diff options
239 files changed, 2071 insertions, 1306 deletions
diff --git a/Documentation/hid/hid-sensor.txt b/Documentation/hid/hid-sensor.txt index 948b0989c43..948b0989c43 100755..100644 --- a/Documentation/hid/hid-sensor.txt +++ b/Documentation/hid/hid-sensor.txt diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 363e348bff9..6c723811c0a 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. real-time workloads. It can also improve energy efficiency for asymmetric multiprocessors. - rcu_nocbs_poll [KNL,BOOT] + rcu_nocb_poll [KNL,BOOT] Rather than requiring that offloaded CPUs (specified by rcu_nocbs= above) explicitly awaken the corresponding "rcuoN" kthreads, diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index 3edb4c2887a..e540fd67f76 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt @@ -57,7 +57,7 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover protocol entry point. -Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields +Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields to struct boot_params for for loading bzImage and ramdisk above 4G in 64bit. diff --git a/MAINTAINERS b/MAINTAINERS index 38fe0ffc5f1..cfceb75af8c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE M: Haavard Skinnemoen <hskinnemoen@gmail.com> M: Hans-Christian Egtvedt <egtvedt@samfundet.no> W: http://www.atmel.com/products/AVR32/ -W: http://avr32linux.org/ +W: http://mirror.egtvedt.no/avr32linux.org/ W: http://avrfreaks.net/ S: Maintained F: arch/avr32/ @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc5 -NAME = Terrified Chipmunk +EXTRAVERSION = -rc7 +NAME = Unicycling Gorilla # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index abfce280f57..71768b8a1ab 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -68,8 +68,8 @@ else endif check_for_multiple_loadaddr = \ -if [ $(words $(UIMAGE_LOADADDR)) -gt 1 ]; then \ - echo 'multiple load addresses: $(UIMAGE_LOADADDR)'; \ +if [ $(words $(UIMAGE_LOADADDR)) -ne 1 ]; then \ + echo 'multiple (or no) load addresses: $(UIMAGE_LOADADDR)'; \ echo 'This is incompatible with uImages'; \ echo 'Specify LOADADDR on the commandline to build an uImage'; \ false; \ diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 36ae03a3f5d..87dfa9026c5 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) irq_set_chained_handler(irq, gic_handle_cascade_irq); } +static u8 gic_get_cpumask(struct gic_chip_data *gic) +{ + void __iomem *base = gic_data_dist_base(gic); + u32 mask, i; + + for (i = mask = 0; i < 32; i += 4) { + mask = readl_relaxed(base + GIC_DIST_TARGET + i); + mask |= mask >> 16; + mask |= mask >> 8; + if (mask) + break; + } + + if (!mask) + pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); + + return mask; +} + static void __init gic_dist_init(struct gic_chip_data *gic) { unsigned int i; @@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic) /* * Set all global interrupts to this CPU only. */ - cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); + cpumask = gic_get_cpumask(gic); + cpumask |= cpumask << 8; + cpumask |= cpumask << 16; for (i = 32; i < gic_irqs; i += 4) writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); @@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) * Get what the GIC says our CPU mask is. */ BUG_ON(cpu >= NR_GIC_CPU_IF); - cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); + cpu_mask = gic_get_cpumask(gic); gic_cpu_map[cpu] = cpu_mask; /* diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index ab98fdd083b..720799fd3a8 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -24,6 +24,7 @@ extern struct arm_delay_ops { void (*delay)(unsigned long); void (*const_udelay)(unsigned long); void (*udelay)(unsigned long); + bool const_clock; } arm_delay_ops; #define __delay(n) arm_delay_ops.delay(n) diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 73cf03aa981..1c4df27f933 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -37,7 +37,7 @@ */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) +#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) /* * The maximum size of a 26-bit user space task. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f30ac3b55ba..80d6fc4dbe4 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -247,7 +247,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | + L_PTE_NONE | L_PTE_VALID; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e2b60..bd6f56b9ec2 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); - cd.epoch_cyc = cyc; + cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); - cd.epoch_cyc_copy = cyc; + cd.epoch_cyc = cyc; raw_local_irq_restore(flags); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ab9458d62cb..87d30e704fe 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -686,6 +686,9 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; + if (arm_delay_ops.const_clock) + return NOTIFY_OK; + if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 0dc53854a5d..6b93f6a1a3c 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -77,6 +77,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer) arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; + arm_delay_ops.const_clock = true; delay_calibrated = true; } else { pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e103c290bc9..85afb031b67 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT select CPU_EXYNOS4210 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD select PINCTRL - select PINCTRL_EXYNOS4 + select PINCTRL_EXYNOS select USE_OF help Machine support for Samsung Exynos4 machine with device tree enabled. diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h index d6b5073692d..44754230fdc 100644 --- a/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/arch/arm/mach-realview/include/mach/irqs-eb.h @@ -115,7 +115,7 @@ /* * Only define NR_IRQS if less than NR_IRQS_EB */ -#define NR_IRQS_EB (IRQ_EB_GIC_START + 96) +#define NR_IRQS_EB (IRQ_EB_GIC_START + 128) #if defined(CONFIG_MACH_REALVIEW_EB) \ && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b820edaf318..db26e2e543f 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) unsigned long instr = 0, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; - mm_segment_t fs; unsigned int fault; u16 tinstr = 0; int isize = 4; @@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) instrptr = instruction_pointer(regs); - fs = get_fs(); - set_fs(KERNEL_DS); if (thumb_mode(regs)) { - fault = __get_user(tinstr, (u16 *)(instrptr & ~1)); + u16 *ptr = (u16 *)(instrptr & ~1); + fault = probe_kernel_address(ptr, tinstr); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ u16 tinst2 = 0; - fault = __get_user(tinst2, (u16 *)(instrptr+2)); + fault = probe_kernel_address(ptr + 1, tinst2); instr = (tinstr << 16) | tinst2; thumb2_32b = 1; } else { @@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } } } else - fault = __get_user(instr, (u32 *)instrptr); - set_fs(fs); + fault = probe_kernel_address(instrptr, instr); if (fault) { type = TYPE_FAULT; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 076c26d4386..dda3904dc64 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); - else if (gfp & GFP_ATOMIC) + else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index dd5e56f95f3..8d10dc8a1e1 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -22,12 +22,14 @@ .macro DBGSTR, str #ifdef DEBUG stmfd sp!, {r0-r3, ip, lr} - add r0, pc, #4 + ldr r0, =1f bl printk - b 1f - .asciz KERN_DEBUG "VFP: \str\n" - .balign 4 -1: ldmfd sp!, {r0-r3, ip, lr} + ldmfd sp!, {r0-r3, ip, lr} + + .pushsection .rodata, "a" +1: .ascii KERN_DEBUG "VFP: \str\n" + .byte 0 + .previous #endif .endm @@ -35,12 +37,14 @@ #ifdef DEBUG stmfd sp!, {r0-r3, ip, lr} mov r1, \arg - add r0, pc, #4 + ldr r0, =1f bl printk - b 1f - .asciz KERN_DEBUG "VFP: \str\n" - .balign 4 -1: ldmfd sp!, {r0-r3, ip, lr} + ldmfd sp!, {r0-r3, ip, lr} + + .pushsection .rodata, "a" +1: .ascii KERN_DEBUG "VFP: \str\n" + .byte 0 + .previous #endif .endm @@ -50,12 +54,14 @@ mov r3, \arg3 mov r2, \arg2 mov r1, \arg1 - add r0, pc, #4 + ldr r0, =1f bl printk - b 1f - .asciz KERN_DEBUG "VFP: \str\n" - .balign 4 -1: ldmfd sp!, {r0-r3, ip, lr} + ldmfd sp!, {r0-r3, ip, lr} + + .pushsection .rodata, "a" +1: .ascii KERN_DEBUG "VFP: \str\n" + .byte 0 + .previous #endif .endm diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 3b44e0dd0a9..5dfbb0b8e7f 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -413,7 +413,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * If there isn't a second FP instruction, exit now. Note that * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ - if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) + if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) goto exit; /* diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index aaf5199d8fc..b3d18f9f3e8 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) + #endif /* __ASM_AVR32_DMA_MAPPING_H */ diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index bbf461076a0..054d9ec57d9 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, _dma_sync((dma_addr_t)vaddr, size, dir); } +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) + #endif /* _BLACKFIN_DMA_MAPPING_H */ diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 3c694065030..88bd0d899bd 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif /* _ASM_C6X_DMA_MAPPING_H */ diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 8588b2ccf85..2f0f654f1b4 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) + #endif diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index dfb811002c6..1746a2b8e6e 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, flush_write_buffers(); } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 3e6b8445af6..292805f0762 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) #include <asm-generic/dma-mapping-broken.h> #endif +/* drivers/base/dma-mapping.c */ +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); +extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size); + +#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) +#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) + #endif /* _M68K_DMA_MAPPING_H */ diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index d7af29f1fcf..ba611927749 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig @@ -8,8 +8,10 @@ config BCM47XX_SSB select SSB_DRIVER_EXTIF select SSB_EMBEDDED select SSB_B43_PCI_BRIDGE if PCI + select SSB_DRIVER_PCICORE if PCI select SSB_PCICORE_HOSTMODE if PCI select SSB_DRIVER_GPIO + select GPIOLIB default y help Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. @@ -25,6 +27,7 @@ config BCM47XX_BCMA select BCMA_HOST_PCI if PCI select BCMA_DRIVER_PCI_HOSTMODE if PCI select BCMA_DRIVER_GPIO + select GPIOLIB default y help Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index 9f883bf7695..33b72144db3 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -30,6 +30,7 @@ * measurement, and debugging facilities. */ +#include <linux/compiler.h> #include <linux/irqflags.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-l2c.h> @@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter) */ static void fault_in(uint64_t addr, int len) { - volatile char *ptr; - volatile char dummy; + char *ptr; + /* * Adjust addr and length so we get all cache lines even for * small ranges spanning two cache lines. */ len += addr & CVMX_CACHE_LINE_MASK; addr &= ~CVMX_CACHE_LINE_MASK; - ptr = (volatile char *)cvmx_phys_to_ptr(addr); + ptr = cvmx_phys_to_ptr(addr); /* * Invalidate L1 cache to make sure all loads result in data * being in L2. */ CVMX_DCACHE_INVALIDATE; while (len > 0) { - dummy += *ptr; + ACCESS_ONCE(*ptr); len -= CVMX_CACHE_LINE_SIZE; ptr += CVMX_CACHE_LINE_SIZE; } diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h index e9bfc0813c7..7bfad0520e2 100644 --- a/arch/mips/include/asm/dsp.h +++ b/arch/mips/include/asm/dsp.h @@ -16,7 +16,7 @@ #include <asm/mipsregs.h> #define DSP_DEFAULT 0x00000000 -#define DSP_MASK 0x3ff +#define DSP_MASK 0x3f #define __enable_dsp_hazard() \ do { \ diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h index ab84064283d..33c34adbecf 100644 --- a/arch/mips/include/asm/inst.h +++ b/arch/mips/include/asm/inst.h @@ -353,6 +353,7 @@ union mips_instruction { struct u_format u_format; struct c_format c_format; struct r_format r_format; + struct p_format p_format; struct f_format f_format; struct ma_format ma_format; struct b_format b_format; diff --git a/arch/mips/include/asm/mach-pnx833x/war.h b/arch/mips/include/asm/mach-pnx833x/war.h index edaa06d9d49..e410df4e1b3 100644 --- a/arch/mips/include/asm/mach-pnx833x/war.h +++ b/arch/mips/include/asm/mach-pnx833x/war.h @@ -21,4 +21,4 @@ #define R10000_LLSC_WAR 0 #define MIPS34K_MISSED_ITLB_WAR 0 -#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */ +#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */ diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index c63191055e6..013d5f78126 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp) #else #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) +#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) #endif #define __pgd_offset(address) pgd_index(address) diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild index a1a0452ac18..77d4fb33f75 100644 --- a/arch/mips/include/uapi/asm/Kbuild +++ b/arch/mips/include/uapi/asm/Kbuild @@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm header-y += auxvec.h header-y += bitsperlong.h +header-y += break.h header-y += byteorder.h header-y += cachectl.h header-y += errno.h diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/uapi/asm/break.h index 9161e684cb4..9161e684cb4 100644 --- a/arch/mips/include/asm/break.h +++ b/arch/mips/include/uapi/asm/break.h diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 6a2d758dd8e..83fa1460e29 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -25,6 +25,12 @@ #define MCOUNT_OFFSET_INSNS 4 #endif +/* Arch override because MIPS doesn't need to run this from stop_machine() */ +void arch_ftrace_update_code(int command) +{ + ftrace_modify_all_code(command); +} + /* * Check if the address is in kernel space * @@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } +#ifndef CONFIG_64BIT +static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, + unsigned int new_code2) +{ + int faulted; + + safe_store_code(new_code1, ip, faulted); + if (unlikely(faulted)) + return -EFAULT; + ip += 4; + safe_store_code(new_code2, ip, faulted); + if (unlikely(faulted)) + return -EFAULT; + flush_icache_range(ip, ip + 8); /* original ip + 12 */ + return 0; +} +#endif + /* * The details about the calling site of mcount on MIPS * @@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod, * needed. */ new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; - +#ifdef CONFIG_64BIT return ftrace_modify_code(ip, new); +#else + /* + * On 32 bit MIPS platforms, gcc adds a stack adjust + * instruction in the delay slot after the branch to + * mcount and expects mcount to restore the sp on return. + * This is based on a legacy API and does nothing but + * waste instructions so it's being removed at runtime. + */ + return ftrace_modify_code_2(ip, new, INSN_NOP); +#endif } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 4c968e7efb7..16586767335 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -46,9 +46,8 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) - PTR_ADDIU sp, PT_SIZE #else - PTR_ADDIU sp, (PT_SIZE + 8) + PTR_ADDIU sp, PT_SIZE #endif .endm @@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - nop + addiu sp,sp,8 + + /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ lw t1, function_trace_stop bnez t1, ftrace_stub nop diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index eec690af658..147cec19621 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v) printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", - t->index); + v->tc->index); return -ENOEXEC; } } else { diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index f36acd1b380..a7935bf0fec 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) #endif /* tell oprofile which irq to use */ - cp0_perfcount_irq = LTQ_PERF_IRQ; + cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); /* * if the timer irq is not one of the mips irqs we need to diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index dc81ca8dc0d..288f7954988 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -21,7 +21,7 @@ void __delay(unsigned long loops) " .set noreorder \n" " .align 3 \n" "1: bnez %0, 1b \n" -#if __SIZEOF_LONG__ == 4 +#if BITS_PER_LONG == 32 " subu %0, 1 \n" #else " dsubu %0, 1 \n" diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 7657fd21cd3..cacfd31e8ec 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr) EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__iounmap); - -int __virt_addr_valid(const volatile void *kaddr) -{ - return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); -} -EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d9be7540a6b..7e5fe2790d8 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return ret; } + +int __virt_addr_valid(const volatile void *kaddr) +{ + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); +} +EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c index 4e7f49d3d5a..c5ce6992ac4 100644 --- a/arch/mips/netlogic/xlr/setup.c +++ b/arch/mips/netlogic/xlr/setup.c @@ -193,8 +193,11 @@ static void nlm_init_node(void) void __init prom_init(void) { - int i, *argv, *envp; /* passed as 32 bit ptrs */ + int *argv, *envp; /* passed as 32 bit ptrs */ struct psb_info *prom_infop; +#ifdef CONFIG_SMP + int i; +#endif /* truncate to 32 bit and sign extend all args */ argv = (int *)(long)(int)fw_arg1; diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index 1552522b871..6eaa4f2d0e3 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -24,7 +24,7 @@ #include <asm/mach-ath79/pci.h> #define AR71XX_PCI_MEM_BASE 0x10000000 -#define AR71XX_PCI_MEM_SIZE 0x08000000 +#define AR71XX_PCI_MEM_SIZE 0x07000000 #define AR71XX_PCI_WIN0_OFFS 0x10000000 #define AR71XX_PCI_WIN1_OFFS 0x11000000 diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 86d77a66645..c11c75be2d7 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -21,7 +21,7 @@ #define AR724X_PCI_CTRL_SIZE 0x100 #define AR724X_PCI_MEM_BASE 0x10000000 -#define AR724X_PCI_MEM_SIZE 0x08000000 +#define AR724X_PCI_MEM_SIZE 0x04000000 #define AR724X_PCI_REG_RESET 0x18 #define AR724X_PCI_REG_INT_STATUS 0x4c diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index c1be4397b1e..a18abfc558e 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size, mn10300_dcache_flush_inv(); } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 467bbd510ea..106b395688e 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev); /* At the moment, we panic on error for IOMMU resource exaustion */ #define dma_mapping_error(dev, x) 0 +/* This API cannot be supported on PA-RISC */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 56585086413..7443481a315 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) sldi r29,r5,SID_SHIFT - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - - /* Calculate hash value for primary slot and store it in r28 */ - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ - xor r28,r5,r0 + /* + * Calculate hash value for primary slot and store it in r28 + * r3 = va, r5 = vsid + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) + */ + rldicl r0,r3,64-12,48 + xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ @@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* * calculate hash value for primary slot and * store it in r28 for 1T segment + * r3 = va, r5 = vsid */ - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ - clrldi r5,r5,40 /* vsid & 0xffffff */ - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ - xor r28,r28,r5 + sldi r28,r5,25 /* vsid << 25 */ + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ + rldicl r0,r3,64-12,36 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) */ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - - /* Calculate hash value for primary slot and store it in r28 */ - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ - xor r28,r5,r0 + /* + * Calculate hash value for primary slot and store it in r28 + * r3 = va, r5 = vsid + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) + */ + rldicl r0,r3,64-12,48 + xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ @@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* * Calculate hash value for primary slot and * store it in r28 for 1T segment + * r3 = va, r5 = vsid */ - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ - clrldi r5,r5,40 /* vsid & 0xffffff */ - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ - xor r28,r28,r5 + sldi r28,r5,25 /* vsid << 25 */ + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ + rldicl r0,r3,64-12,36 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - /* Calculate hash value for primary slot and store it in r28 */ - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ - rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ - xor r28,r5,r0 + /* Calculate hash value for primary slot and store it in r28 + * r3 = va, r5 = vsid + * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) + */ + rldicl r0,r3,64-16,52 + xor r28,r5,r0 /* hash */ b 4f 3: /* Calc vpn and put it in r29 */ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) or r29,r28,r29 - /* * calculate hash value for primary slot and * store it in r28 for 1T segment + * r3 = va, r5 = vsid */ - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ - clrldi r5,r5,40 /* vsid & 0xffffff */ - rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ - xor r28,r28,r5 + sldi r28,r5,25 /* vsid << 25 */ + /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ + rldicl r0,r3,64-16,40 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 102ff7cb3e4..142c4ceff11 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -207,7 +207,7 @@ sysexit_from_sys_call: testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) jnz ia32_ret_from_sys_call TRACE_IRQS_ON - sti + ENABLE_INTERRUPTS(CLBR_NONE) movl %eax,%esi /* second arg, syscall return value */ cmpl $-MAX_ERRNO,%eax /* is it an error ? */ jbe 1f @@ -217,7 +217,7 @@ sysexit_from_sys_call: call __audit_syscall_exit movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - cli + DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) jz \exit diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fe9edec6698..84c1309c4c0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -298,8 +298,7 @@ struct _cache_attr { unsigned int); }; -#ifdef CONFIG_AMD_NB - +#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) /* * L3 cache descriptors */ @@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, static struct _cache_attr subcaches = __ATTR(subcaches, 0644, show_subcaches, store_subcaches); -#else /* CONFIG_AMD_NB */ +#else #define amd_init_l3_cache(x, y) -#endif /* CONFIG_AMD_NB */ +#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ static int __cpuinit cpuid4_cache_lookup_regs(int index, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 93b9e1181f8..4914e94ad6e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void) break; case 28: /* Atom */ - case 54: /* Cedariew */ + case 38: /* Lincroft */ + case 39: /* Penwell */ + case 53: /* Cloverview */ + case 54: /* Cedarview */ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void) pr_cont("SandyBridge events, "); break; case 58: /* IvyBridge */ + case 62: /* IvyBridge EP */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index f2af39f5dc3..4820c232a0b 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] = }; -static __initconst u64 p6_hw_cache_event_ids +static u64 p6_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c index cc2f8c13128..872eb60e780 100644 --- a/arch/x86/tools/insn_sanity.c +++ b/arch/x86/tools/insn_sanity.c @@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */ static void usage(const char *err) { if (err) - fprintf(stderr, "Error: %s\n\n", err); + fprintf(stderr, "%s: Error: %s\n\n", prog, err); fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); fprintf(stderr, "\t-y 64bit mode\n"); fprintf(stderr, "\t-n 32bit mode\n"); @@ -269,7 +269,13 @@ int main(int argc, char **argv) insns++; } - fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); + fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", + prog, + (errors) ? "Failure" : "Success", + insns, + (input_file) ? "given" : "random", + errors, + seed); return errors ? 1 : 0; } diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 4acb5feba1f..172a02a6ad1 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, consistent_sync(vaddr, size, direction); } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/block/genhd.c b/block/genhd.c index 9a289d7c84b..3993ebf4135 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr); static struct device_type disk_type; +static void disk_check_events(struct disk_events *ev, + unsigned int *clearing_ptr); static void disk_alloc_events(struct gendisk *disk); static void disk_add_events(struct gendisk *disk); static void disk_del_events(struct gendisk *disk); @@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) const struct block_device_operations *bdops = disk->fops; struct disk_events *ev = disk->ev; unsigned int pending; + unsigned int clearing = mask; if (!ev) { /* for drivers still using the old ->media_changed method */ @@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) return 0; } - /* tell the workfn about the events being cleared */ + disk_block_events(disk); + + /* + * store the union of mask and ev->clearing on the stack so that the + * race with disk_flush_events does not cause ambiguity (ev->clearing + * can still be modified even if events are blocked). + */ spin_lock_irq(&ev->lock); - ev->clearing |= mask; + clearing |= ev->clearing; + ev->clearing = 0; spin_unlock_irq(&ev->lock); - /* uncondtionally schedule event check and wait for it to finish */ - disk_block_events(disk); - queue_delayed_work(system_freezable_wq, &ev->dwork, 0); - flush_delayed_work(&ev->dwork); - __disk_unblock_events(disk, false); + disk_check_events(ev, &clearing); + /* + * if ev->clearing is not 0, the disk_flush_events got called in the + * middle of this function, so we want to run the workfn without delay. + */ + __disk_unblock_events(disk, ev->clearing ? true : false); /* then, fetch and clear pending events */ spin_lock_irq(&ev->lock); - WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ pending = ev->pending & mask; ev->pending &= ~mask; spin_unlock_irq(&ev->lock); + WARN_ON_ONCE(clearing & mask); return pending; } +/* + * Separate this part out so that a different pointer for clearing_ptr can be + * passed in for disk_clear_events. + */ static void disk_events_workfn(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct disk_events *ev = container_of(dwork, struct disk_events, dwork); + + disk_check_events(ev, &ev->clearing); +} + +static void disk_check_events(struct disk_events *ev, + unsigned int *clearing_ptr) +{ struct gendisk *disk = ev->disk; char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; - unsigned int clearing = ev->clearing; + unsigned int clearing = *clearing_ptr; unsigned int events; unsigned long intv; int nr_events = 0, i; @@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work) events &= ~ev->pending; ev->pending |= events; - ev->clearing &= ~clearing; + *clearing_ptr &= ~clearing; intv = disk_events_poll_jiffies(disk); if (!ev->block && intv) diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 536c166f425..1247052b9db 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -129,7 +129,7 @@ static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset) writel(value, ahb->regs + offset); } -#ifdef CONFIG_ARCH_TEGRA_3x_SOC +#ifdef CONFIG_TEGRA_IOMMU_SMMU static int tegra_ahb_match_by_smmu(struct device *dev, void *data) { struct tegra_ahb *ahb = dev_get_drvdata(dev); diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 6a0955e6d4f..53ecac5a216 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -636,82 +636,82 @@ struct rx_buf_desc { #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE -typedef volatile u_int freg_t; +typedef volatile u_int ffreg_t; typedef u_int rreg_t; typedef struct _ffredn_t { - freg_t idlehead_high; /* Idle cell header (high) */ - freg_t idlehead_low; /* Idle cell header (low) */ - freg_t maxrate; /* Maximum rate */ - freg_t stparms; /* Traffic Management Parameters */ - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ - freg_t rm_type; /* */ - u_int filler5[0x17 - 0x06]; - freg_t cmd_reg; /* Command register */ - u_int filler18[0x20 - 0x18]; - freg_t cbr_base; /* CBR Pointer Base */ - freg_t vbr_base; /* VBR Pointer Base */ - freg_t abr_base; /* ABR Pointer Base */ - freg_t ubr_base; /* UBR Pointer Base */ - u_int filler24; - freg_t vbrwq_base; /* VBR Wait Queue Base */ - freg_t abrwq_base; /* ABR Wait Queue Base */ - freg_t ubrwq_base; /* UBR Wait Queue Base */ - freg_t vct_base; /* Main VC Table Base */ - freg_t vcte_base; /* Extended Main VC Table Base */ - u_int filler2a[0x2C - 0x2A]; - freg_t cbr_tab_beg; /* CBR Table Begin */ - freg_t cbr_tab_end; /* CBR Table End */ - freg_t cbr_pointer; /* CBR Pointer */ - u_int filler2f[0x30 - 0x2F]; - freg_t prq_st_adr; /* Packet Ready Queue Start Address */ - freg_t prq_ed_adr; /* Packet Ready Queue End Address */ - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ - u_int filler38[0x40 - 0x38]; - freg_t queue_base; /* Base address for PRQ and TCQ */ - freg_t desc_base; /* Base address of descriptor table */ - u_int filler42[0x45 - 0x42]; - freg_t mode_reg_0; /* Mode register 0 */ - freg_t mode_reg_1; /* Mode register 1 */ - freg_t intr_status_reg;/* Interrupt Status register */ - freg_t mask_reg; /* Mask Register */ - freg_t cell_ctr_high1; /* Total cell transfer count (high) */ - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ - freg_t state_reg; /* Status register */ - u_int filler4c[0x58 - 0x4c]; - freg_t curr_desc_num; /* Contains the current descriptor num */ - freg_t next_desc; /* Next descriptor */ - freg_t next_vc; /* Next VC */ - u_int filler5b[0x5d - 0x5b]; - freg_t present_slot_cnt;/* Present slot count */ - u_int filler5e[0x6a - 0x5e]; - freg_t new_desc_num; /* New descriptor number */ - freg_t new_vc; /* New VC */ - freg_t sched_tbl_ptr; /* Schedule table pointer */ - freg_t vbrwq_wptr; /* VBR wait queue write pointer */ - freg_t vbrwq_rptr; /* VBR wait queue read pointer */ - freg_t abrwq_wptr; /* ABR wait queue write pointer */ - freg_t abrwq_rptr; /* ABR wait queue read pointer */ - freg_t ubrwq_wptr; /* UBR wait queue write pointer */ - freg_t ubrwq_rptr; /* UBR wait queue read pointer */ - freg_t cbr_vc; /* CBR VC */ - freg_t vbr_sb_vc; /* VBR SB VC */ - freg_t abr_sb_vc; /* ABR SB VC */ - freg_t ubr_sb_vc; /* UBR SB VC */ - freg_t vbr_next_link; /* VBR next link */ - freg_t abr_next_link; /* ABR next link */ - freg_t ubr_next_link; /* UBR next link */ - u_int filler7a[0x7c-0x7a]; - freg_t out_rate_head; /* Out of rate head */ - u_int filler7d[0xca-0x7d]; /* pad out to full address space */ - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ - u_int fillercc[0x100-0xcc]; /* pad out to full address space */ + ffreg_t idlehead_high; /* Idle cell header (high) */ + ffreg_t idlehead_low; /* Idle cell header (low) */ + ffreg_t maxrate; /* Maximum rate */ + ffreg_t stparms; /* Traffic Management Parameters */ + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ + ffreg_t rm_type; /* */ + u_int filler5[0x17 - 0x06]; + ffreg_t cmd_reg; /* Command register */ + u_int filler18[0x20 - 0x18]; + ffreg_t cbr_base; /* CBR Pointer Base */ + ffreg_t vbr_base; /* VBR Pointer Base */ + ffreg_t abr_base; /* ABR Pointer Base */ + ffreg_t ubr_base; /* UBR Pointer Base */ + u_int filler24; + ffreg_t vbrwq_base; /* VBR Wait Queue Base */ + ffreg_t abrwq_base; /* ABR Wait Queue Base */ + ffreg_t ubrwq_base; /* UBR Wait Queue Base */ + ffreg_t vct_base; /* Main VC Table Base */ + ffreg_t vcte_base; /* Extended Main VC Table Base */ + u_int filler2a[0x2C - 0x2A]; + ffreg_t cbr_tab_beg; /* CBR Table Begin */ + ffreg_t cbr_tab_end; /* CBR Table End */ + ffreg_t cbr_pointer; /* CBR Pointer */ + u_int filler2f[0x30 - 0x2F]; + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ + u_int filler38[0x40 - 0x38]; + ffreg_t queue_base; /* Base address for PRQ and TCQ */ + ffreg_t desc_base; /* Base address of descriptor table */ + u_int filler42[0x45 - 0x42]; + ffreg_t mode_reg_0; /* Mode register 0 */ + ffreg_t mode_reg_1; /* Mode register 1 */ + ffreg_t intr_status_reg;/* Interrupt Status register */ + ffreg_t mask_reg; /* Mask Register */ + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ + ffreg_t state_reg; /* Status register */ + u_int filler4c[0x58 - 0x4c]; + ffreg_t curr_desc_num; /* Contains the current descriptor num */ + ffreg_t next_desc; /* Next descriptor */ + ffreg_t next_vc; /* Next VC */ + u_int filler5b[0x5d - 0x5b]; + ffreg_t present_slot_cnt;/* Present slot count */ + u_int filler5e[0x6a - 0x5e]; + ffreg_t new_desc_num; /* New descriptor number */ + ffreg_t new_vc; /* New VC */ + ffreg_t sched_tbl_ptr; /* Schedule table pointer */ + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ + ffreg_t cbr_vc; /* CBR VC */ + ffreg_t vbr_sb_vc; /* VBR SB VC */ + ffreg_t abr_sb_vc; /* ABR SB VC */ + ffreg_t ubr_sb_vc; /* UBR SB VC */ + ffreg_t vbr_next_link; /* VBR next link */ + ffreg_t abr_next_link; /* ABR next link */ + ffreg_t ubr_next_link; /* UBR next link */ + u_int filler7a[0x7c-0x7a]; + ffreg_t out_rate_head; /* Out of rate head */ + u_int filler7d[0xca-0x7d]; /* pad out to full address space */ + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ + u_int fillercc[0x100-0xcc]; /* pad out to full address space */ } ffredn_t; typedef struct _rfredn_t { diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 19e3fbfd575..cb0c4548857 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); #ifdef CONFIG_BCMA_DRIVER_GPIO /* driver_gpio.c */ int bcma_gpio_init(struct bcma_drv_cc *cc); +int bcma_gpio_unregister(struct bcma_drv_cc *cc); #else static inline int bcma_gpio_init(struct bcma_drv_cc *cc) { return -ENOTSUPP; } +static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc) +{ + return 0; +} #endif /* CONFIG_BCMA_DRIVER_GPIO */ #endif diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c index dbda91e4dff..1f0b83e18f6 100644 --- a/drivers/bcma/driver_chipcommon_nflash.c +++ b/drivers/bcma/driver_chipcommon_nflash.c @@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc) struct bcma_bus *bus = cc->core->bus; if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && - cc->core->id.rev != 0x38) { + cc->core->id.rev != 38) { bcma_err(bus, "NAND flash on unsupported board!\n"); return -ENOTSUPP; } diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 9a6f585da2d..71f755c06fc 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) return gpiochip_add(chip); } + +int bcma_gpio_unregister(struct bcma_drv_cc *cc) +{ + return gpiochip_remove(&cc->gpio); +} diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 4a92f647b58..324f9debda8 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus) void bcma_bus_unregister(struct bcma_bus *bus) { struct bcma_device *cores[3]; + int err; + + err = bcma_gpio_unregister(&bus->drv_cc); + if (err == -EBUSY) + bcma_err(bus, "Some GPIOs are still in use.\n"); + else if (err) + bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f58a4a4b4df..2b8303ad63c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) { } /* must hold resource->req_lock */ -static void start_new_tl_epoch(struct drbd_tconn *tconn) +void start_new_tl_epoch(struct drbd_tconn *tconn) { /* no point closing an epoch, if it is empty, anyways. */ if (tconn->current_tle_writes == 0) diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 016de6b8bb5..c08d22964d0 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -267,6 +267,7 @@ struct bio_and_error { int error; }; +extern void start_new_tl_epoch(struct drbd_tconn *tconn); extern void drbd_req_destroy(struct kref *kref); extern void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m); diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 53bf6182bac..0fe220cfb9e 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, enum drbd_state_rv rv = SS_SUCCESS; enum sanitize_state_warnings ssw; struct after_state_chg_work *ascw; + bool did_remote, should_do_remote; os = drbd_read_state(mdev); @@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) atomic_inc(&mdev->local_cnt); + did_remote = drbd_should_do_remote(mdev->state); mdev->state.i = ns.i; + should_do_remote = drbd_should_do_remote(mdev->state); mdev->tconn->susp = ns.susp; mdev->tconn->susp_nod = ns.susp_nod; mdev->tconn->susp_fen = ns.susp_fen; + /* put replicated vs not-replicated requests in seperate epochs */ + if (did_remote != should_do_remote) + start_new_tl_epoch(mdev->tconn); + if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) drbd_print_uuids(mdev, "attached to UUIDs"); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 9694dd99bbb..3fd10099045 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data) } } - if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { + if (cmdto_cnt) { print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); - - mtip_restart_port(port); + if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { + mtip_restart_port(port); + wake_up_interruptible(&port->svc_wait); + } clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); - wake_up_interruptible(&port->svc_wait); } if (port->ic_pause_timer) { @@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd) * Delete our gendisk structure. This also removes the device * from /dev */ - del_gendisk(dd->disk); + if (dd->disk) { + if (dd->disk->queue) + del_gendisk(dd->disk); + else + put_disk(dd->disk); + } spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, dd->index); @@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd) "Shutting down %s ...\n", dd->disk->disk_name); /* Delete our gendisk structure, and cleanup the blk queue. */ - del_gendisk(dd->disk); + if (dd->disk) { + if (dd->disk->queue) + del_gendisk(dd->disk); + else + put_disk(dd->disk); + } + spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, dd->index); diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 74374fb762a..5ac841ff6cc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st); -#define foreach_grant(pos, rbtree, node) \ - for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ +#define foreach_grant_safe(pos, n, rbtree, node) \ + for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ + (n) = rb_next(&(pos)->node); \ &(pos)->node != NULL; \ - (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) + (pos) = container_of(n, typeof(*(pos)), node), \ + (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) static void add_persistent_gnt(struct rb_root *root, @@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; + struct rb_node *n; int ret = 0; int segs_to_unmap = 0; - foreach_grant(persistent_gnt, root, node) { + foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); gnttab_set_unmap_op(&unmap[segs_to_unmap], @@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; - rb_erase(&persistent_gnt->node, root); - kfree(persistent_gnt); - num--; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || !rb_next(&persistent_gnt->node)) { @@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num) BUG_ON(ret); segs_to_unmap = 0; } + + rb_erase(&persistent_gnt->node, root); + kfree(persistent_gnt); + num--; } BUG_ON(num != 0); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 96e9b00db08..11043c18ac5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) { struct llist_node *all_gnts; struct grant *persistent_gnt; + struct llist_node *n; /* Prevent new requests being issued until we fix things up. */ spin_lock_irq(&info->io_lock); @@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) /* Remove all persistent grants */ if (info->persistent_gnts_c) { all_gnts = llist_del_all(&info->persistent_gnts); - llist_for_each_entry(persistent_gnt, all_gnts, node) { + llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) { gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); __free_page(pfn_to_page(persistent_gnt->pfn)); kfree(persistent_gnt); @@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, struct blkif_response *bret) { - int i; + int i = 0; struct bio_vec *bvec; struct req_iterator iter; unsigned long flags; @@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, */ rq_for_each_segment(bvec, s->request, iter) { BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); - i = offset >> PAGE_SHIFT; + if (bvec->bv_offset < offset) + i++; BUG_ON(i >= s->req.u.rw.nr_segments); shared_data = kmap_atomic( pfn_to_page(s->grants_used[i]->pfn)); @@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, bvec->bv_len); bvec_kunmap_irq(bvec_data, &flags); kunmap_atomic(shared_data); - offset += bvec->bv_len; + offset = bvec->bv_offset + bvec->bv_len; } } /* Add the persistent grant into the list of free grants */ diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 684b0d53764..ee4dbeafb37 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev) /* Disable interrupts for vqs */ vdev->config->reset(vdev); /* Finish up work that's lined up */ - cancel_work_sync(&portdev->control_work); + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4d0e60adbc6..a2d478e8692 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { radeon_wait_for_vblank(rdev, i); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { radeon_wait_for_vblank(rdev, i); tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } /* wait for the next frame */ @@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav blackout &= ~BLACKOUT_MODE_MASK; WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); } + /* wait for the MC to settle */ + udelay(100); } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s if (ASIC_IS_DCE6(rdev)) { tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } /* wait for the next frame */ frame_count = radeon_get_vblank_counter(rdev, i); @@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG, gb_addr_config); - tmp = gb_addr_config & NUM_PIPES_MASK; - tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, - EVERGREEN_MAX_BACKENDS, disabled_rb_mask); + if ((rdev->config.evergreen.max_backends == 1) && + (rdev->flags & RADEON_IS_IGP)) { + if ((disabled_rb_mask & 3) == 1) { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; + } else { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, + EVERGREEN_MAX_BACKENDS, disabled_rb_mask); + } WREG32(GB_BACKEND_MAP, tmp); WREG32(CGTS_SYS_TCC_DISABLE, 0); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index bc2540b17c5..becb03e8b32 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev, u32 disabled_rb_mask) { u32 rendering_pipe_num, rb_num_width, req_rb_num; - u32 pipe_rb_ratio, pipe_rb_remain; + u32 pipe_rb_ratio, pipe_rb_remain, tmp; u32 data = 0, mask = 1 << (max_rb_num - 1); unsigned i, j; /* mask out the RBs that don't exist on that asic */ - disabled_rb_mask |= (0xff << max_rb_num) & 0xff; + tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); + /* make sure at least one RB is available */ + if ((tmp & 0xff) != 0xff) + disabled_rb_mask = tmp; rendering_pipe_num = 1 << tiling_pipe_num; req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 9056fafb00e..0b202c07fe5 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, .set_page = &cayman_vm_set_page, }, .ring = { @@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, .set_page = &cayman_vm_set_page, }, .ring = { @@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = { .vm = { .init = &si_vm_init, .fini = &si_vm_fini, - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, .set_page = &si_vm_set_page, }, .ring = { diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 33a56a09ff1..3e403bdda58 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 1), ATOM_DEVICE_CRT1_SUPPORT); } + /* RV100 board with external TDMS bit mis-set. + * Actually uses internal TMDS, clear the bit. + */ + if (dev->pdev->device == 0x5159 && + dev->pdev->subsystem_vendor == 0x1014 && + dev->pdev->subsystem_device == 0x029A) { + tmp &= ~(1 << 4); + } if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ff3def78461..05c96fa0b05 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1115,8 +1115,10 @@ radeon_user_framebuffer_create(struct drm_device *dev, } radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); - if (radeon_fb == NULL) + if (radeon_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(-ENOMEM); + } ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); if (ret) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 2430d80b187..cd72062d5a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi { int r; + /* make sure we aren't trying to allocate more space than there is on the ring */ + if (ndw > (ring->ring_size / 4)) + return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ ndw = (ndw + ring->align_mask) & ~ring->align_mask; diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0f656b111c1..a072fa8c46b 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -1,5 +1,6 @@ cayman 0x9400 0x0000802C GRBM_GFX_INDEX +0x00008040 WAIT_UNTIL 0x000084FC CP_STRMOUT_CNTL 0x000085F0 CP_COHER_CNTL 0x000085F4 CP_COHER_SIZE diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 2bb6d0e84b3..435ed355136 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) WREG32(R600_CITF_CNTL, blackout); } } + /* wait for the MC to settle */ + udelay(100); } void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 44420fca7df..8be35c809c7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_driver *driver = bdev->driver; - fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); + fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); if (!fbo) return -ENOMEM; @@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->vm_node = NULL; atomic_set(&fbo->cpu_writers, 0); - fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); + spin_lock(&bdev->fence_lock); + if (bo->sync_obj) + fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); + else + fbo->sync_obj = NULL; + spin_unlock(&bdev->fence_lock); kref_init(&fbo->list_kref); kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; @@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - - /* ttm_buffer_object_transfer accesses bo->sync_obj */ - ret = ttm_buffer_object_transfer(bo, &ghost_obj); spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) return ret; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 4dfa605e2d1..34e25471aea 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -306,6 +306,9 @@ #define USB_VENDOR_ID_EZKEY 0x0518 #define USB_DEVICE_ID_BTC_8193 0x0002 +#define USB_VENDOR_ID_FORMOSA 0x147a +#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e + #define USB_VENDOR_ID_FREESCALE 0x15A2 #define USB_DEVICE_ID_FREESCALE_MX28 0x004F diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 12e4fdc810b..e766b5614ef 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -540,13 +540,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf, { struct i2c_client *client = hid->driver_data; int report_id = buf[0]; + int ret; if (report_type == HID_INPUT_REPORT) return -EINVAL; - return i2c_hid_set_report(client, + if (report_id) { + buf++; + count--; + } + + ret = i2c_hid_set_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, report_id, buf, count); + + if (report_id && ret >= 0) + ret++; /* add report_id to the number of transfered bytes */ + + return ret; } static int i2c_hid_parse(struct hid_device *hid) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index ac9e3522825..e0e6abf1cd3 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -70,6 +70,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 4850d03870c..35275099caf 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) struct qib_qp __rcu **qpp; qpp = &dev->qp_table[n]; - q = rcu_dereference_protected(*qpp, - lockdep_is_held(&dev->qpt_lock)); - for (; q; qpp = &q->next) { + for (; (q = rcu_dereference_protected(*qpp, + lockdep_is_held(&dev->qpt_lock))) != NULL; + qpp = &q->next) if (q == qp) { atomic_dec(&qp->refcount); *qpp = qp->next; rcu_assign_pointer(qp->next, NULL); - q = rcu_dereference_protected(*qpp, - lockdep_is_held(&dev->qpt_lock)); break; } - q = rcu_dereference_protected(*qpp, - lockdep_is_held(&dev->qpt_lock)); - } } spin_unlock_irqrestore(&dev->qpt_lock, flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 03103d2bd64..67b0c1d2367 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ tx_req->mapping = addr; + skb_orphan(skb); + skb_dst_drop(skb); + rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), addr, skb->len); if (unlikely(rc)) { @@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ dev->trans_start = jiffies; ++tx->tx_head; - skb_orphan(skb); - skb_dst_drop(skb); - if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index a1bca70e20a..2cfa76f5d99 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, netif_stop_queue(dev); } + skb_orphan(skb); + skb_dst_drop(skb); + rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen); if (unlikely(rc)) { @@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, address->last_send = priv->tx_head; ++priv->tx_head; - - skb_orphan(skb); - skb_dst_drop(skb); } if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 675ae527401..5409607d487 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti, return 0; } -/* - * A thin device always inherits its queue limits from its pool. - */ -static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) -{ - struct thin_c *tc = ti->private; - - *limits = bdev_get_queue(tc->pool_dev->bdev)->limits; -} - static struct target_type thin_target = { .name = "thin", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, @@ -2767,7 +2757,6 @@ static struct target_type thin_target = { .postsuspend = thin_postsuspend, .status = thin_status, .iterate_devices = thin_iterate_devices, - .io_hints = thin_io_hints, }; /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c72e4d5a961..314a0e2faf7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci, { struct dm_target *ti; sector_t len; + unsigned num_requests; do { ti = dm_table_find_target(ci->map, ci->sector); @@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci, * reconfiguration might also have changed that since the * check was performed. */ - if (!get_num_requests || !get_num_requests(ti)) + num_requests = get_num_requests ? get_num_requests(ti) : 0; + if (!num_requests) return -EOPNOTSUPP; if (is_split_required && !is_split_required(ti)) @@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci, else len = min(ci->sector_count, max_io_len(ci->sector, ti)); - __issue_target_requests(ci, ti, ti->num_discard_requests, len); + __issue_target_requests(ci, ti, num_requests, len); ci->sector += len; } while (ci->sector_count -= len); diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c index e10e525f33e..296941a9ae2 100644 --- a/drivers/media/radio/radio-keene.c +++ b/drivers/media/radio/radio-keene.c @@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf, radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; + radio->vdev.vfl_dir = VFL_DIR_TX; radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c index a082e400ed0..1507c9d508d 100644 --- a/drivers/media/radio/radio-si4713.c +++ b/drivers/media/radio/radio-si4713.c @@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = { .name = "radio-si4713", .release = video_device_release, .ioctl_ops = &radio_si4713_ioctl_ops, + .vfl_dir = VFL_DIR_TX, }; /* Platform driver interface */ diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index c48be195bba..cabbe3adf43 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = { .ioctl_ops = &wl1273_ioctl_ops, .name = WL1273_FM_DRIVER_NAME, .release = wl1273_vdev_release, + .vfl_dir = VFL_DIR_TX, }; static int wl1273_fm_radio_remove(struct platform_device *pdev) diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index 048de453603..0a8ee8fab92 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = { .ioctl_ops = &fm_drv_ioctl_ops, .name = FM_DRV_NAME, .release = video_device_release, + /* + * To ensure both the tuner and modulator ioctls are accessible we + * set the vfl_dir to M2M to indicate this. + * + * It is not really a mem2mem device of course, but it can both receive + * and transmit using the same radio device. It's the only radio driver + * that does this and it should really be split in two radio devices, + * but that would affect applications using this driver. + */ + .vfl_dir = VFL_DIR_M2M, }; int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 150772395cc..372e921389c 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -20,6 +20,7 @@ #include <linux/err.h> #include <linux/highmem.h> #include <linux/log2.h> +#include <linux/mmc/pm.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/amba/bus.h> @@ -59,6 +60,7 @@ static unsigned int fmax = 515633; * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register * @pwrreg_powerup: power up value for MMCIPOWER register * @signal_direction: input/out direction of bus signals can be indicated + * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock */ struct variant_data { unsigned int clkreg; @@ -71,6 +73,7 @@ struct variant_data { bool blksz_datactrl16; u32 pwrreg_powerup; bool signal_direction; + bool pwrreg_clkgate; }; static struct variant_data variant_arm = { @@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = { .pwrreg_powerup = MCI_PWR_UP, }; +static struct variant_data variant_arm_extended_fifo_hwfc = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .clkreg_enable = MCI_ARM_HWFCEN, + .datalength_bits = 16, + .pwrreg_powerup = MCI_PWR_UP, +}; + static struct variant_data variant_u300 = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, @@ -95,6 +106,7 @@ static struct variant_data variant_u300 = { .sdio = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_nomadik = { @@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = { .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_ux500 = { @@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = { .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_ux500v2 = { @@ -131,9 +145,28 @@ static struct variant_data variant_ux500v2 = { .blksz_datactrl16 = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; /* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + if (!data) + return 0; + + if (!is_power_of_2(data->blksz)) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + return 0; +} + +/* * This must be called with host->lock held */ static void mmci_write_clkreg(struct mmci_host *host, u32 clk) @@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) clk |= MCI_ST_8BIT_BUS; + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + clk |= MCI_ST_UX500_NEG_EDGE; + mmci_write_clkreg(host, clk); } @@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host) host->dma_rx_channel = host->dma_tx_channel = NULL; } +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; + host->data->host_cookie = 0; +} + static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { - struct dma_chan *chan = host->dma_current; + struct dma_chan *chan; enum dma_data_direction dir; + + if (data->flags & MMC_DATA_READ) { + dir = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + dir = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ u32 status; int i; @@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { - dmaengine_terminate_all(chan); + mmci_dma_data_error(host); if (!data->error) data->error = -EIO; } - if (data->flags & MMC_DATA_WRITE) { - dir = DMA_TO_DEVICE; - } else { - dir = DMA_FROM_DEVICE; - } - if (!data->host_cookie) - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + mmci_dma_unmap(host, data); /* * Use of DMA with scatter-gather is impossible. @@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_release(host); } -} -static void mmci_dma_data_error(struct mmci_host *host) -{ - dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); - dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; } -static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, - struct mmci_host_next *next) +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, + struct dma_chan **dma_chan, + struct dma_async_tx_descriptor **dma_desc) { struct variant_data *variant = host->variant; struct dma_slave_config conf = { @@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, enum dma_data_direction buffer_dirn; int nr_sg; - /* Check if next job is already prepared */ - if (data->host_cookie && !next && - host->dma_current && host->dma_desc_current) - return 0; - - if (!next) { - host->dma_current = NULL; - host->dma_desc_current = NULL; - } - if (data->flags & MMC_DATA_READ) { conf.direction = DMA_DEV_TO_MEM; buffer_dirn = DMA_FROM_DEVICE; @@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, if (!desc) goto unmap_exit; - if (next) { - next->dma_chan = chan; - next->dma_desc = desc; - } else { - host->dma_current = chan; - host->dma_desc_current = desc; - } + *dma_chan = chan; + *dma_desc = desc; return 0; unmap_exit: - if (!next) - dmaengine_terminate_all(chan); dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); return -ENOMEM; } +static inline int mmci_dma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + /* Check if next job is already prepared. */ + if (host->dma_current && host->dma_desc_current) + return 0; + + /* No job were prepared thus do it now. */ + return __mmci_dma_prep_data(host, data, &host->dma_current, + &host->dma_desc_current); +} + +static inline int mmci_dma_prep_next(struct mmci_host *host, + struct mmc_data *data) +{ + struct mmci_host_next *nd = &host->next_data; + return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); +} + static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) { int ret; struct mmc_data *data = host->data; - ret = mmci_dma_prep_data(host, host->data, NULL); + ret = mmci_dma_prep_data(host, host->data); if (ret) return ret; @@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) { struct mmci_host_next *next = &host->next_data; - if (data->host_cookie && data->host_cookie != next->cookie) { - pr_warning("[%s] invalid cookie: data->host_cookie %d" - " host->next_data.cookie %d\n", - __func__, data->host_cookie, host->next_data.cookie); - data->host_cookie = 0; - } - - if (!data->host_cookie) - return; + WARN_ON(data->host_cookie && data->host_cookie != next->cookie); + WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); host->dma_desc_current = next->dma_desc; host->dma_current = next->dma_chan; - next->dma_desc = NULL; next->dma_chan = NULL; } @@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, if (!data) return; - if (data->host_cookie) { - data->host_cookie = 0; + BUG_ON(data->host_cookie); + + if (mmci_validate_data(host, data)) return; - } - /* if config for dma */ - if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || - ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { - if (mmci_dma_prep_data(host, data, nd)) - data->host_cookie = 0; - else - data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; - } + if (!mmci_dma_prep_next(host, data)) + data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; } static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, @@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, { struct mmci_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - struct dma_chan *chan; - enum dma_data_direction dir; - if (!data) + if (!data || !data->host_cookie) return; - if (data->flags & MMC_DATA_READ) { - dir = DMA_FROM_DEVICE; - chan = host->dma_rx_channel; - } else { - dir = DMA_TO_DEVICE; - chan = host->dma_tx_channel; - } + mmci_dma_unmap(host, data); + if (err) { + struct mmci_host_next *next = &host->next_data; + struct dma_chan *chan; + if (data->flags & MMC_DATA_READ) + chan = host->dma_rx_channel; + else + chan = host->dma_tx_channel; + dmaengine_terminate_all(chan); - /* if config for dma */ - if (chan) { - if (err) - dmaengine_terminate_all(chan); - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, dir); - mrq->data->host_cookie = 0; + next->dma_desc = NULL; + next->dma_chan = NULL; } } @@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { } +static inline void mmci_dma_finalize(struct mmci_host *host, + struct mmc_data *data) +{ +} + static inline void mmci_dma_data_error(struct mmci_host *host) { } @@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) mmci_write_clkreg(host, clk); } + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + datactrl |= MCI_ST_DPSM_DDRMODE; + /* * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode @@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, u32 remain, success; /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, data); + } /* * Calculate how far we are into the transfer. Note that @@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & MCI_DATAEND || data->error) { if (dma_inprogress(host)) - mmci_dma_unmap(host, data); + mmci_dma_finalize(host, data); mmci_stop_data(host); if (!data->error) @@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, if (!cmd->data || cmd->error) { if (host->data) { /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, host->data); + } mmci_stop_data(host); } mmci_request_end(host, cmd->mrq); @@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq != NULL); - if (mrq->data && !is_power_of_2(mrq->data->blksz)) { - dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", - mrq->data->blksz); - mrq->cmd->error = -EINVAL; + mrq->cmd->error = mmci_validate_data(host, mrq->data); + if (mrq->cmd->error) { mmc_request_done(mmc, mrq); return; } @@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; - int ret; pm_runtime_get_sync(mmc_dev(mmc)); @@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: - if (host->vcc) - ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); break; case MMC_POWER_UP: - if (host->vcc) { - ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); - if (ret) { - dev_err(mmc_dev(mmc), "unable to set OCR\n"); - /* - * The .set_ios() function in the mmc_host_ops - * struct return void, and failing to set the - * power should be rare so we print an error - * and return here. - */ - goto out; - } - } + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + /* * The ST Micro variant doesn't have the PL180s MCI_PWR_UP * and instead uses MCI_PWR_ON so apply whatever value is @@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } + /* + * If clock = 0 and the variant requires the MMCIPOWER to be used for + * gating the clock, the MCI_PWR_ON bit is cleared. + */ + if (!ios->clock && variant->pwrreg_clkgate) + pwr &= ~MCI_PWR_ON; + spin_lock_irqsave(&host->lock, flags); mmci_set_clkreg(host, ios->clock); @@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_unlock_irqrestore(&host->lock, flags); - out: pm_runtime_mark_last_busy(mmc_dev(mmc)); pm_runtime_put_autosuspend(mmc_dev(mmc)); } @@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev, } else dev_warn(&dev->dev, "could not get default pinstate\n"); -#ifdef CONFIG_REGULATOR - /* If we're using the regulator framework, try to fetch a regulator */ - host->vcc = regulator_get(&dev->dev, "vmmc"); - if (IS_ERR(host->vcc)) - host->vcc = NULL; - else { - int mask = mmc_regulator_get_ocrmask(host->vcc); - - if (mask < 0) - dev_err(&dev->dev, "error getting OCR mask (%d)\n", - mask); - else { - host->mmc->ocr_avail = (u32) mask; - if (plat->ocr_mask) - dev_warn(&dev->dev, - "Provided ocr_mask/setpower will not be used " - "(using regulator instead)\n"); - } - } -#endif - /* Fall back to platform data if no regulator is found */ - if (host->vcc == NULL) + /* Get regulators and the supported OCR mask */ + mmc_regulator_get_supply(mmc); + if (!mmc->ocr_avail) mmc->ocr_avail = plat->ocr_mask; + else if (plat->ocr_mask) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); + mmc->caps = plat->capabilities; mmc->caps2 = plat->capabilities2; + /* We support these PM capabilities. */ + mmc->pm_caps = MMC_PM_KEEP_POWER; + /* * We can do SGIO */ @@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev) clk_disable_unprepare(host->clk); clk_put(host->clk); - if (host->vcc) - mmc_regulator_set_ocr(mmc, host->vcc, 0); - regulator_put(host->vcc); - mmc_free_host(mmc); amba_release_regions(dev); @@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev) } #endif +#ifdef CONFIG_PM_RUNTIME +static int mmci_runtime_suspend(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_disable_unprepare(host->clk); + } + + return 0; +} + +static int mmci_runtime_resume(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_prepare_enable(host->clk); + } + + return 0; +} +#endif + static const struct dev_pm_ops mmci_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) + SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; static struct amba_id mmci_ids[] = { @@ -1652,6 +1703,11 @@ static struct amba_id mmci_ids[] = { .data = &variant_arm_extended_fifo, }, { + .id = 0x02041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo_hwfc, + }, + { .id = 0x00041181, .mask = 0x000fffff, .data = &variant_arm, diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index d34d8c0add8..1f33ad5333a 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -28,6 +28,8 @@ #define MCI_ST_UX500_NEG_EDGE (1 << 13) #define MCI_ST_UX500_HWFCEN (1 << 14) #define MCI_ST_UX500_CLK_INV (1 << 15) +/* Modified PL180 on Versatile Express platform */ +#define MCI_ARM_HWFCEN (1 << 12) #define MMCIARGUMENT 0x008 #define MMCICOMMAND 0x00c @@ -193,7 +195,6 @@ struct mmci_host { /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; - struct regulator *vcc; /* pinctrl handles */ struct pinctrl *pinctrl; diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 27f80cd8aef..46dcb54c32e 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -272,6 +272,7 @@ config MTD_DOCG3 tristate "M-Systems Disk-On-Chip G3" select BCH select BCH_CONST_PARAMS + select BITREVERSE ---help--- This provides an MTD device driver for the M-Systems DiskOnChip G3 devices. diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 67cc73c18dd..7901d72c924 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev) resource_size_t res_size; struct mtd_part_parser_data ppdata; bool map_indirect; - const char *mtd_name; + const char *mtd_name = NULL; match = of_match_device(of_flash_match, &dev->dev); if (!match) diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c index 86c9a79b89b..595de4012e7 100644 --- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c +++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c @@ -17,8 +17,8 @@ #include "bcm47xxnflash.h" /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has - * shown 164 retries as maxiumum. */ -#define NFLASH_READY_RETRIES 1000 + * shown ~1000 retries as maxiumum. */ +#define NFLASH_READY_RETRIES 10000 #define NFLASH_SECTOR_SIZE 512 diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 3502606f648..feae55c7b88 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = { static const struct of_device_id davinci_nand_of_match[] = { {.compatible = "ti,davinci-nand", }, {}, -} +}; MODULE_DEVICE_TABLE(of, davinci_nand_of_match); static struct davinci_nand_pdata diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8323ac991ad..3766682a028 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, int i; int val; - /* ONFI need to be probed in 8 bits mode */ - WARN_ON(chip->options & NAND_BUSWIDTH_16); + /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */ + if (chip->options & NAND_BUSWIDTH_16) { + pr_err("Trying ONFI probe in 16 bits mode, aborting !\n"); + return 0; + } /* Try ONFI for unknown chip or LP */ chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1877ed7ca08..1c9e09fbdff 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d, pr_info("%s: Setting primary slave to None.\n", bond->dev->name); bond->primary_slave = NULL; + memset(bond->params.primary, 0, sizeof(bond->params.primary)); bond_select_active_slave(bond); goto out; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 58607f196c9..2282b1ae976 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), IFX_WRITE_LOW_16BIT(mask)); + + /* According to C_CAN documentation, the reserved bit + * in IFx_MASK2 register is fixed 1 + */ priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), - IFX_WRITE_HIGH_16BIT(mask)); + IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), IFX_WRITE_LOW_16BIT(id)); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4eba17b83ba..f1b3df167ff 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -36,13 +36,13 @@ #define DRV_VER "4.4.161.0u" #define DRV_NAME "be2net" -#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" -#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" -#define OC_NAME "Emulex OneConnect 10Gbps NIC" +#define BE_NAME "Emulex BladeEngine2" +#define BE3_NAME "Emulex BladeEngine3" +#define OC_NAME "Emulex OneConnect" #define OC_NAME_BE OC_NAME "(be3)" #define OC_NAME_LANCER OC_NAME "(Lancer)" #define OC_NAME_SH OC_NAME "(Skyhawk)" -#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" +#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" #define BE_VENDOR_ID 0x19a2 #define EMULEX_VENDOR_ID 0x10df diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5c995700e53..4d6f3c54427 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -25,7 +25,7 @@ MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); -MODULE_AUTHOR("ServerEngines Corporation"); +MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("GPL"); static unsigned int num_vfs; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 02a12b69555..4dab6fc265a 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -232,6 +232,7 @@ #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ @@ -389,6 +390,12 @@ #define E1000_PBS_16K E1000_PBA_16K +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + #define IFS_MAX 80 #define IFS_MIN 40 #define IFS_RATIO 4 @@ -408,6 +415,7 @@ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ @@ -443,6 +451,7 @@ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6782a2eea1b..7e95f221d60 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -309,6 +309,8 @@ struct e1000_adapter { struct napi_struct napi; + unsigned int uncorr_errors; /* uncorrectable ECC errors */ + unsigned int corr_errors; /* correctable ECC errors */ unsigned int restart_queue; u32 txd_cmd; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index f95bc6ee1c2..fd4772a2691 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("dropped_smbus", stats.mgpdc), E1000_STAT("rx_dma_failed", rx_dma_failed), E1000_STAT("tx_dma_failed", tx_dma_failed), + E1000_STAT("uncorr_ecc_errors", uncorr_errors), + E1000_STAT("corr_ecc_errors", corr_errors), }; #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index cf217777586..b88676ff3d8 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -77,6 +77,7 @@ enum e1e_registers { #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ E1000_PBS = 0x01008, /* Packet Buffer Size */ + E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */ E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 97633654760..24d9f61956f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) if (hw->mac.type == e1000_ich8lan) reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); ew32(RFCTL, reg); + + /* Enable ECC on Lynxpoint */ + if (hw->mac.type == e1000_pch_lpt) { + reg = er32(PBECCSTS); + reg |= E1000_PBECCSTS_ECC_ENABLE; + ew32(PBECCSTS, reg); + + reg = er32(CTRL); + reg |= E1000_CTRL_MEHE; + ew32(CTRL, reg); + } } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fbf75fdca99..643c883dd79 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + if (napi_schedule_prep(&adapter->napi)) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else if (hw->mac.type == e1000_pch_lpt) { + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); } @@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgptc += er32(MGTPTC); adapter->stats.mgprc += er32(MGTPRC); adapter->stats.mgpdc += er32(MGTPDC); + + /* Correctable ECC Errors */ + if (hw->mac.type == e1000_pch_lpt) { + u32 pbeccsts = er32(PBECCSTS); + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + } } /** diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a6542d75374..5163af31499 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } } - if ((dev_cap->flags & + if ((dev->caps.flags & (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 7992b3e05d3..78ace59efd2 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev) rp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); } - dev_kfree_skb_irq(rp->tx_skbuff[entry]); + dev_kfree_skb(rp->tx_skbuff[entry]); rp->tx_skbuff[entry] = NULL; entry = (++rp->dirty_tx) % TX_RING_SIZE; } @@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work) if (intr_status & IntrPCIErr) netif_warn(rp, hw, dev, "PCI error\n"); - napi_disable(&rp->napi); - rhine_irq_disable(rp); - /* Slow and safe. Consider __napi_schedule as a replacement ? */ - napi_enable(&rp->napi); - napi_schedule(&rp->napi); + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); out_unlock: mutex_unlock(&rp->task_lock); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cc09b67c23b..2917a86f4c4 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data) } static void tun_flow_update(struct tun_struct *tun, u32 rxhash, - u16 queue_index) + struct tun_file *tfile) { struct hlist_head *head; struct tun_flow_entry *e; unsigned long delay = tun->ageing_time; + u16 queue_index = tfile->queue_index; if (!rxhash) return; @@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, rcu_read_lock(); - if (tun->numqueues == 1) + /* We may get a very small possibility of OOO during switching, not + * worth to optimize.*/ + if (tun->numqueues == 1 || tfile->detached) goto unlock; e = tun_flow_find(head, rxhash); @@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun = rtnl_dereference(tfile->tun); - if (tun) { + if (tun && !tfile->detached) { u16 index = tfile->queue_index; BUG_ON(index >= tun->numqueues); dev = tun->dev; rcu_assign_pointer(tun->tfiles[index], tun->tfiles[tun->numqueues - 1]); - rcu_assign_pointer(tfile->tun, NULL); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; --tun->numqueues; - if (clean) + if (clean) { + rcu_assign_pointer(tfile->tun, NULL); sock_put(&tfile->sk); - else + } else tun_disable_queue(tun, tfile); synchronize_net(); @@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean) } if (clean) { - if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && - !(tun->flags & TUN_PERSIST)) - if (tun->dev->reg_state == NETREG_REGISTERED) + if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { + netif_carrier_off(tun->dev); + + if (!(tun->flags & TUN_PERSIST) && + tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); + } BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags)); @@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev) rcu_assign_pointer(tfile->tun, NULL); --tun->numqueues; } + list_for_each_entry(tfile, &tun->disabled, next) { + wake_up_all(&tfile->wq.wait); + rcu_assign_pointer(tfile->tun, NULL); + } BUG_ON(tun->numqueues != 0); synchronize_net(); @@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) goto out; err = -EINVAL; - if (rtnl_dereference(tfile->tun)) + if (rtnl_dereference(tfile->tun) && !tfile->detached) goto out; err = -EBUSY; @@ -1199,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, tun->dev->stats.rx_packets++; tun->dev->stats.rx_bytes += len; - tun_flow_update(tun, rxhash, tfile->queue_index); + tun_flow_update(tun, rxhash, tfile); return total_len; } @@ -1658,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) device_create_file(&tun->dev->dev, &dev_attr_owner) || device_create_file(&tun->dev->dev, &dev_attr_group)) pr_err("Failed to create tun sysfs files\n"); - - netif_carrier_on(tun->dev); } + netif_carrier_on(tun->dev); + tun_debug(KERN_INFO, tun, "tun_set_iff\n"); if (ifr->ifr_flags & IFF_NO_PI) @@ -1813,7 +1823,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) ret = tun_attach(tun, file); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); - if (!tun || !(tun->flags & TUN_TAP_MQ)) + if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) ret = -EINVAL; else __tun_detach(tfile, false); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 9197b2c72ca..00d3b2d3782 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1215,6 +1215,9 @@ static const struct usb_device_id cdc_devs[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), .driver_info = (unsigned long)&wwan_info, }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), + .driver_info = (unsigned long)&wwan_info, + }, /* Infineon(now Intel) HSPA Modem platform */ { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 575a5839ee3..c8e05e27f38 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -351,6 +351,10 @@ static const struct usb_device_id products[] = { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), + .driver_info = (unsigned long)&qmi_wwan_info, + }, /* 2. Combined interface devices matching on class+protocol */ { /* Huawei E367 and possibly others in "Windows mode" */ @@ -361,6 +365,14 @@ static const struct usb_device_id products[] = { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37), + .driver_info = (unsigned long)&qmi_wwan_info, + }, + { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67), + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* Pantech UML290, P4200 and more */ USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), .driver_info = (unsigned long)&qmi_wwan_info, @@ -461,6 +473,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f34b2ebee81..5e33606c136 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) unsigned long lockflags; size_t size = dev->rx_urb_size; + /* prevent rx skb allocation when error ratio is high */ + if (test_bit(EVENT_RX_KILL, &dev->flags)) { + usb_free_urb(urb); + return -ENOLINK; + } + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); @@ -539,6 +545,17 @@ block: break; } + /* stop rx if packet error rate is high */ + if (++dev->pkt_cnt > 30) { + dev->pkt_cnt = 0; + dev->pkt_err = 0; + } else { + if (state == rx_cleanup) + dev->pkt_err++; + if (dev->pkt_err > 20) + set_bit(EVENT_RX_KILL, &dev->flags); + } + state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { @@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net) (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : "simple"); + /* reset rx error state */ + dev->pkt_cnt = 0; + dev->pkt_err = 0; + clear_bit(EVENT_RX_KILL, &dev->flags); + // delay posting reads until we're fully open tasklet_schedule (&dev->bh); if (info->manage_power) { @@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, if (info->tx_fixup) { skb = info->tx_fixup (dev, skb, GFP_ATOMIC); if (!skb) { - if (netif_msg_tx_err(dev)) { - netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); - goto drop; - } else { - /* cdc_ncm collected packet; waits for more */ + /* packet collected; minidriver waiting for more */ + if (info->flags & FLAG_MULTI_PACKET) goto not_drop; - } + netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); + goto drop; } } length = skb->len; @@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param) } } + /* restart RX again after disabling due to high error rate */ + clear_bit(EVENT_RX_KILL, &dev->flags); + // waiting for all pending urbs to complete? if (dev->wait) { if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index dc8913c6238..12c6440d164 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) if (ret & 1) { /* Link is up. */ printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", adapter->netdev->name, adapter->link_speed); - if (!netif_carrier_ok(adapter->netdev)) - netif_carrier_on(adapter->netdev); + netif_carrier_on(adapter->netdev); if (affectTxQueue) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) } else { printk(KERN_INFO "%s: NIC Link is Down\n", adapter->netdev->name); - if (netif_carrier_ok(adapter->netdev)) - netif_carrier_off(adapter->netdev); + netif_carrier_off(adapter->netdev); if (affectTxQueue) { for (i = 0; i < adapter->num_tx_queues; i++) @@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); + netif_carrier_off(netdev); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 0f71d1d4339..e5fd20994be 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -36,6 +36,7 @@ #include "debug.h" #define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ +#define BRCMS_FLUSH_TIMEOUT 500 /* msec */ /* Flags we support */ #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ @@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw) wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); } +static bool brcms_tx_flush_completed(struct brcms_info *wl) +{ + bool result; + + spin_lock_bh(&wl->lock); + result = brcms_c_tx_flush_completed(wl->wlc); + spin_unlock_bh(&wl->lock); + return result; +} + static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) { struct brcms_info *wl = hw->priv; + int ret; no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); - /* wait for packet queue and dma fifos to run empty */ - spin_lock_bh(&wl->lock); - brcms_c_wait_for_tx_completion(wl->wlc, drop); - spin_unlock_bh(&wl->lock); + ret = wait_event_timeout(wl->tx_flush_wq, + brcms_tx_flush_completed(wl), + msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT)); + + brcms_dbg_mac80211(wl->wlc->hw->d11core, + "ret=%d\n", jiffies_to_msecs(ret)); } static const struct ieee80211_ops brcms_ops = { @@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data) done: spin_unlock_bh(&wl->lock); + wake_up(&wl->tx_flush_wq); } /* @@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) atomic_set(&wl->callbacks, 0); + init_waitqueue_head(&wl->tx_flush_wq); + /* setup the bottom half handler */ tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); @@ -1609,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl) spin_lock_bh(&wl->lock); return blocked; } - -/* - * precondition: perimeter lock has been acquired - */ -void brcms_msleep(struct brcms_info *wl, uint ms) -{ - spin_unlock_bh(&wl->lock); - msleep(ms); - spin_lock_bh(&wl->lock); -} diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h index 9358bd5ebd3..947ccacf43e 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h @@ -68,6 +68,8 @@ struct brcms_info { spinlock_t lock; /* per-device perimeter lock */ spinlock_t isr_lock; /* per-device ISR synchronization lock */ + /* tx flush */ + wait_queue_head_t tx_flush_wq; /* timer related fields */ atomic_t callbacks; /* # outstanding callback functions */ @@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl, extern void brcms_free_timer(struct brcms_timer *timer); extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); extern bool brcms_del_timer(struct brcms_timer *timer); -extern void brcms_msleep(struct brcms_info *wl, uint ms); extern void brcms_dpc(unsigned long data); extern void brcms_timer(struct brcms_timer *t); extern void brcms_fatal_error(struct brcms_info *wl); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 17594de4199..8b5839008af 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) static bool brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) { - bool morepending = false; struct bcma_device *core; struct tx_status txstatus, *txs; u32 s1, s2; @@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) txs = &txstatus; core = wlc_hw->d11core; *fatal = false; - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); - while (!(*fatal) - && (s1 & TXS_V)) { - /* !give others some time to run! */ - if (n >= max_tx_num) { - morepending = true; - break; - } + while (n < max_tx_num) { + s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); if (s1 == 0xffffffff) { brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, __func__); *fatal = true; return false; } - s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); + /* only process when valid */ + if (!(s1 & TXS_V)) + break; + s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); txs->status = s1 & TXS_STATUS_MASK; txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; txs->sequence = s2 & TXS_SEQ_MASK; @@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) txs->lasttxtime = 0; *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); - - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); + if (*fatal == true) + return false; n++; } - if (*fatal) - return false; - - return morepending; + return n >= max_tx_num; } static void brcms_c_tbtt(struct brcms_c_info *wlc) @@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc) return wlc->band->bandunit; } -void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) +bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc) { - int timeout = 20; int i; /* Kick DMA to send any pending AMPDU */ for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) if (wlc->hw->di[i]) - dma_txflush(wlc->hw->di[i]); - - /* wait for queue and DMA fifos to run dry */ - while (brcms_txpktpendtot(wlc) > 0) { - brcms_msleep(wlc->wl, 1); - - if (--timeout == 0) - break; - } + dma_kick_tx(wlc->hw->di[i]); - WARN_ON_ONCE(timeout == 0); + return !brcms_txpktpendtot(wlc); } void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h index 4fb2834f4e6..b0f14b7b861 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h +++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h @@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state); extern void brcms_c_scan_start(struct brcms_c_info *wlc); extern void brcms_c_scan_stop(struct brcms_c_info *wlc); extern int brcms_c_get_curband(struct brcms_c_info *wlc); -extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, - bool drop); extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, @@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr); extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); +extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc); #endif /* _BRCM_PUB_H_ */ diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 31534f7c054..279796419ea 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1153,6 +1153,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, next_reclaimed = ssn; } + if (tid != IWL_TID_NON_QOS) { + priv->tid_data[sta_id][tid].next_reclaimed = + next_reclaimed; + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", + next_reclaimed); + } + iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); iwlagn_check_ratid_empty(priv, sta_id, tid); @@ -1203,28 +1210,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, if (!is_agg) iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); - /* - * W/A for FW bug - the seq_ctl isn't updated when the - * queues are flushed. Fetch it from the packet itself - */ - if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { - next_reclaimed = le16_to_cpu(hdr->seq_ctrl); - next_reclaimed = - SEQ_TO_SN(next_reclaimed + 0x10); - } - is_offchannel_skb = (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); freed++; } - if (tid != IWL_TID_NON_QOS) { - priv->tid_data[sta_id][tid].next_reclaimed = - next_reclaimed; - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", - next_reclaimed); - } - WARN_ON(!is_agg && freed != 1); /* diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 9189a32b784..973a9d90e9e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", scan_rsp->number_of_sets); ret = -1; - goto done; + goto check_next_scan; } bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); @@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, if (!beacon_size || beacon_size > bytes_left) { bss_info += bytes_left; bytes_left = 0; - return -1; + ret = -1; + goto check_next_scan; } /* Initialize the current working beacon pointer for this BSS @@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, dev_err(priv->adapter->dev, "%s: bytes left < IE length\n", __func__); - goto done; + goto check_next_scan; } if (element_id == WLAN_EID_DS_PARAMS) { channel = *(current_ptr + sizeof(struct ieee_types_header)); @@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, } } +check_next_scan: spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); if (list_empty(&adapter->scan_pending_q)) { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); @@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, } } -done: return ret; } diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 4494d130b37..0f8b05185ed 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) is_tx ? "Tx" : "Rx"); if (is_tx) { - rtl_lps_leave(hw); + schedule_work(&rtlpriv-> + works.lps_leave_work); ppsc->last_delaylps_stamp_jiffies = jiffies; } @@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } } else if (ETH_P_ARP == ether_type) { if (is_tx) { - rtl_lps_leave(hw); + schedule_work(&rtlpriv->works.lps_leave_work); ppsc->last_delaylps_stamp_jiffies = jiffies; } @@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); if (is_tx) { - rtl_lps_leave(hw); + schedule_work(&rtlpriv->works.lps_leave_work); ppsc->last_delaylps_stamp_jiffies = jiffies; } diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f2ecdeb3a90..1535efda3d5 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); - _rtl_usb_rx_process_agg(hw, skb); - ieee80211_rx_irqsafe(hw, skb); + _rtl_usb_rx_process_agg(hw, _skb); + ieee80211_rx_irqsafe(hw, _skb); } } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 94b79c3338c..9d7f1723dd8 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); /* Notify xenvif that ring now has space to send an skb to the frontend */ void xenvif_notify_tx_completion(struct xenvif *vif); +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); + /* Returns number of ring slots required to send an skb to the frontend */ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b7d41f8c338..b8c5193bd42 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -343,17 +343,22 @@ err: return err; } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_carrier_off(struct xenvif *vif) { struct net_device *dev = vif->dev; - if (netif_carrier_ok(dev)) { - rtnl_lock(); - netif_carrier_off(dev); /* discard queued packets */ - if (netif_running(dev)) - xenvif_down(vif); - rtnl_unlock(); - xenvif_put(vif); - } + + rtnl_lock(); + netif_carrier_off(dev); /* discard queued packets */ + if (netif_running(dev)) + xenvif_down(vif); + rtnl_unlock(); + xenvif_put(vif); +} + +void xenvif_disconnect(struct xenvif *vif) +{ + if (netif_carrier_ok(vif->dev)) + xenvif_carrier_off(vif); atomic_dec(&vif->refcnt); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f2d6b78d901..2b9520c46e9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif) atomic_dec(&netbk->netfront_count); } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, + u8 status); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); @@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif, do { make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - if (cons >= end) + if (cons == end) break; txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); @@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif, xenvif_put(vif); } +static void netbk_fatal_tx_err(struct xenvif *vif) +{ + netdev_err(vif->dev, "fatal error; disabling device\n"); + xenvif_carrier_off(vif); + xenvif_put(vif); +} + static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, @@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif, do { if (frags >= work_to_do) { - netdev_dbg(vif->dev, "Need more frags\n"); + netdev_err(vif->dev, "Need more frags\n"); + netbk_fatal_tx_err(vif); return -frags; } if (unlikely(frags >= MAX_SKB_FRAGS)) { - netdev_dbg(vif->dev, "Too many frags\n"); + netdev_err(vif->dev, "Too many frags\n"); + netbk_fatal_tx_err(vif); return -frags; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { - netdev_dbg(vif->dev, "Frags galore\n"); + netdev_err(vif->dev, "Frag is bigger than frame.\n"); + netbk_fatal_tx_err(vif); return -frags; } @@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif, frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", + netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); + netbk_fatal_tx_err(vif); return -frags; } } while ((txp++)->flags & XEN_NETTXF_more_data); @@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, pending_idx = netbk->pending_ring[index]; page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) - return NULL; + goto err; gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; @@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, } return gop; +err: + /* Unwind, freeing all pages and sending error responses. */ + while (i-- > start) { + xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), + XEN_NETIF_RSP_ERROR); + } + /* The head too, if necessary. */ + if (start) + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + return NULL; } static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, @@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, { struct gnttab_copy *gop = *gopp; u16 pending_idx = *((u16 *)skb->data); - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; - struct xenvif *vif = pending_tx_info[pending_idx].vif; - struct xen_netif_tx_request *txp; struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i, err, start; /* Check status of header. */ err = gop->status; - if (unlikely(err)) { - pending_ring_idx_t index; - index = pending_index(netbk->pending_prod++); - txp = &pending_tx_info[pending_idx].req; - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - netbk->pending_ring[index] = pending_idx; - xenvif_put(vif); - } + if (unlikely(err)) + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; - pending_ring_idx_t index; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); @@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); continue; } /* Error on this fragment: respond to client with an error. */ - txp = &netbk->pending_tx_info[pending_idx].req; - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = pending_idx; - xenvif_put(vif); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) @@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } /* Remember the error: invalidate all subsequent fragments. */ @@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) /* Take an extra reference to offset xen_netbk_idx_release */ get_page(netbk->mmap_pages[pending_idx]); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } } @@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif, do { if (unlikely(work_to_do-- <= 0)) { - netdev_dbg(vif->dev, "Missing extra info\n"); + netdev_err(vif->dev, "Missing extra info\n"); + netbk_fatal_tx_err(vif); return -EBADR; } @@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif, if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { vif->tx.req_cons = ++cons; - netdev_dbg(vif->dev, + netdev_err(vif->dev, "Invalid extra type: %d\n", extra.type); + netbk_fatal_tx_err(vif); return -EINVAL; } @@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { - netdev_dbg(vif->dev, "GSO size must not be zero.\n"); + netdev_err(vif->dev, "GSO size must not be zero.\n"); + netbk_fatal_tx_err(vif); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + netbk_fatal_tx_err(vif); return -EINVAL; } @@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); + /* This can sometimes happen because the test of + * list_empty(net_schedule_list) at the top of the + * loop is unlocked. Just go back and have another + * look. + */ if (!vif) continue; + if (vif->tx.sring->req_prod - vif->tx.req_cons > + XEN_NETIF_TX_RING_SIZE) { + netdev_err(vif->dev, + "Impossible number of requests. " + "req_prod %d, req_cons %d, size %ld\n", + vif->tx.sring->req_prod, vif->tx.req_cons, + XEN_NETIF_TX_RING_SIZE); + netbk_fatal_tx_err(vif); + continue; + } + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); @@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; - if (unlikely(work_to_do < 0)) { - netbk_tx_err(vif, &txreq, idx); + if (unlikely(work_to_do < 0)) continue; - } } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); - if (unlikely(ret < 0)) { - netbk_tx_err(vif, &txreq, idx - ret); + if (unlikely(ret < 0)) continue; - } + idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { @@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { - netdev_dbg(vif->dev, + netdev_err(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); - netbk_tx_err(vif, &txreq, idx); + netbk_fatal_tx_err(vif); continue; } @@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { + /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); continue; } } @@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk) xen_netbk_tx_submit(netbk); } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, + u8 status) { struct xenvif *vif; struct pending_tx_info *pending_tx_info; @@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) vif = pending_tx_info->vif; - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); + make_tx_response(vif, &pending_tx_info->req, status); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index efaecefe3f8..a5f3c8ca480 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -184,8 +184,8 @@ config PINCTRL_SAMSUNG select PINMUX select PINCONF -config PINCTRL_EXYNOS4 - bool "Pinctrl driver data for Exynos4 SoC" +config PINCTRL_EXYNOS + bool "Pinctrl driver data for Samsung EXYNOS SoCs" depends on OF && GPIOLIB select PINCTRL_SAMSUNG diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index fc4606f27dc..6e87e52eab5 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o -obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o +obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index 498b2ba905d..d02498b30c6 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c @@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void) return of_iomap(np, 0); } +static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) + return -EINVAL; + + if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[1]; + + return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; +} + static int sirfsoc_pinmux_probe(struct platform_device *pdev) { int ret; @@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np) bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); bank->chip.gc.of_node = np; + bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; + bank->chip.gc.of_gpio_n_cells = 2; bank->chip.regs = regs; bank->id = i; bank->is_marco = is_marco; diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index b85040caaea..cca18a3c029 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c @@ -379,9 +379,10 @@ static struct regulator_desc regulators[] = { }; #ifdef CONFIG_OF -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, +static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, struct max77686_platform_data *pdata) { + struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct device_node *pmic_np, *regulators_np; struct max77686_regulator_data *rdata; struct of_regulator_match rmatch; @@ -390,15 +391,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, pmic_np = iodev->dev->of_node; regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); if (!regulators_np) { - dev_err(iodev->dev, "could not find regulators sub-node\n"); + dev_err(&pdev->dev, "could not find regulators sub-node\n"); return -EINVAL; } pdata->num_regulators = ARRAY_SIZE(regulators); - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * pdata->num_regulators, GFP_KERNEL); if (!rdata) { - dev_err(iodev->dev, + dev_err(&pdev->dev, "could not allocate memory for regulator data\n"); return -ENOMEM; } @@ -407,7 +408,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, rmatch.name = regulators[i].name; rmatch.init_data = NULL; rmatch.of_node = NULL; - of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); + of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1); rdata[i].initdata = rmatch.init_data; rdata[i].of_node = rmatch.of_node; } @@ -417,7 +418,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, return 0; } #else -static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, +static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, struct max77686_platform_data *pdata) { return 0; @@ -440,7 +441,7 @@ static int max77686_pmic_probe(struct platform_device *pdev) } if (iodev->dev->of_node) { - ret = max77686_pmic_dt_parse_pdata(iodev, pdata); + ret = max77686_pmic_dt_parse_pdata(pdev, pdata); if (ret) return ret; } diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index d1a77512d83..d40cf7fdb54 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c @@ -237,8 +237,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev) return -EINVAL; } - ret = of_regulator_match(pdev->dev.parent, regulators, - max8907_matches, + ret = of_regulator_match(&pdev->dev, regulators, max8907_matches, ARRAY_SIZE(max8907_matches)); if (ret < 0) { dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 02be7fcae32..836908ce505 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c @@ -934,7 +934,7 @@ static struct regulator_desc regulators[] = { }; #ifdef CONFIG_OF -static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev, struct max8997_platform_data *pdata, struct device_node *pmic_np) { @@ -944,7 +944,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, gpio = of_get_named_gpio(pmic_np, "max8997,pmic-buck125-dvs-gpios", i); if (!gpio_is_valid(gpio)) { - dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); + dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio); return -EINVAL; } pdata->buck125_gpios[i] = gpio; @@ -952,22 +952,23 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, return 0; } -static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, struct max8997_platform_data *pdata) { + struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct device_node *pmic_np, *regulators_np, *reg_np; struct max8997_regulator_data *rdata; unsigned int i, dvs_voltage_nr = 1, ret; pmic_np = iodev->dev->of_node; if (!pmic_np) { - dev_err(iodev->dev, "could not find pmic sub-node\n"); + dev_err(&pdev->dev, "could not find pmic sub-node\n"); return -ENODEV; } regulators_np = of_find_node_by_name(pmic_np, "regulators"); if (!regulators_np) { - dev_err(iodev->dev, "could not find regulators sub-node\n"); + dev_err(&pdev->dev, "could not find regulators sub-node\n"); return -EINVAL; } @@ -976,11 +977,10 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, for_each_child_of_node(regulators_np, reg_np) pdata->num_regulators++; - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * pdata->num_regulators, GFP_KERNEL); if (!rdata) { - dev_err(iodev->dev, "could not allocate memory for " - "regulator data\n"); + dev_err(&pdev->dev, "could not allocate memory for regulator data\n"); return -ENOMEM; } @@ -991,14 +991,14 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, break; if (i == ARRAY_SIZE(regulators)) { - dev_warn(iodev->dev, "don't know how to configure " - "regulator %s\n", reg_np->name); + dev_warn(&pdev->dev, "don't know how to configure regulator %s\n", + reg_np->name); continue; } rdata->id = i; - rdata->initdata = of_get_regulator_init_data( - iodev->dev, reg_np); + rdata->initdata = of_get_regulator_init_data(&pdev->dev, + reg_np); rdata->reg_node = reg_np; rdata++; } @@ -1014,7 +1014,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || pdata->buck5_gpiodvs) { - ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); + ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np); if (ret) return -EINVAL; @@ -1025,8 +1025,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, } else { if (pdata->buck125_default_idx >= 8) { pdata->buck125_default_idx = 0; - dev_info(iodev->dev, "invalid value for " - "default dvs index, using 0 instead\n"); + dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n"); } } @@ -1040,28 +1039,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, if (of_property_read_u32_array(pmic_np, "max8997,pmic-buck1-dvs-voltage", pdata->buck1_voltage, dvs_voltage_nr)) { - dev_err(iodev->dev, "buck1 voltages not specified\n"); + dev_err(&pdev->dev, "buck1 voltages not specified\n"); return -EINVAL; } if (of_property_read_u32_array(pmic_np, "max8997,pmic-buck2-dvs-voltage", pdata->buck2_voltage, dvs_voltage_nr)) { - dev_err(iodev->dev, "buck2 voltages not specified\n"); + dev_err(&pdev->dev, "buck2 voltages not specified\n"); return -EINVAL; } if (of_property_read_u32_array(pmic_np, "max8997,pmic-buck5-dvs-voltage", pdata->buck5_voltage, dvs_voltage_nr)) { - dev_err(iodev->dev, "buck5 voltages not specified\n"); + dev_err(&pdev->dev, "buck5 voltages not specified\n"); return -EINVAL; } return 0; } #else -static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, +static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, struct max8997_platform_data *pdata) { return 0; @@ -1085,7 +1084,7 @@ static int max8997_pmic_probe(struct platform_device *pdev) } if (iodev->dev->of_node) { - ret = max8997_pmic_dt_parse_pdata(iodev, pdata); + ret = max8997_pmic_dt_parse_pdata(pdev, pdata); if (ret) return ret; } diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index 1f0df4046b8..0a8dd1cbee6 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c @@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = { .min = 2800000, .step = 100000, .max = 3100000, }; static const struct voltage_map_desc ldo10_voltage_map_desc = { - .min = 95000, .step = 50000, .max = 1300000, + .min = 950000, .step = 50000, .max = 1300000, }; static const struct voltage_map_desc ldo1213_voltage_map_desc = { .min = 800000, .step = 100000, .max = 3300000, diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 6f684916fd7..66ca769287a 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node, if (!dev || !node) return -EINVAL; + for (i = 0; i < num_matches; i++) { + struct of_regulator_match *match = &matches[i]; + match->init_data = NULL; + match->of_node = NULL; + } + for_each_child_of_node(node, child) { name = of_get_property(child, "regulator-compatible", NULL); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index bd062a2ffbe..cd9ea2ea182 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = { .min_uV = S2MPS11_BUCK_MIN2, \ .uV_step = S2MPS11_BUCK_STEP2, \ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_B9CTRL2, \ + .vsel_reg = S2MPS11_REG_B10CTRL2, \ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B9CTRL1, \ + .enable_reg = S2MPS11_REG_B10CTRL1, \ .enable_mask = S2MPS11_ENABLE_MASK \ } diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c index 73dce766412..df395187c06 100644 --- a/drivers/regulator/tps65217-regulator.c +++ b/drivers/regulator/tps65217-regulator.c @@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev) if (!regs) return NULL; - count = of_regulator_match(pdev->dev.parent, regs, - reg_matches, TPS65217_NUM_REGULATOR); + count = of_regulator_match(&pdev->dev, regs, reg_matches, + TPS65217_NUM_REGULATOR); of_node_put(regs); if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) return NULL; diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 59c3770fa77..b0e4c0bc85c 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -998,7 +998,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data( return NULL; } - ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); + ret = of_regulator_match(&pdev->dev, regulators, matches, count); if (ret < 0) { dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index afb7cfa85cc..c016ad81767 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; + struct rtc_device *rtc = i2c_get_clientdata(client); int handled = 0, sr, err; /* @@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data) if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 08378e3cc21..10c1a3454e4 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -44,6 +44,7 @@ #define RTC_YMR 0x34 /* Year match register */ #define RTC_YLR 0x38 /* Year data load register */ +#define RTC_CR_EN (1 << 0) /* counter enable bit */ #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */ #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */ @@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) struct pl031_local *ldata; struct pl031_vendor_data *vendor = id->data; struct rtc_class_ops *ops = &vendor->ops; - unsigned long time; + unsigned long time, data; ret = amba_request_regions(adev, NULL); if (ret) @@ -345,10 +346,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev)); dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev)); + data = readl(ldata->base + RTC_CR); /* Enable the clockwatch on ST Variants */ if (vendor->clockwatch) - writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, - ldata->base + RTC_CR); + data |= RTC_CR_CWEN; + writel(data | RTC_CR_EN, ldata->base + RTC_CR); /* * On ST PL031 variants, the RTC reset value does not provide correct diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index 00c930f4b6f..2730533e2d2 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c @@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) return -EINVAL; } - writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) + writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S) | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) | (bin2bcd(tm->tm_mday)) | ((tm->tm_year >= 200) << DATE_CENTURY_S), diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 97ac0a38e3d..eb2753008ef 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c @@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus) return -1; } + +int ssb_gpio_unregister(struct ssb_bus *bus) +{ + if (ssb_chipco_available(&bus->chipco) || + ssb_extif_available(&bus->extif)) { + return gpiochip_remove(&bus->gpio); + } else { + SSB_WARN_ON(1); + } + + return -1; +} diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 772ad9b5c30..24dc331b470 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus) void ssb_bus_unregister(struct ssb_bus *bus) { + int err; + + err = ssb_gpio_unregister(bus); + if (err == -EBUSY) + ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); + else if (err) + ssb_dprintk(KERN_ERR PFX + "Can not unregister GPIO driver: %i\n", err); + ssb_buses_lock(); ssb_devices_unregister(bus); list_del(&bus->list); diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 6c10b66c796..da38305a2d2 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif) #ifdef CONFIG_SSB_DRIVER_GPIO extern int ssb_gpio_init(struct ssb_bus *bus); +extern int ssb_gpio_unregister(struct ssb_bus *bus); #else /* CONFIG_SSB_DRIVER_GPIO */ static inline int ssb_gpio_init(struct ssb_bus *bus) { return -ENOTSUPP; } +static inline int ssb_gpio_unregister(struct ssb_bus *bus) +{ + return 0; +} #endif /* CONFIG_SSB_DRIVER_GPIO */ #endif /* LINUX_SSB_PRIVATE_H_ */ diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e2695101bb9..f2aa7543d20 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -941,6 +941,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { + int block_size = dev->dev_attrib.block_size; + if (dev->export_count) { pr_err("dev[%p]: Unable to change SE Device" " fabric_max_sectors while export_count is %d\n", @@ -978,8 +980,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) /* * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() */ + if (!block_size) { + block_size = 512; + pr_warn("Defaulting to 512 for zero block_size\n"); + } fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, - dev->dev_attrib.block_size); + block_size); dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 810263dfa4a..c57bbbc7a7d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -754,6 +754,11 @@ static int target_fabric_port_link( return -EFAULT; } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("se_device not configured yet, cannot port link\n"); + return -ENODEV; + } + tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; se_tpg = container_of(to_config_group(tpg_ci), struct se_portal_group, tpg_group); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 26a6d183ccb..a664c664a31 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -58,11 +58,10 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) buf[7] = dev->dev_attrib.block_size & 0xff; rbuf = transport_kmap_data_sg(cmd); - if (!rbuf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } target_complete_cmd(cmd, GOOD); return 0; @@ -97,11 +96,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) buf[14] = 0x80; rbuf = transport_kmap_data_sg(cmd); - if (!rbuf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } target_complete_cmd(cmd, GOOD); return 0; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 84f9e96e8ac..2d88f087d96 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -641,11 +641,10 @@ spc_emulate_inquiry(struct se_cmd *cmd) out: rbuf = transport_kmap_data_sg(cmd); - if (!rbuf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } if (!ret) target_complete_cmd(cmd, GOOD); @@ -851,7 +850,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; - unsigned char *buf, *map_buf; + unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; int type = dev->transport->get_device_type(dev); int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); bool dbd = !!(cdb[1] & 0x08); @@ -863,26 +862,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) int ret; int i; - map_buf = transport_kmap_data_sg(cmd); - if (!map_buf) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - /* - * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we - * know we actually allocated a full page. Otherwise, if the - * data buffer is too small, allocate a temporary buffer so we - * don't have to worry about overruns in all our INQUIRY - * emulation handling. - */ - if (cmd->data_length < SE_MODE_PAGE_BUF && - (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { - buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); - if (!buf) { - transport_kunmap_data_sg(cmd); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } - } else { - buf = map_buf; - } + memset(buf, 0, SE_MODE_PAGE_BUF); + /* * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). @@ -934,8 +915,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) if (page == 0x3f) { if (subpage != 0x00 && subpage != 0xff) { pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); - kfree(buf); - transport_kunmap_data_sg(cmd); return TCM_INVALID_CDB_FIELD; } @@ -972,7 +951,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", page, subpage); - transport_kunmap_data_sg(cmd); return TCM_UNKNOWN_MODE_PAGE; set_length: @@ -981,12 +959,12 @@ set_length: else buf[0] = length - 1; - if (buf != map_buf) { - memcpy(map_buf, buf, cmd->data_length); - kfree(buf); + rbuf = transport_kmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); + transport_kunmap_data_sg(cmd); } - transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, GOOD); return 0; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 4225d5e7213..8e64adf8e4d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -39,6 +39,7 @@ #include <asm/unaligned.h> #include <linux/platform_device.h> #include <linux/workqueue.h> +#include <linux/pm_runtime.h> #include <linux/usb.h> #include <linux/usb/hcd.h> @@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd) return retval; } +/* + * usb_hcd_start_port_resume - a root-hub port is sending a resume signal + * @bus: the bus which the root hub belongs to + * @portnum: the port which is being resumed + * + * HCDs should call this function when they know that a resume signal is + * being sent to a root-hub port. The root hub will be prevented from + * going into autosuspend until usb_hcd_end_port_resume() is called. + * + * The bus's private lock must be held by the caller. + */ +void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum) +{ + unsigned bit = 1 << portnum; + + if (!(bus->resuming_ports & bit)) { + bus->resuming_ports |= bit; + pm_runtime_get_noresume(&bus->root_hub->dev); + } +} +EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume); + +/* + * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal + * @bus: the bus which the root hub belongs to + * @portnum: the port which is being resumed + * + * HCDs should call this function when they know that a resume signal has + * stopped being sent to a root-hub port. The root hub will be allowed to + * autosuspend again. + * + * The bus's private lock must be held by the caller. + */ +void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum) +{ + unsigned bit = 1 << portnum; + + if (bus->resuming_ports & bit) { + bus->resuming_ports &= ~bit; + pm_runtime_put_noidle(&bus->root_hub->dev); + } +} +EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume); /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 957ed2c4148..cbf7168e3ce 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2838,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev) EXPORT_SYMBOL_GPL(usb_enable_ltm); #ifdef CONFIG_USB_SUSPEND +/* + * usb_disable_function_remotewakeup - disable usb3.0 + * device's function remote wakeup + * @udev: target device + * + * Assume there's only one function on the USB 3.0 + * device and disable remote wake for the first + * interface. FIXME if the interface association + * descriptor shows there's more than one function. + */ +static int usb_disable_function_remotewakeup(struct usb_device *udev) +{ + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, + USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); +} /* * usb_port_suspend - suspend a usb device's upstream port @@ -2955,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", port1, status); /* paranoia: "should not happen" */ - if (udev->do_remote_wakeup) - (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), - USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, - USB_DEVICE_REMOTE_WAKEUP, 0, - NULL, 0, - USB_CTRL_SET_TIMEOUT); + if (udev->do_remote_wakeup) { + if (!hub_is_superspeed(hub->hdev)) { + (void) usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, + USB_RECIP_DEVICE, + USB_DEVICE_REMOTE_WAKEUP, 0, + NULL, 0, + USB_CTRL_SET_TIMEOUT); + } else + (void) usb_disable_function_remotewakeup(udev); + + } /* Try to enable USB2 hardware LPM again */ if (udev->usb2_hw_lpm_capable == 1) @@ -3052,20 +3076,30 @@ static int finish_port_resume(struct usb_device *udev) * udev->reset_resume */ } else if (udev->actconfig && !udev->reset_resume) { - le16_to_cpus(&devstatus); - if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { - status = usb_control_msg(udev, - usb_sndctrlpipe(udev, 0), - USB_REQ_CLEAR_FEATURE, + if (!hub_is_superspeed(udev->parent)) { + le16_to_cpus(&devstatus); + if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) + status = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, - USB_DEVICE_REMOTE_WAKEUP, 0, - NULL, 0, - USB_CTRL_SET_TIMEOUT); - if (status) - dev_dbg(&udev->dev, - "disable remote wakeup, status %d\n", - status); + USB_DEVICE_REMOTE_WAKEUP, 0, + NULL, 0, + USB_CTRL_SET_TIMEOUT); + } else { + status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, + &devstatus); + le16_to_cpus(&devstatus); + if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP + | USB_INTRF_STAT_FUNC_RW)) + status = + usb_disable_function_remotewakeup(udev); } + + if (status) + dev_dbg(&udev->dev, + "disable remote wakeup, status %d\n", + status); status = 0; } return status; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 09537b2f100..b416a3fc995 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -797,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); set_bit(i, &ehci->resuming_ports); ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); + usb_hcd_start_port_resume(&hcd->self, i); mod_timer(&hcd->rh_timer, ehci->reset_done[i]); } } diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4ccb97c0678..4d3b294f203 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf) status = STS_PCD; } } - /* FIXME autosuspend idle root hubs */ + + /* If a resume is in progress, make sure it can finish */ + if (ehci->resuming_ports) + mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); + spin_unlock_irqrestore (&ehci->lock, flags); return status ? retval : 0; } @@ -851,6 +855,7 @@ static int ehci_hub_control ( /* resume signaling for 20 msec */ ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(20); + usb_hcd_start_port_resume(&hcd->self, wIndex); /* check the port again */ mod_timer(&ehci_to_hcd(ehci)->rh_timer, ehci->reset_done[wIndex]); @@ -862,6 +867,7 @@ static int ehci_hub_control ( clear_bit(wIndex, &ehci->suspended_ports); set_bit(wIndex, &ehci->port_c_suspend); ehci->reset_done[wIndex] = 0; + usb_hcd_end_port_resume(&hcd->self, wIndex); /* stop resume signaling */ temp = ehci_readl(ehci, status_reg); @@ -950,6 +956,7 @@ static int ehci_hub_control ( ehci->reset_done[wIndex] = 0; if (temp & PORT_PE) set_bit(wIndex, &ehci->port_c_suspend); + usb_hcd_end_port_resume(&hcd->self, wIndex); } if (temp & PORT_OC) diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 3d989028c83..fd252f0cfb3 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested) if (ehci->async_iaa || ehci->async_unlinking) return; - /* Do all the waiting QHs at once */ - ehci->async_iaa = ehci->async_unlink; - ehci->async_unlink = NULL; - /* If the controller isn't running, we don't have to wait for it */ if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { + + /* Do all the waiting QHs */ + ehci->async_iaa = ehci->async_unlink; + ehci->async_unlink = NULL; + if (!nested) /* Avoid recursion */ end_unlink_async(ehci); /* Otherwise start a new IAA cycle */ } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { + struct ehci_qh *qh; + + /* Do only the first waiting QH (nVidia bug?) */ + qh = ehci->async_unlink; + ehci->async_iaa = qh; + ehci->async_unlink = qh->unlink_next; + qh->unlink_next = NULL; + /* Make sure the unlinks are all visible to the hardware */ wmb(); @@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci) } } +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); + static void unlink_empty_async(struct ehci_hcd *ehci) { - struct ehci_qh *qh, *next; - bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); + struct ehci_qh *qh; + struct ehci_qh *qh_to_unlink = NULL; bool check_unlinks_later = false; + int count = 0; - /* Unlink all the async QHs that have been empty for a timer cycle */ - next = ehci->async->qh_next.qh; - while (next) { - qh = next; - next = qh->qh_next.qh; - + /* Find the last async QH which has been empty for a timer cycle */ + for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { if (list_empty(&qh->qtd_list) && qh->qh_state == QH_STATE_LINKED) { - if (!stopped && qh->unlink_cycle == - ehci->async_unlink_cycle) + ++count; + if (qh->unlink_cycle == ehci->async_unlink_cycle) check_unlinks_later = true; else - single_unlink_async(ehci, qh); + qh_to_unlink = qh; } } - /* Start a new IAA cycle if any QHs are waiting for it */ - if (ehci->async_unlink) - start_iaa_cycle(ehci, false); + /* If nothing else is being unlinked, unlink the last empty QH */ + if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) { + start_unlink_async(ehci, qh_to_unlink); + --count; + } - /* QHs that haven't been empty for long enough will be handled later */ - if (check_unlinks_later) { + /* Other QHs will be handled later */ + if (count > 0) { ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); ++ehci->async_unlink_cycle; } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 69ebee73c0c..b476daf49f6 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) @@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci) } ehci->now_frame = now_frame; + frame = ehci->last_iso_frame; for (;;) { union ehci_shadow q, *q_p; __hc32 type, *hw_p; - frame = ehci->last_iso_frame; restart: /* scan each element in frame's queue for completions */ q_p = &ehci->pshadow [frame]; @@ -2321,6 +2321,9 @@ restart: /* Stop when we have reached the current frame */ if (frame == now_frame) break; - ehci->last_iso_frame = (frame + 1) & fmask; + + /* The last frame may still have active siTDs */ + ehci->last_iso_frame = frame; + frame = (frame + 1) & fmask; } } diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0..f904071d70d 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c @@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci) if (want != actual) { - /* Poll again later, but give up after about 20 ms */ - if (ehci->ASS_poll_count++ < 20) { - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); - return; - } - ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", - want, actual); + /* Poll again later */ + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); + ++ehci->ASS_poll_count; + return; } + + if (ehci->ASS_poll_count > 20) + ehci_dbg(ehci, "ASS poll count reached %d\n", + ehci->ASS_poll_count); ehci->ASS_poll_count = 0; /* The status is up-to-date; restart or stop the schedule as needed */ @@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci) if (want != actual) { - /* Poll again later, but give up after about 20 ms */ - if (ehci->PSS_poll_count++ < 20) { - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); - return; - } - ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", - want, actual); + /* Poll again later */ + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); + return; } + + if (ehci->PSS_poll_count > 20) + ehci_dbg(ehci, "PSS poll count reached %d\n", + ehci->PSS_poll_count); ehci->PSS_poll_count = 0; /* The status is up-to-date; restart or stop the schedule as needed */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a3b6d7104ae..4c338ec03a0 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) "defaulting to EHCI.\n"); dev_warn(&xhci_pdev->dev, "USB 3.0 devices will work at USB 2.0 speeds.\n"); + usb_disable_xhci_ports(xhci_pdev); return; } diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 768d54295a2..15d13229ddb 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port, } } clear_bit(port, &uhci->resuming_ports); + usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port); } /* Wait for the UHCI controller in HP's iLO2 server management chip. @@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci) set_bit(port, &uhci->resuming_ports); uhci->ports_timeout = jiffies + msecs_to_jiffies(25); + usb_hcd_start_port_resume( + &uhci_to_hcd(uhci)->self, port); /* Make sure we see the port again * after the resuming period is over. */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 59fb5c677db..7f76a49e90d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci, faked_port_index + 1); if (slot_id && xhci->devs[slot_id]) xhci_ring_device(xhci, slot_id); - if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { + if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { bus_state->port_remote_wakeup &= ~(1 << faked_port_index); xhci_test_and_clear_bit(xhci, port_array, @@ -2589,6 +2589,8 @@ cleanup: (trb_comp_code != COMP_STALL && trb_comp_code != COMP_BABBLE)) xhci_urb_free_priv(xhci, urb_priv); + else + kfree(urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && @@ -3108,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, * running_total. */ packets_transferred = (running_total + trb_buff_len) / - usb_endpoint_maxp(&urb->ep->desc); + GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); if ((total_packet_count - packets_transferred) > 31) return 31 << 17; @@ -3642,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td_len = urb->iso_frame_desc[i].length; td_remain_len = td_len; total_packet_count = DIV_ROUND_UP(td_len, - usb_endpoint_maxp(&urb->ep->desc)); + GET_MAX_PACKET( + usb_endpoint_maxp(&urb->ep->desc))); /* A zero-length transfer still involves at least one packet. */ if (total_packet_count == 0) total_packet_count++; @@ -3664,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td = urb_priv->td[i]; for (j = 0; j < trbs_per_td; j++) { u32 remainder = 0; - field = TRB_TBC(burst_count) | TRB_TLBPC(residue); + field = 0; if (first_trb) { + field = TRB_TBC(burst_count) | + TRB_TLBPC(residue); /* Queue the isoc TRB */ field |= TRB_TYPE(TRB_ISOC); /* Assume URB_ISO_ASAP is set */ diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f14736f647f..edc0f0dcad8 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ + { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ba68835d06a..90ceef1776c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = { /* * ELV devices: */ + { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, @@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index fa5d5603827..9d359e189a6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -147,6 +147,11 @@ #define XSENS_CONVERTER_6_PID 0xD38E #define XSENS_CONVERTER_7_PID 0xD38F +/** + * Zolix (www.zolix.com.cb) product ids + */ +#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ + /* * NDI (www.ndigital.com) product ids */ @@ -204,7 +209,7 @@ /* * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). - * All of these devices use FTDI's vendor ID (0x0403). + * Almost all of these devices use FTDI's vendor ID (0x0403). * Further IDs taken from ELV Windows .inf file. * * The previously included PID for the UO 100 module was incorrect. @@ -212,6 +217,8 @@ * * Armin Laeuger originally sent the PID for the UM 100 module. */ +#define FTDI_ELV_VID 0x1B1F /* ELV AG */ +#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 0d9dac9e7f9..567bc77d639 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_CC864_DUAL 0x1005 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 +#define TELIT_PRODUCT_LE920 0x1200 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb); #define TPLINK_VENDOR_ID 0x2357 #define TPLINK_PRODUCT_MA180 0x0201 +/* Changhong products */ +#define CHANGHONG_VENDOR_ID 0x2077 +#define CHANGHONG_PRODUCT_CH690 0x7001 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = { .reserved = BIT(3) | BIT(4), }; +static const struct option_blacklist_info telit_le920_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(5), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index aa148c21ea4..24662547dc5 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ + {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ /* Gobi 2000 devices */ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c index 105d900150c..16b0bf055ee 100644 --- a/drivers/usb/storage/initializers.c +++ b/drivers/usb/storage/initializers.c @@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us) return 0; } -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us) +/* This places the HUAWEI usb dongles in multi-port mode */ +static int usb_stor_huawei_feature_init(struct us_data *us) { int result; @@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us) US_DEBUGP("Huawei mode set result is %d\n", result); return 0; } + +/* + * It will send a scsi switch command called rewind' to huawei dongle. + * When the dongle receives this command at the first time, + * it will reboot immediately. After rebooted, it will ignore this command. + * So it is unnecessary to read its response. + */ +static int usb_stor_huawei_scsi_init(struct us_data *us) +{ + int result = 0; + int act_len = 0; + struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; + char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); + bcbw->Tag = 0; + bcbw->DataTransferLength = 0; + bcbw->Flags = bcbw->Lun = 0; + bcbw->Length = sizeof(rewind_cmd); + memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); + memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); + + result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, + US_BULK_CB_WRAP_LEN, &act_len); + US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); + return result; +} + +/* + * It tries to find the supported Huawei USB dongles. + * In Huawei, they assign the following product IDs + * for all of their mobile broadband dongles, + * including the new dongles in the future. + * So if the product ID is not included in this list, + * it means it is not Huawei's mobile broadband dongles. + */ +static int usb_stor_huawei_dongles_pid(struct us_data *us) +{ + struct usb_interface_descriptor *idesc; + int idProduct; + + idesc = &us->pusb_intf->cur_altsetting->desc; + idProduct = us->pusb_dev->descriptor.idProduct; + /* The first port is CDROM, + * means the dongle in the single port mode, + * and a switch command is required to be sent. */ + if (idesc && idesc->bInterfaceNumber == 0) { + if ((idProduct == 0x1001) + || (idProduct == 0x1003) + || (idProduct == 0x1004) + || (idProduct >= 0x1401 && idProduct <= 0x1500) + || (idProduct >= 0x1505 && idProduct <= 0x1600) + || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { + return 1; + } + } + return 0; +} + +int usb_stor_huawei_init(struct us_data *us) +{ + int result = 0; + + if (usb_stor_huawei_dongles_pid(us)) { + if (us->pusb_dev->descriptor.idProduct >= 0x1446) + result = usb_stor_huawei_scsi_init(us); + else + result = usb_stor_huawei_feature_init(us); + } + return result; +} diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h index 529327fbb06..5376d4fc76f 100644 --- a/drivers/usb/storage/initializers.h +++ b/drivers/usb/storage/initializers.h @@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us); * flash reader */ int usb_stor_ucr61s2b_init(struct us_data *us); -/* This places the HUAWEI E220 devices in multi-port mode */ -int usb_stor_huawei_e220_init(struct us_data *us); +/* This places the HUAWEI usb dongles in multi-port mode */ +int usb_stor_huawei_init(struct us_data *us); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index d305a5aa3a5..72923b56bbf 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1527,335 +1527,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100, /* Reported by fangxiaozhi <huananhu@huawei.com> * This brings the HUAWEI data card devices into multi-port mode */ -UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, +UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, "HUAWEI MOBILE", "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, - 0), -UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, - "HUAWEI MOBILE", - "Mass Storage", - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 0), /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */ diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 31b3e1a61bb..cf09b6ba71f 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); .useTransport = use_transport, \ } +#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ + vendor_name, product_name, use_protocol, use_transport, \ + init_function, Flags) \ +{ \ + .vendorName = vendor_name, \ + .productName = product_name, \ + .useProtocol = use_protocol, \ + .useTransport = use_transport, \ + .initFunction = init_function, \ +} + static struct us_unusual_dev us_unusual_dev_list[] = { # include "unusual_devs.h" { } /* Terminating entry */ @@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids = #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF #ifdef CONFIG_LOCKDEP diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index b78a526910f..5ef8ce74aae 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -41,6 +41,20 @@ #define USUAL_DEV(useProto, useTrans) \ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } +/* Define the device is matched with Vendor ID and interface descriptors */ +#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ + vendorName, productName, useProtocol, useTransport, \ + initFunction, flags) \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ + | USB_DEVICE_ID_MATCH_VENDOR, \ + .idVendor = (id_vendor), \ + .bInterfaceClass = (cl), \ + .bInterfaceSubClass = (sc), \ + .bInterfaceProtocol = (pr), \ + .driver_info = (flags) \ +} + struct usb_device_id usb_storage_usb_ids[] = { # include "unusual_devs.h" { } /* Terminating entry */ @@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids); #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF /* * The table of devices to ignore diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b21b23..959b1cd89e6 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net) } /* Caller must have TX VQ lock */ -static void tx_poll_start(struct vhost_net *net, struct socket *sock) +static int tx_poll_start(struct vhost_net *net, struct socket *sock) { + int ret; + if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) - return; - vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); - net->tx_poll_state = VHOST_NET_POLL_STARTED; + return 0; + ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); + if (!ret) + net->tx_poll_state = VHOST_NET_POLL_STARTED; + return ret; } /* In case of DMA done not in order in lower device driver for some reason. @@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n, vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); } -static void vhost_net_enable_vq(struct vhost_net *n, +static int vhost_net_enable_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct socket *sock; + int ret; sock = rcu_dereference_protected(vq->private_data, lockdep_is_held(&vq->mutex)); if (!sock) - return; + return 0; if (vq == n->vqs + VHOST_NET_VQ_TX) { n->tx_poll_state = VHOST_NET_POLL_STOPPED; - tx_poll_start(n, sock); + ret = tx_poll_start(n, sock); } else - vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); + ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); + + return ret; } static struct socket *vhost_net_stop_vq(struct vhost_net *n, @@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) r = PTR_ERR(ubufs); goto err_ubufs; } - oldubufs = vq->ubufs; - vq->ubufs = ubufs; + vhost_net_disable_vq(n, vq); rcu_assign_pointer(vq->private_data, sock); - vhost_net_enable_vq(n, vq); - r = vhost_init_used(vq); if (r) - goto err_vq; + goto err_used; + r = vhost_net_enable_vq(n, vq); + if (r) + goto err_used; + + oldubufs = vq->ubufs; + vq->ubufs = ubufs; n->tx_packets = 0; n->tx_zcopy_err = 0; @@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) mutex_unlock(&n->dev.mutex); return 0; +err_used: + rcu_assign_pointer(vq->private_data, oldsock); + vhost_net_enable_vq(n, vq); + if (ubufs) + vhost_ubuf_put_and_wait(ubufs); err_ubufs: fput(sock->file); err_vq: diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index b20df5c829f..22321cf84fb 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -575,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs) /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ tv_tpg = vs->vs_tpg; - if (unlikely(!tv_tpg)) { - pr_err("%s endpoint not set\n", __func__); + if (unlikely(!tv_tpg)) return; - } mutex_lock(&vq->mutex); vhost_disable_notify(&vs->dev, vq); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 34389f75fe6..9759249e6d9 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; + poll->wqh = NULL; vhost_work_init(&poll->work, fn); } /* Start polling a file. We add ourselves to file's wait queue. The caller must * keep a reference to a file until after vhost_poll_stop is called. */ -void vhost_poll_start(struct vhost_poll *poll, struct file *file) +int vhost_poll_start(struct vhost_poll *poll, struct file *file) { unsigned long mask; + int ret = 0; mask = file->f_op->poll(file, &poll->table); if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); + if (mask & POLLERR) { + if (poll->wqh) + remove_wait_queue(poll->wqh, &poll->wait); + ret = -EINVAL; + } + + return ret; } /* Stop polling a file. After this function returns, it becomes safe to drop the * file reference. You must also flush afterwards. */ void vhost_poll_stop(struct vhost_poll *poll) { - remove_wait_queue(poll->wqh, &poll->wait); + if (poll->wqh) { + remove_wait_queue(poll->wqh, &poll->wait); + poll->wqh = NULL; + } } static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, @@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) fput(filep); if (pollstart && vq->handle_kick) - vhost_poll_start(&vq->poll, vq->kick); + r = vhost_poll_start(&vq->poll, vq->kick); mutex_unlock(&vq->mutex); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 2639c58b23a..17261e277c0 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, unsigned long mask, struct vhost_dev *dev); -void vhost_poll_start(struct vhost_poll *poll, struct file *file); +int vhost_poll_start(struct vhost_poll *poll, struct file *file); void vhost_poll_stop(struct vhost_poll *poll); void vhost_poll_flush(struct vhost_poll *poll); void vhost_poll_queue(struct vhost_poll *poll); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 0be4df39e95..74d77dfa5f6 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) if (irq == -1) { irq = xen_allocate_irq_dynamic(); - if (irq == -1) + if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, @@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) if (irq == -1) { irq = xen_allocate_irq_dynamic(); - if (irq == -1) + if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_percpu_chip, diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 97f5d264c31..37c1f825f51 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c @@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, struct pci_dev *dev, struct xen_pci_op *op) { struct xen_pcibk_dev_data *dev_data; - int otherend = pdev->xdev->otherend_id; int status; if (unlikely(verbose_request)) @@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, status = pci_enable_msi(dev); if (status) { - printk(KERN_ERR "error enable msi for guest %x status %x\n", - otherend, status); + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", + pci_name(dev), pdev->xdev->otherend_id, + status); op->value = 0; return XEN_PCI_ERR_op_failed; } @@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, pci_name(dev), i, op->msix_entries[i].vector); } - } else { - printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", - pci_name(dev), result); - } + } else + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", + pci_name(dev), pdev->xdev->otherend_id, + result); kfree(entries); op->value = result; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a8b8adc0507..5a3327b8f90 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) unsigned nr_extents = 0; int extra_reserve = 0; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; - int ret; + int ret = 0; bool delalloc_lock = true; /* If we are a free space inode we need to not flush since we will be in @@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) csum_bytes = BTRFS_I(inode)->csum_bytes; spin_unlock(&BTRFS_I(inode)->lock); - if (root->fs_info->quota_enabled) { + if (root->fs_info->quota_enabled) ret = btrfs_qgroup_reserve(root, num_bytes + nr_extents * root->leafsize); - if (ret) { - spin_lock(&BTRFS_I(inode)->lock); - calc_csum_metadata_size(inode, num_bytes, 0); - spin_unlock(&BTRFS_I(inode)->lock); - if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); - return ret; - } - } - ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); + /* + * ret != 0 here means the qgroup reservation failed, we go straight to + * the shared error handling then. + */ + if (ret == 0) + ret = reserve_metadata_bytes(root, block_rsv, + to_reserve, flush); + if (ret) { u64 to_free = 0; unsigned dropped; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 2e8cae63d24..fdb7a8db3b5 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -288,7 +288,8 @@ out: void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) { clear_bit(EXTENT_FLAG_LOGGING, &em->flags); - try_merge_map(tree, em); + if (em->in_tree) + try_merge_map(tree, em); } /** diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f76b1fd160d..aeb84469d2c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, struct btrfs_key key; struct btrfs_ioctl_defrag_range_args range; int num_defrag; + int index; + int ret; /* get the inode */ key.objectid = defrag->root; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); key.offset = (u64)-1; + + index = srcu_read_lock(&fs_info->subvol_srcu); + inode_root = btrfs_read_fs_root_no_name(fs_info, &key); if (IS_ERR(inode_root)) { - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); - return PTR_ERR(inode_root); + ret = PTR_ERR(inode_root); + goto cleanup; + } + if (btrfs_root_refs(&inode_root->root_item) == 0) { + ret = -ENOENT; + goto cleanup; } key.objectid = defrag->ino; @@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, key.offset = 0; inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); if (IS_ERR(inode)) { - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); - return PTR_ERR(inode); + ret = PTR_ERR(inode); + goto cleanup; } + srcu_read_unlock(&fs_info->subvol_srcu, index); /* do a chunk of defrag */ clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); @@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, iput(inode); return 0; +cleanup: + srcu_read_unlock(&fs_info->subvol_srcu, index); + kmem_cache_free(btrfs_inode_defrag_cachep, defrag); + return ret; } /* @@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, if (err < 0 && num_written > 0) num_written = err; } -out: + if (sync) atomic_dec(&BTRFS_I(inode)->sync_writers); +out: sb_end_write(inode->i_sb); current->backing_dev_info = NULL; return num_written ? num_written : err; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5b22d45d3c6..338f2597bf7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); - d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); fail: if (async_transid) { *async_transid = trans->transid; @@ -525,6 +524,10 @@ fail: } if (err && !ret) ret = err; + + if (!ret) + d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); + return ret; } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index f1073129704..e5ed5672960 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, * if the disk i_size is already at the inode->i_size, or * this ordered extent is inside the disk i_size, we're done */ - if (disk_i_size == i_size || offset <= disk_i_size) { + if (disk_i_size == i_size) + goto out; + + /* + * We still need to update disk_i_size if outstanding_isize is greater + * than disk_i_size. + */ + if (offset <= disk_i_size && + (!ordered || ordered->outstanding_isize <= disk_i_size)) goto out; - } /* * walk backward from this ordered extent to disk_i_size. @@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, break; if (test->file_offset >= i_size) break; - if (test->file_offset >= disk_i_size) { + if (entry_end(test) > disk_i_size) { /* * we don't update disk_i_size now, so record this * undealt i_size. Or we will not know the real diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bdbb94f245c..67783e03d12 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) int corrected = 0; struct btrfs_key key; struct inode *inode = NULL; + struct btrfs_fs_info *fs_info; u64 end = offset + PAGE_SIZE - 1; struct btrfs_root *local_root; + int srcu_index; key.objectid = root; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; - local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); - if (IS_ERR(local_root)) + + fs_info = fixup->root->fs_info; + srcu_index = srcu_read_lock(&fs_info->subvol_srcu); + + local_root = btrfs_read_fs_root_no_name(fs_info, &key); + if (IS_ERR(local_root)) { + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); return PTR_ERR(local_root); + } key.type = BTRFS_INODE_ITEM_KEY; key.objectid = inum; key.offset = 0; - inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); + inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); if (IS_ERR(inode)) return PTR_ERR(inode); @@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) } if (PageUptodate(page)) { - struct btrfs_fs_info *fs_info; if (PageDirty(page)) { /* * we need to write the data to the defect sector. the @@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) u64 physical_for_dev_replace; u64 len; struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; + int srcu_index; key.objectid = root; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; + + srcu_index = srcu_read_lock(&fs_info->subvol_srcu); + local_root = btrfs_read_fs_root_no_name(fs_info, &key); - if (IS_ERR(local_root)) + if (IS_ERR(local_root)) { + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); return PTR_ERR(local_root); + } key.type = BTRFS_INODE_ITEM_KEY; key.objectid = inum; key.offset = 0; inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); if (IS_ERR(inode)) return PTR_ERR(inode); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f15494699f3..fc03aa60b68 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type, &root->fs_info->trans_block_rsv, num_bytes, flush); if (ret) - return ERR_PTR(ret); + goto reserve_fail; } again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); - if (!h) - return ERR_PTR(-ENOMEM); + if (!h) { + ret = -ENOMEM; + goto alloc_fail; + } /* * If we are JOIN_NOLOCK we're already committing a transaction and @@ -365,11 +367,7 @@ again: if (ret < 0) { /* We must get the transaction if we are JOIN_NOLOCK. */ BUG_ON(type == TRANS_JOIN_NOLOCK); - - if (type < TRANS_JOIN_NOLOCK) - sb_end_intwrite(root->fs_info->sb); - kmem_cache_free(btrfs_trans_handle_cachep, h); - return ERR_PTR(ret); + goto join_fail; } cur_trans = root->fs_info->running_transaction; @@ -410,6 +408,19 @@ got_it: if (!current->journal_info && type != TRANS_USERSPACE) current->journal_info = h; return h; + +join_fail: + if (type < TRANS_JOIN_NOLOCK) + sb_end_intwrite(root->fs_info->sb); + kmem_cache_free(btrfs_trans_handle_cachep, h); +alloc_fail: + if (num_bytes) + btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, + num_bytes); +reserve_fail: + if (qgroup_reserved) + btrfs_qgroup_free(root, qgroup_reserved); + return ERR_PTR(ret); } struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 15f6efdf646..5cbb7f4b167 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ret = 0; /* Notify udev that device has changed */ - btrfs_kobject_uevent(bdev, KOBJ_CHANGE); + if (bdev) + btrfs_kobject_uevent(bdev, KOBJ_CHANGE); error_brelse: brelse(bh); diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 7ff49852b0c..911649a47dd 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -503,11 +503,11 @@ static ssize_t device_write(struct file *file, const char __user *buf, #endif return -EINVAL; -#ifdef CONFIG_COMPAT - if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN) -#else + /* + * can't compare against COMPAT/dlm_write_request32 because + * we don't yet know if is64bit is zero + */ if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) -#endif return -EINVAL; kbuf = kzalloc(count + 1, GFP_NOFS); diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index dd057bc6b65..fc8dc20fdeb 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -177,11 +177,31 @@ out_nofree: return mnt; } +static int +nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +{ + if (NFS_FH(dentry->d_inode)->size != 0) + return nfs_getattr(mnt, dentry, stat); + generic_fillattr(dentry->d_inode, stat); + return 0; +} + +static int +nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr) +{ + if (NFS_FH(dentry->d_inode)->size != 0) + return nfs_setattr(dentry, attr); + return -EACCES; +} + const struct inode_operations nfs_mountpoint_inode_operations = { .getattr = nfs_getattr, + .setattr = nfs_setattr, }; const struct inode_operations nfs_referral_inode_operations = { + .getattr = nfs_namespace_getattr, + .setattr = nfs_namespace_setattr, }; static void nfs_expire_automounts(struct work_struct *work) diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index acc34726812..2e9779b58b7 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -236,11 +236,10 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp, error = nfs4_discover_server_trunking(clp, &old); if (error < 0) goto error; + nfs_put_client(clp); if (clp != old) { clp->cl_preserve_clid = true; - nfs_put_client(clp); clp = old; - atomic_inc(&clp->cl_count); } return clp; @@ -306,7 +305,7 @@ int nfs40_walk_client_list(struct nfs_client *new, .clientid = new->cl_clientid, .confirm = new->cl_confirm, }; - int status; + int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { @@ -332,40 +331,33 @@ int nfs40_walk_client_list(struct nfs_client *new, if (prev) nfs_put_client(prev); + prev = pos; status = nfs4_proc_setclientid_confirm(pos, &clid, cred); - if (status == 0) { + switch (status) { + case -NFS4ERR_STALE_CLIENTID: + break; + case 0: nfs4_swap_callback_idents(pos, new); - nfs_put_client(pos); + prev = NULL; *result = pos; dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", __func__, pos, atomic_read(&pos->cl_count)); - return 0; - } - if (status != -NFS4ERR_STALE_CLIENTID) { - nfs_put_client(pos); - dprintk("NFS: <-- %s status = %d, no result\n", - __func__, status); - return status; + default: + goto out; } spin_lock(&nn->nfs_client_lock); - prev = pos; } + spin_unlock(&nn->nfs_client_lock); - /* - * No matching nfs_client found. This should be impossible, - * because the new nfs_client has already been added to - * nfs_client_list by nfs_get_client(). - * - * Don't BUG(), since the caller is holding a mutex. - */ + /* No match found. The server lost our clientid */ +out: if (prev) nfs_put_client(prev); - spin_unlock(&nn->nfs_client_lock); - pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); - return -NFS4ERR_STALE_CLIENTID; + dprintk("NFS: <-- %s status = %d\n", __func__, status); + return status; } #ifdef CONFIG_NFS_V4_1 @@ -432,7 +424,7 @@ int nfs41_walk_client_list(struct nfs_client *new, { struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); struct nfs_client *pos, *n, *prev = NULL; - int error; + int status = -NFS4ERR_STALE_CLIENTID; spin_lock(&nn->nfs_client_lock); list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { @@ -448,14 +440,17 @@ int nfs41_walk_client_list(struct nfs_client *new, nfs_put_client(prev); prev = pos; - error = nfs_wait_client_init_complete(pos); - if (error < 0) { + nfs4_schedule_lease_recovery(pos); + status = nfs_wait_client_init_complete(pos); + if (status < 0) { nfs_put_client(pos); spin_lock(&nn->nfs_client_lock); continue; } - + status = pos->cl_cons_state; spin_lock(&nn->nfs_client_lock); + if (status < 0) + continue; } if (pos->rpc_ops != new->rpc_ops) @@ -473,6 +468,7 @@ int nfs41_walk_client_list(struct nfs_client *new, if (!nfs4_match_serverowners(pos, new)) continue; + atomic_inc(&pos->cl_count); spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", __func__, pos, atomic_read(&pos->cl_count)); @@ -481,16 +477,10 @@ int nfs41_walk_client_list(struct nfs_client *new, return 0; } - /* - * No matching nfs_client found. This should be impossible, - * because the new nfs_client has already been added to - * nfs_client_list by nfs_get_client(). - * - * Don't BUG(), since the caller is holding a mutex. - */ + /* No matching nfs_client found. */ spin_unlock(&nn->nfs_client_lock); - pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); - return -NFS4ERR_STALE_CLIENTID; + dprintk("NFS: <-- %s status = %d\n", __func__, status); + return status; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9448c579d41..e61f68d5ef2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -136,16 +136,11 @@ int nfs40_discover_server_trunking(struct nfs_client *clp, clp->cl_confirm = clid.confirm; status = nfs40_walk_client_list(clp, result, cred); - switch (status) { - case -NFS4ERR_STALE_CLIENTID: - set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); - case 0: + if (status == 0) { /* Sustain the lease, even if it's empty. If the clientid4 * goes stale it's of no use for trunking discovery. */ nfs4_schedule_state_renewal(*result); - break; } - out: return status; } @@ -1863,6 +1858,7 @@ again: case -ETIMEDOUT: case -EAGAIN: ssleep(1); + case -NFS4ERR_STALE_CLIENTID: dprintk("NFS: %s after status %d, retrying\n", __func__, status); goto again; @@ -2022,8 +2018,18 @@ static int nfs4_reset_session(struct nfs_client *clp) nfs4_begin_drain_session(clp); cred = nfs4_get_exchange_id_cred(clp); status = nfs4_proc_destroy_session(clp->cl_session, cred); - if (status && status != -NFS4ERR_BADSESSION && - status != -NFS4ERR_DEADSESSION) { + switch (status) { + case 0: + case -NFS4ERR_BADSESSION: + case -NFS4ERR_DEADSESSION: + break; + case -NFS4ERR_BACK_CHAN_BUSY: + case -NFS4ERR_DELAY: + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); + status = 0; + ssleep(1); + goto out; + default: status = nfs4_recovery_handle_error(clp, status); goto out; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 2e7e8c878e5..b056b162872 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2589,27 +2589,23 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags, struct nfs_server *server; struct dentry *mntroot = ERR_PTR(-ENOMEM); struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod; - int error; - dprintk("--> nfs_xdev_mount_common()\n"); + dprintk("--> nfs_xdev_mount()\n"); mount_info.mntfh = mount_info.cloned->fh; /* create a new volume representation */ server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); - if (IS_ERR(server)) { - error = PTR_ERR(server); - goto out_err; - } - mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod); - dprintk("<-- nfs_xdev_mount_common() = 0\n"); -out: - return mntroot; + if (IS_ERR(server)) + mntroot = ERR_CAST(server); + else + mntroot = nfs_fs_mount_common(server, flags, + dev_name, &mount_info, nfs_mod); -out_err: - dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error); - goto out; + dprintk("<-- nfs_xdev_mount() = %ld\n", + IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L); + return mntroot; } #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index fdb18076948..f3859354e41 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, if (ret < 0) printk(KERN_ERR "NILFS: GC failed during preparation: " "cannot read source blocks: err=%d\n", ret); - else + else { + if (nilfs_sb_need_update(nilfs)) + set_nilfs_discontinued(nilfs); ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); + } nilfs_remove_all_gcinodes(nilfs); clear_nilfs_gc_running(nilfs); diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6d0e8..d0ab98f73d3 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list) (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) /** + * llist_for_each_entry_safe - iterate safely against remove over some entries + * of lock-less list of given type. + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as a temporary storage. + * @node: the fist entry of deleted list entries. + * @member: the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. This variant allows removal of entries + * as we iterate. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry_safe(pos, n, node, member) \ + for ((pos) = llist_entry((node), typeof(*(pos)), member), \ + (n) = (pos)->member.next; \ + &(pos)->member != NULL; \ + (pos) = llist_entry(n, typeof(*(pos)), member), \ + (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) + +/** * llist_empty - tests whether a lock-less list is empty * @head: the list to test * diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0108a56f814..28bd5fa2ff2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size; * the slab_mutex must be held when looping through those caches */ #define for_each_memcg_cache_index(_idx) \ - for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) + for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) static inline bool memcg_kmem_enabled(void) { diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index bc823c4c028..deca8745252 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -151,7 +151,7 @@ struct mmu_notifier_ops { * Therefore notifier chains can only be traversed when either * * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ struct mmu_notifier { diff --git a/include/linux/usb.h b/include/linux/usb.h index 689b14b26c8..4d22d0f6167 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -357,6 +357,8 @@ struct usb_bus { int bandwidth_int_reqs; /* number of Interrupt requests */ int bandwidth_isoc_reqs; /* number of Isoc. requests */ + unsigned resuming_ports; /* bit array: resuming root-hub ports */ + #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 608050b2545..0a78df5f6cf 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); extern void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum); +extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); +extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); + /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 5de7a220e98..0e5ac93bab1 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -33,6 +33,7 @@ struct usbnet { wait_queue_head_t *wait; struct mutex phy_mutex; unsigned char suspend_count; + unsigned char pkt_cnt, pkt_err; /* i/o info: pipes etc */ unsigned in, out; @@ -70,6 +71,7 @@ struct usbnet { # define EVENT_DEV_OPEN 7 # define EVENT_DEVICE_REPORT_IDLE 8 # define EVENT_NO_RUNTIME_PM 9 +# define EVENT_RX_KILL 10 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -100,7 +102,6 @@ struct driver_info { #define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */ #define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */ -#define FLAG_NOARP 0x2000 /* device can't do ARP */ /* * Indicates to usbnet, that USB driver accumulates multiple IP packets. @@ -108,6 +109,7 @@ struct driver_info { */ #define FLAG_MULTI_PACKET 0x2000 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ +#define FLAG_NOARP 0x8000 /* device can't do ARP */ /* init device ... can sleep, or cause probe() failure */ int (*bind)(struct usbnet *, struct usb_interface *); diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 498433dd067..938b7fd1120 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -34,17 +34,17 @@ extern int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); -extern int datagram_recv_ctl(struct sock *sk, - struct msghdr *msg, - struct sk_buff *skb); - -extern int datagram_send_ctl(struct net *net, - struct sock *sk, - struct msghdr *msg, - struct flowi6 *fl6, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass, - int *dontfrag); +extern int ip6_datagram_recv_ctl(struct sock *sk, + struct msghdr *msg, + struct sk_buff *skb); + +extern int ip6_datagram_send_ctl(struct net *net, + struct sock *sk, + struct msghdr *msg, + struct flowi6 *fl6, + struct ipv6_txoptions *opt, + int *hlimit, int *tclass, + int *dontfrag); #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 50598472dc4..f738e25377f 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -152,6 +152,12 @@ #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) +/* + * Interface status, Figure 9-5 USB 3.0 spec + */ +#define USB_INTRF_STAT_FUNC_RW_CAP 1 +#define USB_INTRF_STAT_FUNC_RW 2 + #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 301079d06f2..7b6646a8c06 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) } /* + * Initialize event state based on the perf_event_attr::disabled. + */ +static inline void perf_event__state_init(struct perf_event *event) +{ + event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : + PERF_EVENT_STATE_INACTIVE; +} + +/* * Called at perf_event creation and when events are attached/detached from a * group. */ @@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, event->overflow_handler = overflow_handler; event->overflow_handler_context = context; - if (attr->disabled) - event->state = PERF_EVENT_STATE_OFF; + perf_event__state_init(event); pmu = NULL; @@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open, mutex_lock(&gctx->mutex); perf_remove_from_context(group_leader); + + /* + * Removing from the context ends up with disabled + * event. What we want here is event in the initial + * startup state, ready to be add into new context. + */ + perf_event__state_init(group_leader); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_remove_from_context(sibling); + perf_event__state_init(sibling); put_ctx(gctx); } mutex_unlock(&gctx->mutex); diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f6e5ec2932b..c1cc7e17ff9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -40,8 +40,7 @@ #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ -static bool rcu_nocb_poll; /* Offload kthread are to poll. */ -module_param(rcu_nocb_poll, bool, 0444); +static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ static char __initdata nocb_buf[NR_CPUS * 5]; #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ @@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str) } __setup("rcu_nocbs=", rcu_nocb_setup); +static int __init parse_rcu_nocb_poll(char *arg) +{ + rcu_nocb_poll = 1; + return 0; +} +early_param("rcu_nocb_poll", parse_rcu_nocb_poll); + /* Is the specified CPU a no-CPUs CPU? */ static bool is_nocb_cpu(int cpu) { @@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg) for (;;) { /* If not polling, wait for next batch of callbacks. */ if (!rcu_nocb_poll) - wait_event(rdp->nocb_wq, rdp->nocb_head); + wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); list = ACCESS_ONCE(rdp->nocb_head); if (!list) { schedule_timeout_interruptible(1); + flush_signals(current); continue; } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2cd3c1b4e58..7ae4c4c5420 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) cfs_rq->runnable_load_avg); SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", cfs_rq->blocked_load_avg); - SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", - atomic64_read(&cfs_rq->tg->load_avg)); + SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", + (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", cfs_rq->tg_load_contrib); SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5eea8707234..81fa5364340 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) hrtimer_cancel(&cfs_b->slack_timer); } -static void unthrottle_offline_cfs_rqs(struct rq *rq) +static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) { struct cfs_rq *cfs_rq; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 418feb01344..4f02b284735 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) static int do_balance_runtime(struct rt_rq *rt_rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); - struct root_domain *rd = cpu_rq(smp_processor_id())->rd; + struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; int i, weight, more = 0; u64 rt_period; diff --git a/lib/digsig.c b/lib/digsig.c index 8c0e62975c8..dc2be7ed176 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key, memset(out1, 0, head); memcpy(out1 + head, p, l); + kfree(p); + err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); if (err) goto err; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6001ee6347a..b5783d81eda 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, if (flags & FOLL_WRITE && !pmd_write(*pmd)) goto out; + /* Avoid dumping huge zero page */ + if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) + return ERR_PTR(-EFAULT); + page = pmd_page(*pmd); VM_BUG_ON(!PageHead(page)); if (flags & FOLL_TOUCH) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4f3ea0b1e57..546db81820e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, if (!huge_pte_none(huge_ptep_get(ptep))) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(pte_modify(pte, newprot)); + pte = arch_make_huge_pte(pte, vma, NULL, 0); set_huge_pte_at(mm, address, ptep, pte); pages++; } diff --git a/mm/migrate.c b/mm/migrate.c index c38778610aa..2fd8b4af474 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, if (is_write_migration_entry(entry)) pte = pte_mkwrite(pte); #ifdef CONFIG_HUGETLB_PAGE - if (PageHuge(new)) + if (PageHuge(new)) { pte = pte_mkhuge(pte); + pte = arch_make_huge_pte(pte, vma, new, 0); + } #endif flush_cache_page(vma, addr, pte_pfn(pte)); set_pte_at(mm, addr, ptep, pte); diff --git a/mm/mmap.c b/mm/mmap.c index 35730ee9d51..d1e4124f3d0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2943,7 +2943,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) * vma in this mm is backed by the same anon_vma or address_space. * * We can take all the locks in random order because the VM code - * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never + * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never * takes more than one of them in a row. Secondly we're protected * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. * diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 25bfce0666e..4925a02ae7e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn) __u8 reason = hci_proto_disconn_ind(conn); switch (conn->type) { - case ACL_LINK: - hci_acl_disconn(conn, reason); - break; case AMP_LINK: hci_amp_disconn(conn, reason); break; + default: + hci_acl_disconn(conn, reason); + break; } } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 68a9587c969..5abefb12891 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) skb_pull(skb, sizeof(code)); + /* + * The SMP context must be initialized for all other PDUs except + * pairing and security requests. If we get any other PDU when + * not initialized simply disconnect (done if this function + * returns an error). + */ + if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && + !conn->smp_chan) { + BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); + kfree_skb(skb); + return -ENOTSUPP; + } + switch (code) { case SMP_CMD_PAIRING_REQ: reason = smp_cmd_pairing_req(conn, skb); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b29dacf900f..e6e1cbe863f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file, return -EFAULT; i += len; mutex_lock(&pktgen_thread_lock); - pktgen_add_device(t, f); + ret = pktgen_add_device(t, f); mutex_unlock(&pktgen_thread_lock); - ret = count; - sprintf(pg_result, "OK: add_device=%s", f); + if (!ret) { + ret = count; + sprintf(pg_result, "OK: add_device=%s", f); + } else + sprintf(pg_result, "ERROR: can not add device %s", f); goto out; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a9a2ae3e221..32443ebc3e8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->network_header = old->network_header; new->mac_header = old->mac_header; new->inner_transport_header = old->inner_transport_header; - new->inner_network_header = old->inner_transport_header; + new->inner_network_header = old->inner_network_header; skb_dst_copy(new, old); new->rxhash = old->rxhash; new->ooo_okay = old->ooo_okay; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 291f2ed7cc3..cdf2e707bb1 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp) { int cnt; /* increase in packets */ unsigned int delta = 0; + u32 snd_cwnd = tp->snd_cwnd; + + if (unlikely(!snd_cwnd)) { + pr_err_once("snd_cwnd is nul, please report this bug.\n"); + snd_cwnd = 1U; + } /* RFC3465: ABC Slow start * Increase only after a full MSS of bytes is acked @@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp) if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ else - cnt = tp->snd_cwnd; /* exponential increase */ + cnt = snd_cwnd; /* exponential increase */ /* RFC3465: ABC * We MAY increase by 2 if discovered delayed ack @@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp) tp->bytes_acked = 0; tp->snd_cwnd_cnt += cnt; - while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { - tp->snd_cwnd_cnt -= tp->snd_cwnd; + while (tp->snd_cwnd_cnt >= snd_cwnd) { + tp->snd_cwnd_cnt -= snd_cwnd; delta++; } - tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); + tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); } EXPORT_SYMBOL_GPL(tcp_slow_start); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 18f97ca76b0..ad70a962c20 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag) } } else { if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { + if (!tcp_packets_in_flight(tp)) { + tcp_enter_frto_loss(sk, 2, flag); + return true; + } + /* Prevent sending of new data. */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)); @@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, * the remote receives only the retransmitted (regular) SYNs: either * the original SYN-data or the corresponding SYN-ACK is lost. */ - syn_drop = (cookie->len <= 0 && data && - inet_csk(sk)->icsk_retransmits); + syn_drop = (cookie->len <= 0 && data && tp->total_retrans); tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 70b09ef2463..eadb693eef5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) * errors returned from accept(). */ inet_csk_reqsk_queue_drop(sk, req, prev); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); goto out; case TCP_SYN_SENT: @@ -1500,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * clogging syn queue with openreqs with exponentially increasing * timeout. */ - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; + } req = inet_reqsk_alloc(&tcp_request_sock_ops); if (!req) @@ -1666,6 +1669,7 @@ drop_and_release: drop_and_free: reqsk_free(req); drop: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return 0; } EXPORT_SYMBOL(tcp_v4_conn_request); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 420e5632638..1b5d8cb9b12 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1660,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) if (dev->addr_len != IEEE802154_ADDR_LEN) return -1; memcpy(eui, dev->dev_addr, 8); + eui[0] ^= 2; return 0; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 8edf2601065..7a778b9a7b8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) if (skb->protocol == htons(ETH_P_IPV6)) { sin->sin6_addr = ipv6_hdr(skb)->saddr; if (np->rxopt.all) - datagram_recv_ctl(sk, msg, skb); + ip6_datagram_recv_ctl(sk, msg, skb); if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin->sin6_scope_id = IP6CB(skb)->iif; } else { @@ -468,7 +468,8 @@ out: } -int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) +int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_skb_parm *opt = IP6CB(skb); @@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) } return 0; } +EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); -int datagram_send_ctl(struct net *net, struct sock *sk, - struct msghdr *msg, struct flowi6 *fl6, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag) +int ip6_datagram_send_ctl(struct net *net, struct sock *sk, + struct msghdr *msg, struct flowi6 *fl6, + struct ipv6_txoptions *opt, + int *hlimit, int *tclass, int *dontfrag) { struct in6_pktinfo *src_info; struct cmsghdr *cmsg; @@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk, exit_f: return err; } -EXPORT_SYMBOL_GPL(datagram_send_ctl); +EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 29124b7a04c..d6de4b44725 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, msg.msg_control = (void*)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); - err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, - &junk, &junk); + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, + &junk, &junk, &junk); if (err) goto done; err = -EINVAL; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index c727e471275..131dd097736 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, int ret; if (!ip6_tnl_xmit_ctl(t)) - return -1; + goto tx_err; switch (skb->protocol) { case htons(ETH_P_IP): diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ee94d31c9d4..d1e2e8ef29c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -476,8 +476,8 @@ sticky_done: msg.msg_controllen = optlen; msg.msg_control = (void*)(opt+1); - retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, - &junk); + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, + &junk, &junk); if (retv) goto done; update: @@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, release_sock(sk); if (skb) { - int err = datagram_recv_ctl(sk, &msg, skb); + int err = ip6_datagram_recv_ctl(sk, &msg, skb); kfree_skb(skb); if (err) return err; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6cd29b1e8b9..70fa8144999 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) - datagram_recv_ctl(sk, msg, skb); + ip6_datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) @@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e229a3bc345..363d8b7772e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -928,7 +928,7 @@ restart: dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); - if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) + if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL))) nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); else if (!(rt->dst.flags & DST_HOST)) nrt = rt6_alloc_clone(rt, &fl6->daddr); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 93825dd3a7c..4f43537197e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } inet_csk_reqsk_queue_drop(sk, req, prev); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); goto out; case TCP_SYN_SENT: @@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop; } - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; + } req = inet6_reqsk_alloc(&tcp6_request_sock_ops); if (req == NULL) @@ -1108,6 +1111,7 @@ drop_and_release: drop_and_free: reqsk_free(req); drop: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return 0; /* don't send reset */ } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index dfaa29b8b29..fb083295ff0 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -443,7 +443,7 @@ try_again: ip_cmsg_recv(msg, skb); } else { if (np->rxopt.all) - datagram_recv_ctl(sk, msg, skb); + ip6_datagram_recv_ctl(sk, msg, skb); } err = copied; @@ -1153,8 +1153,8 @@ do_udp_sendmsg: memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1a9f3723c13..2ac884d0e89 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) } +/* Lookup the tunnel socket, possibly involving the fs code if the socket is + * owned by userspace. A struct sock returned from this function must be + * released using l2tp_tunnel_sock_put once you're done with it. + */ +struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) +{ + int err = 0; + struct socket *sock = NULL; + struct sock *sk = NULL; + + if (!tunnel) + goto out; + + if (tunnel->fd >= 0) { + /* Socket is owned by userspace, who might be in the process + * of closing it. Look the socket up using the fd to ensure + * consistency. + */ + sock = sockfd_lookup(tunnel->fd, &err); + if (sock) + sk = sock->sk; + } else { + /* Socket is owned by kernelspace */ + sk = tunnel->sock; + } + +out: + return sk; +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); + +/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ +void l2tp_tunnel_sock_put(struct sock *sk) +{ + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); + if (tunnel) { + if (tunnel->fd >= 0) { + /* Socket is owned by userspace */ + sockfd_put(sk->sk_socket); + } + sock_put(sk); + } +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); + /* Lookup a session by id in the global session list */ static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) @@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len struct udphdr *uh; struct inet_sock *inet; __wsum csum; - int old_headroom; - int new_headroom; int headroom; int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; int udp_len; @@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len */ headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + hdr_len; - old_headroom = skb_headroom(skb); if (skb_cow_head(skb, headroom)) { kfree_skb(skb); return NET_XMIT_DROP; } - new_headroom = skb_headroom(skb); skb_orphan(skb); - skb->truesize += new_headroom - old_headroom; - /* Setup L2TP header */ session->build_header(session, __skb_push(skb, hdr_len)); @@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 tunnel->old_sk_destruct = sk->sk_destruct; sk->sk_destruct = &l2tp_tunnel_destruct; tunnel->sock = sk; + tunnel->fd = fd; lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); sk->sk_allocation = GFP_ATOMIC; @@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); */ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { - int err = 0; - struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; + int err = -EBADF; + struct socket *sock = NULL; + struct sock *sk = NULL; + + sk = l2tp_tunnel_sock_lookup(tunnel); + if (!sk) + goto out; + + sock = sk->sk_socket; + BUG_ON(!sock); /* Force the tunnel socket to close. This will eventually * cause the tunnel to be deleted via the normal socket close * mechanisms when userspace closes the tunnel socket. */ - if (sock != NULL) { - err = inet_shutdown(sock, 2); + err = inet_shutdown(sock, 2); - /* If the tunnel's socket was created by the kernel, - * close the socket here since the socket was not - * created by userspace. - */ - if (sock->file == NULL) - err = inet_release(sock); - } + /* If the tunnel's socket was created by the kernel, + * close the socket here since the socket was not + * created by userspace. + */ + if (sock->file == NULL) + err = inet_release(sock); + l2tp_tunnel_sock_put(sk); +out: return err; } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 56d583e083a..e62204cad4f 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -188,7 +188,8 @@ struct l2tp_tunnel { int (*recv_payload_hook)(struct sk_buff *skb); void (*old_sk_destruct)(struct sock *); struct sock *sock; /* Parent socket */ - int fd; + int fd; /* Parent fd, if tunnel socket + * was created by userspace */ uint8_t priv[0]; /* private data */ }; @@ -228,6 +229,8 @@ out: return tunnel; } +extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); +extern void l2tp_tunnel_sock_put(struct sock *sk); extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 927547171bc..8ee4a86ae99 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { - struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; size_t copied = 0; int err = -EOPNOTSUPP; @@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, lsa->l2tp_scope_id = IP6CB(skb)->iif; } - if (inet->cmsg_flags) - ip_cmsg_recv(msg, skb); + if (np->rxopt.all) + ip6_datagram_recv_ctl(sk, msg, skb); if (flags & MSG_TRUNC) copied = skb->len; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 286366ef893..716605c241f 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; - int old_headroom; - int new_headroom; int uhlen, headroom; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) @@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (tunnel == NULL) goto abort_put_sess; - old_headroom = skb_headroom(skb); uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; headroom = NET_SKB_PAD + sizeof(struct iphdr) + /* IP header */ @@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (skb_cow_head(skb, headroom)) goto abort_put_sess_tun; - new_headroom = skb_headroom(skb); - skb->truesize += new_headroom - old_headroom; - /* Setup PPP header */ __skb_push(skb, sizeof(ppph)); skb->data[0] = ppph[0]; diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index a9327e2e48c..670cbc3518d 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -35,10 +35,11 @@ /* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { - if (unlikely(!vport)) { - kfree_skb(skb); - return; - } + if (unlikely(!vport)) + goto error; + + if (unlikely(skb_warn_if_lro(skb))) + goto error; /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). @@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) skb_push(skb, ETH_HLEN); ovs_vport_receive(vport, skb); + return; + +error: + kfree_skb(skb); } /* Called with rcu_read_lock and bottom-halves disabled. */ @@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb) goto error; } - if (unlikely(skb_warn_if_lro(skb))) - goto error; - skb->dev = netdev_vport->dev; len = skb->len; dev_queue_xmit(skb); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e639645e8fe..c111bd0e083 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock) packet_flush_mclist(sk); - memset(&req_u, 0, sizeof(req_u)); - - if (po->rx_ring.pg_vec) + if (po->rx_ring.pg_vec) { + memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 0); + } - if (po->tx_ring.pg_vec) + if (po->tx_ring.pg_vec) { + memset(&req_u, 0, sizeof(req_u)); packet_set_ring(sk, &req_u, 1, 1); + } fanout_release(sk); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 298c0ddfb57..3d2acc7a9c8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (q->rate) { struct sk_buff_head *list = &sch->q; - delay += packet_len_2_sched_time(skb->len, q); - if (!skb_queue_empty(list)) { /* - * Last packet in queue is reference point (now). - * First packet in queue is already in flight, - * calculate this time bonus and substract + * Last packet in queue is reference point (now), + * calculate this time bonus and subtract * from delay. */ - delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; + delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; + delay = max_t(psched_tdiff_t, 0, delay); now = netem_skb_cb(skb_peek_tail(list))->time_to_send; } + + delay += packet_len_2_sched_time(skb->len, q); } cb->time_to_send = now + delay; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 159b9bc5d63..d8420ae614d 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key) return; if (atomic_dec_and_test(&key->refcnt)) { - kfree(key); + kzfree(key); SCTP_DBG_OBJCNT_DEC(keys); } } diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 17a001bac2c..1a9c5fb7731 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep) /* Final destructor for endpoint. */ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) { + int i; + SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); /* Free up the HMAC transform. */ @@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) sctp_inq_free(&ep->base.inqueue); sctp_bind_addr_free(&ep->base.bind_addr); + for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) + memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); + /* Remove and free the port */ if (sctp_sk(ep->base.sk)->bind_hash) sctp_put_port(ep->base.sk); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e65758cb03..cedd9bf67b8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk, ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); out: - kfree(authkey); + kzfree(authkey); return ret; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index bfa31714581..fb20f25ddec 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); } +static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) +{ + struct list_head *q = &queue->tasks[queue->priority]; + struct rpc_task *task; + + if (!list_empty(q)) { + task = list_first_entry(q, struct rpc_task, u.tk_wait.list); + if (task->tk_owner == queue->owner) + list_move_tail(&task->u.tk_wait.list, q); + } +} + static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) { - queue->priority = priority; + if (queue->priority != priority) { + /* Fairness: rotate the list when changing priority */ + rpc_rotate_queue_owner(queue); + queue->priority = priority; + } } static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 0a148c9d2a5..0f679df7d07 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, } /* - * See net/ipv6/datagram.c : datagram_recv_ctl + * See net/ipv6/datagram.c : ip6_datagram_recv_ctl */ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, struct cmsghdr *cmh) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 01592d7d478..45f1618c8e2 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, &iwe, IW_EV_UINT_LEN); } - buf = kmalloc(30, GFP_ATOMIC); + buf = kmalloc(31, GFP_ATOMIC); if (buf) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile index bbbd276659b..7203e66dcd6 100644 --- a/samples/seccomp/Makefile +++ b/samples/seccomp/Makefile @@ -19,6 +19,7 @@ bpf-direct-objs := bpf-direct.o # Try to match the kernel target. ifndef CONFIG_64BIT +ifndef CROSS_COMPILE # s390 has -m31 flag to build 31 bit binaries ifndef CONFIG_S390 @@ -35,6 +36,7 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG) HOSTLOADLIBES_bpf-fancy += $(MFLAG) HOSTLOADLIBES_dropper += $(MFLAG) endif +endif # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 4d2c7dfdaab..2bb08a962ce 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -230,12 +230,12 @@ our $Inline = qr{inline|__always_inline|noinline}; our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; our $Lval = qr{$Ident(?:$Member)*}; -our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)}; -our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))}; -our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)}; +our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; +our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; +our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; our $Float = qr{$Float_hex|$Float_dec|$Float_int}; -our $Constant = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))}; -our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)}; +our $Constant = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*}; +our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; our $Compare = qr{<=|>=|==|!=|<|>}; our $Operators = qr{ <=|>=|==|!=| diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index a210c8d7b4b..3b98159d964 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig @@ -108,13 +108,18 @@ if SND_IMX_SOC config SND_SOC_IMX_SSI tristate -config SND_SOC_IMX_PCM_FIQ +config SND_SOC_IMX_PCM tristate + +config SND_SOC_IMX_PCM_FIQ + bool select FIQ + select SND_SOC_IMX_PCM config SND_SOC_IMX_PCM_DMA - tristate + bool select SND_SOC_DMAENGINE_PCM + select SND_SOC_IMX_PCM config SND_SOC_IMX_AUDMUX tristate diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile index ec1457915d7..afd34794db5 100644 --- a/sound/soc/fsl/Makefile +++ b/sound/soc/fsl/Makefile @@ -41,10 +41,7 @@ endif obj-$(CONFIG_SND_SOC_IMX_SSI) += snd-soc-imx-ssi.o obj-$(CONFIG_SND_SOC_IMX_AUDMUX) += snd-soc-imx-audmux.o -obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += snd-soc-imx-pcm-fiq.o -snd-soc-imx-pcm-fiq-y := imx-pcm-fiq.o imx-pcm.o -obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += snd-soc-imx-pcm-dma.o -snd-soc-imx-pcm-dma-y := imx-pcm-dma.o imx-pcm.o +obj-$(CONFIG_SND_SOC_IMX_PCM) += snd-soc-imx-pcm.o # i.MX Machine Support snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c index bf363d8d044..500f8ce55d7 100644 --- a/sound/soc/fsl/imx-pcm-dma.c +++ b/sound/soc/fsl/imx-pcm-dma.c @@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = { .pcm_free = imx_pcm_free, }; -static int imx_soc_platform_probe(struct platform_device *pdev) +int imx_pcm_dma_init(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); } - -static int imx_soc_platform_remove(struct platform_device *pdev) -{ - snd_soc_unregister_platform(&pdev->dev); - return 0; -} - -static struct platform_driver imx_pcm_driver = { - .driver = { - .name = "imx-pcm-audio", - .owner = THIS_MODULE, - }, - .probe = imx_soc_platform_probe, - .remove = imx_soc_platform_remove, -}; - -module_platform_driver(imx_pcm_driver); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-pcm-audio"); diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c index 5ec362ae4d0..920f945cb2f 100644 --- a/sound/soc/fsl/imx-pcm-fiq.c +++ b/sound/soc/fsl/imx-pcm-fiq.c @@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = { .pcm_free = imx_pcm_fiq_free, }; -static int imx_soc_platform_probe(struct platform_device *pdev) +int imx_pcm_fiq_init(struct platform_device *pdev) { struct imx_ssi *ssi = platform_get_drvdata(pdev); int ret; @@ -314,23 +314,3 @@ failed_register: return ret; } - -static int imx_soc_platform_remove(struct platform_device *pdev) -{ - snd_soc_unregister_platform(&pdev->dev); - return 0; -} - -static struct platform_driver imx_pcm_driver = { - .driver = { - .name = "imx-fiq-pcm-audio", - .owner = THIS_MODULE, - }, - - .probe = imx_soc_platform_probe, - .remove = imx_soc_platform_remove, -}; - -module_platform_driver(imx_pcm_driver); - -MODULE_LICENSE("GPL"); diff --git a/sound/soc/fsl/imx-pcm.c b/sound/soc/fsl/imx-pcm.c index 0c9f188ddc6..0d0625bfcb6 100644 --- a/sound/soc/fsl/imx-pcm.c +++ b/sound/soc/fsl/imx-pcm.c @@ -31,6 +31,7 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, runtime->dma_bytes); return ret; } +EXPORT_SYMBOL_GPL(snd_imx_pcm_mmap); static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { @@ -79,6 +80,7 @@ int imx_pcm_new(struct snd_soc_pcm_runtime *rtd) out: return ret; } +EXPORT_SYMBOL_GPL(imx_pcm_new); void imx_pcm_free(struct snd_pcm *pcm) { @@ -100,6 +102,39 @@ void imx_pcm_free(struct snd_pcm *pcm) buf->area = NULL; } } +EXPORT_SYMBOL_GPL(imx_pcm_free); + +static int imx_pcm_probe(struct platform_device *pdev) +{ + if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0) + return imx_pcm_fiq_init(pdev); + + return imx_pcm_dma_init(pdev); +} + +static int imx_pcm_remove(struct platform_device *pdev) +{ + snd_soc_unregister_platform(&pdev->dev); + return 0; +} + +static struct platform_device_id imx_pcm_devtype[] = { + { .name = "imx-pcm-audio", }, + { .name = "imx-fiq-pcm-audio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, imx_pcm_devtype); + +static struct platform_driver imx_pcm_driver = { + .driver = { + .name = "imx-pcm", + .owner = THIS_MODULE, + }, + .id_table = imx_pcm_devtype, + .probe = imx_pcm_probe, + .remove = imx_pcm_remove, +}; +module_platform_driver(imx_pcm_driver); MODULE_DESCRIPTION("Freescale i.MX PCM driver"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h index 83c0ed7d55c..5ae13a13a35 100644 --- a/sound/soc/fsl/imx-pcm.h +++ b/sound/soc/fsl/imx-pcm.h @@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream, int imx_pcm_new(struct snd_soc_pcm_runtime *rtd); void imx_pcm_free(struct snd_pcm *pcm); +#ifdef CONFIG_SND_SOC_IMX_PCM_DMA +int imx_pcm_dma_init(struct platform_device *pdev); +#else +static inline int imx_pcm_dma_init(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ +int imx_pcm_fiq_init(struct platform_device *pdev); +#else +static inline int imx_pcm_fiq_init(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif + #endif /* _IMX_PCM_H */ diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore new file mode 100644 index 00000000000..44f095fa260 --- /dev/null +++ b/tools/vm/.gitignore @@ -0,0 +1,2 @@ +slabinfo +page-types |