diff options
182 files changed, 1612 insertions, 934 deletions
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de> Greg Kroah-Hartman <greg@kroah.com> Henk Vergonet <Henk.Vergonet@gmail.com> Henrik Kretzschmar <henne@nachtwindheim.de> +Henrik Rydberg <rydberg@bitmath.org> Herbert Xu <herbert@gondor.apana.org.au> Jacob Shin <Jacob.Shin@amd.com> James Bottomley <jejb@mulgrave.(none)> diff --git a/MAINTAINERS b/MAINTAINERS index ddb9ac8d32b..3589d67437f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h F: drivers/char/apm-emulation.c APPLE BCM5974 MULTITOUCH DRIVER -M: Henrik Rydberg <rydberg@euromail.se> +M: Henrik Rydberg <rydberg@bitmath.org> L: linux-input@vger.kernel.org -S: Maintained +S: Odd fixes F: drivers/input/mouse/bcm5974.c APPLE SMC DRIVER -M: Henrik Rydberg <rydberg@euromail.se> +M: Henrik Rydberg <rydberg@bitmath.org> L: lm-sensors@lm-sensors.org -S: Maintained +S: Odd fixes F: drivers/hwmon/applesmc.c APPLETALK NETWORK LAYER @@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c BTRFS FILE SYSTEM M: Chris Mason <clm@fb.com> M: Josef Bacik <jbacik@fb.com> +M: David Sterba <dsterba@suse.cz> L: linux-btrfs@vger.kernel.org W: http://btrfs.wiki.kernel.org/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/ @@ -4940,10 +4941,10 @@ F: include/uapi/linux/input.h F: include/linux/input/ INPUT MULTITOUCH (MT) PROTOCOL -M: Henrik Rydberg <rydberg@euromail.se> +M: Henrik Rydberg <rydberg@bitmath.org> L: linux-input@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git -S: Maintained +S: Odd fixes F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Diseased Newt # *DOCUMENTATION* @@ -391,6 +391,7 @@ USERINCLUDE := \ # Needed to be compatible with the O= option LINUXINCLUDE := \ -I$(srctree)/arch/$(hdr-arch)/include \ + -Iarch/$(hdr-arch)/include/generated/uapi \ -Iarch/$(hdr-arch)/include/generated \ $(if $(KBUILD_SRC), -I$(srctree)/include) \ -Iinclude \ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index f9c86391103..715ae19bc7c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "model name\t: %s rev %d (%s)\n", cpu_name, cpuid & 15, elf_platform); +#if defined(CONFIG_SMP) + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), + (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); +#else + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); +#endif /* dump out the processor features */ seq_puts(m, "Features\t: "); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5e6052e1885..86ef244c5a2 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void) void __init smp_cpus_done(unsigned int max_cpus) { + int cpu; + unsigned long bogosum = 0; + + for_each_online_cpu(cpu) + bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; + + printk(KERN_INFO "SMP: Total of %d processors activated " + "(%lu.%02lu BogoMIPS).\n", + num_online_cpus(), + bogosum / (500000/HZ), + (bogosum / (5000/HZ)) % 100); + hyp_mode_check(); } diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index b1fa4e61471..fbe0ca31a99 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -21,6 +21,7 @@ #include <asm/barrier.h> +#include <linux/bug.h> #include <linux/init.h> #include <linux/types.h> diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index ace70682499..8e797b2fcc0 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -39,6 +39,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; + u32 reg_id_dfr0; u32 reg_id_isar0; u32 reg_id_isar1; u32 reg_id_isar2; @@ -51,6 +52,10 @@ struct cpuinfo_arm64 { u32 reg_id_mmfr3; u32 reg_id_pfr0; u32 reg_id_pfr1; + + u32 reg_mvfr0; + u32 reg_mvfr1; + u32 reg_mvfr2; }; DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 286b1bec547..f9be30ea1cb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -31,6 +31,7 @@ #include <asm/fpsimd.h> #include <asm/hw_breakpoint.h> +#include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include <asm/types.h> @@ -123,9 +124,6 @@ struct task_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 49c9aefd24a..b780c6c76ee 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 386 +#define __NR_compat_syscalls 387 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 57b64174753..07d435cf2ee 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) * If we have AArch32, we care about 32-bit features for compat. These * registers should be RES0 otherwise. */ + diff |= CHECK(id_dfr0, boot, cur, cpu); diff |= CHECK(id_isar0, boot, cur, cpu); diff |= CHECK(id_isar1, boot, cur, cpu); diff |= CHECK(id_isar2, boot, cur, cpu); @@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) diff |= CHECK(id_pfr0, boot, cur, cpu); diff |= CHECK(id_pfr1, boot, cur, cpu); + diff |= CHECK(mvfr0, boot, cur, cpu); + diff |= CHECK(mvfr1, boot, cur, cpu); + diff |= CHECK(mvfr2, boot, cur, cpu); + /* * Mismatched CPU features are a recipe for disaster. Don't even * pretend to support them. @@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); @@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); + cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 6fac253bc78..2bb4347d0ed 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -326,6 +326,7 @@ void __init efi_idmap_init(void) /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ efi_setup_idmap(); + early_memunmap(memmap.map, memmap.map_end - memmap.map); } static int __init remap_region(efi_memory_desc_t *md, void **new) @@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void) } mapsize = memmap.map_end - memmap.map; - early_memunmap(memmap.map, mapsize); if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index fd027b101de..9b6f71db270 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,7 @@ #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> +#include <asm/alternative.h> #include <asm/insn.h> #include <asm/sections.h> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b8099116675..20fe2932ad0 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p) request_standard_resources(); efi_idmap_init(); + early_ioremap_reset(); unflatten_device_tree(); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 4f93c67e63d..14944e5b28d 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -25,6 +25,7 @@ #include <asm/cacheflush.h> #include <asm/cpu_ops.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> extern void secondary_holding_pen(void); diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 6f4bac969bf..23eada79439 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -7,6 +7,7 @@ */ #include <linux/device.h> +#include <linux/delay.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index f3b51b57740..95c39b95e97 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 318 /* length of syscall table */ +#define NR_syscalls 319 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 4c2240c1b0c..461079560c7 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -331,5 +331,6 @@ #define __NR_getrandom 1339 #define __NR_memfd_create 1340 #define __NR_bpf 1341 +#define __NR_execveat 1342 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 615ef81def4..e795cb84815 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } -EXPORT_SYMBOL(acpi_map_lsapic); +EXPORT_SYMBOL(acpi_map_cpu); -int acpi_unmap_lsapic(int cpu) +int acpi_unmap_cpu(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; set_cpu_present(cpu, false); @@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu) return (0); } - -EXPORT_SYMBOL(acpi_unmap_lsapic); +EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index f5e96dffc63..fcf8b8cbca0 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1779,6 +1779,7 @@ sys_call_table: data8 sys_getrandom data8 sys_memfd_create // 1340 data8 sys_bpf + data8 sys_execveat .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 19c36cba37c..a46f5f45570 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size); extern void reserve_crashkernel(void); extern void machine_kexec_mask_interrupts(void); +static inline bool kdump_in_progress(void) +{ + return crashing_cpu >= 0; +} + #else /* !CONFIG_KEXEC */ static inline void crash_kexec_secondary(struct pt_regs *regs) { } @@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler) return 0; } +static inline bool kdump_in_progress(void) +{ + return false; +} + #endif /* CONFIG_KEXEC */ #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index ce9577d693b..91062eef582 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp) SYSCALL_SPU(getrandom) SYSCALL_SPU(memfd_create) SYSCALL_SPU(bpf) +COMPAT_SYS(execveat) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index e0da021caa0..36b79c31eed 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 362 +#define __NR_syscalls 363 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index f55351f2e66..ef5b5b1f312 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -384,5 +384,6 @@ #define __NR_getrandom 359 #define __NR_memfd_create 360 #define __NR_bpf 361 +#define __NR_execveat 362 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 879b3aacac3..f96d1ec2418 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image) * using debugger IPI. */ - if (crashing_cpu == -1) + if (!kdump_in_progress()) kexec_prepare_cpus(); pr_debug("kexec: Starting switchover sequence.\n"); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ec017cb444..8b2d2dc8ef1 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -700,6 +700,7 @@ void start_secondary(void *unused) smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); + cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); @@ -738,14 +739,6 @@ void start_secondary(void *unused) notify_cpu_starting(cpu); set_cpu_online(cpu, true); - /* - * CPU must be marked active and online before we signal back to the - * master, because the scheduler needs to see the cpu_online and - * cpu_active bits set. - */ - smp_wmb(); - cpu_callin_map[cpu] = 1; - local_irq_enable(); cpu_startup_entry(CPUHP_ONLINE); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 469751d9200..b5682fd6c98 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -43,6 +43,7 @@ #include <asm/trace.h> #include <asm/firmware.h> #include <asm/plpar_wrappers.h> +#include <asm/kexec.h> #include <asm/fadump.h> #include "pseries.h" @@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void) * out to the user, but at least this will stop us from * continuing on further and creating an even more * difficult to debug situation. + * + * There is a known problem when kdump'ing, if cpus are offline + * the above call will fail. Rather than panicking again, keep + * going and hope the kdump kernel is also little endian, which + * it usually is. */ - if (rc) + if (rc && !kdump_in_progress()) panic("Could not enable big endian exceptions"); } #endif diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index 87bc86821bc..d195a87ca54 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -3,6 +3,7 @@ config UML default y select HAVE_ARCH_AUDITSYSCALL select HAVE_UID16 + select HAVE_FUTEX_CMPXCHG if FUTEX select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_IO diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index fd0f848938c..5a4a089e8b1 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o @@ -46,6 +45,7 @@ endif ifeq ($(avx2_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o + obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ endif aes-i586-y := aes-i586-asm_32.o aes_glue.o diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 2df2a0298f5..a916c4a6116 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -208,7 +208,7 @@ ddq_add_8: .if (klen == KEY_128) .if (load_keys) - vmovdqa 3*16(p_keys), xkeyA + vmovdqa 3*16(p_keys), xkey4 .endif .else vmovdqa 3*16(p_keys), xkeyA @@ -224,7 +224,7 @@ ddq_add_8: add $(16*by), p_in .if (klen == KEY_128) - vmovdqa 4*16(p_keys), xkey4 + vmovdqa 4*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 4*16(p_keys), xkey4 @@ -234,7 +234,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ + /* key 3 */ + .if (klen == KEY_128) + vaesenc xkey4, var_xdata, var_xdata + .else + vaesenc xkeyA, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -243,13 +248,18 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkey4, var_xdata, var_xdata /* key 4 */ + /* key 4 */ + .if (klen == KEY_128) + vaesenc xkeyB, var_xdata, var_xdata + .else + vaesenc xkey4, var_xdata, var_xdata + .endif .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) - vmovdqa 6*16(p_keys), xkeyB + vmovdqa 6*16(p_keys), xkey8 .endif .else vmovdqa 6*16(p_keys), xkeyB @@ -267,12 +277,17 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ + /* key 6 */ + .if (klen == KEY_128) + vaesenc xkey8, var_xdata, var_xdata + .else + vaesenc xkeyB, var_xdata, var_xdata + .endif .set i, (i +1) .endr .if (klen == KEY_128) - vmovdqa 8*16(p_keys), xkey8 + vmovdqa 8*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 8*16(p_keys), xkey8 @@ -288,7 +303,7 @@ ddq_add_8: .if (klen == KEY_128) .if (load_keys) - vmovdqa 9*16(p_keys), xkeyA + vmovdqa 9*16(p_keys), xkey12 .endif .else vmovdqa 9*16(p_keys), xkeyA @@ -297,7 +312,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkey8, var_xdata, var_xdata /* key 8 */ + /* key 8 */ + .if (klen == KEY_128) + vaesenc xkeyB, var_xdata, var_xdata + .else + vaesenc xkey8, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -306,7 +326,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ + /* key 9 */ + .if (klen == KEY_128) + vaesenc xkey12, var_xdata, var_xdata + .else + vaesenc xkeyA, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -412,7 +437,6 @@ ddq_add_8: /* main body of aes ctr load */ .macro do_aes_ctrmain key_len - cmp $16, num_bytes jb .Ldo_return2\key_len diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4433a4be817..d1626364a28 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } -EXPORT_SYMBOL(acpi_map_lsapic); +EXPORT_SYMBOL(acpi_map_cpu); -int acpi_unmap_lsapic(int cpu) +int acpi_unmap_cpu(int cpu) { #ifdef CONFIG_ACPI_NUMA set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); @@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu) return (0); } - -EXPORT_SYMBOL(acpi_unmap_lsapic); +EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 531d4269e2e..bd16d6c370e 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c @@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void); extern asmlinkage void sys_ni_syscall(void); -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index 20c3649d069..5cdfa9db221 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c @@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void); extern void sys_ni_syscall(void); -const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { +const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { /* * Smells like a compiler bug -- it doesn't work * when the & below is removed. diff --git a/drivers/Makefile b/drivers/Makefile index 67d2334dc41..527a6da8d53 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/ obj-y += tty/ obj-y += char/ -# gpu/ comes after char for AGP vs DRM startup +# iommu/ comes before gpu as gpu are using iommu controllers +obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ + +# gpu/ comes after char for AGP vs DRM startup and after iommu obj-y += gpu/ obj-$(CONFIG_CONNECTOR) += connector/ @@ -141,7 +144,6 @@ obj-y += clk/ obj-$(CONFIG_MAILBOX) += mailbox/ obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ -obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ obj-$(CONFIG_REMOTEPROC) += remoteproc/ obj-$(CONFIG_RPMSG) += rpmsg/ diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 1fdf5e07a1c..1020b1b53a1 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_status status; int ret; - if (pr->apic_id == -1) + if (pr->phys_id == -1) return -ENODEV; status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); @@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) cpu_maps_update_begin(); cpu_hotplug_begin(); - ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); + ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id); if (ret) goto out; ret = arch_register_cpu(pr->id); if (ret) { - acpi_unmap_lsapic(pr->id); + acpi_unmap_cpu(pr->id); goto out; } @@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device) union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); - int apic_id, cpu_index, device_declaration = 0; + int phys_id, cpu_index, device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; @@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->acpi_id = value; } - apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); - if (apic_id < 0) - acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); - pr->apic_id = apic_id; + phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); + if (phys_id < 0) + acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); + pr->phys_id = phys_id; - cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); + cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { cpu0_initialized = 1; - /* Handle UP system running SMP kernel, with no LAPIC in MADT */ + /* + * Handle UP system running SMP kernel, with no CPU + * entry in MADT + */ if ((cpu_index == -1) && (num_online_cpus() == 1)) cpu_index = 0; } @@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device) /* Remove the CPU. */ arch_unregister_cpu(pr->id); - acpi_unmap_lsapic(pr->id); + acpi_unmap_cpu(pr->id); cpu_hotplug_done(); cpu_maps_update_done(); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index c2daa85fc9f..c0d44d394ca 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device) device->power.state = ACPI_STATE_UNKNOWN; if (!acpi_device_is_present(device)) - return 0; + return -ENXIO; result = acpi_device_get_power(device, &state); if (result) diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 342942f90a1..02e48394276 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id) unsigned long madt_end, entry; static struct acpi_table_madt *madt; static int read_madt; - int apic_id = -1; + int phys_id = -1; /* CPU hardware ID */ if (!read_madt) { if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, @@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id) } if (!madt) - return apic_id; + return phys_id; entry = (unsigned long)madt; madt_end = entry + madt->header.length; @@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id) struct acpi_subtable_header *header = (struct acpi_subtable_header *)entry; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { - if (!map_lapic_id(header, acpi_id, &apic_id)) + if (!map_lapic_id(header, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { - if (!map_x2apic_id(header, type, acpi_id, &apic_id)) + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { - if (!map_lsapic_id(header, type, acpi_id, &apic_id)) + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) break; } entry += header->length; } - return apic_id; + return phys_id; } static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) @@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_subtable_header *header; - int apic_id = -1; + int phys_id = -1; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) goto exit; @@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) header = (struct acpi_subtable_header *)obj->buffer.pointer; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) - map_lapic_id(header, acpi_id, &apic_id); + map_lapic_id(header, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) - map_lsapic_id(header, type, acpi_id, &apic_id); + map_lsapic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) - map_x2apic_id(header, type, acpi_id, &apic_id); + map_x2apic_id(header, type, acpi_id, &phys_id); exit: kfree(buffer.pointer); - return apic_id; + return phys_id; } -int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) +int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) { - int apic_id; + int phys_id; - apic_id = map_mat_entry(handle, type, acpi_id); - if (apic_id == -1) - apic_id = map_madt_entry(type, acpi_id); + phys_id = map_mat_entry(handle, type, acpi_id); + if (phys_id == -1) + phys_id = map_madt_entry(type, acpi_id); - return apic_id; + return phys_id; } -int acpi_map_cpuid(int apic_id, u32 acpi_id) +int acpi_map_cpuid(int phys_id, u32 acpi_id) { #ifdef CONFIG_SMP int i; #endif - if (apic_id == -1) { + if (phys_id == -1) { /* * On UP processor, there is no _MAT or MADT table. - * So above apic_id is always set to -1. + * So above phys_id is always set to -1. * * BIOS may define multiple CPU handles even for UP processor. * For example, @@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) * Processor (CPU3, 0x03, 0x00000410, 0x06) {} * } * - * Ignores apic_id and always returns 0 for the processor + * Ignores phys_id and always returns 0 for the processor * handle with acpi id 0 if nr_cpu_ids is 1. * This should be the case if SMP tables are not found. * Return -1 for other CPU's handle. @@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) if (nr_cpu_ids <= 1 && acpi_id == 0) return acpi_id; else - return apic_id; + return phys_id; } #ifdef CONFIG_SMP for_each_possible_cpu(i) { - if (cpu_physical_id(i) == apic_id) + if (cpu_physical_id(i) == phys_id) return i; } #else /* In UP kernel, only processor 0 is valid */ - if (apic_id == 0) - return apic_id; + if (phys_id == 0) + return phys_id; #endif return -1; } int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) { - int apic_id; + int phys_id; - apic_id = acpi_get_apicid(handle, type, acpi_id); + phys_id = acpi_get_phys_id(handle, type, acpi_id); - return acpi_map_cpuid(apic_id, acpi_id); + return acpi_map_cpuid(phys_id, acpi_id); } EXPORT_SYMBOL_GPL(acpi_get_cpuid); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 16914cc3088..dc4d8960684 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device) if (device->wakeup.flags.valid) acpi_power_resources_list_free(&device->wakeup.resources); - if (!device->flags.power_manageable) + if (!device->power.flags.power_resources) return; for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { @@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) device->power.flags.power_resources) device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; - if (acpi_bus_init_power(device)) { - acpi_free_power_resources_lists(device); + if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; - } } static void acpi_bus_get_flags(struct acpi_device *device) @@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device) /* Skip devices that are not present. */ if (!acpi_device_is_present(device)) { device->flags.visited = false; + device->flags.power_manageable = 0; return; } if (device->handler) goto ok; if (!device->flags.initialized) { - acpi_bus_update_power(device, NULL); + device->flags.power_manageable = + device->power.states[ACPI_STATE_D0].flags.valid; + if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; + device->flags.initialized = true; } device->flags.visited = false; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index c72e79d2c5a..032db459370 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), }, }, + + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ + .callback = video_disable_native_backlight, + .ident = "Dell XPS15 L521X", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), + }, + }, {} }; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index fd5a5e85d7d..982b96323f8 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -969,7 +969,8 @@ static void sender(void *send_info, do_gettimeofday(&t); pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", - msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); + msg->data[0], msg->data[1], + (long) t.tv_sec, (long) t.tv_usec); } } diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 66e40398b3d..e620807418e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ +obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ @@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ -obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 7d4974b83af..fcfdf23e191 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -31,7 +31,6 @@ #include <uapi/linux/kfd_ioctl.h> #include <linux/time.h> #include <linux/mm.h> -#include <linux/uaccess.h> #include <uapi/asm-generic/mman-common.h> #include <asm/processor.h> #include "kfd_priv.h" @@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep) return 0; } -static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, - void __user *arg) +static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, + void *data) { - struct kfd_ioctl_get_version_args args; + struct kfd_ioctl_get_version_args *args = data; int err = 0; - args.major_version = KFD_IOCTL_MAJOR_VERSION; - args.minor_version = KFD_IOCTL_MINOR_VERSION; - - if (copy_to_user(arg, &args, sizeof(args))) - err = -EFAULT; + args->major_version = KFD_IOCTL_MAJOR_VERSION; + args->minor_version = KFD_IOCTL_MINOR_VERSION; return err; } @@ -221,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, return 0; } -static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, - void __user *arg) +static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, + void *data) { - struct kfd_ioctl_create_queue_args args; + struct kfd_ioctl_create_queue_args *args = data; struct kfd_dev *dev; int err = 0; unsigned int queue_id; @@ -233,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, memset(&q_properties, 0, sizeof(struct queue_properties)); - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - pr_debug("kfd: creating queue ioctl\n"); - err = set_queue_properties_from_user(&q_properties, &args); + err = set_queue_properties_from_user(&q_properties, args); if (err) return err; - dev = kfd_device_by_id(args.gpu_id); + dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; @@ -250,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { - err = PTR_ERR(pdd); + err = -ESRCH; goto err_bind_process; } @@ -263,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, if (err != 0) goto err_create_queue; - args.queue_id = queue_id; + args->queue_id = queue_id; /* Return gpu_id as doorbell offset for mmap usage */ - args.doorbell_offset = args.gpu_id << PAGE_SHIFT; - - if (copy_to_user(arg, &args, sizeof(args))) { - err = -EFAULT; - goto err_copy_args_out; - } + args->doorbell_offset = args->gpu_id << PAGE_SHIFT; mutex_unlock(&p->mutex); - pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); + pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); pr_debug("ring buffer address == 0x%016llX\n", - args.ring_base_address); + args->ring_base_address); pr_debug("read ptr address == 0x%016llX\n", - args.read_pointer_address); + args->read_pointer_address); pr_debug("write ptr address == 0x%016llX\n", - args.write_pointer_address); + args->write_pointer_address); return 0; -err_copy_args_out: - pqm_destroy_queue(&p->pqm, queue_id); err_create_queue: err_bind_process: mutex_unlock(&p->mutex); @@ -297,99 +283,90 @@ err_bind_process: } static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, - void __user *arg) + void *data) { int retval; - struct kfd_ioctl_destroy_queue_args args; - - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; + struct kfd_ioctl_destroy_queue_args *args = data; pr_debug("kfd: destroying queue id %d for PASID %d\n", - args.queue_id, + args->queue_id, p->pasid); mutex_lock(&p->mutex); - retval = pqm_destroy_queue(&p->pqm, args.queue_id); + retval = pqm_destroy_queue(&p->pqm, args->queue_id); mutex_unlock(&p->mutex); return retval; } static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, - void __user *arg) + void *data) { int retval; - struct kfd_ioctl_update_queue_args args; + struct kfd_ioctl_update_queue_args *args = data; struct queue_properties properties; - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { + if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); return -EINVAL; } - if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) { + if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); return -EINVAL; } - if ((args.ring_base_address) && + if ((args->ring_base_address) && (!access_ok(VERIFY_WRITE, - (const void __user *) args.ring_base_address, + (const void __user *) args->ring_base_address, sizeof(uint64_t)))) { pr_err("kfd: can't access ring base address\n"); return -EFAULT; } - if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) { + if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { pr_err("kfd: ring size must be a power of 2 or 0\n"); return -EINVAL; } - properties.queue_address = args.ring_base_address; - properties.queue_size = args.ring_size; - properties.queue_percent = args.queue_percentage; - properties.priority = args.queue_priority; + properties.queue_address = args->ring_base_address; + properties.queue_size = args->ring_size; + properties.queue_percent = args->queue_percentage; + properties.priority = args->queue_priority; pr_debug("kfd: updating queue id %d for PASID %d\n", - args.queue_id, p->pasid); + args->queue_id, p->pasid); mutex_lock(&p->mutex); - retval = pqm_update_queue(&p->pqm, args.queue_id, &properties); + retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); mutex_unlock(&p->mutex); return retval; } -static long kfd_ioctl_set_memory_policy(struct file *filep, - struct kfd_process *p, void __user *arg) +static int kfd_ioctl_set_memory_policy(struct file *filep, + struct kfd_process *p, void *data) { - struct kfd_ioctl_set_memory_policy_args args; + struct kfd_ioctl_set_memory_policy_args *args = data; struct kfd_dev *dev; int err = 0; struct kfd_process_device *pdd; enum cache_policy default_policy, alternate_policy; - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT - && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { + if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT + && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } - if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT - && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { + if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT + && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } - dev = kfd_device_by_id(args.gpu_id); + dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; @@ -397,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep, pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { - err = PTR_ERR(pdd); + err = -ESRCH; goto out; } - default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) + default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; alternate_policy = - (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) + (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; if (!dev->dqm->set_cache_memory_policy(dev->dqm, &pdd->qpd, default_policy, alternate_policy, - (void __user *)args.alternate_aperture_base, - args.alternate_aperture_size)) + (void __user *)args->alternate_aperture_base, + args->alternate_aperture_size)) err = -EINVAL; out: @@ -422,53 +399,44 @@ out: return err; } -static long kfd_ioctl_get_clock_counters(struct file *filep, - struct kfd_process *p, void __user *arg) +static int kfd_ioctl_get_clock_counters(struct file *filep, + struct kfd_process *p, void *data) { - struct kfd_ioctl_get_clock_counters_args args; + struct kfd_ioctl_get_clock_counters_args *args = data; struct kfd_dev *dev; struct timespec time; - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - dev = kfd_device_by_id(args.gpu_id); + dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; /* Reading GPU clock counter from KGD */ - args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); /* No access to rdtsc. Using raw monotonic time */ getrawmonotonic(&time); - args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time); + args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time); get_monotonic_boottime(&time); - args.system_clock_counter = (uint64_t)timespec_to_ns(&time); + args->system_clock_counter = (uint64_t)timespec_to_ns(&time); /* Since the counter is in nano-seconds we use 1GHz frequency */ - args.system_clock_freq = 1000000000; - - if (copy_to_user(arg, &args, sizeof(args))) - return -EFAULT; + args->system_clock_freq = 1000000000; return 0; } static int kfd_ioctl_get_process_apertures(struct file *filp, - struct kfd_process *p, void __user *arg) + struct kfd_process *p, void *data) { - struct kfd_ioctl_get_process_apertures_args args; + struct kfd_ioctl_get_process_apertures_args *args = data; struct kfd_process_device_apertures *pAperture; struct kfd_process_device *pdd; dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - args.num_of_nodes = 0; + args->num_of_nodes = 0; mutex_lock(&p->mutex); @@ -477,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, /* Run over all pdd of the process */ pdd = kfd_get_first_process_device_data(p); do { - pAperture = &args.process_apertures[args.num_of_nodes]; + pAperture = + &args->process_apertures[args->num_of_nodes]; pAperture->gpu_id = pdd->dev->id; pAperture->lds_base = pdd->lds_base; pAperture->lds_limit = pdd->lds_limit; @@ -487,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, pAperture->scratch_limit = pdd->scratch_limit; dev_dbg(kfd_device, - "node id %u\n", args.num_of_nodes); + "node id %u\n", args->num_of_nodes); dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id); dev_dbg(kfd_device, @@ -503,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit); - args.num_of_nodes++; + args->num_of_nodes++; } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && - (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS)); + (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); } mutex_unlock(&p->mutex); - if (copy_to_user(arg, &args, sizeof(args))) - return -EFAULT; - return 0; } +#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} + +/** Ioctl table */ +static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, + kfd_ioctl_get_version, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, + kfd_ioctl_create_queue, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, + kfd_ioctl_destroy_queue, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, + kfd_ioctl_set_memory_policy, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, + kfd_ioctl_get_clock_counters, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, + kfd_ioctl_get_process_apertures, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, + kfd_ioctl_update_queue, 0), +}; + +#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) + static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct kfd_process *process; - long err = -EINVAL; + amdkfd_ioctl_t *func; + const struct amdkfd_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + char stack_kdata[128]; + char *kdata = NULL; + unsigned int usize, asize; + int retcode = -EINVAL; - dev_dbg(kfd_device, - "ioctl cmd 0x%x (#%d), arg 0x%lx\n", - cmd, _IOC_NR(cmd), arg); + if (nr >= AMDKFD_CORE_IOCTL_COUNT) + goto err_i1; + + if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { + u32 amdkfd_size; + + ioctl = &amdkfd_ioctls[nr]; + + amdkfd_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (amdkfd_size > asize) + asize = amdkfd_size; + + cmd = ioctl->cmd; + } else + goto err_i1; + + dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); process = kfd_get_process(current); - if (IS_ERR(process)) - return PTR_ERR(process); + if (IS_ERR(process)) { + dev_dbg(kfd_device, "no process\n"); + goto err_i1; + } - switch (cmd) { - case KFD_IOC_GET_VERSION: - err = kfd_ioctl_get_version(filep, process, (void __user *)arg); - break; - case KFD_IOC_CREATE_QUEUE: - err = kfd_ioctl_create_queue(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_DESTROY_QUEUE: - err = kfd_ioctl_destroy_queue(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_SET_MEMORY_POLICY: - err = kfd_ioctl_set_memory_policy(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_GET_CLOCK_COUNTERS: - err = kfd_ioctl_get_clock_counters(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_GET_PROCESS_APERTURES: - err = kfd_ioctl_get_process_apertures(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_UPDATE_QUEUE: - err = kfd_ioctl_update_queue(filep, process, - (void __user *)arg); - break; - - default: - dev_err(kfd_device, - "unknown ioctl cmd 0x%x, arg 0x%lx)\n", - cmd, arg); - err = -EINVAL; - break; + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + dev_dbg(kfd_device, "no function\n"); + retcode = -EINVAL; + goto err_i1; } - if (err < 0) - dev_err(kfd_device, - "ioctl error %ld for ioctl cmd 0x%x (#%d)\n", - err, cmd, _IOC_NR(cmd)); + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; + } + } + if (asize > usize) + memset(kdata + usize, 0, asize - usize); + } - return err; + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { + retcode = -EFAULT; + goto err_i1; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + retcode = func(filep, process, kdata); + + if (cmd & IOC_OUT) + if (copy_to_user((void __user *)arg, kdata, usize) != 0) + retcode = -EFAULT; + +err_i1: + if (!ioctl) + dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + + if (retcode) + dev_dbg(kfd_device, "ret = %d\n", retcode); + + return retcode; } static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 924e90c072e..9c8961d2236 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm, { int bit = qpd->vmid - KFD_VMID_START_OFFSET; + /* Release the vmid mapping */ + set_pasid_vmid_mapping(dqm, 0, qpd->vmid); + set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); qpd->vmid = 0; q->properties.vmid = 0; @@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, return retval; } + pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", + q->pipe, + q->queue); + + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, + q->queue, q->properties.write_ptr); + if (retval != 0) { + deallocate_hqd(dqm, q); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + return 0; } @@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) { int retval; struct mqd_manager *mqd; + bool prev_active = false; BUG_ON(!dqm || !q || !q->mqd); @@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) return -ENOMEM; } - retval = mqd->update_mqd(mqd, q->mqd, &q->properties); if (q->properties.is_active == true) + prev_active = true; + + /* + * + * check active state vs. the previous state + * and modify counter accordingly + */ + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if ((q->properties.is_active == true) && (prev_active == false)) dqm->queue_count++; - else + else if ((q->properties.is_active == false) && (prev_active == true)) dqm->queue_count--; if (sched_policy != KFD_SCHED_POLICY_NO_HWS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index adc31474e78..4c3828cf45b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { - return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address, + return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, pipe_id, queue_id); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 71699ad97d7..4c25ef504f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -32,7 +32,7 @@ int kfd_pasid_init(void) { pasid_limit = max_num_of_processes; - pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL); + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); if (!pasid_bitmap) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f9fb81e3bb0..a5edb29507e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -463,6 +463,24 @@ struct kfd_process { bool is_32bit_user_mode; }; +/** + * Ioctl function type. + * + * \param filep pointer to file structure. + * \param p amdkfd process pointer. + * \param data pointer to arg that was copied from user. + */ +typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, + void *data); + +struct amdkfd_ioctl_desc { + unsigned int cmd; + int flags; + amdkfd_ioctl_t *func; + unsigned int cmd_drv; + const char *name; +}; + void kfd_process_create_wq(void); void kfd_process_destroy_wq(void); struct kfd_process *kfd_create_process(const struct task_struct *); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index b11792d7e70..cca1708fd81 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -921,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void) uint32_t i = 0; list_for_each_entry(dev, &topology_device_list, list) { - ret = kfd_build_sysfs_node_entry(dev, 0); + ret = kfd_build_sysfs_node_entry(dev, i); if (ret < 0) return ret; i++; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 47b551970a1..96a512208fa 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -183,7 +183,7 @@ struct kfd2kgd_calls { int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr); - bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address, + bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 70d0f0f06f1..e9f891c432f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1756,8 +1756,6 @@ struct drm_i915_private { */ struct workqueue_struct *dp_wq; - uint32_t bios_vgacntr; - /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 52adcb680be..c11603b4cf1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1048,6 +1048,7 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_pwrite *args = data; struct drm_i915_gem_object *obj; int ret; @@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return -EFAULT; } + intel_runtime_pm_get(dev_priv); + ret = i915_mutex_lock_interruptible(dev); if (ret) - return ret; + goto put_rpm; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { @@ -1121,6 +1124,9 @@ out: drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); +put_rpm: + intel_runtime_pm_put(dev_priv); + return ret; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 996c2931c49..d0d3dfbe6d2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3725,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3736,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -3907,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3918,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fb3e3d42919..e2af1383b17 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev) vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); udelay(300); - /* - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming - * from S3 without preserving (some of?) the other bits. - */ - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); + I915_WRITE(vga_reg, VGA_DISP_DISABLE); POSTING_READ(vga_reg); } @@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev) intel_shared_dpll_init(dev); - /* save the BIOS value before clobbering it */ - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index f5a78d53e29..ac6da7102fb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, vlv_power_sequencer_reset(dev_priv); } -static void check_power_well_state(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); - - if (power_well->always_on || !i915.disable_power_well) { - if (!enabled) - goto mismatch; - - return; - } - - if (enabled != (power_well->count > 0)) - goto mismatch; - - return; - -mismatch: - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", - power_well->name, power_well->always_on, enabled, - power_well->count, i915.disable_power_well); -} - /** * intel_display_power_get - grab a power domain reference * @dev_priv: i915 device instance @@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, power_well->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } - - check_power_well_state(dev_priv, power_well); } power_domains->domain_use_count[domain]++; @@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, power_well->hw_enabled = false; power_well->ops->disable(dev_priv, power_well); } - - check_power_well_state(dev_priv, power_well); } mutex_unlock(&power_domains->lock); diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c index ff2b434b3db..760947e380c 100644 --- a/drivers/gpu/drm/nouveau/core/core/event.c +++ b/drivers/gpu/drm/nouveau/core/core/event.c @@ -26,7 +26,7 @@ void nvkm_event_put(struct nvkm_event *event, u32 types, int index) { - BUG_ON(!spin_is_locked(&event->refs_lock)); + assert_spin_locked(&event->refs_lock); while (types) { int type = __ffs(types); types &= ~(1 << type); if (--event->refs[index * event->types_nr + type] == 0) { @@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index) void nvkm_event_get(struct nvkm_event *event, u32 types, int index) { - BUG_ON(!spin_is_locked(&event->refs_lock)); + assert_spin_locked(&event->refs_lock); while (types) { int type = __ffs(types); types &= ~(1 << type); if (++event->refs[index * event->types_nr + type] == 1) { diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c index d1bcde55e9d..839a3257768 100644 --- a/drivers/gpu/drm/nouveau/core/core/notify.c +++ b/drivers/gpu/drm/nouveau/core/core/notify.c @@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) struct nvkm_event *event = notify->event; unsigned long flags; - BUG_ON(!spin_is_locked(&event->list_lock)); + assert_spin_locked(&event->list_lock); BUG_ON(size != notify->size); spin_lock_irqsave(&event->refs_lock, flags); diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c index 674da1f095b..73292269065 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c @@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass; break; + case 0x106: + device->cname = "GK208B"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; + device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + break; case 0x108: device->cname = "GK208"; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c index 5e58bba0dd5..a7a890fad1e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c @@ -44,8 +44,10 @@ static void pramin_fini(void *data) { struct priv *priv = data; - nv_wr32(priv->bios, 0x001700, priv->bar0); - kfree(priv); + if (priv) { + nv_wr32(priv->bios, 0x001700, priv->bar0); + kfree(priv); + } } static void * diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c index 00f2ca7e44a..033a8e99949 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c @@ -24,34 +24,71 @@ #include "nv50.h" +struct nvaa_ram_priv { + struct nouveau_ram base; + u64 poller_base; +}; + static int nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 datasize, struct nouveau_object **pobject) { - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 rsvd_head = ( 256 * 1024); /* vga memory */ + u32 rsvd_tail = (1024 * 1024); /* vbios etc */ struct nouveau_fb *pfb = nouveau_fb(parent); - struct nouveau_ram *ram; + struct nvaa_ram_priv *priv; int ret; - ret = nouveau_ram_create(parent, engine, oclass, &ram); - *pobject = nv_object(ram); + ret = nouveau_ram_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); if (ret) return ret; - ram->size = nv_rd32(pfb, 0x10020c); - ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32); + priv->base.type = NV_MEM_TYPE_STOLEN; + priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; + priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12; - ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) - - (rsvd_head + rsvd_tail), 1); + rsvd_tail += 0x1000; + priv->poller_base = priv->base.size - rsvd_tail; + + ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12, + (priv->base.size - (rsvd_head + rsvd_tail)) >> 12, + 1); if (ret) return ret; - ram->type = NV_MEM_TYPE_STOLEN; - ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; - ram->get = nv50_ram_get; - ram->put = nv50_ram_put; + priv->base.get = nv50_ram_get; + priv->base.put = nv50_ram_put; + return 0; +} + +static int +nvaa_ram_init(struct nouveau_object *object) +{ + struct nouveau_fb *pfb = nouveau_fb(object); + struct nvaa_ram_priv *priv = (void *)object; + int ret; + u64 dniso, hostnb, flush; + + ret = nouveau_ram_init(&priv->base); + if (ret) + return ret; + + dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1; + hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1; + flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1; + + /* Enable NISO poller for various clients and set their associated + * read address, only for MCP77/78 and MCP79/7A. (fd#25701) + */ + nv_wr32(pfb, 0x100c18, dniso); + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001); + nv_wr32(pfb, 0x100c1c, hostnb); + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002); + nv_wr32(pfb, 0x100c24, flush); + nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000); + return 0; } @@ -60,7 +97,7 @@ nvaa_ram_oclass = { .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvaa_ram_ctor, .dtor = _nouveau_ram_dtor, - .init = _nouveau_ram_init, + .init = nvaa_ram_init, .fini = _nouveau_ram_fini, }, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c index a75c35ccf25..165401c4045 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c @@ -24,13 +24,6 @@ #include "nv04.h" -static void -nv4c_mc_msi_rearm(struct nouveau_mc *pmc) -{ - struct nv04_mc_priv *priv = (void *)pmc; - nv_wr08(priv, 0x088050, 0xff); -} - struct nouveau_oclass * nv4c_mc_oclass = &(struct nouveau_mc_oclass) { .base.handle = NV_SUBDEV(MC, 0x4c), @@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) { .fini = _nouveau_mc_fini, }, .intr = nv04_mc_intr, - .msi_rearm = nv4c_mc_msi_rearm, }.base; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 21ec561edc9..bba2960d3df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) * so use the DMA API for them. */ if (!nv_device_is_cpu_coherent(device) && - ttm->caching_state == tt_uncached) + ttm->caching_state == tt_uncached) { ttm_dma_unpopulate(ttm_dma, dev->dev); + return; + } #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 42c34babc2e..bf0f9e21d71 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -36,7 +36,14 @@ void nouveau_gem_object_del(struct drm_gem_object *gem) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; + struct device *dev = drm->dev->dev; + int ret; + + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0 && ret != -EACCES)) + return; if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); @@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem) /* reset filp so nouveau_bo_del_ttm() can test for it */ gem->filp = NULL; ttm_bo_unref(&bo); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } int @@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_vma *vma; + struct device *dev = drm->dev->dev; int ret; if (!cli->vm) @@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) goto out; } + ret = pm_runtime_get_sync(dev); + if (ret < 0 && ret != -EACCES) + goto out; + ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); - if (ret) { + if (ret) kfree(vma); - goto out; - } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } else { vma->refcount++; } @@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct device *dev = drm->dev->dev; struct nouveau_vma *vma; int ret; @@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { - if (--vma->refcount == 0) - nouveau_gem_object_unmap(nvbo, vma); + if (--vma->refcount == 0) { + ret = pm_runtime_get_sync(dev); + if (!WARN_ON(ret < 0 && ret != -EACCES)) { + nouveau_gem_object_unmap(nvbo, vma); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + } + } } ttm_bo_unreserve(&nvbo->bo); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index d59ec491dbb..ed644a4f6f5 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) return pll; } /* otherwise, pick one of the plls */ - if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI) || + if ((rdev->family == CHIP_KABINI) || (rdev->family == CHIP_MULLINS)) { - /* KB/KV/ML has PPLL1 and PPLL2 */ + /* KB/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { - /* CI has PPLL0, PPLL1, and PPLL2 */ + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL0: /* disable the ppll */ if ((rdev->family == CHIP_ARUBA) || + (rdev->family == CHIP_KAVERI) || (rdev->family == CHIP_BONAIRE) || (rdev->family == CHIP_HAWAII)) atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 11ba9d21b89..db42a670f99 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, struct radeon_connector_atom_dig *dig_connector; int dp_clock; + if ((mode->clock > 340000) && + (!radeon_connector_is_dp12_capable(connector))) + return MODE_CLOCK_HIGH; + if (!radeon_connector->con_priv) return MODE_CLOCK_HIGH; dig_connector = radeon_connector->con_priv; diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index ba85986febe..03003f8a6de 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2156,4 +2156,6 @@ #define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu #define ATC_VM_APERTURE1_LOW_ADDR 0x3304u +#define IH_VMID_0_LUT 0x3D40u + #endif diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index 2fe8cfc966d..bafdf92a573 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 9b42001295b..e3e9c10cfba 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev) pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (radeon_bapm == -1) { - /* There are stability issues reported on with - * bapm enabled on an asrock system. - */ - if (rdev->pdev->subsystem_vendor == 0x1849) - pi->bapm_enable = false; - else + /* only enable bapm on KB, ML by default */ + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) pi->bapm_enable = true; + else + pi->bapm_enable = false; } else if (radeon_bapm == 0) { pi->bapm_enable = false; } else { diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 242fd8b1b22..8bf87f1203c 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -72,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr); -static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, @@ -92,7 +92,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .init_memory = kgd_init_memory, .init_pipeline = kgd_init_pipeline, .hqd_load = kgd_hqd_load, - .hqd_is_occupies = kgd_hqd_is_occupies, + .hqd_is_occupied = kgd_hqd_is_occupied, .hqd_destroy = kgd_hqd_destroy, .get_fw_version = get_fw_version }; @@ -101,6 +101,7 @@ static const struct kgd2kfd_calls *kgd2kfd; bool radeon_kfd_init(void) { +#if defined(CONFIG_HSA_AMD_MODULE) bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, const struct kgd2kfd_calls**); @@ -117,6 +118,17 @@ bool radeon_kfd_init(void) } return true; +#elif defined(CONFIG_HSA_AMD) + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { + kgd2kfd = NULL; + + return false; + } + + return true; +#else + return false; +#endif } void radeon_kfd_fini(void) @@ -378,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, cpu_relax(); write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); + /* Mapping vmid to pasid also for IH block */ + write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t), + pasid_mapping); + return 0; } @@ -517,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, return 0; } -static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { uint32_t act; @@ -556,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, if (timeout == 0) { pr_err("kfd: cp queue preemption time out (%dms)\n", temp); + release_queue(kgd); return -ETIME; } msleep(20); diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 535403e0c8a..15aee723db7 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, u32 format; u32 *buffer; const u8 __user *data; - int size, dwords, tex_width, blit_width, spitch; + unsigned int size, dwords, tex_width, blit_width, spitch; u32 height; int i; u32 texpitch, microtile; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 230b6f887cd..dfdc2697002 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -27,7 +27,8 @@ if HID config HID_BATTERY_STRENGTH bool "Battery level reporting for HID devices" - depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY + depends on HID + select POWER_SUPPLY default n ---help--- This option adds support of reporting battery strength (for HID devices diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index c3d0ac1a098..8b638792cb4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7460f340229..9243359c182 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -526,6 +526,7 @@ #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 +#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 #define USB_VENDOR_ID_LABTEC 0x1020 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index e0a0f06ac5e..9505605b6e2 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = { USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, {} diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index b92bf01a1ae..158fcf577fa 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, } break; case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { rdesc = mousepen_i608x_rdesc_fixed; *rsize = sizeof(mousepen_i608x_rdesc_fixed); @@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) switch (id->product) { case USB_DEVICE_ID_KYE_EASYPEN_I405X: case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: case USB_DEVICE_ID_KYE_EASYPEN_M610X: ret = kye_tablet_enable(hdev); if (ret) { @@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index c917ab61aaf..5bc6d80d5be 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev, switch (data[0]) { case REPORT_ID_DJ_SHORT: + if (size != DJREPORT_SHORT_LENGTH) { + dev_err(&hdev->dev, "DJ report of bad size (%d)", size); + return false; + } return logi_dj_dj_event(hdev, report, data, size); case REPORT_ID_HIDPP_SHORT: - /* intentional fallthrough */ + if (size != HIDPP_REPORT_SHORT_LENGTH) { + dev_err(&hdev->dev, + "Short HID++ report of bad size (%d)", size); + return false; + } + return logi_dj_hidpp_event(hdev, report, data, size); case REPORT_ID_HIDPP_LONG: + if (size != HIDPP_REPORT_LONG_LENGTH) { + dev_err(&hdev->dev, + "Long HID++ report of bad size (%d)", size); + return false; + } return logi_dj_hidpp_event(hdev, report, data, size); } diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 2f420c0b660..a93cefe0e52 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) (report->rap.sub_id == 0x41); } +/** + * hidpp_prefix_name() prefixes the current given name with "Logitech ". + */ +static void hidpp_prefix_name(char **name, int name_length) +{ +#define PREFIX_LENGTH 9 /* "Logitech " */ + + int new_length; + char *new_name; + + if (name_length > PREFIX_LENGTH && + strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) + /* The prefix has is already in the name */ + return; + + new_length = PREFIX_LENGTH + name_length; + new_name = kzalloc(new_length, GFP_KERNEL); + if (!new_name) + return; + + snprintf(new_name, new_length, "Logitech %s", *name); + + kfree(*name); + + *name = new_name; +} + /* -------------------------------------------------------------------------- */ /* HIDP++ 1.0 commands */ /* -------------------------------------------------------------------------- */ @@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev) return NULL; memcpy(name, &response.rap.params[2], len); + + /* include the terminating '\0' */ + hidpp_prefix_name(&name, len + 1); + return name; } @@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp) index += ret; } + /* include the terminating '\0' */ + hidpp_prefix_name(&name, __name_length + 1); + return name; } @@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size) switch (data[0]) { case 0x02: + if (size < 2) { + hid_err(hdev, "Received HID report of bad size (%d)", + size); + return 1; + } if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { input_event(wd->input, EV_KEY, BTN_LEFT, !!(data[1] & 0x01)); input_event(wd->input, EV_KEY, BTN_RIGHT, !!(data[1] & 0x02)); input_sync(wd->input); + return 0; } else { if (size < 21) return 1; return wtp_mouse_raw_xy_event(hidpp, &data[7]); } case REPORT_ID_HIDPP_LONG: + /* size is already checked in hidpp_raw_event. */ if ((report->fap.feature_index != wd->mt_feature_index) || (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) return 1; diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 1a07e07d99a..47d7e74231e 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -35,6 +35,8 @@ static struct class *pyra_class; static void profile_activated(struct pyra_device *pyra, unsigned int new_profile) { + if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) + return; pyra->actual_profile = new_profile; pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; } @@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp, if (off != 0 || count != PYRA_SIZE_SETTINGS) return -EINVAL; - mutex_lock(&pyra->pyra_lock); - settings = (struct pyra_settings const *)buf; + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) + return -EINVAL; + + mutex_lock(&pyra->pyra_lock); retval = pyra_set_settings(usb_dev, settings); if (retval) { diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d32037cbf9d..d43e967e753 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid) static void i2c_hid_stop(struct hid_device *hid) { - struct i2c_client *client = hid->driver_data; - struct i2c_hid *ihid = i2c_get_clientdata(client); - hid->claimed = 0; - - i2c_hid_free_buffers(ihid); } static int i2c_hid_open(struct hid_device *hid) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index dc89be90b35..b27b3d33eba 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -124,6 +124,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1232336b960..40dfbc0444c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb, if (action != BUS_NOTIFY_REMOVED_DEVICE) return 0; - /* - * If the device is still attached to a device driver we can't - * tear down the domain yet as DMA mappings may still be in use. - * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. - */ - if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) - return 0; - domain = find_domain(dev); if (!domain) return 0; @@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, domain_remove_one_dev_info(old_domain, dev); else domain_remove_dev_info(old_domain); + + if (!domain_type_is_vm_or_si(old_domain) && + list_empty(&old_domain->devices)) + domain_exit(old_domain); } } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 68dfb0fd5ee..748693192c2 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, static u64 ipmmu_page_prot(unsigned int prot, u64 type) { - u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF + u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV | ARM_VMSA_PTE_NS | type; @@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type) if (prot & IOMMU_CACHE) pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; - if (prot & IOMMU_EXEC) - pgprot &= ~ARM_VMSA_PTE_XN; + if (prot & IOMMU_NOEXEC) + pgprot |= ARM_VMSA_PTE_XN; else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) /* If no access create a faulting entry to avoid TLB fills. */ pgprot &= ~ARM_VMSA_PTE_PAGE; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index b2023af384b..6a8b1ec4a48 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = { .remove = rk_iommu_remove, .driver = { .name = "rk_iommu", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(rk_iommu_dt_ids), }, }; diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 1fcd5568a35..f3470d96837 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev) } db->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(db->clk)) + if (IS_ERR(db->clk)) { + ret = PTR_ERR(db->clk); goto out; + } clk_prepare_enable(db->clk); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3498760dc22..760c72c6e2a 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1170,10 +1170,6 @@ tx_request_irq_error: init_error: free_skbufs(dev); alloc_skbuf_error: - if (priv->phydev) { - phy_disconnect(priv->phydev); - priv->phydev = NULL; - } phy_error: return ret; } @@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev) int ret; unsigned long int flags; - /* Stop and disconnect the PHY */ - if (priv->phydev) { + /* Stop the PHY */ + if (priv->phydev) phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; - } netif_stop_queue(dev); napi_disable(&priv->napi); @@ -1525,6 +1518,10 @@ err_free_netdev: static int altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct altera_tse_private *priv = netdev_priv(ndev); + + if (priv->phydev) + phy_disconnect(priv->phydev); platform_set_drvdata(pdev, NULL); altera_tse_mdio_destroy(ndev); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 705f334ebb8..b29e027c476 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev) if (vnic_rq_desc_used(&enic->rq[i]) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; - goto err_out_notify_unset; + goto err_out_free_rq; } } @@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev) return 0; -err_out_notify_unset: +err_out_free_rq: + for (i = 0; i < enic->rq_count; i++) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); enic_dev_notify_unset(enic); err_out_free_intr: enic_free_intr(enic); diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 781065eb543..e9c3a87e5b1 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && - !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { + (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { /* enable/disable MDI/MDI-X auto-switching. */ mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 433a55886ad..cb0de455683 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (desc_n >= ring->count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); - return; + goto out; } if (!is_rx_ring) { txd = I40E_TX_DESC(ring, desc_n); @@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } else { dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); } + +out: kfree(ring); } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 051ea94bdcd..0f69ef81751 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) u32 swmask = mask; u32 fwmask = mask << 16; s32 ret_val = 0; - s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + s32 i = 0, timeout = 200; while (i < timeout) { if (igb_get_hw_semaphore(hw)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 943cbd47d83..03e9eb0dc76 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); - goto err_stop_fw; + return err; } choose_steering_mode(dev, &dev_cap); @@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) &init_hca); if ((long long) icm_size < 0) { err = icm_size; - goto err_stop_fw; + return err; } dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; @@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); if (err) - goto err_stop_fw; + return err; err = mlx4_INIT_HCA(dev, &init_hca); if (err) { @@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_query_func(dev, &dev_cap); if (err < 0) { mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); - goto err_stop_fw; + goto err_close; } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { dev->caps.num_eqs = dev_cap.max_eqs; dev->caps.reserved_eqs = dev_cap.reserved_eqs; @@ -2006,11 +2006,6 @@ err_free_icm: if (!mlx4_is_slave(dev)) mlx4_free_icms(dev); -err_stop_fw: - if (!mlx4_is_slave(dev)) { - mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, priv->fw.fw_icm, 0); - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d6f549685c0..7094a9c70fd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free); void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) { mlx4_mtt_cleanup(dev, &mr->mtt); + mr->mtt.order = -1; } EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); @@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, { int err; - mpt_entry->start = cpu_to_be64(iova); - mpt_entry->length = cpu_to_be64(size); - mpt_entry->entity_size = cpu_to_be32(page_shift); - err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); if (err) return err; + mpt_entry->start = cpu_to_be64(mr->iova); + mpt_entry->length = cpu_to_be64(mr->size); + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); + mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | MLX4_MPT_PD_FLAG_EN_INV); mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index af099057f0e..71af98bb72c 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), &mgp->cmd_bus, GFP_KERNEL); - if (mgp->cmd == NULL) + if (!mgp->cmd) { + status = -ENOMEM; goto abort_with_enabled; + } mgp->board_span = pci_resource_len(pdev, 0); mgp->iomem_base = pci_resource_start(pdev, 0); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c2f09af5c25..4847713211c 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { int i = 0; - while (i < 10) { - if (i) - ssleep(1); - + do { if (ql_sem_lock(qdev, QL_DRVR_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) @@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) "driver lock acquired\n"); return 1; } - } + ssleep(1); + } while (++i < 10); netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 9929b97cfb3..2528c3fb6b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { dev_err(&pdev->dev, "%s: failed. Please Reboot\n", __func__); + err = -ENODEV; goto err_out_free_hw; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c560f9aeb55..e61ee835127 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -757,6 +757,14 @@ requeue: static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; + int value = irq - priv->irqs_table[0]; + + /* NOTICE: Ending IRQ here. The trick with the 'value' variable above + * is to make sure we will always write the correct value to the EOI + * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 + * for TX Interrupt and 3 for MISC Interrupt. + */ + cpdma_ctlr_eoi(priv->dma, value); cpsw_intr_disable(priv); if (priv->irq_enabled == true) { @@ -786,8 +794,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) int num_tx, num_rx; num_tx = cpdma_chan_process(priv->txch, 128); - if (num_tx) - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { @@ -795,7 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) napi_complete(napi); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { prim_cpsw->irq_enabled = true; @@ -1310,8 +1315,6 @@ static int cpsw_ndo_open(struct net_device *ndev) napi_enable(&priv->napi); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { @@ -1578,9 +1581,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) @@ -1620,9 +1620,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - } #endif diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9c2d91ea0af..dbcbf0c5bcf 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op) lp->regs = of_iomap(op->dev.of_node, 0); if (!lp->regs) { dev_err(&op->dev, "could not map temac regs.\n"); + rc = -ENOMEM; goto nodev; } @@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op) np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); if (!np) { dev_err(&op->dev, "could not find DMA node\n"); + rc = -ENODEV; goto err_iounmap; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c18a0c637c4..a6d2860b712 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op) lp->regs = of_iomap(op->dev.of_node, 0); if (!lp->regs) { dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); + ret = -ENOMEM; goto nodev; } /* Setup checksum offload, but default to off if not specified */ @@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op) np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); if (!np) { dev_err(&op->dev, "could not find DMA node\n"); + ret = -ENODEV; goto err_iounmap; } lp->dma_regs = of_iomap(np, 0); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 24858799c20..9d4ce388510 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "no IRQ found\n"); + rc = -ENXIO; goto error; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b8a82b86f90..602dc6668c3 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -56,6 +56,8 @@ struct qmi_wwan_state { /* default ethernet address used by the modem */ static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; +static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00}; + /* Make up an ethernet header if the packet doesn't have one. * * A firmware bug common among several devices cause them to send raw @@ -332,10 +334,12 @@ next_desc: usb_driver_release_interface(driver, info->data); } - /* Never use the same address on both ends of the link, even - * if the buggy firmware told us to. + /* Never use the same address on both ends of the link, even if the + * buggy firmware told us to. Or, if device is assigned the well-known + * buggy firmware MAC address, replace it with a random address, */ - if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) || + ether_addr_equal(dev->net->dev_addr, buggy_fw_addr)) eth_hw_addr_random(dev->net); /* make MAC addr easily distinguishable from an IP header */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2d1c77e8183..57ec23e8ccf 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) netif_wake_queue(netdev); } +static netdev_features_t +rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u32 mss = skb_shinfo(skb)->gso_size; + int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; + int offset = skb_transport_offset(skb); + + if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) + features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) + features &= ~NETIF_F_GSO_MASK; + + return features; +} + static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { @@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, .ndo_validate_addr = eth_validate_addr, + .ndo_features_check = rtl8152_features_check, }; static void r8152b_get_version(struct r8152 *tp) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index efbaf2ae199..794204e34fb 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -737,6 +737,7 @@ static void connect(struct backend_info *be) } queue->remaining_credit = credit_bytes; + queue->credit_usec = credit_usec; err = connect_rings(be, queue); if (err) { diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index ba74f0aa60c..3c22dbebc80 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -89,6 +89,7 @@ struct rockchip_iomux { * @reg_pull: optional separate register for additional pull settings * @clk: clock of the gpio bank * @irq: interrupt of the gpio bank + * @saved_enables: Saved content of GPIO_INTEN at suspend time. * @pin_base: first pin number * @nr_pins: number of pins in this bank * @name: name of the bank @@ -107,6 +108,7 @@ struct rockchip_pin_bank { struct regmap *regmap_pull; struct clk *clk; int irq; + u32 saved_enables; u32 pin_base; u8 nr_pins; char *name; @@ -1543,6 +1545,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) return 0; } +static void rockchip_irq_suspend(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct rockchip_pin_bank *bank = gc->private; + + bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN); + irq_reg_writel(gc, gc->wake_active, GPIO_INTEN); +} + +static void rockchip_irq_resume(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct rockchip_pin_bank *bank = gc->private; + + irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN); +} + +static void rockchip_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + u32 val; + + irq_gc_lock(gc); + + val = irq_reg_readl(gc, GPIO_INTEN); + val &= ~d->mask; + irq_reg_writel(gc, val, GPIO_INTEN); + + irq_gc_unlock(gc); +} + +static void rockchip_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + u32 val; + + irq_gc_lock(gc); + + val = irq_reg_readl(gc, GPIO_INTEN); + val |= d->mask; + irq_reg_writel(gc, val, GPIO_INTEN); + + irq_gc_unlock(gc); +} + static int rockchip_interrupts_register(struct platform_device *pdev, struct rockchip_pinctrl *info) { @@ -1581,12 +1628,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev, gc = irq_get_domain_generic_chip(bank->domain, 0); gc->reg_base = bank->reg_base; gc->private = bank; - gc->chip_types[0].regs.mask = GPIO_INTEN; + gc->chip_types[0].regs.mask = GPIO_INTMASK; gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; - gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; - gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_enable = rockchip_irq_enable; + gc->chip_types[0].chip.irq_disable = rockchip_irq_disable; gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; + gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; + gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; gc->wake_enabled = IRQ_MSK(bank->nr_pins); diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 7c9d5138224..9e5ec00084b 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin_id) { unsigned long config; - st_pinconf_get(pctldev, pin_id, &config); + mutex_unlock(&pctldev->mutex); + st_pinconf_get(pctldev, pin_id, &config); + mutex_lock(&pctldev->mutex); seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," "de:%ld,rt-clk:%ld,rt-delay:%ld]", @@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = { static struct irq_chip st_gpio_irqchip = { .name = "GPIO", + .irq_disable = st_gpio_irq_mask, .irq_mask = st_gpio_irq_mask, .irq_unmask = st_gpio_irq_unmask, .irq_set_type = st_gpio_irq_set_type, diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 3b73b96619e..26270c35162 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,7 +39,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.16" +#define DRV_VERSION "1.6.0.17" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 2097de42a14..155b286f1a9 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) goto fnic_abort_cmd_end; } + /* IO out of order */ + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { + spin_unlock_irqrestore(io_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "Issuing Host reset due to out of order IO\n"); + + if (fnic_host_reset(sc) == FAILED) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "fnic_host_reset failed.\n"); + } + ret = FAILED; + goto fnic_abort_cmd_end; + } + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; /* diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 12ca291c138..cce1cbc1a92 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP. */ - if (time_after(jiffies, fcport->retry_delay_timestamp)) + if (fcport->retry_delay_timestamp == 0) { + /* retry delay not set */ + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) fcport->retry_delay_timestamp = 0; else goto qc24_target_busy; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index e42fff6e8c1..8afb01604d5 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1041,7 +1041,7 @@ retry: } /* signal not to enter either branch of the if () below */ timeleft = 0; - rtn = NEEDS_RETRY; + rtn = FAILED; } else { timeleft = wait_for_completion_timeout(&done, timeout); rtn = SUCCESS; @@ -1081,7 +1081,7 @@ retry: rtn = FAILED; break; } - } else if (!rtn) { + } else if (rtn != FAILED) { scsi_abort_eh_cmnd(scmd); rtn = FAILED; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fedab3c21dd..399516925d8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) sd_config_discard(sdkp, SD_LBP_WS16); } else { /* LBP VPD page tells us what to use */ - - if (sdkp->lbpws) + if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else if (sdkp->lbpws) sd_config_discard(sdkp, SD_LBP_WS16); else if (sdkp->lbpws10) sd_config_discard(sdkp, SD_LBP_WS10); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 255201f2212..7cc0122a18c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = { static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - u8 type; struct vfio_pci_device *vdev; struct iommu_group *group; int ret; - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); - if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) return -EINVAL; group = iommu_group_get(&pdev->dev); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 14419a8ccbb..d415d69dc23 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, ++headcount; seg += in; } - heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); + heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); *iovcount = seg; if (unlikely(log)) *log_num = nlogs; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ed71b5347a7..cb807d0ea49 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) r = -EFAULT; break; } - if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) || - (a.used_user_addr & (sizeof *vq->used->ring - 1)) || - (a.log_guest_addr & (sizeof *vq->used->ring - 1))) { + + /* Make sure it's safe to cast pointers to vring types. */ + BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); + BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); + if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || + (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || + (a.log_guest_addr & (sizeof(u64) - 1))) { r = -EINVAL; break; } diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 2ef9529809d..9756f21b809 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev) vp_free_vectors(vdev); kfree(vp_dev->vqs); + vp_dev->vqs = NULL; } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, @@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } -void virtio_pci_release_dev(struct device *_d) -{ - /* - * No need for a release method as we allocate/free - * all devices together with the pci devices. - * Provide an empty one to avoid getting a warning from core. - */ -} - #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index adddb647b21..5a497289b7e 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev); * - ignore the affinity request if we're using INTX */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); -void virtio_pci_release_dev(struct device *); int virtio_pci_legacy_probe(struct pci_dev *pci_dev, const struct pci_device_id *id); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 6c76f0f5658..a5486e65e04 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .set_vq_affinity = vp_set_vq_affinity, }; +static void virtio_pci_release_dev(struct device *_d) +{ + struct virtio_device *vdev = dev_to_virtio(_d); + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* As struct device is a kobject, it's not safe to + * free the memory (including the reference counter itself) + * until it's release callback. */ + kfree(vp_dev); +} + /* the PCI probing function */ int virtio_pci_legacy_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) @@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev) pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); - kfree(vp_dev); } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 2d3e32ebfd1..8729cf68d2f 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, { int ret; int type; - struct btrfs_tree_block_info *info; struct btrfs_extent_inline_ref *eiref; if (*ptr == (unsigned long)-1) @@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, } /* we can treat both ref types equally here */ - info = (struct btrfs_tree_block_info *)(ei + 1); *out_root = btrfs_extent_inline_ref_offset(eb, eiref); - *out_level = btrfs_tree_block_level(eb, info); + + if (key->type == BTRFS_EXTENT_ITEM_KEY) { + struct btrfs_tree_block_info *info; + + info = (struct btrfs_tree_block_info *)(ei + 1); + *out_level = btrfs_tree_block_level(eb, info); + } else { + ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); + *out_level = (u8)key->offset; + } if (ret == 1) *ptr = (unsigned long)-1; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 054577bddaf..de4e70fb3cb 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode) { struct btrfs_delayed_node *delayed_node; + /* + * we don't do delayed inode updates during log recovery because it + * leads to enospc problems. This means we also can't do + * delayed inode refs + */ + if (BTRFS_I(inode)->root->fs_info->log_root_recovering) + return -EAGAIN; + delayed_node = btrfs_get_or_create_delayed_node(inode); if (IS_ERR(delayed_node)) return PTR_ERR(delayed_node); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a80b97100d9..15116585e71 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); - if (ret < 0) + if (ret) { + if (ret > 0) + ret = -ENOENT; goto fail; - BUG_ON(ret); /* Corruption */ + } leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); @@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); fail: - if (ret) { + if (ret) btrfs_abort_transaction(trans, root, ret); - return ret; - } - return 0; + return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e687bb0dc73..8bf326affb9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) out_fail: btrfs_end_transaction(trans, root); - if (drop_on_err) + if (drop_on_err) { + inode_dec_link_count(inode); iput(inode); + } btrfs_balance_delayed_items(root); btrfs_btree_balance_dirty(root); return err; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f2bb13a23f8..9e1569ffbf6 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity, ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL); -skip: if (ret) return ret; +skip: len -= l; logical += l; physical += l; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index f5013d92a7e..c81c0e00458 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, } } - dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", + dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", inode, ceph_vinop(inode), len, locked_page); if (len > 0) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e5d3eadf47b..bed43081720 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, /* fallback to generic here if not in extents fmt */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return __generic_block_fiemap(inode, fieinfo, start, len, - ext4_get_block); + return generic_block_fiemap(inode, fieinfo, start, len, + ext4_get_block); if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) return -EBADR; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 513c12cf444..8131be8c0af 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp) * we determine this extent as a data or a hole according to whether the * page cache has data or not. */ -static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, - loff_t endoff, loff_t *offset) +static int ext4_find_unwritten_pgoff(struct inode *inode, + int whence, + struct ext4_map_blocks *map, + loff_t *offset) { struct pagevec pvec; + unsigned int blkbits; pgoff_t index; pgoff_t end; + loff_t endoff; loff_t startoff; loff_t lastoff; int found = 0; + blkbits = inode->i_sb->s_blocksize_bits; startoff = *offset; lastoff = startoff; - + endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; index = startoff >> PAGE_CACHE_SHIFT; end = endoff >> PAGE_CACHE_SHIFT; @@ -403,144 +408,147 @@ out: static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) { struct inode *inode = file->f_mapping->host; - struct fiemap_extent_info fie; - struct fiemap_extent ext[2]; - loff_t next; - int i, ret = 0; + struct ext4_map_blocks map; + struct extent_status es; + ext4_lblk_t start, last, end; + loff_t dataoff, isize; + int blkbits; + int ret = 0; mutex_lock(&inode->i_mutex); - if (offset >= inode->i_size) { + + isize = i_size_read(inode); + if (offset >= isize) { mutex_unlock(&inode->i_mutex); return -ENXIO; } - fie.fi_flags = 0; - fie.fi_extents_max = 2; - fie.fi_extents_start = (struct fiemap_extent __user *) &ext; - while (1) { - mm_segment_t old_fs = get_fs(); - - fie.fi_extents_mapped = 0; - memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); - - set_fs(get_ds()); - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); - set_fs(old_fs); - if (ret) + + blkbits = inode->i_sb->s_blocksize_bits; + start = offset >> blkbits; + last = start; + end = isize >> blkbits; + dataoff = offset; + + do { + map.m_lblk = last; + map.m_len = end - last + 1; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { + if (last != start) + dataoff = (loff_t)last << blkbits; break; + } - /* No extents found, EOF */ - if (!fie.fi_extents_mapped) { - ret = -ENXIO; + /* + * If there is a delay extent at this offset, + * it will be as a data. + */ + ext4_es_find_delayed_extent_range(inode, last, last, &es); + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { + if (last != start) + dataoff = (loff_t)last << blkbits; break; } - for (i = 0; i < fie.fi_extents_mapped; i++) { - next = (loff_t)(ext[i].fe_length + ext[i].fe_logical); - if (offset < (loff_t)ext[i].fe_logical) - offset = (loff_t)ext[i].fe_logical; - /* - * If extent is not unwritten, then it contains valid - * data, mapped or delayed. - */ - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) - goto out; + /* + * If there is a unwritten extent at this offset, + * it will be as a data or a hole according to page + * cache that has data or not. + */ + if (map.m_flags & EXT4_MAP_UNWRITTEN) { + int unwritten; + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, + &map, &dataoff); + if (unwritten) + break; + } - /* - * If there is a unwritten extent at this offset, - * it will be as a data or a hole according to page - * cache that has data or not. - */ - if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, - next, &offset)) - goto out; + last++; + dataoff = (loff_t)last << blkbits; + } while (last <= end); - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) { - ret = -ENXIO; - goto out; - } - offset = next; - } - } - if (offset > inode->i_size) - offset = inode->i_size; -out: mutex_unlock(&inode->i_mutex); - if (ret) - return ret; - return vfs_setpos(file, offset, maxsize); + if (dataoff > isize) + return -ENXIO; + + return vfs_setpos(file, dataoff, maxsize); } /* - * ext4_seek_hole() retrieves the offset for SEEK_HOLE + * ext4_seek_hole() retrieves the offset for SEEK_HOLE. */ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) { struct inode *inode = file->f_mapping->host; - struct fiemap_extent_info fie; - struct fiemap_extent ext[2]; - loff_t next; - int i, ret = 0; + struct ext4_map_blocks map; + struct extent_status es; + ext4_lblk_t start, last, end; + loff_t holeoff, isize; + int blkbits; + int ret = 0; mutex_lock(&inode->i_mutex); - if (offset >= inode->i_size) { + + isize = i_size_read(inode); + if (offset >= isize) { mutex_unlock(&inode->i_mutex); return -ENXIO; } - fie.fi_flags = 0; - fie.fi_extents_max = 2; - fie.fi_extents_start = (struct fiemap_extent __user *)&ext; - while (1) { - mm_segment_t old_fs = get_fs(); - - fie.fi_extents_mapped = 0; - memset(ext, 0, sizeof(*ext)); + blkbits = inode->i_sb->s_blocksize_bits; + start = offset >> blkbits; + last = start; + end = isize >> blkbits; + holeoff = offset; - set_fs(get_ds()); - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); - set_fs(old_fs); - if (ret) - break; + do { + map.m_lblk = last; + map.m_len = end - last + 1; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { + last += ret; + holeoff = (loff_t)last << blkbits; + continue; + } - /* No extents found */ - if (!fie.fi_extents_mapped) - break; + /* + * If there is a delay extent at this offset, + * we will skip this extent. + */ + ext4_es_find_delayed_extent_range(inode, last, last, &es); + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { + last = es.es_lblk + es.es_len; + holeoff = (loff_t)last << blkbits; + continue; + } - for (i = 0; i < fie.fi_extents_mapped; i++) { - next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); - /* - * If extent is not unwritten, then it contains valid - * data, mapped or delayed. - */ - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { - if (offset < (loff_t)ext[i].fe_logical) - goto out; - offset = next; + /* + * If there is a unwritten extent at this offset, + * it will be as a data or a hole according to page + * cache that has data or not. + */ + if (map.m_flags & EXT4_MAP_UNWRITTEN) { + int unwritten; + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, + &map, &holeoff); + if (!unwritten) { + last += ret; + holeoff = (loff_t)last << blkbits; continue; } - /* - * If there is a unwritten extent at this offset, - * it will be as a data or a hole according to page - * cache that has data or not. - */ - if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE, - next, &offset)) - goto out; - - offset = next; - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) - goto out; } - } - if (offset > inode->i_size) - offset = inode->i_size; -out: + + /* find a hole */ + break; + } while (last <= end); + mutex_unlock(&inode->i_mutex); - if (ret) - return ret; - return vfs_setpos(file, offset, maxsize); + if (holeoff > isize) + holeoff = isize; + + return vfs_setpos(file, holeoff, maxsize); } /* diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bf76f405a5f..8a8ec6293b1 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb) return -EPERM; /* + * If we are not using the primary superblock/GDT copy don't resize, + * because the user tools have no way of handling this. Probably a + * bad time to do it anyways. + */ + if (EXT4_SB(sb)->s_sbh->b_blocknr != + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { + ext4_warning(sb, "won't resize using backup superblock at %llu", + (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); + return -EPERM; + } + + /* * We are not allowed to do online-resizing on a filesystem mounted * with error, because it can destroy the filesystem easily. */ @@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", gdb_num); - /* - * If we are not using the primary superblock/GDT copy don't resize, - * because the user tools have no way of handling this. Probably a - * bad time to do it anyways. - */ - if (EXT4_SB(sb)->s_sbh->b_blocknr != - le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { - ext4_warning(sb, "won't resize using backup superblock at %llu", - (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); - return -EPERM; - } - gdb_bh = sb_bread(sb, gdblock); if (!gdb_bh) return -EIO; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 43c92b1685c..74c5f53595f 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) - ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " + ext4_warning(sb, "metadata_csum and uninit_bg are " "redundant flags; please run fsck."); /* Check for a known checksum algorithm */ diff --git a/fs/fcntl.c b/fs/fcntl.c index 99d440a4a6b..ee85cd4e136 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -740,14 +740,15 @@ static int __init fcntl_init(void) * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ - BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( + BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ __O_SYNC | O_DSYNC | FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | - __FMODE_EXEC | O_PATH | __O_TMPFILE + __FMODE_EXEC | O_PATH | __O_TMPFILE | + __FMODE_NONOTIFY )); fasync_cache = kmem_cache_create("fasync_cache", diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3550a9c8761..c06a1ba80d7 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, status = nfs4_setlease(dp); goto out; } - atomic_inc(&fp->fi_delegees); if (fp->fi_had_conflict) { status = -EAGAIN; goto out_unlock; } + atomic_inc(&fp->fi_delegees); hash_delegation_locked(dp, fp); status = 0; out_unlock: diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 79b5af5e6a7..cecd875653e 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2023,11 +2023,8 @@ leave: dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); - if (ret < 0) { + if (ret < 0) mlog_errno(ret); - if (newlock) - dlm_lock_put(newlock); - } return ret; } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index b931e04e338..914c121ec89 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, struct inode *inode, const char *symname); +static int ocfs2_double_lock(struct ocfs2_super *osb, + struct buffer_head **bh1, + struct inode *inode1, + struct buffer_head **bh2, + struct inode *inode2, + int rename); + +static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); /* An orphan dir name is an 8 byte value, printed as a hex string */ #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) @@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry, { handle_t *handle; struct inode *inode = old_dentry->d_inode; + struct inode *old_dir = old_dentry->d_parent->d_inode; int err; struct buffer_head *fe_bh = NULL; + struct buffer_head *old_dir_bh = NULL; struct buffer_head *parent_fe_bh = NULL; struct ocfs2_dinode *fe = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); @@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry, dquot_initialize(dir); - err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); + err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, + &parent_fe_bh, dir, 0); if (err < 0) { if (err != -ENOENT) mlog_errno(err); return err; } + /* make sure both dirs have bhs + * get an extra ref on old_dir_bh if old==new */ + if (!parent_fe_bh) { + if (old_dir_bh) { + parent_fe_bh = old_dir_bh; + get_bh(parent_fe_bh); + } else { + mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); + err = -EIO; + goto out; + } + } + if (!dir->i_nlink) { err = -ENOENT; goto out; } - err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, + err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, old_dentry->d_name.len, &old_de_ino); if (err) { err = -ENOENT; @@ -801,10 +825,11 @@ out_unlock_inode: ocfs2_inode_unlock(inode, 1); out: - ocfs2_inode_unlock(dir, 1); + ocfs2_double_unlock(old_dir, dir); brelse(fe_bh); brelse(parent_fe_bh); + brelse(old_dir_bh); ocfs2_free_dir_lookup_result(&lookup); @@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb, } /* - * The only place this should be used is rename! + * The only place this should be used is rename and link! * if they have the same id, then the 1st one is the only one locked. */ static int ocfs2_double_lock(struct ocfs2_super *osb, struct buffer_head **bh1, struct inode *inode1, struct buffer_head **bh2, - struct inode *inode2) + struct inode *inode2, + int rename) { int status; int inode1_is_ancestor, inode2_is_ancestor; @@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, } /* lock id2 */ status = ocfs2_inode_lock_nested(inode2, bh2, 1, - OI_LS_RENAME1); + rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); if (status < 0) { if (status != -ENOENT) mlog_errno(status); @@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, } /* lock id1 */ - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); + status = ocfs2_inode_lock_nested(inode1, bh1, 1, + rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); if (status < 0) { /* * An error return must mean that no cluster locks @@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir, /* if old and new are the same, this'll just do one lock. */ status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, - &new_dir_bh, new_dir); + &new_dir_bh, new_dir, 1); if (status < 0) { mlog_errno(status); goto bail; diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 3ca9b751f12..b95dc32a6e6 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -196,8 +196,8 @@ struct acpi_processor_flags { struct acpi_processor { acpi_handle handle; u32 acpi_id; - u32 apic_id; - u32 id; + u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */ + u32 id; /* CPU logical ID allocated by OS */ u32 pblk; int performance_platform_limit; int throttling_platform_limit; @@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) #endif /* CONFIG_CPU_FREQ */ /* in processor_core.c */ -int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); -int acpi_map_cpuid(int apic_id, u32 acpi_id); +int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); +int acpi_map_cpuid(int phys_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); /* in processor_pdc.c */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 856d381b1d5..d459cd17b47 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void); #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ -int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); -int acpi_unmap_lsapic(int cpu); +int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu); +int acpi_unmap_cpu(int cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 5d86416d35f..61b19c46bdb 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -87,8 +87,8 @@ struct ceph_osd_req_op { struct ceph_osd_data osd_data; } extent; struct { - __le32 name_len; - __le32 value_len; + u32 name_len; + u32 value_len; __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ struct ceph_osd_data osd_data; diff --git a/include/linux/fs.h b/include/linux/fs.h index f90c0282c11..42efe13077b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) /* File was opened by fanotify and shouldn't generate fanotify events */ -#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) +#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 290db1269c4..75ae2e2631f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -13,11 +13,54 @@ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> */ +/* Shifted versions of the command enable bits are be used if the command + * has no arguments (see kdb_check_flags). This allows commands, such as + * go, to have different permissions depending upon whether it is called + * with an argument. + */ +#define KDB_ENABLE_NO_ARGS_SHIFT 10 + typedef enum { - KDB_REPEAT_NONE = 0, /* Do not repeat this command */ - KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ - KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ -} kdb_repeat_t; + KDB_ENABLE_ALL = (1 << 0), /* Enable everything */ + KDB_ENABLE_MEM_READ = (1 << 1), + KDB_ENABLE_MEM_WRITE = (1 << 2), + KDB_ENABLE_REG_READ = (1 << 3), + KDB_ENABLE_REG_WRITE = (1 << 4), + KDB_ENABLE_INSPECT = (1 << 5), + KDB_ENABLE_FLOW_CTRL = (1 << 6), + KDB_ENABLE_SIGNAL = (1 << 7), + KDB_ENABLE_REBOOT = (1 << 8), + /* User exposed values stop here, all remaining flags are + * exclusively used to describe a commands behaviour. + */ + + KDB_ENABLE_ALWAYS_SAFE = (1 << 9), + KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1, + + KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT, + + KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */ + KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */ +} kdb_cmdflags_t; typedef int (*kdb_func_t)(int, const char **); @@ -62,6 +105,7 @@ extern atomic_t kdb_event; #define KDB_BADLENGTH (-19) #define KDB_NOBP (-20) #define KDB_BADADDR (-21) +#define KDB_NOPERM (-22) /* * kdb_diemsg @@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) /* Dynamic kdb shell command registration */ extern int kdb_register(char *, kdb_func_t, char *, char *, short); -extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, - short, kdb_repeat_t); +extern int kdb_register_flags(char *, kdb_func_t, char *, char *, + short, kdb_cmdflags_t); extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return 0; } -static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen, - kdb_repeat_t repeat) { return 0; } +static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen, + kdb_cmdflags_t flags) { return 0; } static inline int kdb_unregister(char *cmd) { return 0; } #endif /* CONFIG_KGDB_KDB */ enum { diff --git a/include/linux/mm.h b/include/linux/mm.h index f80d0194c9b..80fc92a4964 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma, #if VM_GROWSUP extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #else - #define expand_upwards(vma, address) do { } while (0) + #define expand_upwards(vma, address) (0) #endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c0c2bce6b0b..d9d7e7e5635 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -37,6 +37,16 @@ struct anon_vma { atomic_t refcount; /* + * Count of child anon_vmas and VMAs which points to this anon_vma. + * + * This counter is used for making decision about reusing anon_vma + * instead of forking new one. See comments in function anon_vma_clone. + */ + unsigned degree; + + struct anon_vma *parent; /* Parent of this anon_vma */ + + /* * NOTE: the LSB of the rb_root.rb_node is set by * mm_take_all_locks() _after_ taking the above lock. So the * rb_root must only be read/written after taking the above lock diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a219be961c0..00048339c23 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); -void set_page_dirty_balance(struct page *page); void writeback_set_ratelimit(void); void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 58d719ddaa6..29c7be8808d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this - * particular key. Setting this flag does not necessarily mean that SKBs - * will have sufficient tailroom for ICV or MIC. + * particular key. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. @@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * if space should be prepared for the IV, but the IV * itself should not be generated. Do not set together with - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does - * not necessarily mean that SKBs will have sufficient tailroom for ICV or - * MIC. + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * management frames. The flag can help drivers that have a hardware * crypto implementation that doesn't deal with management frames diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 1e7f74acc2e..b429b73e875 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -857,7 +857,7 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) } /** - * params_channels - Get the sample rate from the hw params + * params_rate - Get the sample rate from the hw params * @p: hw params */ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) @@ -866,7 +866,7 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) } /** - * params_channels - Get the period size (in frames) from the hw params + * params_period_size - Get the period size (in frames) from the hw params * @p: hw params */ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) @@ -875,7 +875,7 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) } /** - * params_channels - Get the number of periods from the hw params + * params_periods - Get the number of periods from the hw params * @p: hw params */ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) @@ -884,7 +884,7 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) } /** - * params_channels - Get the buffer size (in frames) from the hw params + * params_buffer_size - Get the buffer size (in frames) from the hw params * @p: hw params */ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) @@ -893,7 +893,7 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) } /** - * params_channels - Get the buffer size (in bytes) from the hw params + * params_buffer_bytes - Get the buffer size (in bytes) from the hw params * @p: hw params */ static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index 7543b3e5133..e063effe0cc 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h @@ -5,7 +5,7 @@ /* * FMODE_EXEC is 0x20 - * FMODE_NONOTIFY is 0x1000000 + * FMODE_NONOTIFY is 0x4000000 * These cannot be used by userspace O_* until internal and external open * flags are split. * -Eric Paris diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 7acef41fc20..af94f31e33a 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args { uint32_t pad; }; -#define KFD_IOC_MAGIC 'K' +#define AMDKFD_IOCTL_BASE 'K' +#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) +#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) +#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) +#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) -#define KFD_IOC_GET_VERSION \ - _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) +#define AMDKFD_IOC_GET_VERSION \ + AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) -#define KFD_IOC_CREATE_QUEUE \ - _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) +#define AMDKFD_IOC_CREATE_QUEUE \ + AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) -#define KFD_IOC_DESTROY_QUEUE \ - _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) +#define AMDKFD_IOC_DESTROY_QUEUE \ + AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) -#define KFD_IOC_SET_MEMORY_POLICY \ - _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) +#define AMDKFD_IOC_SET_MEMORY_POLICY \ + AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) -#define KFD_IOC_GET_CLOCK_COUNTERS \ - _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) +#define AMDKFD_IOC_GET_CLOCK_COUNTERS \ + AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) -#define KFD_IOC_GET_PROCESS_APERTURES \ - _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) +#define AMDKFD_IOC_GET_PROCESS_APERTURES \ + AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) -#define KFD_IOC_UPDATE_QUEUE \ - _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) +#define AMDKFD_IOC_UPDATE_QUEUE \ + AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) + +#define AMDKFD_COMMAND_START 0x01 +#define AMDKFD_COMMAND_END 0x08 #endif diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 61c818a7fe7..a3318f31e8e 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -101,6 +101,13 @@ struct vring { struct vring_used *used; }; +/* Alignment requirements for vring elements. + * When using pre-virtio 1.0 layout, these fall out naturally. + */ +#define VRING_AVAIL_ALIGN_SIZE 2 +#define VRING_USED_ALIGN_SIZE 4 +#define VRING_DESC_ALIGN_SIZE 16 + /* The standard layout for the ring is a continuous chunk of memory which looks * like this. We assume num is a power of 2. * diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 1adf62b39b9..07ce18ca71e 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -27,6 +27,9 @@ * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ + +#define pr_fmt(fmt) "KGDB: " fmt + #include <linux/pid_namespace.h> #include <linux/clocksource.h> #include <linux/serial_core.h> @@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr) return err; err = kgdb_arch_remove_breakpoint(&tmp); if (err) - printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " - "memory destroyed at: %lx", addr); + pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", + addr); return err; } @@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void) error = kgdb_arch_set_breakpoint(&kgdb_break[i]); if (error) { ret = error; - printk(KERN_INFO "KGDB: BP install failed: %lx", - kgdb_break[i].bpt_addr); + pr_info("BP install failed: %lx\n", + kgdb_break[i].bpt_addr); continue; } @@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void) continue; error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); if (error) { - printk(KERN_INFO "KGDB: BP remove failed: %lx\n", - kgdb_break[i].bpt_addr); + pr_info("BP remove failed: %lx\n", + kgdb_break[i].bpt_addr); ret = error; } @@ -367,7 +370,7 @@ int dbg_remove_all_break(void) goto setundefined; error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); if (error) - printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", + pr_err("breakpoint remove failed: %lx\n", kgdb_break[i].bpt_addr); setundefined: kgdb_break[i].state = BP_UNDEFINED; @@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait) if (print_wait) { #ifdef CONFIG_KGDB_KDB if (!dbg_kdb_mode) - printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); + pr_crit("waiting... or $3#33 for KDB\n"); #else - printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); + pr_crit("Waiting for remote debugger\n"); #endif } return 1; @@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) exception_level = 0; kgdb_skipexception(ks->ex_vector, ks->linux_regs); dbg_activate_sw_breakpoints(); - printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", - addr); + pr_crit("re-enter error: breakpoint removed %lx\n", addr); WARN_ON_ONCE(1); return 1; @@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) panic("Recursive entry to debugger"); } - printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); + pr_crit("re-enter exception: ALL breakpoints killed\n"); #ifdef CONFIG_KGDB_KDB /* Allow kdb to debug itself one level */ return 0; @@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, int cpu; int trace_on = 0; int online_cpus = num_online_cpus(); + u64 time_left; kgdb_info[ks->cpu].enter_kgdb++; kgdb_info[ks->cpu].exception_state |= exception_state; @@ -595,9 +598,13 @@ return_normal: /* * Wait for the other CPUs to be notified and be waiting for us: */ - while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + - atomic_read(&slaves_in_kgdb)) != online_cpus) + time_left = loops_per_jiffy * HZ; + while (kgdb_do_roundup && --time_left && + (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != + online_cpus) cpu_relax(); + if (!time_left) + pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); /* * At this point the primary processor is completely @@ -795,15 +802,15 @@ static struct console kgdbcons = { static void sysrq_handle_dbg(int key) { if (!dbg_io_ops) { - printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); + pr_crit("ERROR: No KGDB I/O module available\n"); return; } if (!kgdb_connected) { #ifdef CONFIG_KGDB_KDB if (!dbg_kdb_mode) - printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); + pr_crit("KGDB or $3#33 for KDB\n"); #else - printk(KERN_CRIT "Entering KGDB\n"); + pr_crit("Entering KGDB\n"); #endif } @@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void) { kgdb_break_asap = 0; - printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); + pr_crit("Waiting for connection from remote gdb...\n"); kgdb_breakpoint(); } @@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) if (dbg_io_ops) { spin_unlock(&kgdb_registration_lock); - printk(KERN_ERR "kgdb: Another I/O driver is already " - "registered with KGDB.\n"); + pr_err("Another I/O driver is already registered with KGDB\n"); return -EBUSY; } @@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) spin_unlock(&kgdb_registration_lock); - printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", - new_dbg_io_ops->name); + pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); /* Arm KGDB now. */ kgdb_register_callbacks(); @@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) spin_unlock(&kgdb_registration_lock); - printk(KERN_INFO - "kgdb: Unregistered I/O driver %s, debugger disabled.\n", + pr_info("Unregistered I/O driver %s, debugger disabled\n", old_dbg_io_ops->name); } EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index b20d544f20c..e1dbf4a2c69 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c @@ -531,22 +531,29 @@ void __init kdb_initbptab(void) for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) bp->bp_free = 1; - kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", - "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", - "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); + kdb_register_flags("bp", kdb_bp, "[<vaddr>]", + "Set/Display breakpoints", 0, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); + kdb_register_flags("bl", kdb_bp, "[<vaddr>]", + "Display breakpoints", 0, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) - kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", - "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("bc", kdb_bc, "<bpnum>", - "Clear Breakpoint", 0, KDB_REPEAT_NONE); - kdb_register_repeat("be", kdb_bc, "<bpnum>", - "Enable Breakpoint", 0, KDB_REPEAT_NONE); - kdb_register_repeat("bd", kdb_bc, "<bpnum>", - "Disable Breakpoint", 0, KDB_REPEAT_NONE); - - kdb_register_repeat("ss", kdb_ss, "", - "Single Step", 1, KDB_REPEAT_NO_ARGS); + kdb_register_flags("bph", kdb_bp, "[<vaddr>]", + "[datar [length]|dataw [length]] Set hw brk", 0, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); + kdb_register_flags("bc", kdb_bc, "<bpnum>", + "Clear Breakpoint", 0, + KDB_ENABLE_FLOW_CTRL); + kdb_register_flags("be", kdb_bc, "<bpnum>", + "Enable Breakpoint", 0, + KDB_ENABLE_FLOW_CTRL); + kdb_register_flags("bd", kdb_bc, "<bpnum>", + "Disable Breakpoint", 0, + KDB_ENABLE_FLOW_CTRL); + + kdb_register_flags("ss", kdb_ss, "", + "Single Step", 1, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); /* * Architecture dependent initialization. */ diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index 8859ca34dcf..15e1a7af5dd 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c @@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks) ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } + /* set CATASTROPHIC if the system contains unresponsive processors */ + for_each_online_cpu(i) + if (!kgdb_info[i].enter_kgdb) + KDB_FLAG_SET(CATASTROPHIC); if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 379650b984f..f191bddf64b 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -12,6 +12,7 @@ */ #include <linux/ctype.h> +#include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/kmsg_dump.h> @@ -23,6 +24,7 @@ #include <linux/vmalloc.h> #include <linux/atomic.h> #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/kallsyms.h> @@ -42,6 +44,12 @@ #include <linux/slab.h> #include "kdb_private.h" +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "kdb." + +static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; +module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); + #define GREP_LEN 256 char kdb_grep_string[GREP_LEN]; int kdb_grepping_flag; @@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = { KDBMSG(BADLENGTH, "Invalid length field"), KDBMSG(NOBP, "No Breakpoint exists"), KDBMSG(BADADDR, "Invalid address"), + KDBMSG(NOPERM, "Permission denied"), }; #undef KDBMSG @@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu) } /* + * Check whether the flags of the current command and the permissions + * of the kdb console has allow a command to be run. + */ +static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, + bool no_args) +{ + /* permissions comes from userspace so needs massaging slightly */ + permissions &= KDB_ENABLE_MASK; + permissions |= KDB_ENABLE_ALWAYS_SAFE; + + /* some commands change group when launched with no arguments */ + if (no_args) + permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT; + + flags |= KDB_ENABLE_ALL; + + return permissions & flags; +} + +/* * kdbgetenv - This function will return the character string value of * an environment variable. * Parameters: @@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg, kdb_symtab_t symtab; /* + * If the enable flags prohibit both arbitrary memory access + * and flow control then there are no reasonable grounds to + * provide symbol lookup. + */ + if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL, + kdb_cmd_enabled, false)) + return KDB_NOPERM; + + /* * Process arguments which follow the following syntax: * * symbol | numeric-address [+/- numeric-offset] @@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) if (!s->count) s->usable = 0; if (s->usable) - kdb_register(s->name, kdb_exec_defcmd, - s->usage, s->help, 0); + /* macros are always safe because when executed each + * internal command re-enters kdb_parse() and is + * safety checked individually. + */ + kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, + s->help, 0, + KDB_ENABLE_ALWAYS_SAFE); return 0; } if (!s->usable) @@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr) if (i < kdb_max_commands) { int result; + + if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) + return KDB_NOPERM; + KDB_STATE_SET(CMD); result = (*tp->cmd_func)(argc-1, (const char **)argv); if (result && ignore_errors && result > KDB_CMD_GO) result = 0; KDB_STATE_CLEAR(CMD); - switch (tp->cmd_repeat) { - case KDB_REPEAT_NONE: - argc = 0; - if (argv[0]) - *(argv[0]) = '\0'; - break; - case KDB_REPEAT_NO_ARGS: - argc = 1; - if (argv[1]) - *(argv[1]) = '\0'; - break; - case KDB_REPEAT_WITH_ARGS: - break; - } + + if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) + return result; + + argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; + if (argv[argc]) + *(argv[argc]) = '\0'; return result; } @@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv) */ static int kdb_sr(int argc, const char **argv) { + bool check_mask = + !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false); + if (argc != 1) return KDB_ARGCOUNT; + kdb_trap_printk++; - __handle_sysrq(*argv[1], false); + __handle_sysrq(*argv[1], check_mask); kdb_trap_printk--; return 0; @@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void) for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { if (!cpu_online(i)) { state = 'F'; /* cpu is offline */ + } else if (!kgdb_info[i].enter_kgdb) { + state = 'D'; /* cpu is online but unresponsive */ } else { state = ' '; /* cpu is responding to kdb */ if (kdb_task_state_char(KDB_TSK(i)) == 'I') @@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv) /* * Validate cpunum */ - if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) + if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) return KDB_BADCPUNUM; dbg_switch_cpu = cpunum; @@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv) return 0; if (!kt->cmd_name) continue; + if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) + continue; if (strlen(kt->cmd_usage) > 20) space = "\n "; kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, @@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv) } /* - * kdb_register_repeat - This function is used to register a kernel + * kdb_register_flags - This function is used to register a kernel * debugger command. * Inputs: * cmd Command name @@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv) * zero for success, one if a duplicate command. */ #define kdb_command_extend 50 /* arbitrary */ -int kdb_register_repeat(char *cmd, - kdb_func_t func, - char *usage, - char *help, - short minlen, - kdb_repeat_t repeat) +int kdb_register_flags(char *cmd, + kdb_func_t func, + char *usage, + char *help, + short minlen, + kdb_cmdflags_t flags) { int i; kdbtab_t *kp; @@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd, kp->cmd_func = func; kp->cmd_usage = usage; kp->cmd_help = help; - kp->cmd_flags = 0; kp->cmd_minlen = minlen; - kp->cmd_repeat = repeat; + kp->cmd_flags = flags; return 0; } -EXPORT_SYMBOL_GPL(kdb_register_repeat); +EXPORT_SYMBOL_GPL(kdb_register_flags); /* * kdb_register - Compatibility register function for commands that do * not need to specify a repeat state. Equivalent to - * kdb_register_repeat with KDB_REPEAT_NONE. + * kdb_register_flags with flags set to 0. * Inputs: * cmd Command name * func Function to execute the command @@ -2721,8 +2768,7 @@ int kdb_register(char *cmd, char *help, short minlen) { - return kdb_register_repeat(cmd, func, usage, help, minlen, - KDB_REPEAT_NONE); + return kdb_register_flags(cmd, func, usage, help, minlen, 0); } EXPORT_SYMBOL_GPL(kdb_register); @@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void) for_each_kdbcmd(kp, i) kp->cmd_name = NULL; - kdb_register_repeat("md", kdb_md, "<vaddr>", + kdb_register_flags("md", kdb_md, "<vaddr>", "Display Memory Contents, also mdWcN, e.g. md8c1", 1, - KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", - "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", - "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mds", kdb_md, "<vaddr>", - "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", - "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("go", kdb_go, "[<vaddr>]", - "Continue Execution", 1, KDB_REPEAT_NONE); - kdb_register_repeat("rd", kdb_rd, "", - "Display Registers", 0, KDB_REPEAT_NONE); - kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", - "Modify Registers", 0, KDB_REPEAT_NONE); - kdb_register_repeat("ef", kdb_ef, "<vaddr>", - "Display exception frame", 0, KDB_REPEAT_NONE); - kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", - "Stack traceback", 1, KDB_REPEAT_NONE); - kdb_register_repeat("btp", kdb_bt, "<pid>", - "Display stack for process <pid>", 0, KDB_REPEAT_NONE); - kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", - "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); - kdb_register_repeat("btc", kdb_bt, "", - "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); - kdb_register_repeat("btt", kdb_bt, "<vaddr>", + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>", + "Display Raw Memory", 0, + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>", + "Display Physical Memory", 0, + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mds", kdb_md, "<vaddr>", + "Display Memory Symbolically", 0, + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>", + "Modify Memory Contents", 0, + KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS); + kdb_register_flags("go", kdb_go, "[<vaddr>]", + "Continue Execution", 1, + KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); + kdb_register_flags("rd", kdb_rd, "", + "Display Registers", 0, + KDB_ENABLE_REG_READ); + kdb_register_flags("rm", kdb_rm, "<reg> <contents>", + "Modify Registers", 0, + KDB_ENABLE_REG_WRITE); + kdb_register_flags("ef", kdb_ef, "<vaddr>", + "Display exception frame", 0, + KDB_ENABLE_MEM_READ); + kdb_register_flags("bt", kdb_bt, "[<vaddr>]", + "Stack traceback", 1, + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); + kdb_register_flags("btp", kdb_bt, "<pid>", + "Display stack for process <pid>", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", + "Backtrace all processes matching state flag", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("btc", kdb_bt, "", + "Backtrace current process on each cpu", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("btt", kdb_bt, "<vaddr>", "Backtrace process given its struct task address", 0, - KDB_REPEAT_NONE); - kdb_register_repeat("env", kdb_env, "", - "Show environment variables", 0, KDB_REPEAT_NONE); - kdb_register_repeat("set", kdb_set, "", - "Set environment variables", 0, KDB_REPEAT_NONE); - kdb_register_repeat("help", kdb_help, "", - "Display Help Message", 1, KDB_REPEAT_NONE); - kdb_register_repeat("?", kdb_help, "", - "Display Help Message", 0, KDB_REPEAT_NONE); - kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", - "Switch to new cpu", 0, KDB_REPEAT_NONE); - kdb_register_repeat("kgdb", kdb_kgdb, "", - "Enter kgdb mode", 0, KDB_REPEAT_NONE); - kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", - "Display active task list", 0, KDB_REPEAT_NONE); - kdb_register_repeat("pid", kdb_pid, "<pidnum>", - "Switch to another task", 0, KDB_REPEAT_NONE); - kdb_register_repeat("reboot", kdb_reboot, "", - "Reboot the machine immediately", 0, KDB_REPEAT_NONE); + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); + kdb_register_flags("env", kdb_env, "", + "Show environment variables", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("set", kdb_set, "", + "Set environment variables", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("help", kdb_help, "", + "Display Help Message", 1, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("?", kdb_help, "", + "Display Help Message", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("cpu", kdb_cpu, "<cpunum>", + "Switch to new cpu", 0, + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); + kdb_register_flags("kgdb", kdb_kgdb, "", + "Enter kgdb mode", 0, 0); + kdb_register_flags("ps", kdb_ps, "[<flags>|A]", + "Display active task list", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("pid", kdb_pid, "<pidnum>", + "Switch to another task", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("reboot", kdb_reboot, "", + "Reboot the machine immediately", 0, + KDB_ENABLE_REBOOT); #if defined(CONFIG_MODULES) - kdb_register_repeat("lsmod", kdb_lsmod, "", - "List loaded kernel modules", 0, KDB_REPEAT_NONE); + kdb_register_flags("lsmod", kdb_lsmod, "", + "List loaded kernel modules", 0, + KDB_ENABLE_INSPECT); #endif #if defined(CONFIG_MAGIC_SYSRQ) - kdb_register_repeat("sr", kdb_sr, "<key>", - "Magic SysRq key", 0, KDB_REPEAT_NONE); + kdb_register_flags("sr", kdb_sr, "<key>", + "Magic SysRq key", 0, + KDB_ENABLE_ALWAYS_SAFE); #endif #if defined(CONFIG_PRINTK) - kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", - "Display syslog buffer", 0, KDB_REPEAT_NONE); + kdb_register_flags("dmesg", kdb_dmesg, "[lines]", + "Display syslog buffer", 0, + KDB_ENABLE_ALWAYS_SAFE); #endif if (arch_kgdb_ops.enable_nmi) { - kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", - "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); - } - kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", - "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); - kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", - "Send a signal to a process", 0, KDB_REPEAT_NONE); - kdb_register_repeat("summary", kdb_summary, "", - "Summarize the system", 4, KDB_REPEAT_NONE); - kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", - "Display per_cpu variables", 3, KDB_REPEAT_NONE); - kdb_register_repeat("grephelp", kdb_grep_help, "", - "Display help on | grep", 0, KDB_REPEAT_NONE); + kdb_register_flags("disable_nmi", kdb_disable_nmi, "", + "Disable NMI entry to KDB", 0, + KDB_ENABLE_ALWAYS_SAFE); + } + kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"", + "Define a set of commands, down to endefcmd", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("kill", kdb_kill, "<-signal> <pid>", + "Send a signal to a process", 0, + KDB_ENABLE_SIGNAL); + kdb_register_flags("summary", kdb_summary, "", + "Summarize the system", 4, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", + "Display per_cpu variables", 3, + KDB_ENABLE_MEM_READ); + kdb_register_flags("grephelp", kdb_grep_help, "", + "Display help on | grep", 0, + KDB_ENABLE_ALWAYS_SAFE); } /* Execute any commands defined in kdb_cmds. */ diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 7afd3c8c41d..eaacd169395 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -172,10 +172,9 @@ typedef struct _kdbtab { kdb_func_t cmd_func; /* Function to execute command */ char *cmd_usage; /* Usage String for this command */ char *cmd_help; /* Help message for this command */ - short cmd_flags; /* Parsing flags */ short cmd_minlen; /* Minimum legal # command * chars required */ - kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ + kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ } kdbtab_t; extern int kdb_bt(int, const char **); /* KDB display back trace */ diff --git a/kernel/exit.c b/kernel/exit.c index 1ea4369890a..6806c55475e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) static int wait_consider_task(struct wait_opts *wo, int ptrace, struct task_struct *p) { + /* + * We can race with wait_task_zombie() from another thread. + * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition + * can't confuse the checks below. + */ + int exit_state = ACCESS_ONCE(p->exit_state); int ret; - if (unlikely(p->exit_state == EXIT_DEAD)) + if (unlikely(exit_state == EXIT_DEAD)) return 0; ret = eligible_child(wo, p); @@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, return 0; } - if (unlikely(p->exit_state == EXIT_TRACE)) { + if (unlikely(exit_state == EXIT_TRACE)) { /* * ptrace == 0 means we are the natural parent. In this case * we should clear notask_error, debugger will notify us. @@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, } /* slay zombie? */ - if (p->exit_state == EXIT_ZOMBIE) { + if (exit_state == EXIT_ZOMBIE) { /* we don't reap group leaders with subthreads */ if (!delay_group_leader(p)) { /* diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 5cf6731b98e..3ef3736002d 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c @@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock) DEBUG_LOCKS_WARN_ON(lock->owner != current); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); - mutex_clear_owner(lock); } /* * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug * mutexes so that we can do it here after we've verified state. */ + mutex_clear_owner(lock); atomic_set(&lock->count, 1); } diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index b0b1c44e923..3ccf5c2c132 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv) static __init int kdb_ftrace_register(void) { - kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", - "Dump ftrace log", 0, KDB_REPEAT_NONE); + kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", + "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); return 0; } diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 358eb81fa28..c635a107a7d 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -73,6 +73,31 @@ config KGDB_KDB help KDB frontend for kernel +config KDB_DEFAULT_ENABLE + hex "KDB: Select kdb command functions to be enabled by default" + depends on KGDB_KDB + default 0x1 + help + Specifiers which kdb commands are enabled by default. This may + be set to 1 or 0 to enable all commands or disable almost all + commands. + + Alternatively the following bitmask applies: + + 0x0002 - allow arbitrary reads from memory and symbol lookup + 0x0004 - allow arbitrary writes to memory + 0x0008 - allow current register state to be inspected + 0x0010 - allow current register state to be modified + 0x0020 - allow passive inspection (backtrace, process list, lsmod) + 0x0040 - allow flow control management (breakpoint, single step) + 0x0080 - enable signalling of processes + 0x0100 - allow machine to be rebooted + + The config option merely sets the default at boot time. Both + issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or + setting with kdb.cmd_enable=X kernel command line option will + override the default settings. + config KDB_KEYBOARD bool "KGDB_KDB: keyboard as input device" depends on VT && KGDB_KDB diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 2404d03e251..03dd576e677 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -11,6 +11,7 @@ * 2 of the Licence, or (at your option) any later version. */ //#define DEBUG +#include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/assoc_array_priv.h> diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 56badfc4810..957d3da53dd 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC depends on !KMEMCHECK select PAGE_EXTENSION select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC - select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC ---help--- Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types @@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC that would result in incorrect warnings of memory corruption after a resume because free pages are not saved to the suspend image. -config WANT_PAGE_DEBUG_FLAGS - bool - config PAGE_POISONING bool - select WANT_PAGE_DEBUG_FLAGS - -config PAGE_GUARD - bool - select WANT_PAGE_DEBUG_FLAGS diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ef91e856c7e..851924fa517 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { mem_cgroup_swap_statistics(from, false); mem_cgroup_swap_statistics(to, true); - /* - * This function is only called from task migration context now. - * It postpones page_counter and refcount handling till the end - * of task migration(mem_cgroup_clear_mc()) for performance - * improvement. But we cannot postpone css_get(to) because if - * the process that has been moved to @to does swap-in, the - * refcount of @to might be decreased to 0. - * - * We are in attach() phase, so the cgroup is guaranteed to be - * alive, so we can just call css_get(). - */ - css_get(&to->css); return 0; } return -EINVAL; @@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent_css == NULL) { root_mem_cgroup = memcg; page_counter_init(&memcg->memory, NULL); + memcg->soft_limit = PAGE_COUNTER_MAX; page_counter_init(&memcg->memsw, NULL); page_counter_init(&memcg->kmem, NULL); } @@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) if (parent->use_hierarchy) { page_counter_init(&memcg->memory, &parent->memory); + memcg->soft_limit = PAGE_COUNTER_MAX; page_counter_init(&memcg->memsw, &parent->memsw); page_counter_init(&memcg->kmem, &parent->kmem); @@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) */ } else { page_counter_init(&memcg->memory, NULL); + memcg->soft_limit = PAGE_COUNTER_MAX; page_counter_init(&memcg->memsw, NULL); page_counter_init(&memcg->kmem, NULL); /* @@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); - memcg->soft_limit = 0; + memcg->soft_limit = PAGE_COUNTER_MAX; } #ifdef CONFIG_MMU diff --git a/mm/memory.c b/mm/memory.c index ca920d1fd31..c6565f00fb3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2137,17 +2137,24 @@ reuse: if (!dirty_page) return ret; - /* - * Yes, Virginia, this is actually required to prevent a race - * with clear_page_dirty_for_io() from clearing the page dirty - * bit after it clear all dirty ptes, but before a racing - * do_wp_page installs a dirty pte. - * - * do_shared_fault is protected similarly. - */ if (!page_mkwrite) { - wait_on_page_locked(dirty_page); - set_page_dirty_balance(dirty_page); + struct address_space *mapping; + int dirtied; + + lock_page(dirty_page); + dirtied = set_page_dirty(dirty_page); + VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); + mapping = dirty_page->mapping; + unlock_page(dirty_page); + + if (dirtied && mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + /* file_update_time outside page_lock */ if (vma->vm_file) file_update_time(vma->vm_file); @@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo if (prev && prev->vm_end == address) return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - expand_downwards(vma, address - PAGE_SIZE); + return expand_downwards(vma, address - PAGE_SIZE); } if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { struct vm_area_struct *next = vma->vm_next; @@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo if (next && next->vm_start == address + PAGE_SIZE) return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - expand_upwards(vma, address + PAGE_SIZE); + return expand_upwards(vma, address + PAGE_SIZE); } return 0; } diff --git a/mm/mmap.c b/mm/mmap.c index 7b36aa7cc89..7f684d5a808 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -778,10 +778,12 @@ again: remove_next = 1 + (end > next->vm_end); if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; + importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) + if (error) { + importer->anon_vma = NULL; return error; - importer->anon_vma = exporter->anon_vma; + } } } @@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start; + unsigned long new_start, actual_size; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + actual_size = size; + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) + actual_size -= PAGE_SIZE; + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d5d81f5384d..6f4335238e3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1541,16 +1541,6 @@ pause: bdi_start_background_writeback(bdi); } -void set_page_dirty_balance(struct page *page) -{ - if (set_page_dirty(page)) { - struct address_space *mapping = page_mapping(page); - - if (mapping) - balance_dirty_pages_ratelimited(mapping); - } -} - static DEFINE_PER_CPU(int, bdp_ratelimits); /* @@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied); * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * - * Most callers have locked the page, which pins the address_space in memory. - * But zap_pte_range() does not lock the page, however in that case the - * mapping is pinned by the vma's ->vm_file reference. - * - * We take care to handle the case where the page was truncated from the - * mapping by re-checking page_mapping() inside tree_lock. + * The caller must ensure this doesn't race with truncation. Most will simply + * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and + * the pte lock held, which also locks out truncation. */ int __set_page_dirty_nobuffers(struct page *page) { if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); - struct address_space *mapping2; unsigned long flags; if (!mapping) return 1; spin_lock_irqsave(&mapping->tree_lock, flags); - mapping2 = page_mapping(page); - if (mapping2) { /* Race with truncate? */ - BUG_ON(mapping2 != mapping); - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); - account_page_dirtied(page, mapping); - radix_tree_tag_set(&mapping->page_tree, - page_index(page), PAGECACHE_TAG_DIRTY); - } + BUG_ON(page_mapping(page) != mapping); + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); + account_page_dirtied(page, mapping); + radix_tree_tag_set(&mapping->page_tree, page_index(page), + PAGECACHE_TAG_DIRTY); spin_unlock_irqrestore(&mapping->tree_lock, flags); if (mapping->host) { /* !PageAnon && !swapper_space */ @@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page) /* * We carefully synchronise fault handlers against * installing a dirty pte and marking the page dirty - * at this point. We do this by having them hold the - * page lock at some point after installing their - * pte, but before marking the page dirty. - * Pages are always locked coming in here, so we get - * the desired exclusion. See mm/memory.c:do_wp_page() - * for more comments. + * at this point. We do this by having them hold the + * page lock while dirtying the page, and pages are + * always locked coming in here, so we get the desired + * exclusion. */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); diff --git a/mm/rmap.c b/mm/rmap.c index c5bc241127b..71cd5bd0c17 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void) anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); + anon_vma->degree = 1; /* Reference for first vma */ + anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. @@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); + /* vma reference or self-parent link for new root */ + anon_vma->degree++; allocated = NULL; avc = NULL; } @@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. + * + * If dst->anon_vma is NULL this function tries to find and reuse existing + * anon_vma which has no vmas and only one child anon_vma. This prevents + * degradation of anon_vma hierarchy to endless linear chain in case of + * constantly forking task. On the other hand, an anon_vma with more than one + * child isn't reused even if there was no alive vma, thus rmap walker has a + * good chance of avoiding scanning the whole hierarchy when it searches where + * page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { @@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); + + /* + * Reuse existing anon_vma if its degree lower than two, + * that means it has no vma and only one anon_vma child. + * + * Do not chose parent anon_vma, otherwise first child + * will always reuse it. Root anon_vma is never reused: + * it has self-parent reference and at least one child. + */ + if (!dst->anon_vma && anon_vma != src->anon_vma && + anon_vma->degree < 2) + dst->anon_vma = anon_vma; } + if (dst->anon_vma) + dst->anon_vma->degree++; unlock_anon_vma_root(root); return 0; @@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) if (!pvma->anon_vma) return 0; + /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ + vma->anon_vma = NULL; + /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. @@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) if (error) return error; + /* An existing anon_vma has been reused, all done then. */ + if (vma->anon_vma) + return 0; + /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) @@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; + anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until @@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); + anon_vma->parent->degree++; anon_vma_unlock_write(anon_vma); return 0; @@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma) * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ - if (RB_EMPTY_ROOT(&anon_vma->rb_root)) + if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { + anon_vma->parent->degree--; continue; + } list_del(&avc->same_vma); anon_vma_chain_free(avc); } + if (vma->anon_vma) + vma->anon_vma->degree--; unlock_anon_vma_root(root); /* @@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma) list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; + BUG_ON(anon_vma->degree); put_anon_vma(anon_vma); list_del(&avc->same_vma); diff --git a/mm/vmscan.c b/mm/vmscan.c index bd9a72bc4a1..ab2505c3ef5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, return false; /* - * There is a potential race between when kswapd checks its watermarks - * and a process gets throttled. There is also a potential race if - * processes get throttled, kswapd wakes, a large process exits therby - * balancing the zones that causes kswapd to miss a wakeup. If kswapd - * is going to sleep, no process should be sleeping on pfmemalloc_wait - * so wake them now if necessary. If necessary, processes will wake - * kswapd and get throttled again + * The throttled processes are normally woken up in balance_pgdat() as + * soon as pfmemalloc_watermark_ok() is true. But there is a potential + * race between when kswapd checks the watermarks and a process gets + * throttled. There is also a potential race if processes get + * throttled, kswapd wakes, a large process exits thereby balancing the + * zones, which causes kswapd to exit balance_pgdat() before reaching + * the wake up checks. If kswapd is going to sleep, no process should + * be sleeping on pfmemalloc_wait, so wake them now if necessary. If + * the wake up is premature, processes will wake kswapd and get + * throttled again. The difference from wake ups in balance_pgdat() is + * that here we are under prepare_to_wait(). */ - if (waitqueue_active(&pgdat->pfmemalloc_wait)) { - wake_up(&pgdat->pfmemalloc_wait); - return false; - } + if (waitqueue_active(&pgdat->pfmemalloc_wait)) + wake_up_all(&pgdat->pfmemalloc_wait); return pgdat_balanced(pgdat, order, classzone_idx); } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index ab6bb2af1d4..b24e4bb64fb 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, if (orig_initialized) atomic_dec(&bat_priv->mcast.num_disabled); orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; - /* If mcast support is being switched off increase the disabled - * mcast node counter. + /* If mcast support is being switched off or if this is an initial + * OGM without mcast support then increase the disabled mcast + * node counter. */ } else if (!orig_mcast_enabled && - orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { + (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST || + !orig_initialized)) { atomic_inc(&bat_priv->mcast.num_disabled); orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; } @@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig) { struct batadv_priv *bat_priv = orig->bat_priv; - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) + if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) && + orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST) atomic_dec(&bat_priv->mcast.num_disabled); batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 8d04d174669..fab47f1f3ef 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) if (!bat_priv->nc.decoding_hash) goto err; - batadv_hash_set_lock_class(bat_priv->nc.coding_hash, + batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, &batadv_nc_decoding_hash_lock_class_key); INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6a484514cd3..bea8198d019 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) batadv_frag_purge_orig(orig_node, NULL); - batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, - "originator timed out"); - if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); @@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, atomic_set(&orig_node->last_ttvn, 0); orig_node->tt_buff = NULL; orig_node->tt_buff_len = 0; + orig_node->last_seen = jiffies; reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); orig_node->bcast_seqno_reset = reset_time; #ifdef CONFIG_BATMAN_ADV_MCAST @@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) if (batadv_purge_orig_node(bat_priv, orig_node)) { batadv_gw_node_delete(bat_priv, orig_node); hlist_del_rcu(&orig_node->hash_entry); + batadv_tt_global_del_orig(orig_node->bat_priv, + orig_node, -1, + "originator timed out"); batadv_orig_node_free_ref(orig_node); continue; } diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 35f76f2f782..6648f321864 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv, router = batadv_orig_router_get(orig_node, recv_if); + if (!router) + return router; + /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) * and if activated. */ - if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || - !router) + if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding))) return router; /* bonding: loop through the list of possible routers found diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 15845814a0f..ba6eb17226d 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au, int ret; char tmp_enc[40]; __le32 tmp[5] = { - 16u, msg->hdr.crc, msg->footer.front_crc, + cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, msg->footer.middle_crc, msg->footer.data_crc, }; ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index a83062ceeec..f2148e22b14 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len, if (src_len != sizeof(u32) + dst_len) return -EINVAL; - buf_len = le32_to_cpu(*(u32 *)src); + buf_len = le32_to_cpu(*(__le32 *)src); if (buf_len != dst_len) return -EINVAL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7f18262e232..65caf8b95e1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; - if (tso_segs == 1) { + if (tso_segs == 1 || !max_segs) { if (unlikely(!tcp_nagle_test(tp, skb, mss_now, (tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)))) @@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, } limit = mss_now; - if (tso_segs > 1 && !tcp_urg_mode(tp)) + if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, min_t(unsigned int, cwnd_quota, diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 0bb7038121a..bd4e46ec32b 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) sdata->crypto_tx_tailroom_needed_cnt--; WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && @@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sta = key->sta; sdata = key->sdata; - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) increment_tailroom_need_count(sdata); ret = drv_set_key(key->local, DISABLE_KEY, sdata, @@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) increment_tailroom_need_count(key->sdata); } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 70bef2ab7f2..da2fae0873a 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, { struct flow_stats *stats; int node = numa_node_id(); + int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); stats = rcu_dereference(flow->stats[node]); @@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, if (likely(new_stats)) { new_stats->used = jiffies; new_stats->packet_count = 1; - new_stats->byte_count = skb->len; + new_stats->byte_count = len; new_stats->tcp_flags = tcp_flags; spin_lock_init(&new_stats->lock); @@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, stats->used = jiffies; stats->packet_count++; - stats->byte_count += skb->len; + stats->byte_count += len; stats->tcp_flags |= tcp_flags; unlock: spin_unlock(&stats->lock); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 53f3ebbfcea..2034c6d9cb5 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, stats = this_cpu_ptr(vport->percpu_stats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; - stats->rx_bytes += skb->len; + stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); u64_stats_update_end(&stats->syncp); OVS_CB(skb)->input_vport = vport; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 1cb61242e55..4439ac4c1b5 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) struct kvec *head = buf->head; struct kvec *tail = buf->tail; int fraglen; - int new, old; + int new; if (len > buf->len) { WARN_ON_ONCE(1); @@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) buf->len -= fraglen; new = buf->page_base + buf->page_len; - old = new + fraglen; - xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); + + xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); if (buf->page_len) { xdr->p = page_address(*xdr->page_ptr); diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 1bca180db8a..627f8cbbedb 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean @@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \ __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) -# as clean-files is given relative to the current directory, this adds -# a $(obj) prefix, except for absolute paths +# clean-files is given relative to the current directory, unless it +# starts with $(objtree)/ (which means "./", so do not add "./" unless +# you want to delete a file from the toplevel object directory). __clean-files := $(wildcard \ - $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \ - $(filter /%, $(__clean-files))) + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \ + $(filter $(objtree)/%, $(__clean-files))) -# as clean-dirs is given relative to the current directory, this adds -# a $(obj) prefix, except for absolute paths +# same as clean-files __clean-dirs := $(wildcard \ - $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \ - $(filter /%, $(clean-dirs))) + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \ + $(filter $(objtree)/%, $(clean-dirs))) # ========================================================================== diff --git a/security/keys/gc.c b/security/keys/gc.c index 9609a7f0fae..c7952375ac5 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - key_user_put(key->user); - /* now throw away the key memory */ if (key->type->destroy) key->type->destroy(key); + key_user_put(key->user); + kfree(key->description); #ifdef KEY_DEBUGGING diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c index 255dabc6fc3..2a85e4209f0 100644 --- a/sound/firewire/fireworks/fireworks_transaction.c +++ b/sound/firewire/fireworks/fireworks_transaction.c @@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) spin_lock_irq(&efw->lock); t = (struct snd_efw_transaction *)data; - length = min_t(size_t, t->length * sizeof(t->length), length); + length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); if (efw->push_ptr < efw->pull_ptr) capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 5f13d2d1807..b422e406a9c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, +{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060"); MODULE_ALIAS("snd-hda-codec-id:10de0067"); MODULE_ALIAS("snd-hda-codec-id:10de0070"); MODULE_ALIAS("snd-hda-codec-id:10de0071"); +MODULE_ALIAS("snd-hda-codec-id:10de0072"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); MODULE_ALIAS("snd-hda-codec-id:11069f81"); diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 4f6413e01c1..605d14003d2 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec) spec->gpio_mask; } if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) - spec->gpio_mask &= spec->gpio_mask; - if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) spec->gpio_dir &= spec->gpio_mask; + if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) + spec->gpio_data &= spec->gpio_mask; if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) spec->eapd_mask &= spec->gpio_mask; if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 81fe1464d26..c0fbe188143 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -784,8 +784,8 @@ static unsigned int bst_tlv[] = { static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); + struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component); ucontrol->value.integer.value[0] = rt5677->dsp_vad_en; @@ -795,8 +795,9 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol, static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); + struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); + struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component); + struct snd_soc_codec *codec = snd_soc_component_to_codec(component); rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0]; diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c index b93168d4f64..8d18bbda661 100644 --- a/sound/soc/dwc/designware_i2s.c +++ b/sound/soc/dwc/designware_i2s.c @@ -209,16 +209,9 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream, switch (config->chan_nr) { case EIGHT_CHANNEL_SUPPORT: - ch_reg = 3; - break; case SIX_CHANNEL_SUPPORT: - ch_reg = 2; - break; case FOUR_CHANNEL_SUPPORT: - ch_reg = 1; - break; case TWO_CHANNEL_SUPPORT: - ch_reg = 0; break; default: dev_err(dev->dev, "channel not supported\n"); @@ -227,18 +220,22 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream, i2s_disable_channels(dev, substream->stream); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution); - i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02); - irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); - i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30); - i2s_write_reg(dev->i2s_base, TER(ch_reg), 1); - } else { - i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution); - i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07); - irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); - i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03); - i2s_write_reg(dev->i2s_base, RER(ch_reg), 1); + for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) { + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + i2s_write_reg(dev->i2s_base, TCR(ch_reg), + xfer_resolution); + i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02); + irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); + i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30); + i2s_write_reg(dev->i2s_base, TER(ch_reg), 1); + } else { + i2s_write_reg(dev->i2s_base, RCR(ch_reg), + xfer_resolution); + i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07); + irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); + i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03); + i2s_write_reg(dev->i2s_base, RER(ch_reg), 1); + } } i2s_write_reg(dev->i2s_base, CCR, ccr); @@ -263,6 +260,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream, snd_soc_dai_set_dma_data(dai, substream, NULL); } +static int dw_i2s_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + i2s_write_reg(dev->i2s_base, TXFFR, 1); + else + i2s_write_reg(dev->i2s_base, RXFFR, 1); + + return 0; +} + static int dw_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { @@ -294,6 +304,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = { .startup = dw_i2s_startup, .shutdown = dw_i2s_shutdown, .hw_params = dw_i2s_hw_params, + .prepare = dw_i2s_prepare, .trigger = dw_i2s_trigger, }; diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index e989ecf046c..f86de1211b9 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -89,7 +89,7 @@ config SND_SOC_INTEL_BROADWELL_MACH config SND_SOC_INTEL_BYTCR_RT5640_MACH tristate "ASoC Audio DSP Support for MID BYT Platform" - depends on X86 + depends on X86 && I2C select SND_SOC_RT5640 select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI @@ -101,7 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH config SND_SOC_INTEL_CHT_BSW_RT5672_MACH tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec" - depends on X86_INTEL_LPSS + depends on X86_INTEL_LPSS && I2C select SND_SOC_RT5670 select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c index f5d0fc1ab10..eef0c56ec32 100644 --- a/sound/soc/intel/bytcr_dpcm_rt5640.c +++ b/sound/soc/intel/bytcr_dpcm_rt5640.c @@ -227,4 +227,4 @@ module_platform_driver(snd_byt_mc_driver); MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver"); MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:bytrt5640-audio"); +MODULE_ALIAS("platform:bytt100_rt5640"); diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c index 4a5bde9c686..ef2e8b5766a 100644 --- a/sound/soc/intel/sst-firmware.c +++ b/sound/soc/intel/sst-firmware.c @@ -763,8 +763,12 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba /* does block span more than 1 section */ if (ba->offset >= block->offset && ba->offset < block_end) { + /* add block */ + list_move(&block->list, &dsp->used_block_list); + list_add(&block->module_list, block_list); /* align ba to block boundary */ - ba->offset = block->offset; + ba->size -= block_end - ba->offset; + ba->offset = block_end; err = block_alloc_contiguous(dsp, ba, block_list); if (err < 0) diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c index 3abc29e8a92..2ac72eb5e75 100644 --- a/sound/soc/intel/sst/sst_acpi.c +++ b/sound/soc/intel/sst/sst_acpi.c @@ -343,7 +343,7 @@ int sst_acpi_remove(struct platform_device *pdev) } static struct sst_machines sst_acpi_bytcr[] = { - {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin", + {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin", &byt_rvp_platform_data }, {}, }; diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 26ec5117b35..13d8507333b 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -454,11 +454,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev) i2s->playback_dma_data.addr = res->start + I2S_TXDR; i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - i2s->playback_dma_data.maxburst = 16; + i2s->playback_dma_data.maxburst = 4; i2s->capture_dma_data.addr = res->start + I2S_RXDR; i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - i2s->capture_dma_data.maxburst = 16; + i2s->capture_dma_data.maxburst = 4; i2s->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, i2s); diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h index 89a5d8bc6ee..93f456f518a 100644 --- a/sound/soc/rockchip/rockchip_i2s.h +++ b/sound/soc/rockchip/rockchip_i2s.h @@ -127,7 +127,7 @@ #define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT) #define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT) #define I2S_DMACR_TDL_SHIFT 0 -#define I2S_DMACR_TDL(x) ((x - 1) << I2S_DMACR_TDL_SHIFT) +#define I2S_DMACR_TDL(x) ((x) << I2S_DMACR_TDL_SHIFT) #define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT) /* diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 985052b3fbe..2c62620abca 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3230,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; - int num_routes, old_routes; + int num_routes; struct snd_soc_dapm_route *routes; int i, ret; @@ -3248,9 +3248,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, return -EINVAL; } - old_routes = card->num_dapm_routes; - routes = devm_kzalloc(card->dev, - (old_routes + num_routes) * sizeof(*routes), + routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes), GFP_KERNEL); if (!routes) { dev_err(card->dev, @@ -3258,11 +3256,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, return -EINVAL; } - memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes)); - for (i = 0; i < num_routes; i++) { ret = of_property_read_string_index(np, propname, - 2 * i, &routes[old_routes + i].sink); + 2 * i, &routes[i].sink); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", @@ -3270,7 +3266,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, return -EINVAL; } ret = of_property_read_string_index(np, propname, - (2 * i) + 1, &routes[old_routes + i].source); + (2 * i) + 1, &routes[i].source); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", @@ -3279,7 +3275,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, } } - card->num_dapm_routes += num_routes; + card->num_dapm_routes = num_routes; card->dapm_routes = routes; return 0; diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 27284474613..327f8642ca8 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev) return -EINVAL; } - if (cdev->n_streams < 2) { + if (cdev->n_streams < 1) { dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams); return -EINVAL; } diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index 6f803609e49..0b0112c80f2 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex) * * TODO: Hook into free() and add that check there as well. */ - debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); + debug_check_no_locks_freed(mutex, sizeof(*mutex)); __del_lock(__get_lock(mutex)); return ll_pthread_mutex_destroy(mutex); } @@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) { try_init_preload(); - debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); + debug_check_no_locks_freed(rwlock, sizeof(*rwlock)); __del_lock(__get_lock(rwlock)); return ll_pthread_rwlock_destroy(rwlock); } |