diff options
Diffstat (limited to 'include')
99 files changed, 1197 insertions, 551 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 9a62937c56c..51969436b8b 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,66 +4,12 @@ #include <linux/time.h> #include <linux/jiffies.h> -typedef unsigned long __nocast cputime_t; - -#define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) -#define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) - -typedef u64 __nocast cputime64_t; - -#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) -#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) - -#define nsecs_to_cputime64(__ct) \ - jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) - - -/* - * Convert cputime to microseconds and back. - */ -#define cputime_to_usecs(__ct) \ - jiffies_to_usecs(cputime_to_jiffies(__ct)) -#define usecs_to_cputime(__usec) \ - jiffies_to_cputime(usecs_to_jiffies(__usec)) -#define usecs_to_cputime64(__usec) \ - jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) - -/* - * Convert cputime to seconds and back. - */ -#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) -#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) - -/* - * Convert cputime to timespec and back. - */ -#define timespec_to_cputime(__val) \ - jiffies_to_cputime(timespec_to_jiffies(__val)) -#define cputime_to_timespec(__ct,__val) \ - jiffies_to_timespec(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to timeval and back. - */ -#define timeval_to_cputime(__val) \ - jiffies_to_cputime(timeval_to_jiffies(__val)) -#define cputime_to_timeval(__ct,__val) \ - jiffies_to_timeval(cputime_to_jiffies(__ct),__val) - -/* - * Convert cputime to clock and back. - */ -#define cputime_to_clock_t(__ct) \ - jiffies_to_clock_t(cputime_to_jiffies(__ct)) -#define clock_t_to_cputime(__x) \ - jiffies_to_cputime(clock_t_to_jiffies(__x)) +#ifndef CONFIG_VIRT_CPU_ACCOUNTING +# include <asm-generic/cputime_jiffies.h> +#endif -/* - * Convert cputime64 to clock. - */ -#define cputime64_to_clock_t(__ct) \ - jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# include <asm-generic/cputime_nsecs.h> +#endif #endif diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h new file mode 100644 index 00000000000..272ecba9f58 --- /dev/null +++ b/include/asm-generic/cputime_jiffies.h @@ -0,0 +1,72 @@ +#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H +#define _ASM_GENERIC_CPUTIME_JIFFIES_H + +typedef unsigned long __nocast cputime_t; + +#define cputime_one_jiffy jiffies_to_cputime(1) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) + +typedef u64 __nocast cputime64_t; + +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) + + +/* + * Convert nanoseconds to cputime + */ +#define nsecs_to_cputime64(__nsec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec)) +#define nsecs_to_cputime(__nsec) \ + jiffies_to_cputime(nsecs_to_jiffies(__nsec)) + + +/* + * Convert cputime to microseconds and back. + */ +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)) +#define usecs_to_cputime(__usec) \ + jiffies_to_cputime(usecs_to_jiffies(__usec)) +#define usecs_to_cputime64(__usec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) + +/* + * Convert cputime to seconds and back. + */ +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) + +/* + * Convert cputime to timespec and back. + */ +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to timeval and back. + */ +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) + +/* + * Convert cputime to clock and back. + */ +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) + +#endif diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h new file mode 100644 index 00000000000..b6485cafb7b --- /dev/null +++ b/include/asm-generic/cputime_nsecs.h @@ -0,0 +1,104 @@ +/* + * Definitions for measuring cputime in nsecs resolution. + * + * Based on <arch/ia64/include/asm/cputime.h> + * + * Copyright (C) 2007 FUJITSU LIMITED + * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#ifndef _ASM_GENERIC_CPUTIME_NSECS_H +#define _ASM_GENERIC_CPUTIME_NSECS_H + +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; + +#define cputime_one_jiffy jiffies_to_cputime(1) + +/* + * Convert cputime <-> jiffies (HZ) + */ +#define cputime_to_jiffies(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) +#define cputime_to_scaled(__ct) (__ct) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) + + +/* + * Convert cputime <-> nanoseconds + */ +#define nsecs_to_cputime(__nsecs) ((__force u64)(__nsecs)) + + +/* + * Convert cputime <-> microseconds + */ +#define cputime_to_usecs(__ct) \ + ((__force u64)(__ct) / NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) +#define usecs_to_cputime64(__usecs) \ + (__force cputime64_t)((__usecs) * NSEC_PER_USEC) + +/* + * Convert cputime <-> seconds + */ +#define cputime_to_secs(__ct) \ + ((__force u64)(__ct) / NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) + +/* + * Convert cputime <-> timespec (nsec) + */ +static inline cputime_t timespec_to_cputime(const struct timespec *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; +} +static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) +{ + val->tv_sec = (__force u64) ct / NSEC_PER_SEC; + val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; +} + +/* + * Convert cputime <-> timeval (msec) + */ +static inline cputime_t timeval_to_cputime(struct timeval *val) +{ + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; +} +static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) +{ + val->tv_sec = (__force u64) ct / NSEC_PER_SEC; + val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; +} + +/* + * Convert cputime <-> clock (USER_HZ) + */ +#define cputime_to_clock_t(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) + +/* + * Convert cputime64 to clock. + */ +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) + +#endif diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index ccf7b4f34a3..6c32af918c2 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -16,6 +16,22 @@ extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + /* attrs is not supported and ignored */ + return dma_alloc_coherent(dev, size, dma_handle, flag); +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + /* attrs is not supported and ignored */ + dma_free_coherent(dev, size, cpu_addr, dma_handle); +} + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h index 40528cb977e..2c9f9d4336c 100644 --- a/include/asm-generic/parport.h +++ b/include/asm-generic/parport.h @@ -10,8 +10,8 @@ * to devices on the PCI bus. */ -static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) { #ifdef CONFIG_ISA return parport_pc_find_isa_ports(autoirq, autodma); diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 701beab27aa..5cf680a98f9 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn) return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); } -static inline unsigned long my_zero_pfn(unsigned long addr) -{ - return page_to_pfn(ZERO_PAGE(addr)); -} +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + #else static inline int is_zero_pfn(unsigned long pfn) { diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index 58f466ff00d..1db51b8524e 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h @@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long fd, off_t pgoff); #endif +#ifndef CONFIG_GENERIC_SIGALTSTACK #ifndef sys_sigaltstack asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, struct pt_regs *); #endif +#endif #ifndef sys_rt_sigreturn asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ed6642ad03e..25f01d0bc14 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -78,6 +78,14 @@ struct mmu_gather_batch { #define MAX_GATHER_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) +/* + * Limit the maximum number of mmu_gather batches to reduce a risk of soft + * lockups for non-preemptible kernels on huge machines when a lot of memory + * is zapped during unmapping. + * 10K pages freed at once should be safe even without a preemption point. + */ +#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) + /* struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ @@ -96,6 +104,7 @@ struct mmu_gather { struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; + unsigned int batch_count; }; #define HAVE_GENERIC_MMU_GATHER diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 0f4a366f6fa..3527fb3f75b 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -70,7 +70,7 @@ struct drm_mm { unsigned long scan_color; unsigned long scan_size; unsigned long scan_hit_start; - unsigned scan_hit_size; + unsigned long scan_hit_end; unsigned scanned_blocks; unsigned long scan_start; unsigned long scan_end; diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index 808dad29607..d6aeaf3c6d6 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h @@ -6,24 +6,10 @@ * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef _EXYNOS_DRM_H_ #define _EXYNOS_DRM_H_ diff --git a/include/linux/aer.h b/include/linux/aer.h index 544abdb2238..ec10e1b24c1 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -49,8 +49,8 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) } #endif -extern void cper_print_aer(const char *prefix, int cper_severity, - struct aer_capability_regs *aer); +extern void cper_print_aer(const char *prefix, struct pci_dev *dev, + int cper_severity, struct aer_capability_regs *aer); extern int cper_severity_to_aer(int cper_severity); extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, int severity); diff --git a/include/linux/async.h b/include/linux/async.h index 7a24fe9b44b..a2e3f18b2ad 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -19,8 +19,7 @@ typedef u64 async_cookie_t; typedef void (async_func_ptr) (void *data, async_cookie_t cookie); struct async_domain { struct list_head node; - struct list_head domain; - int count; + struct list_head pending; unsigned registered:1; }; @@ -29,8 +28,7 @@ struct async_domain { */ #define ASYNC_DOMAIN(_name) \ struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ - .domain = LIST_HEAD_INIT(_name.domain), \ - .count = 0, \ + .pending = LIST_HEAD_INIT(_name.pending), \ .registered = 1 } /* @@ -39,8 +37,7 @@ struct async_domain { */ #define ASYNC_DOMAIN_EXCLUSIVE(_name) \ struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ - .domain = LIST_HEAD_INIT(_name.domain), \ - .count = 0, \ + .pending = LIST_HEAD_INIT(_name.pending), \ .registered = 0 } extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); @@ -52,4 +49,5 @@ extern void async_synchronize_full_domain(struct async_domain *domain); extern void async_synchronize_cookie(async_cookie_t cookie); extern void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain); +extern bool current_is_async(void); #endif diff --git a/include/linux/ata.h b/include/linux/ata.h index 408da950217..8f7a3d68371 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -297,10 +297,12 @@ enum { ATA_LOG_SATA_NCQ = 0x10, ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, - ATA_LOG_DEVSLP_MDAT = 0x30, + ATA_LOG_DEVSLP_OFFSET = 0x30, + ATA_LOG_DEVSLP_SIZE = 0x08, + ATA_LOG_DEVSLP_MDAT = 0x00, ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, - ATA_LOG_DEVSLP_DETO = 0x31, - ATA_LOG_DEVSLP_VALID = 0x37, + ATA_LOG_DEVSLP_DETO = 0x01, + ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, /* READ/WRITE LONG (obsolete) */ diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h index fe9989636b6..b9fde17f767 100644 --- a/include/linux/ata_platform.h +++ b/include/linux/ata_platform.h @@ -15,12 +15,12 @@ struct pata_platform_info { unsigned int irq_flags; }; -extern int __devinit __pata_platform_probe(struct device *dev, - struct resource *io_res, - struct resource *ctl_res, - struct resource *irq_res, - unsigned int ioport_shift, - int __pio_mask); +extern int __pata_platform_probe(struct device *dev, + struct resource *io_res, + struct resource *ctl_res, + struct resource *irq_res, + unsigned int ioport_shift, + int __pio_mask); /* * Marvell SATA private data diff --git a/include/linux/audit.h b/include/linux/audit.h index bce729afbcf..5a6d718adf3 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -24,6 +24,7 @@ #define _LINUX_AUDIT_H_ #include <linux/sched.h> +#include <linux/ptrace.h> #include <uapi/linux/audit.h> struct audit_sig_info { @@ -157,7 +158,8 @@ void audit_core_dumps(long signr); static inline void audit_seccomp(unsigned long syscall, long signr, int code) { - if (unlikely(!audit_dummy_context())) + /* Force a record to be reported if a signal was delivered. */ + if (signr || unlikely(!audit_dummy_context())) __audit_seccomp(syscall, signr, code); } diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h index def894b83b0..4dd1f33e36a 100644 --- a/include/linux/bcma/bcma_driver_gmac_cmn.h +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -92,7 +92,7 @@ struct bcma_drv_gmac_cmn { #define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) #ifdef CONFIG_BCMA_DRIVER_GMAC_CMN -extern void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); +extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); #else static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { } #endif diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 41da581e161..c48d98d27b7 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -214,7 +214,7 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) -extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc); +extern void bcma_core_pci_init(struct bcma_drv_pci *pc); extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, bool enable); extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend); diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 8a7096fcb01..66346521cb6 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -161,6 +161,15 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) extern void clockevents_suspend(void); extern void clockevents_resume(void); +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST +extern void tick_broadcast(const struct cpumask *mask); +#else +#define tick_broadcast NULL +#endif +extern int tick_receive_broadcast(void); +#endif + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void clockevents_notify(unsigned long reason, void *arg); #else diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 6ecb6dc2f30..cc7bddeaf55 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - bool sync, bool *contended, struct page **page); + bool sync, bool *contended); extern int compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended, struct page **page) + bool sync, bool *contended) { return COMPACT_CONTINUE; } diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index e24339ccb7f..b28d161c109 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -3,12 +3,40 @@ #ifdef CONFIG_CONTEXT_TRACKING #include <linux/sched.h> +#include <linux/percpu.h> + +struct context_tracking { + /* + * When active is false, probes are unset in order + * to minimize overhead: TIF flags are cleared + * and calls to user_enter/exit are ignored. This + * may be further optimized using static keys. + */ + bool active; + enum { + IN_KERNEL = 0, + IN_USER, + } state; +}; + +DECLARE_PER_CPU(struct context_tracking, context_tracking); + +static inline bool context_tracking_in_user(void) +{ + return __this_cpu_read(context_tracking.state) == IN_USER; +} + +static inline bool context_tracking_active(void) +{ + return __this_cpu_read(context_tracking.active); +} extern void user_enter(void); extern void user_exit(void); extern void context_tracking_task_switch(struct task_struct *prev, struct task_struct *next); #else +static inline bool context_tracking_in_user(void) { return false; } static inline void user_enter(void) { } static inline void user_exit(void) { } static inline void context_tracking_task_switch(struct task_struct *prev, diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index ac3bbb5b950..1739510d899 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -13,9 +13,11 @@ #include <linux/cpumask.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/kref.h> /** * struct cpu_rmap - CPU affinity reverse-map + * @refcount: kref for object * @size: Number of objects to be reverse-mapped * @used: Number of objects added * @obj: Pointer to array of object pointers @@ -23,6 +25,7 @@ * based on affinity masks */ struct cpu_rmap { + struct kref refcount; u16 size, used; void **obj; struct { @@ -33,15 +36,7 @@ struct cpu_rmap { #define CPU_RMAP_DIST_INF 0xffff extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); - -/** - * free_cpu_rmap - free CPU affinity reverse-map - * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL - */ -static inline void free_cpu_rmap(struct cpu_rmap *rmap) -{ - kfree(rmap); -} +extern int cpu_rmap_put(struct cpu_rmap *rmap); extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3711b34dc4f..24cd1037b6d 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -126,9 +126,9 @@ struct cpuidle_driver { struct module *owner; int refcnt; - unsigned int power_specified:1; /* set to 1 to use the core cpuidle time keeping (for all states). */ unsigned int en_core_tk_irqen:1; + /* states array must be ordered in decreasing power consumption */ struct cpuidle_state states[CPUIDLE_STATE_MAX]; int state_count; int safe_state_index; diff --git a/include/linux/cred.h b/include/linux/cred.h index abb2cd50f6b..04421e82536 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -128,7 +128,6 @@ struct cred { struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ - struct thread_group_cred *tgcred; /* thread-group shared credentials */ #endif #ifdef CONFIG_SECURITY void *security; /* subjective LSM security */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 8b84916dc67..7a9498ab3c2 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -618,18 +618,30 @@ extern int __init efi_setup_pcdp_console(char *); #endif /* - * We play games with efi_enabled so that the compiler will, if possible, remove - * EFI-related code altogether. + * We play games with efi_enabled so that the compiler will, if + * possible, remove EFI-related code altogether. */ +#define EFI_BOOT 0 /* Were we booted from EFI? */ +#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */ +#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */ +#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ +#define EFI_MEMMAP 4 /* Can we use EFI memory map? */ +#define EFI_64BIT 5 /* Is the firmware 64-bit? */ + #ifdef CONFIG_EFI # ifdef CONFIG_X86 - extern int efi_enabled; - extern bool efi_64bit; +extern int efi_enabled(int facility); # else -# define efi_enabled 1 +static inline int efi_enabled(int facility) +{ + return 1; +} # endif #else -# define efi_enabled 0 +static inline int efi_enabled(int facility) +{ + return 0; +} #endif /* diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c03af7687bb..18662063175 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -138,6 +138,7 @@ extern void elv_drain_elevator(struct request_queue *); /* * io scheduler registration */ +extern void __init load_default_elevator_module(void); extern int elv_register(struct elevator_type *); extern void elv_unregister(struct elevator_type *); @@ -206,5 +207,9 @@ enum { INIT_LIST_HEAD(&(rq)->csd.list); \ } while (0) +#else /* CONFIG_BLOCK */ + +static inline void load_default_elevator_module(void) { } + #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 92691d85c32..e5ca8ef50e9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -74,7 +74,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * SAVE_REGS - The ftrace_ops wants regs saved at each function called * and passed to the callback. If this flag is set, but the * architecture does not support passing regs - * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the + * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the * ftrace_ops will fail to register, unless the next flag * is set. * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the @@ -418,7 +418,7 @@ void ftrace_modify_all_code(int command); #endif #ifndef FTRACE_REGS_ADDR -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) #else # define FTRACE_REGS_ADDR FTRACE_ADDR @@ -480,7 +480,7 @@ extern int ftrace_make_nop(struct module *mod, */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); -#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS /** * ftrace_modify_call - convert from one addr to another (no nop) * @rec: the mcount call site record diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index a3d489531d8..13a54d0bdfa 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -49,7 +49,6 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; - int padding; }; #define FTRACE_MAX_EVENT \ @@ -84,6 +83,9 @@ struct trace_iterator { long idx; cpumask_var_t started; + + /* it's true when current open file is snapshot */ + bool snapshot; }; enum trace_iter_flags { @@ -272,7 +274,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, extern int trace_add_event_call(struct ftrace_event_call *call); extern void trace_remove_event_call(struct ftrace_event_call *call); -#define is_signed_type(type) (((type)(-1)) < 0) +#define is_signed_type(type) (((type)(-1)) < (type)0) int trace_set_clr_event(const char *system, const char *event, int set); diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 624ef3f45c8..29eb805ea4a 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -153,7 +153,7 @@ extern void rcu_nmi_exit(void); */ #define __irq_enter() \ do { \ - vtime_account_irq_enter(current); \ + account_irq_enter_time(current); \ add_preempt_count(HARDIRQ_OFFSET); \ trace_hardirq_enter(); \ } while (0) @@ -169,7 +169,7 @@ extern void irq_enter(void); #define __irq_exit() \ do { \ trace_hardirq_exit(); \ - vtime_account_irq_exit(current); \ + account_irq_exit_time(current); \ sub_preempt_count(HARDIRQ_OFFSET); \ } while (0) @@ -180,10 +180,10 @@ extern void irq_exit(void); #define nmi_enter() \ do { \ + lockdep_off(); \ ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ - lockdep_off(); \ rcu_nmi_enter(); \ trace_hardirq_enter(); \ } while (0) @@ -192,10 +192,10 @@ extern void irq_exit(void); do { \ trace_hardirq_exit(); \ rcu_nmi_exit(); \ - lockdep_on(); \ BUG_ON(!in_nmi()); \ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ ftrace_nmi_exit(); \ + lockdep_on(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/init.h b/include/linux/init.h index a799273714a..861814710d5 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -93,14 +93,6 @@ #define __exit __section(.exit.text) __exitused __cold notrace -/* Used for HOTPLUG, but that is always enabled now, so just make them noops */ -#define __devinit -#define __devinitdata -#define __devinitconst -#define __devexit -#define __devexitdata -#define __devexitconst - /* Used for HOTPLUG_CPU */ #define __cpuinit __section(.cpuinit.text) __cold notrace #define __cpuinitdata __section(.cpuinit.data) @@ -161,6 +153,7 @@ extern unsigned int reset_devices; /* used by init/main.c */ void setup_arch(char **); void prepare_namespace(void); +void __init load_default_modules(void); extern void (*late_time_init)(void); @@ -337,18 +330,6 @@ void __init parse_early_options(char *cmdline); #define __INITRODATA_OR_MODULE __INITRODATA #endif /*CONFIG_MODULES*/ -/* Functions marked as __devexit may be discarded at kernel link time, depending - on config options. Newer versions of binutils detect references from - retained sections to discarded sections and flag an error. Pointers to - __devexit functions must use __devexit_p(function_name), the wrapper will - insert either the function_name or NULL, depending on the config options. - */ -#if defined(MODULE) || defined(CONFIG_HOTPLUG) -#define __devexit_p(x) x -#else -#define __devexit_p(x) NULL -#endif - #ifdef MODULE #define __exit_p(x) x #else diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6d087c5f57f..5cd0f094992 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -10,7 +10,9 @@ #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <linux/securebits.h> +#include <linux/seqlock.h> #include <net/net_namespace.h> +#include <linux/sched/rt.h> #ifdef CONFIG_SMP # define INIT_PUSHABLE_TASKS(tsk) \ @@ -141,6 +143,15 @@ extern struct task_group root_task_group; # define INIT_PERF_EVENTS(tsk) #endif +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +# define INIT_VTIME(tsk) \ + .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ + .vtime_snap = 0, \ + .vtime_snap_whence = VTIME_SYS, +#else +# define INIT_VTIME(tsk) +#endif + #define INIT_TASK_COMM "swapper" /* @@ -210,6 +221,7 @@ extern struct task_group root_task_group; INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ INIT_CPUSET_SEQ \ + INIT_VTIME(tsk) \ } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5e4e6170f43..5fa5afeeb75 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -268,11 +268,6 @@ struct irq_affinity_notify { extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -static inline void irq_run_affinity_notifiers(void) -{ - flush_scheduled_work(); -} - #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index fe771978e87..ae221a7b509 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -24,6 +24,7 @@ struct ipc_ids { unsigned short seq_max; struct rw_semaphore rw_mutex; struct idr ipcs_idr; + int next_id; }; struct ipc_namespace { diff --git a/include/linux/irq.h b/include/linux/irq.h index fdf2c4a238c..bc4e0661195 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -509,8 +509,11 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq) /* Handle dynamic irq creation and destruction */ extern unsigned int create_irq_nr(unsigned int irq_want, int node); +extern unsigned int __create_irqs(unsigned int from, unsigned int count, + int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); +extern void destroy_irqs(unsigned int irq, unsigned int count); /* * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and @@ -528,6 +531,8 @@ extern int irq_set_handler_data(unsigned int irq, void *data); extern int irq_set_chip_data(unsigned int irq, void *data); extern int irq_set_irq_type(unsigned int irq, unsigned int type); extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); +extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, + struct msi_desc *entry); extern struct irq_data *irq_get_irq_data(unsigned int irq); static inline struct irq_chip *irq_get_chip(unsigned int irq) @@ -590,6 +595,9 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, #define irq_alloc_desc_from(from, node) \ irq_alloc_descs(-1, from, 1, node) +#define irq_alloc_descs_from(from, cnt, node) \ + irq_alloc_descs(-1, from, cnt, node) + void irq_free_descs(unsigned int irq, unsigned int cnt); int irq_reserve_irqs(unsigned int from, unsigned int cnt); diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 6a9e8f5399e..f5dbce50466 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -3,6 +3,20 @@ #include <linux/llist.h> +/* + * An entry can be in one of four states: + * + * free NULL, 0 -> {claimed} : free to be used + * claimed NULL, 3 -> {pending} : claimed to be enqueued + * pending next, 3 -> {busy} : queued, pending callback + * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed + */ + +#define IRQ_WORK_PENDING 1UL +#define IRQ_WORK_BUSY 2UL +#define IRQ_WORK_FLAGS 3UL +#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ + struct irq_work { unsigned long flags; struct llist_node llnode; @@ -16,8 +30,14 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) work->func = func; } -bool irq_work_queue(struct irq_work *work); +void irq_work_queue(struct irq_work *work); void irq_work_run(void); void irq_work_sync(struct irq_work *work); +#ifdef CONFIG_IRQ_WORK +bool irq_work_needs_cpu(void); +#else +static bool irq_work_needs_cpu(void) { return false; } +#endif + #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 66b70780e91..ed5f6ed6eb7 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -127,7 +127,7 @@ extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t) extern void account_steal_time(cputime_t); extern void account_idle_time(cputime_t); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE static inline void account_process_tick(struct task_struct *tsk, int user) { vtime_account_user(tsk); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 23755ba42ab..4b6ef4d33cc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -49,16 +49,6 @@ #define KPROBE_REENTER 0x00000004 #define KPROBE_HIT_SSDONE 0x00000008 -/* - * If function tracer is enabled and the arch supports full - * passing of pt_regs to function tracing, then kprobes can - * optimize on top of function tracing. - */ -#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \ - && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE) -# define KPROBES_CAN_USE_FTRACE -#endif - /* Attach to insert probes on any functions which should be ignored*/ #define __kprobes __attribute__((__section__(".kprobes.text"))) @@ -316,7 +306,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, #endif #endif /* CONFIG_OPTPROBES */ -#ifdef KPROBES_CAN_USE_FTRACE +#ifdef CONFIG_KPROBES_ON_FTRACE extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs); extern int arch_prepare_kprobe_ftrace(struct kprobe *p); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c497ab0d03..b7996a768eb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -22,6 +22,7 @@ #include <linux/rcupdate.h> #include <linux/ratelimit.h> #include <linux/err.h> +#include <linux/irqflags.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -740,15 +741,52 @@ static inline int kvm_deassign_device(struct kvm *kvm, } #endif /* CONFIG_IOMMU_API */ -static inline void kvm_guest_enter(void) +static inline void __guest_enter(void) { - BUG_ON(preemptible()); /* * This is running in ioctl context so we can avoid * the call to vtime_account() with its unnecessary idle check. */ - vtime_account_system_irqsafe(current); + vtime_account_system(current); current->flags |= PF_VCPU; +} + +static inline void __guest_exit(void) +{ + /* + * This is running in ioctl context so we can avoid + * the call to vtime_account() with its unnecessary idle check. + */ + vtime_account_system(current); + current->flags &= ~PF_VCPU; +} + +#ifdef CONFIG_CONTEXT_TRACKING +extern void guest_enter(void); +extern void guest_exit(void); + +#else /* !CONFIG_CONTEXT_TRACKING */ +static inline void guest_enter(void) +{ + __guest_enter(); +} + +static inline void guest_exit(void) +{ + __guest_exit(); +} +#endif /* !CONFIG_CONTEXT_TRACKING */ + +static inline void kvm_guest_enter(void) +{ + unsigned long flags; + + BUG_ON(preemptible()); + + local_irq_save(flags); + guest_enter(); + local_irq_restore(flags); + /* KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode * is very similar to exiting to userspase from rcu point of view. In @@ -761,12 +799,11 @@ static inline void kvm_guest_enter(void) static inline void kvm_guest_exit(void) { - /* - * This is running in ioctl context so we can avoid - * the call to vtime_account() with its unnecessary idle check. - */ - vtime_account_system_irqsafe(current); - current->flags &= ~PF_VCPU; + unsigned long flags; + + local_irq_save(flags); + guest_exit(); + local_irq_restore(flags); } /* diff --git a/include/linux/libata.h b/include/linux/libata.h index 83ba0ab2c91..649e5f86b5f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -652,8 +652,8 @@ struct ata_device { u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ }; - /* Identify Device Data Log (30h), SATA Settings (page 08h) */ - u8 sata_settings[ATA_SECT_SIZE]; + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; /* error history */ int spdn_cnt; diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6d0e8..d0ab98f73d3 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -125,6 +125,31 @@ static inline void init_llist_head(struct llist_head *list) (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) /** + * llist_for_each_entry_safe - iterate safely against remove over some entries + * of lock-less list of given type. + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as a temporary storage. + * @node: the fist entry of deleted list entries. + * @member: the name of the llist_node with the struct. + * + * In general, some entries of the lock-less list can be traversed + * safely only after being removed from list, so start with an entry + * instead of list head. This variant allows removal of entries + * as we iterate. + * + * If being used on entries deleted from lock-less list directly, the + * traverse order is from the newest to the oldest added entry. If + * you want to traverse from the oldest to the newest, you must + * reverse the order by yourself before traversing. + */ +#define llist_for_each_entry_safe(pos, n, node, member) \ + for ((pos) = llist_entry((node), typeof(*(pos)), member), \ + (n) = (pos)->member.next; \ + &(pos)->member != NULL; \ + (pos) = llist_entry(n, typeof(*(pos)), member), \ + (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) + +/** * llist_empty - tests whether a lock-less list is empty * @head: the list to test * diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 00e46376e28..2bca44b0893 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else # define rwsem_acquire(l, s, t, i) do { } while (0) +# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) # define rwsem_acquire_read(l, s, t, i) do { } while (0) # define rwsem_release(l, n, i) do { } while (0) #endif diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0108a56f814..28bd5fa2ff2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size; * the slab_mutex must be held when looping through those caches */ #define for_each_memcg_cache_index(_idx) \ - for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) + for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) static inline bool memcg_kmem_enabled(void) { diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 2138bd33021..e53dcfeaee6 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -272,8 +272,6 @@ struct abx500_bm_data { const struct abx500_fg_parameters *fg_params; }; -extern struct abx500_bm_data ab8500_bm_data; - enum { NTC_EXTERNAL = 0, NTC_INTERNAL, diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h index 44310c98ee6..9bd037df97d 100644 --- a/include/linux/mfd/abx500/ab8500-bm.h +++ b/include/linux/mfd/abx500/ab8500-bm.h @@ -422,7 +422,10 @@ struct ab8500_chargalg_platform_data { struct ab8500_btemp; struct ab8500_gpadc; struct ab8500_fg; + #ifdef CONFIG_AB8500_BM +extern struct abx500_bm_data ab8500_bm_data; + void ab8500_fg_reinit(void); void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); struct ab8500_btemp *ab8500_btemp_get(void); @@ -434,31 +437,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res); int ab8500_fg_inst_curr_done(struct ab8500_fg *di); #else -int ab8500_fg_inst_curr_done(struct ab8500_fg *di) -{ -} -static void ab8500_fg_reinit(void) -{ -} -static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA) -{ -} -static struct ab8500_btemp *ab8500_btemp_get(void) -{ - return NULL; -} -static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp) -{ - return 0; -} -struct ab8500_fg *ab8500_fg_get(void) -{ - return NULL; -} -static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev) -{ - return -ENODEV; -} +static struct abx500_bm_data ab8500_bm_data; static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di) { diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index 86dd93de6ff..786d02eb79d 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -99,6 +99,9 @@ struct da9052 { u8 chip_id; int chip_irq; + + /* SOC I/O transfer related fixes for DA9052/53 */ + int (*fix_io) (struct da9052 *da9052, unsigned char reg); }; /* ADC API */ @@ -113,32 +116,87 @@ static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg) ret = regmap_read(da9052->regmap, reg, &val); if (ret < 0) return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + return val; } static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg, unsigned char val) { - return regmap_write(da9052->regmap, reg, val); + int ret; + + ret = regmap_write(da9052->regmap, reg, val); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; } static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, unsigned reg_cnt, unsigned char *val) { - return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); + int ret; + + ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; } static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, unsigned reg_cnt, unsigned char *val) { - return regmap_raw_write(da9052->regmap, reg, val, reg_cnt); + int ret; + + ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; } static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, unsigned char bit_mask, unsigned char reg_val) { - return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); + int ret; + + ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); + if (ret < 0) + return ret; + + if (da9052->fix_io) { + ret = da9052->fix_io(da9052, reg); + if (ret < 0) + return ret; + } + + return ret; } int da9052_device_init(struct da9052 *da9052, u8 chip_id); diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index b97f7309d7f..c4dd3a8add2 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -34,6 +34,9 @@ #define DA9052_STATUS_C_REG 3 #define DA9052_STATUS_D_REG 4 +/* PARK REGISTER */ +#define DA9052_PARK_REGISTER DA9052_STATUS_D_REG + /* EVENT REGISTERS */ #define DA9052_EVENT_A_REG 5 #define DA9052_EVENT_B_REG 6 diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h index a8d393e3066..2b13970596f 100644 --- a/include/linux/mfd/rtsx_common.h +++ b/include/linux/mfd/rtsx_common.h @@ -38,6 +38,9 @@ #define RTSX_SD_CARD 0 #define RTSX_MS_CARD 1 +#define CLK_TO_DIV_N 0 +#define DIV_N_TO_CLK 1 + struct platform_device; struct rtsx_slot { diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 060b721fcbf..4b117a3f54d 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -158,10 +158,9 @@ #define SG_TRANS_DATA (0x02 << 4) #define SG_LINK_DESC (0x03 << 4) -/* SD bank voltage */ -#define SD_IO_3V3 0 -#define SD_IO_1V8 1 - +/* Output voltage */ +#define OUTPUT_3V3 0 +#define OUTPUT_1V8 1 /* Card Clock Enable Register */ #define SD_CLK_EN 0x04 @@ -201,6 +200,20 @@ #define CHANGE_CLK 0x01 /* LDO_CTL */ +#define BPP_ASIC_1V7 0x00 +#define BPP_ASIC_1V8 0x01 +#define BPP_ASIC_1V9 0x02 +#define BPP_ASIC_2V0 0x03 +#define BPP_ASIC_2V7 0x04 +#define BPP_ASIC_2V8 0x05 +#define BPP_ASIC_3V2 0x06 +#define BPP_ASIC_3V3 0x07 +#define BPP_REG_TUNED18 0x07 +#define BPP_TUNED18_SHIFT_8402 5 +#define BPP_TUNED18_SHIFT_8411 4 +#define BPP_PAD_MASK 0x04 +#define BPP_PAD_3V3 0x04 +#define BPP_PAD_1V8 0x00 #define BPP_LDO_POWB 0x03 #define BPP_LDO_ON 0x00 #define BPP_LDO_SUSPEND 0x02 @@ -688,7 +701,10 @@ struct pcr_ops { int (*disable_auto_blink)(struct rtsx_pcr *pcr); int (*card_power_on)(struct rtsx_pcr *pcr, int card); int (*card_power_off)(struct rtsx_pcr *pcr, int card); + int (*switch_output_voltage)(struct rtsx_pcr *pcr, + u8 voltage); unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr); + int (*conv_clk_and_div_n)(int clk, int dir); }; enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; @@ -783,6 +799,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card); int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); +int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage); unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr); void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); diff --git a/include/linux/mm.h b/include/linux/mm.h index 63204078f72..66e2f7c61e5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); int split_free_page(struct page *page); -int capture_free_page(struct page *page, int alloc_order, int migratetype); /* * Compound pages have a destructor function. Provide a diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index bc823c4c028..deca8745252 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -151,7 +151,7 @@ struct mmu_notifier_ops { * Therefore notifier chains can only be traversed when either * * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ struct mmu_notifier { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4bec5be82ca..73b64a38b98 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -503,14 +503,6 @@ struct zone { * rarely used fields: */ const char *name; -#ifdef CONFIG_MEMORY_ISOLATION - /* - * the number of MIGRATE_ISOLATE *pageblock*. - * We need this for free page counting. Look at zone_watermark_ok_safe. - * It's protected by zone->lock - */ - int nr_pageblock_isolate; -#endif } ____cacheline_internodealigned_in_smp; typedef enum { diff --git a/include/linux/module.h b/include/linux/module.h index 7760c6d344a..1375ee3f03a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -199,11 +199,11 @@ struct module_use { struct module *source, *target; }; -enum module_state -{ - MODULE_STATE_LIVE, - MODULE_STATE_COMING, - MODULE_STATE_GOING, +enum module_state { + MODULE_STATE_LIVE, /* Normal state. */ + MODULE_STATE_COMING, /* Full formed, running module_init. */ + MODULE_STATE_GOING, /* Going away. */ + MODULE_STATE_UNFORMED, /* Still setting it up. */ }; /** diff --git a/include/linux/msg.h b/include/linux/msg.h index 7a4b9e97d29..391af8d11cc 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -34,7 +34,9 @@ struct msg_queue { /* Helper routines for sys_msgsnd and sys_msgrcv */ extern long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg); -extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext, - size_t msgsz, long msgtyp, int msgflg); +extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, + int msgflg, + long (*msg_fill)(void __user *, struct msg_msg *, + size_t)); #endif /* _LINUX_MSG_H */ diff --git a/include/linux/namei.h b/include/linux/namei.h index e998c030061..5a5ff57ceed 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -2,6 +2,7 @@ #define _LINUX_NAMEI_H #include <linux/dcache.h> +#include <linux/errno.h> #include <linux/linkage.h> #include <linux/path.h> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c599e4782d4..9ef07d0868b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -60,6 +60,9 @@ struct wireless_dev; #define SET_ETHTOOL_OPS(netdev,ops) \ ( (netdev)->ethtool_ops = (ops) ) +extern void netdev_set_default_ethtool_ops(struct net_device *dev, + const struct ethtool_ops *ops); + /* hardware address assignment types */ #define NET_ADDR_PERM 0 /* address is permanent (default) */ #define NET_ADDR_RANDOM 1 /* address is generated randomly */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 15472d691ee..6fa4dd2a3b9 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1101,6 +1101,12 @@ static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) return -1; } +static inline int +pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec) +{ + return -1; +} + static inline void pci_msi_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msi(struct pci_dev *dev) @@ -1132,6 +1138,7 @@ static inline int pci_msi_enabled(void) } #else extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); +extern int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec); extern void pci_msi_shutdown(struct pci_dev *dev); extern void pci_disable_msi(struct pci_dev *dev); extern int pci_msix_table_size(struct pci_dev *dev); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6bfb2faa0b1..e47ee462c2f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -135,16 +135,21 @@ struct hw_perf_event { struct { /* software */ struct hrtimer hrtimer; }; + struct { /* tracepoint */ + struct task_struct *tp_target; + /* for tp_event->class */ + struct list_head tp_list; + }; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct { /* breakpoint */ - struct arch_hw_breakpoint info; - struct list_head bp_list; /* * Crufty hack to avoid the chicken and egg * problem hw_breakpoint has with context * creation and event initalization. */ struct task_struct *bp_target; + struct arch_hw_breakpoint info; + struct list_head bp_list; }; #endif }; @@ -817,6 +822,17 @@ do { \ } while (0) +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; +}; + +#define PMU_EVENT_ATTR(_name, _var, _id, _show) \ +static struct perf_pmu_events_attr _var = { \ + .attr = __ATTR(_name, 0444, _show, NULL), \ + .id = _id, \ +}; + #define PMU_FORMAT_ATTR(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ diff --git a/include/linux/platform_data/imx-iram.h b/include/linux/platform_data/imx-iram.h new file mode 100644 index 00000000000..022690c3370 --- /dev/null +++ b/include/linux/platform_data/imx-iram.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, + * MA 02110-1301, USA. + */ +#include <linux/errno.h> + +#ifdef CONFIG_IRAM_ALLOC + +int __init iram_init(unsigned long base, unsigned long size); +void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr); +void iram_free(unsigned long dma_addr, unsigned int size); + +#else + +static inline int __init iram_init(unsigned long base, unsigned long size) +{ + return -ENOMEM; +} + +static inline void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr) +{ + return NULL; +} + +static inline void iram_free(unsigned long base, unsigned long size) {} + +#endif diff --git a/include/linux/printk.h b/include/linux/printk.h index 9afc01e5a0a..86c4b629471 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -98,9 +98,6 @@ int no_printk(const char *fmt, ...) extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); -extern int printk_needs_cpu(int cpu); -extern void printk_tick(void); - #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) int vprintk_emit(int facility, int level, diff --git a/include/linux/profile.h b/include/linux/profile.h index a0fc32279fc..21123902366 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -82,9 +82,6 @@ int task_handoff_unregister(struct notifier_block * n); int profile_event_register(enum profile_type, struct notifier_block * n); int profile_event_unregister(enum profile_type, struct notifier_block * n); -int register_timer_hook(int (*hook)(struct pt_regs *)); -void unregister_timer_hook(int (*hook)(struct pt_regs *)); - struct pt_regs; #else @@ -135,16 +132,6 @@ static inline int profile_event_unregister(enum profile_type t, struct notifier_ #define profile_handoff_task(a) (0) #define profile_munmap(a) do { } while (0) -static inline int register_timer_hook(int (*hook)(struct pt_regs *)) -{ - return -ENOSYS; -} - -static inline void unregister_timer_hook(int (*hook)(struct pt_regs *)) -{ - return; -} - #endif /* CONFIG_PROFILING */ #endif /* _LINUX_PROFILE_H */ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 098d2a83829..cb6ab5feab6 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -46,9 +46,8 @@ struct persistent_ram_zone { size_t old_log_size; }; -struct persistent_ram_zone * __devinit persistent_ram_new(phys_addr_t start, - size_t size, u32 sig, - int ecc_size); +struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + u32 sig, int ecc_size); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 1693775ecfe..89573a33ab3 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request, extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern void ptrace_disable(struct task_struct *); -extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 2ac60c9cf64..fea49b5da12 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); -static __always_inline void -rb_erase_augmented(struct rb_node *node, struct rb_root *root, - const struct rb_augment_callbacks *augment) +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right, *tmp = node->rb_left; struct rb_node *parent, *rebalance; @@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root, } augment->propagate(tmp, NULL); + return rebalance; +} + +static __always_inline void +rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); if (rebalance) __rb_erase_color(rebalance, root, augment->rotate); } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 275aa3f1062..b758ce17b30 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -53,7 +53,10 @@ extern int rcutorture_runnable; /* for sysctl */ extern void rcutorture_record_test_transition(void); extern void rcutorture_record_progress(unsigned long vernum); extern void do_trace_rcu_torture_read(char *rcutorturename, - struct rcu_head *rhp); + struct rcu_head *rhp, + unsigned long secs, + unsigned long c_old, + unsigned long c); #else static inline void rcutorture_record_test_transition(void) { @@ -63,9 +66,13 @@ static inline void rcutorture_record_progress(unsigned long vernum) } #ifdef CONFIG_RCU_TRACE extern void do_trace_rcu_torture_read(char *rcutorturename, - struct rcu_head *rhp); + struct rcu_head *rhp, + unsigned long secs, + unsigned long c_old, + unsigned long c); #else -#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ + do { } while (0) #endif #endif @@ -749,7 +756,7 @@ static inline void rcu_preempt_sleep_check(void) * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may * be preempted, but explicit blocking is illegal. Finally, in preemptible - * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, + * RCU implementations in real-time (with -rt patchset) kernel builds, * RCU read-side critical sections may be preempted and they may also * block, but only when acquiring spinlocks that are subject to priority * inheritance. diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 519777e3fa0..1342e69542f 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 9531845c419..11d05f9fe8b 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -138,6 +138,7 @@ extern void rtc_device_unregister(struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); +extern int rtc_set_ntp_time(struct timespec now); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 54bd7cd7ecb..8da67d625e1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem); */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); + +# define down_write_nest_lock(sem, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ +} while (0); + #else # define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 577eb973de7..e4112aad296 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -304,19 +304,6 @@ static inline void lockup_detector_init(void) } #endif -#ifdef CONFIG_DETECT_HUNG_TASK -extern unsigned int sysctl_hung_task_panic; -extern unsigned long sysctl_hung_task_check_count; -extern unsigned long sysctl_hung_task_timeout_secs; -extern unsigned long sysctl_hung_task_warnings; -extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); -#else -/* Avoid need for ifdefs elsewhere in the code */ -enum { sysctl_hung_task_timeout_secs = 0 }; -#endif - /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) @@ -338,23 +325,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); struct nsproxy; struct user_namespace; -/* - * Default maximum number of active map areas, this limits the number of vmas - * per mm struct. Users can overwrite this number by sysctl but there is a - * problem. - * - * When a program's coredump is generated as ELF format, a section is created - * per a vma. In ELF, the number of sections is represented in unsigned short. - * This means the number of sections should be smaller than 65535 at coredump. - * Because the kernel adds some informative sections to a image of program at - * generating coredump, we need some margin. The number of extra sections is - * 1-3 now and depends on arch. We use "5" as safe margin, here. - */ -#define MAPCOUNT_ELF_CORE_MARGIN (5) -#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) - -extern int sysctl_max_map_count; - #include <linux/aio.h> #ifdef CONFIG_MMU @@ -1194,6 +1164,7 @@ struct sched_entity { /* rq "owned" by this entity/group: */ struct cfs_rq *my_q; #endif + /* * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be * removed when useful for applications beyond shares distribution (e.g. @@ -1208,6 +1179,7 @@ struct sched_entity { struct sched_rt_entity { struct list_head run_list; unsigned long timeout; + unsigned long watchdog_stamp; unsigned int time_slice; struct sched_rt_entity *back; @@ -1220,11 +1192,6 @@ struct sched_rt_entity { #endif }; -/* - * default timeslice is 100 msecs (used only for SCHED_RR tasks). - * Timeslices get refilled after they expire. - */ -#define RR_TIMESLICE (100 * HZ / 1000) struct rcu_node; @@ -1368,6 +1335,15 @@ struct task_struct { #ifndef CONFIG_VIRT_CPU_ACCOUNTING struct cputime prev_cputime; #endif +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + seqlock_t vtime_seqlock; + unsigned long long vtime_snap; + enum { + VTIME_SLEEPING = 0, + VTIME_USER, + VTIME_SYS, + } vtime_snap_whence; +#endif unsigned long nvcsw, nivcsw; /* context switch counts */ struct timespec start_time; /* monotonic time */ struct timespec real_start_time; /* boot based time */ @@ -1622,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled) } #endif -/* - * Priority of a process goes from 0..MAX_PRIO-1, valid RT - * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH - * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority - * values are inverted: lower p->prio value means higher priority. - * - * The MAX_USER_RT_PRIO value allows the actual maximum - * RT priority to be separate from the value exported to - * user-space. This allows kernel threads to set their - * priority to a value higher than any user task. Note: - * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. - */ - -#define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO - -#define MAX_PRIO (MAX_RT_PRIO + 40) -#define DEFAULT_PRIO (MAX_RT_PRIO + 20) - -static inline int rt_prio(int prio) -{ - if (unlikely(prio < MAX_RT_PRIO)) - return 1; - return 0; -} - -static inline int rt_task(struct task_struct *p) -{ - return rt_prio(p->prio); -} - static inline struct pid *task_pid(struct task_struct *task) { return task->pids[PIDTYPE_PID].pid; @@ -1792,6 +1737,37 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, + cputime_t *utime, cputime_t *stime); +extern void task_cputime_scaled(struct task_struct *t, + cputime_t *utimescaled, cputime_t *stimescaled); +extern cputime_t task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, + cputime_t *utime, cputime_t *stime) +{ + if (utime) + *utime = t->utime; + if (stime) + *stime = t->stime; +} + +static inline void task_cputime_scaled(struct task_struct *t, + cputime_t *utimescaled, + cputime_t *stimescaled) +{ + if (utimescaled) + *utimescaled = t->utimescaled; + if (stimescaled) + *stimescaled = t->stimescaled; +} + +static inline cputime_t task_gtime(struct task_struct *t) +{ + return t->gtime; +} +#endif extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); @@ -1810,6 +1786,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ @@ -2032,58 +2009,7 @@ extern void wake_up_idle_cpu(int cpu); static inline void wake_up_idle_cpu(int cpu) { } #endif -extern unsigned int sysctl_sched_latency; -extern unsigned int sysctl_sched_min_granularity; -extern unsigned int sysctl_sched_wakeup_granularity; -extern unsigned int sysctl_sched_child_runs_first; - -enum sched_tunable_scaling { - SCHED_TUNABLESCALING_NONE, - SCHED_TUNABLESCALING_LOG, - SCHED_TUNABLESCALING_LINEAR, - SCHED_TUNABLESCALING_END, -}; -extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; - -extern unsigned int sysctl_numa_balancing_scan_delay; -extern unsigned int sysctl_numa_balancing_scan_period_min; -extern unsigned int sysctl_numa_balancing_scan_period_max; -extern unsigned int sysctl_numa_balancing_scan_period_reset; -extern unsigned int sysctl_numa_balancing_scan_size; -extern unsigned int sysctl_numa_balancing_settle_count; - -#ifdef CONFIG_SCHED_DEBUG -extern unsigned int sysctl_sched_migration_cost; -extern unsigned int sysctl_sched_nr_migrate; -extern unsigned int sysctl_sched_time_avg; -extern unsigned int sysctl_timer_migration; -extern unsigned int sysctl_sched_shares_window; - -int sched_proc_update_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, - loff_t *ppos); -#endif -#ifdef CONFIG_SCHED_DEBUG -static inline unsigned int get_sysctl_timer_migration(void) -{ - return sysctl_timer_migration; -} -#else -static inline unsigned int get_sysctl_timer_migration(void) -{ - return 1; -} -#endif -extern unsigned int sysctl_sched_rt_period; -extern int sysctl_sched_rt_runtime; - -int sched_rt_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - #ifdef CONFIG_SCHED_AUTOGROUP -extern unsigned int sysctl_sched_autogroup_enabled; - extern void sched_autogroup_create_attach(struct task_struct *p); extern void sched_autogroup_detach(struct task_struct *p); extern void sched_autogroup_fork(struct signal_struct *sig); @@ -2099,30 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } #endif -#ifdef CONFIG_CFS_BANDWIDTH -extern unsigned int sysctl_sched_cfs_bandwidth_slice; -#endif - -#ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(struct task_struct *p); -extern void rt_mutex_setprio(struct task_struct *p, int prio); -extern void rt_mutex_adjust_pi(struct task_struct *p); -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ - return tsk->pi_blocked_on != NULL; -} -#else -static inline int rt_mutex_getprio(struct task_struct *p) -{ - return p->normal_prio; -} -# define rt_mutex_adjust_pi(p) do { } while (0) -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ - return false; -} -#endif - extern bool yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); @@ -2713,7 +2615,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); -extern void signal_wake_up(struct task_struct *t, int resume_stopped); +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} /* * Wrappers for p->thread_info->cpu access. No-op on UP. @@ -2743,8 +2654,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -extern void normalize_rt_tasks(void); - #ifdef CONFIG_CGROUP_SCHED extern struct task_group root_task_group; diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h new file mode 100644 index 00000000000..94e19ea28fc --- /dev/null +++ b/include/linux/sched/rt.h @@ -0,0 +1,58 @@ +#ifndef _SCHED_RT_H +#define _SCHED_RT_H + +/* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority + * values are inverted: lower p->prio value means higher priority. + * + * The MAX_USER_RT_PRIO value allows the actual maximum + * RT priority to be separate from the value exported to + * user-space. This allows kernel threads to set their + * priority to a value higher than any user task. Note: + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + +#define MAX_USER_RT_PRIO 100 +#define MAX_RT_PRIO MAX_USER_RT_PRIO + +#define MAX_PRIO (MAX_RT_PRIO + 40) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) + +static inline int rt_prio(int prio) +{ + if (unlikely(prio < MAX_RT_PRIO)) + return 1; + return 0; +} + +static inline int rt_task(struct task_struct *p) +{ + return rt_prio(p->prio); +} + +#ifdef CONFIG_RT_MUTEXES +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p); +static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return tsk->pi_blocked_on != NULL; +} +#else +static inline int rt_mutex_getprio(struct task_struct *p) +{ + return p->normal_prio; +} +# define rt_mutex_adjust_pi(p) do { } while (0) +static inline bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return false; +} +#endif + +extern void normalize_rt_tasks(void); + + +#endif /* _SCHED_RT_H */ diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h new file mode 100644 index 00000000000..d2bb0ae979d --- /dev/null +++ b/include/linux/sched/sysctl.h @@ -0,0 +1,110 @@ +#ifndef _SCHED_SYSCTL_H +#define _SCHED_SYSCTL_H + +#ifdef CONFIG_DETECT_HUNG_TASK +extern unsigned int sysctl_hung_task_panic; +extern unsigned long sysctl_hung_task_check_count; +extern unsigned long sysctl_hung_task_timeout_secs; +extern unsigned long sysctl_hung_task_warnings; +extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); +#else +/* Avoid need for ifdefs elsewhere in the code */ +enum { sysctl_hung_task_timeout_secs = 0 }; +#endif + +/* + * Default maximum number of active map areas, this limits the number of vmas + * per mm struct. Users can overwrite this number by sysctl but there is a + * problem. + * + * When a program's coredump is generated as ELF format, a section is created + * per a vma. In ELF, the number of sections is represented in unsigned short. + * This means the number of sections should be smaller than 65535 at coredump. + * Because the kernel adds some informative sections to a image of program at + * generating coredump, we need some margin. The number of extra sections is + * 1-3 now and depends on arch. We use "5" as safe margin, here. + */ +#define MAPCOUNT_ELF_CORE_MARGIN (5) +#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + +extern int sysctl_max_map_count; + +extern unsigned int sysctl_sched_latency; +extern unsigned int sysctl_sched_min_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_child_runs_first; + +enum sched_tunable_scaling { + SCHED_TUNABLESCALING_NONE, + SCHED_TUNABLESCALING_LOG, + SCHED_TUNABLESCALING_LINEAR, + SCHED_TUNABLESCALING_END, +}; +extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; + +extern unsigned int sysctl_numa_balancing_scan_delay; +extern unsigned int sysctl_numa_balancing_scan_period_min; +extern unsigned int sysctl_numa_balancing_scan_period_max; +extern unsigned int sysctl_numa_balancing_scan_period_reset; +extern unsigned int sysctl_numa_balancing_scan_size; +extern unsigned int sysctl_numa_balancing_settle_count; + +#ifdef CONFIG_SCHED_DEBUG +extern unsigned int sysctl_sched_migration_cost; +extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_sched_time_avg; +extern unsigned int sysctl_timer_migration; +extern unsigned int sysctl_sched_shares_window; + +int sched_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos); +#endif +#ifdef CONFIG_SCHED_DEBUG +static inline unsigned int get_sysctl_timer_migration(void) +{ + return sysctl_timer_migration; +} +#else +static inline unsigned int get_sysctl_timer_migration(void) +{ + return 1; +} +#endif + +/* + * control realtime throttling: + * + * /proc/sys/kernel/sched_rt_period_us + * /proc/sys/kernel/sched_rt_runtime_us + */ +extern unsigned int sysctl_sched_rt_period; +extern int sysctl_sched_rt_runtime; + +#ifdef CONFIG_CFS_BANDWIDTH +extern unsigned int sysctl_sched_cfs_bandwidth_slice; +#endif + +#ifdef CONFIG_SCHED_AUTOGROUP +extern unsigned int sysctl_sched_autogroup_enabled; +#endif + +/* + * default timeslice is 100 msecs (used only for SCHED_RR tasks). + * Timeslices get refilled after they expire. + */ +#define RR_TIMESLICE (100 * HZ / 1000) + +extern int sched_rr_timeslice; + +extern int sched_rr_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +extern int sched_rt_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +#endif /* _SCHED_SYSCTL_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 0f6afc657f7..eee7478cda7 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -989,17 +989,29 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * tells the LSM to decrement the number of secmark labeling rules loaded * @req_classify_flow: * Sets the flow's sid to the openreq sid. + * @tun_dev_alloc_security: + * This hook allows a module to allocate a security structure for a TUN + * device. + * @security pointer to a security structure pointer. + * Returns a zero on success, negative values on failure. + * @tun_dev_free_security: + * This hook allows a module to free the security structure for a TUN + * device. + * @security pointer to the TUN device's security structure * @tun_dev_create: * Check permissions prior to creating a new TUN device. - * @tun_dev_post_create: - * This hook allows a module to update or allocate a per-socket security - * structure. - * @sk contains the newly created sock structure. + * @tun_dev_attach_queue: + * Check permissions prior to attaching to a TUN device queue. + * @security pointer to the TUN device's security structure. * @tun_dev_attach: - * Check permissions prior to attaching to a persistent TUN device. This - * hook can also be used by the module to update any security state + * This hook can be used by the module to update any security state * associated with the TUN device's sock structure. * @sk contains the existing sock structure. + * @security pointer to the TUN device's security structure. + * @tun_dev_open: + * This hook can be used by the module to update any security state + * associated with the TUN device's security structure. + * @security pointer to the TUN devices's security structure. * * Security hooks for XFRM operations. * @@ -1620,9 +1632,12 @@ struct security_operations { void (*secmark_refcount_inc) (void); void (*secmark_refcount_dec) (void); void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); - int (*tun_dev_create)(void); - void (*tun_dev_post_create)(struct sock *sk); - int (*tun_dev_attach)(struct sock *sk); + int (*tun_dev_alloc_security) (void **security); + void (*tun_dev_free_security) (void *security); + int (*tun_dev_create) (void); + int (*tun_dev_attach_queue) (void *security); + int (*tun_dev_attach) (struct sock *sk, void *security); + int (*tun_dev_open) (void *security); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2566,9 +2581,12 @@ void security_inet_conn_established(struct sock *sk, int security_secmark_relabel_packet(u32 secid); void security_secmark_refcount_inc(void); void security_secmark_refcount_dec(void); +int security_tun_dev_alloc_security(void **security); +void security_tun_dev_free_security(void *security); int security_tun_dev_create(void); -void security_tun_dev_post_create(struct sock *sk); -int security_tun_dev_attach(struct sock *sk); +int security_tun_dev_attach_queue(void *security); +int security_tun_dev_attach(struct sock *sk, void *security); +int security_tun_dev_open(void *security); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -2733,16 +2751,31 @@ static inline void security_secmark_refcount_dec(void) { } +static inline int security_tun_dev_alloc_security(void **security) +{ + return 0; +} + +static inline void security_tun_dev_free_security(void *security) +{ +} + static inline int security_tun_dev_create(void) { return 0; } -static inline void security_tun_dev_post_create(struct sock *sk) +static inline int security_tun_dev_attach_queue(void *security) +{ + return 0; +} + +static inline int security_tun_dev_attach(struct sock *sk, void *security) { + return 0; } -static inline int security_tun_dev_attach(struct sock *sk) +static inline int security_tun_dev_open(void *security) { return 0; } diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index e0106d8581d..c65dee05991 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -14,6 +14,8 @@ struct smpboot_thread_data; * @thread_should_run: Check whether the thread should run or not. Called with * preemption disabled. * @thread_fn: The associated thread function + * @create: Optional setup function, called when the thread gets + * created (Not called from the thread context) * @setup: Optional setup function, called when the thread gets * operational the first time * @cleanup: Optional cleanup function, called when the thread @@ -22,6 +24,7 @@ struct smpboot_thread_data; * parked (cpu offline) * @unpark: Optional unpark function, called when the thread is * unparked (cpu online) + * @selfparking: Thread is not parked by the park function. * @thread_comm: The base name of the thread */ struct smp_hotplug_thread { @@ -29,10 +32,12 @@ struct smp_hotplug_thread { struct list_head list; int (*thread_should_run)(unsigned int cpu); void (*thread_fn)(unsigned int cpu); + void (*create)(unsigned int cpu); void (*setup)(unsigned int cpu); void (*cleanup)(unsigned int cpu, bool online); void (*park)(unsigned int cpu); void (*unpark)(unsigned int cpu); + bool selfparking; const char *thread_comm; }; diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 6eb691b0835..04f4121a23a 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp); * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. * - * Note that if the CPU is in the idle loop from an RCU point of view - * (ie: that we are in the section between rcu_idle_enter() and - * rcu_idle_exit()) then srcu_read_lock_held() returns false even if - * the CPU did an srcu_read_lock(). The reason for this is that RCU - * ignores CPUs that are in such a section, considering these as in - * extended quiescent state, so such a CPU is effectively never in an - * RCU read-side critical section regardless of what RCU primitives it - * invokes. This state of affairs is required --- we need to keep an - * RCU-free window in idle where the CPU may possibly enter into low - * power mode. This way we can notice an extended quiescent state to - * other CPUs that started a grace period. Otherwise we would delay any - * grace period as long as we run in the idle task. - * - * Similarly, we avoid claiming an SRCU read lock held if the current - * CPU is offline. + * Note that SRCU is based on its own statemachine and it doesn't + * relies on normal RCU, it can be called from the CPU which + * is in the idle loop from an RCU point of view or offline. */ static inline int srcu_read_lock_held(struct srcu_struct *sp) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) - return 0; - if (!rcu_lockdep_current_cpu_online()) - return 0; return lock_is_held(&sp->dep_map); } @@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) int retval = __srcu_read_lock(sp); rcu_lock_acquire(&(sp)->dep_map); - rcu_lockdep_assert(!rcu_is_cpu_idle(), - "srcu_read_lock() used illegally while idle"); return retval; } @@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp) { - rcu_lockdep_assert(!rcu_is_cpu_idle(), - "srcu_read_unlock() used illegally while idle"); rcu_lock_release(&(sp)->dep_map); __srcu_read_unlock(sp, idx); } diff --git a/include/linux/tick.h b/include/linux/tick.h index 1a6567b4849..553272e6af5 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -8,6 +8,8 @@ #include <linux/clockchips.h> #include <linux/irqflags.h> +#include <linux/percpu.h> +#include <linux/hrtimer.h> #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -122,13 +124,26 @@ static inline int tick_oneshot_mode_active(void) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ +DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched); + +static inline int tick_nohz_tick_stopped(void) +{ + return __this_cpu_read(tick_cpu_sched.tick_stopped); +} + extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); -# else + +# else /* !CONFIG_NO_HZ */ +static inline int tick_nohz_tick_stopped(void) +{ + return 0; +} + static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } diff --git a/include/linux/time.h b/include/linux/time.h index 4d358e9d10f..a3ab6a814a9 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -115,8 +115,20 @@ static inline bool timespec_valid_strict(const struct timespec *ts) return true; } +extern bool persistent_clock_exist; + +#ifdef ALWAYS_USE_PERSISTENT_CLOCK +#define has_persistent_clock() true +#else +static inline bool has_persistent_clock(void) +{ + return persistent_clock_exist; +} +#endif + extern void read_persistent_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts); +extern int persistent_clock_is_local; extern int update_persistent_clock(struct timespec now); void timekeeping_init(void); extern int timekeeping_suspended; @@ -158,6 +170,7 @@ extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); extern unsigned int alarm_setitimer(unsigned int seconds); extern int do_getitimer(int which, struct itimerval *value); +extern int __getnstimeofday(struct timespec *tv); extern void getnstimeofday(struct timespec *tv); extern void getrawmonotonic(struct timespec *ts); extern void getnstime_raw_and_real(struct timespec *ts_raw, diff --git a/include/linux/tsacct_kern.h b/include/linux/tsacct_kern.h index 44893e5ec8f..3251965bf4c 100644 --- a/include/linux/tsacct_kern.h +++ b/include/linux/tsacct_kern.h @@ -23,12 +23,15 @@ static inline void bacct_add_tsk(struct user_namespace *user_ns, #ifdef CONFIG_TASK_XACCT extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p); extern void acct_update_integrals(struct task_struct *tsk); +extern void acct_account_cputime(struct task_struct *tsk); extern void acct_clear_integrals(struct task_struct *tsk); #else static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) {} static inline void acct_update_integrals(struct task_struct *tsk) {} +static inline void acct_account_cputime(struct task_struct *tsk) +{} static inline void acct_clear_integrals(struct task_struct *tsk) {} #endif /* CONFIG_TASK_XACCT */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4f628a6fc5b..02b83db8e2c 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -35,13 +35,20 @@ struct inode; # include <asm/uprobes.h> #endif +#define UPROBE_HANDLER_REMOVE 1 +#define UPROBE_HANDLER_MASK 1 + +enum uprobe_filter_ctx { + UPROBE_FILTER_REGISTER, + UPROBE_FILTER_UNREGISTER, + UPROBE_FILTER_MMAP, +}; + struct uprobe_consumer { int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); - /* - * filter is optional; If a filter exists, handler is run - * if and only if filter returns true. - */ - bool (*filter)(struct uprobe_consumer *self, struct task_struct *task); + bool (*filter)(struct uprobe_consumer *self, + enum uprobe_filter_ctx ctx, + struct mm_struct *mm); struct uprobe_consumer *next; }; @@ -94,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -117,6 +125,11 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { return -ENOSYS; } +static inline int +uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) +{ + return -ENOSYS; +} static inline void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { diff --git a/include/linux/usb.h b/include/linux/usb.h index 689b14b26c8..4d22d0f6167 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -357,6 +357,8 @@ struct usb_bus { int bandwidth_int_reqs; /* number of Interrupt requests */ int bandwidth_isoc_reqs; /* number of Isoc. requests */ + unsigned resuming_ports; /* bit array: resuming root-hub ports */ + #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 608050b2545..0a78df5f6cf 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); extern void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum); +extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); +extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); + /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index bd45eb7bedc..0e5ac93bab1 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -33,6 +33,7 @@ struct usbnet { wait_queue_head_t *wait; struct mutex phy_mutex; unsigned char suspend_count; + unsigned char pkt_cnt, pkt_err; /* i/o info: pipes etc */ unsigned in, out; @@ -70,6 +71,7 @@ struct usbnet { # define EVENT_DEV_OPEN 7 # define EVENT_DEVICE_REPORT_IDLE 8 # define EVENT_NO_RUNTIME_PM 9 +# define EVENT_RX_KILL 10 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -107,6 +109,7 @@ struct driver_info { */ #define FLAG_MULTI_PACKET 0x2000 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ +#define FLAG_NOARP 0x8000 /* device can't do ARP */ /* init device ... can sleep, or cause probe() failure */ int (*bind)(struct usbnet *, struct usb_interface *); diff --git a/include/linux/vtime.h b/include/linux/vtime.h index ae30ab58431..71a5782d8c5 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -6,15 +6,46 @@ struct task_struct; #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void vtime_task_switch(struct task_struct *prev); extern void vtime_account_system(struct task_struct *tsk); -extern void vtime_account_system_irqsafe(struct task_struct *tsk); extern void vtime_account_idle(struct task_struct *tsk); extern void vtime_account_user(struct task_struct *tsk); -extern void vtime_account(struct task_struct *tsk); -#else +extern void vtime_account_irq_enter(struct task_struct *tsk); + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +static inline bool vtime_accounting_enabled(void) { return true; } +#endif + +#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ + static inline void vtime_task_switch(struct task_struct *prev) { } static inline void vtime_account_system(struct task_struct *tsk) { } -static inline void vtime_account_system_irqsafe(struct task_struct *tsk) { } -static inline void vtime_account(struct task_struct *tsk) { } +static inline void vtime_account_user(struct task_struct *tsk) { } +static inline void vtime_account_irq_enter(struct task_struct *tsk) { } +static inline bool vtime_accounting_enabled(void) { return false; } +#endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void arch_vtime_task_switch(struct task_struct *tsk); +extern void vtime_account_irq_exit(struct task_struct *tsk); +extern bool vtime_accounting_enabled(void); +extern void vtime_user_enter(struct task_struct *tsk); +static inline void vtime_user_exit(struct task_struct *tsk) +{ + vtime_account_user(tsk); +} +extern void vtime_guest_enter(struct task_struct *tsk); +extern void vtime_guest_exit(struct task_struct *tsk); +extern void vtime_init_idle(struct task_struct *tsk); +#else +static inline void vtime_account_irq_exit(struct task_struct *tsk) +{ + /* On hard|softirq exit we always account to hard|softirq cputime */ + vtime_account_system(tsk); +} +static inline void vtime_user_enter(struct task_struct *tsk) { } +static inline void vtime_user_exit(struct task_struct *tsk) { } +static inline void vtime_guest_enter(struct task_struct *tsk) { } +static inline void vtime_guest_exit(struct task_struct *tsk) { } +static inline void vtime_init_idle(struct task_struct *tsk) { } #endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING @@ -23,25 +54,15 @@ extern void irqtime_account_irq(struct task_struct *tsk); static inline void irqtime_account_irq(struct task_struct *tsk) { } #endif -static inline void vtime_account_irq_enter(struct task_struct *tsk) +static inline void account_irq_enter_time(struct task_struct *tsk) { - /* - * Hardirq can interrupt idle task anytime. So we need vtime_account() - * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING. - * Softirq can also interrupt idle task directly if it calls - * local_bh_enable(). Such case probably don't exist but we never know. - * Ksoftirqd is not concerned because idle time is flushed on context - * switch. Softirqs in the end of hardirqs are also not a problem because - * the idle time is flushed on hardirq time already. - */ - vtime_account(tsk); + vtime_account_irq_enter(tsk); irqtime_account_irq(tsk); } -static inline void vtime_account_irq_exit(struct task_struct *tsk) +static inline void account_irq_exit_time(struct task_struct *tsk) { - /* On hard|softirq exit we always account to hard|softirq cputime */ - vtime_account_system(tsk); + vtime_account_irq_exit(tsk); irqtime_account_irq(tsk); } diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 2b58905d350..8afab27cdbc 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ - WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ + WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ @@ -40,7 +40,7 @@ enum { WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, - WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, + WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, @@ -57,29 +57,36 @@ enum { /* special cpu IDs */ WORK_CPU_UNBOUND = NR_CPUS, - WORK_CPU_NONE = NR_CPUS + 1, - WORK_CPU_LAST = WORK_CPU_NONE, + WORK_CPU_END = NR_CPUS + 1, /* - * Reserve 7 bits off of cwq pointer w/ debugobjects turned - * off. This makes cwqs aligned to 256 bytes and allows 15 - * workqueue flush colors. + * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. + * This makes pwqs aligned to 256 bytes and allows 15 workqueue + * flush colors. */ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, - /* data contains off-queue information when !WORK_STRUCT_CWQ */ + /* data contains off-queue information when !WORK_STRUCT_PWQ */ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), + /* + * When a work item is off queue, its high bits point to the last + * pool it was on. Cap at 31 bits and use the highest number to + * indicate that no pool is associated. + */ WORK_OFFQ_FLAG_BITS = 1, - WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, + WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, + WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, /* convenience constants */ WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, - WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT, + WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, /* bit mask for work_busy() return values */ WORK_BUSY_PENDING = 1 << 0, @@ -95,13 +102,16 @@ struct work_struct { #endif }; -#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) +#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ - ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) + ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) struct delayed_work { struct work_struct work; struct timer_list timer; + + /* target workqueue and CPU ->timer uses to queue ->work */ + struct workqueue_struct *wq; int cpu; }; @@ -426,7 +436,6 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); -extern unsigned int work_cpu(struct work_struct *work); extern unsigned int work_busy(struct work_struct *work); /* diff --git a/include/net/ip.h b/include/net/ip.h index 0707fb9551a..a68f838a132 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -143,6 +143,8 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) extern int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +extern void ip4_datagram_release_cb(struct sock *sk); + struct ip_reply_arg { struct kvec iov[1]; int flags; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index d8f5b9f5216..e98aeb3da03 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -31,6 +31,8 @@ extern void nf_conntrack_cleanup(struct net *net); extern int nf_conntrack_proto_init(struct net *net); extern void nf_conntrack_proto_fini(struct net *net); +extern void nf_conntrack_cleanup_end(void); + extern bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index a1d83cc8bf8..923cb20051e 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -71,6 +71,7 @@ struct netns_ct { struct hlist_head *expect_hash; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; + struct hlist_nulls_head tmpl; struct ip_conntrack_stat __percpu *stat; struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; struct nf_exp_event_notifier __rcu *nf_expect_event_cb; diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h index 591db7d657a..c24060ee411 100644 --- a/include/net/netns/x_tables.h +++ b/include/net/netns/x_tables.h @@ -8,6 +8,7 @@ struct ebt_table; struct netns_xt { struct list_head tables[NFPROTO_NUMPROTO]; + bool notrack_deprecated_warning; #if defined(CONFIG_BRIDGE_NF_EBTABLES) || \ defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE) struct ebt_table *broute_table; diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 498433dd067..938b7fd1120 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -34,17 +34,17 @@ extern int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); -extern int datagram_recv_ctl(struct sock *sk, - struct msghdr *msg, - struct sk_buff *skb); - -extern int datagram_send_ctl(struct net *net, - struct sock *sk, - struct msghdr *msg, - struct flowi6 *fl6, - struct ipv6_txoptions *opt, - int *hlimit, int *tclass, - int *dontfrag); +extern int ip6_datagram_recv_ctl(struct sock *sk, + struct msghdr *msg, + struct sk_buff *skb); + +extern int ip6_datagram_send_ctl(struct net *net, + struct sock *sk, + struct msghdr *msg, + struct flowi6 *fl6, + struct ipv6_txoptions *opt, + int *hlimit, int *tclass, + int *dontfrag); #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h index 6d9e15ed1dc..dd8c48d14ed 100644 --- a/include/sound/cs4271.h +++ b/include/sound/cs4271.h @@ -19,7 +19,7 @@ struct cs4271_platform_data { int gpio_nreset; /* GPIO driving Reset pin, if any */ - int amutec_eq_bmutec:1; /* flag to enable AMUTEC=BMUTEC */ + bool amutec_eq_bmutec; /* flag to enable AMUTEC=BMUTEC */ }; #endif /* __CS4271_H */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 769e27c774a..bc56738cb10 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -58,8 +58,9 @@ .info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \ .put = snd_soc_put_volsw_range, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ - {.reg = xreg, .shift = xshift, .min = xmin,\ - .max = xmax, .platform_max = xmax, .invert = xinvert} } + {.reg = xreg, .rreg = xreg, .shift = xshift, \ + .rshift = xshift, .min = xmin, .max = xmax, \ + .platform_max = xmax, .invert = xinvert} } #define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ @@ -88,8 +89,9 @@ .info = snd_soc_info_volsw_range, \ .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ - {.reg = xreg, .shift = xshift, .min = xmin,\ - .max = xmax, .platform_max = xmax, .invert = xinvert} } + {.reg = xreg, .rreg = xreg, .shift = xshift, \ + .rshift = xshift, .min = xmin, .max = xmax, \ + .platform_max = xmax, .invert = xinvert} } #define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 7cae2360221..663e34a5383 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -174,6 +174,7 @@ typedef unsigned __bitwise__ sense_reason_t; enum tcm_sense_reason_table { #define R(x) (__force sense_reason_t )(x) + TCM_NO_SENSE = R(0x00), TCM_NON_EXISTENT_LUN = R(0x01), TCM_UNSUPPORTED_SCSI_OPCODE = R(0x02), TCM_INCORRECT_AMOUNT_OF_DATA = R(0x03), diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h new file mode 100644 index 00000000000..88b87838379 --- /dev/null +++ b/include/trace/events/ras.h @@ -0,0 +1,77 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ras + +#if !defined(_TRACE_AER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_AER_H + +#include <linux/tracepoint.h> +#include <linux/edac.h> + + +/* + * PCIe AER Trace event + * + * These events are generated when hardware detects a corrected or + * uncorrected event on a PCIe device. The event report has + * the following structure: + * + * char * dev_name - The name of the slot where the device resides + * ([domain:]bus:device.function). + * u32 status - Either the correctable or uncorrectable register + * indicating what error or errors have been seen + * u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED + */ + +#define aer_correctable_errors \ + {BIT(0), "Receiver Error"}, \ + {BIT(6), "Bad TLP"}, \ + {BIT(7), "Bad DLLP"}, \ + {BIT(8), "RELAY_NUM Rollover"}, \ + {BIT(12), "Replay Timer Timeout"}, \ + {BIT(13), "Advisory Non-Fatal"} + +#define aer_uncorrectable_errors \ + {BIT(4), "Data Link Protocol"}, \ + {BIT(12), "Poisoned TLP"}, \ + {BIT(13), "Flow Control Protocol"}, \ + {BIT(14), "Completion Timeout"}, \ + {BIT(15), "Completer Abort"}, \ + {BIT(16), "Unexpected Completion"}, \ + {BIT(17), "Receiver Overflow"}, \ + {BIT(18), "Malformed TLP"}, \ + {BIT(19), "ECRC"}, \ + {BIT(20), "Unsupported Request"} + +TRACE_EVENT(aer_event, + TP_PROTO(const char *dev_name, + const u32 status, + const u8 severity), + + TP_ARGS(dev_name, status, severity), + + TP_STRUCT__entry( + __string( dev_name, dev_name ) + __field( u32, status ) + __field( u8, severity ) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __entry->status = status; + __entry->severity = severity; + ), + + TP_printk("%s PCIe Bus Error: severity=%s, %s\n", + __get_str(dev_name), + __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" : + __entry->severity == HW_EVENT_ERR_FATAL ? + "Fatal" : "Uncorrected", + __entry->severity == HW_EVENT_ERR_CORRECTED ? + __print_flags(__entry->status, "|", aer_correctable_errors) : + __print_flags(__entry->status, "|", aer_uncorrectable_errors)) +); + +#endif /* _TRACE_AER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index d4f559b1ec3..1918e832da4 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -44,8 +44,10 @@ TRACE_EVENT(rcu_utilization, * of a new grace period or the end of an old grace period ("cpustart" * and "cpuend", respectively), a CPU passing through a quiescent * state ("cpuqs"), a CPU coming online or going offline ("cpuonl" - * and "cpuofl", respectively), and a CPU being kicked for being too - * long in dyntick-idle mode ("kick"). + * and "cpuofl", respectively), a CPU being kicked for being too + * long in dyntick-idle mode ("kick"), a CPU accelerating its new + * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU + * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB"). */ TRACE_EVENT(rcu_grace_period, @@ -393,7 +395,7 @@ TRACE_EVENT(rcu_kfree_callback, */ TRACE_EVENT(rcu_batch_start, - TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit), + TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit), TP_ARGS(rcuname, qlen_lazy, qlen, blimit), @@ -401,7 +403,7 @@ TRACE_EVENT(rcu_batch_start, __field(char *, rcuname) __field(long, qlen_lazy) __field(long, qlen) - __field(int, blimit) + __field(long, blimit) ), TP_fast_assign( @@ -411,7 +413,7 @@ TRACE_EVENT(rcu_batch_start, __entry->blimit = blimit; ), - TP_printk("%s CBs=%ld/%ld bl=%d", + TP_printk("%s CBs=%ld/%ld bl=%ld", __entry->rcuname, __entry->qlen_lazy, __entry->qlen, __entry->blimit) ); @@ -523,22 +525,30 @@ TRACE_EVENT(rcu_batch_end, */ TRACE_EVENT(rcu_torture_read, - TP_PROTO(char *rcutorturename, struct rcu_head *rhp), + TP_PROTO(char *rcutorturename, struct rcu_head *rhp, + unsigned long secs, unsigned long c_old, unsigned long c), - TP_ARGS(rcutorturename, rhp), + TP_ARGS(rcutorturename, rhp, secs, c_old, c), TP_STRUCT__entry( __field(char *, rcutorturename) __field(struct rcu_head *, rhp) + __field(unsigned long, secs) + __field(unsigned long, c_old) + __field(unsigned long, c) ), TP_fast_assign( __entry->rcutorturename = rcutorturename; __entry->rhp = rhp; + __entry->secs = secs; + __entry->c_old = c_old; + __entry->c = c; ), - TP_printk("%s torture read %p", - __entry->rcutorturename, __entry->rhp) + TP_printk("%s torture read %p %luus c: %lu %lu", + __entry->rcutorturename, __entry->rhp, + __entry->secs, __entry->c_old, __entry->c) ); /* @@ -608,7 +618,8 @@ TRACE_EVENT(rcu_barrier, #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ do { } while (0) -#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ + do { } while (0) #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) #endif /* #else #ifdef CONFIG_RCU_TRACE */ diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index f28d1b65f17..bf0e18ba6cf 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work, /** * workqueue_queue_work - called when a work gets queued * @req_cpu: the requested cpu - * @cwq: pointer to struct cpu_workqueue_struct + * @pwq: pointer to struct pool_workqueue * @work: pointer to struct work_struct * * This event occurs when a work is queued immediately or once a @@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work, */ TRACE_EVENT(workqueue_queue_work, - TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, + TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq, struct work_struct *work), - TP_ARGS(req_cpu, cwq, work), + TP_ARGS(req_cpu, pwq, work), TP_STRUCT__entry( __field( void *, work ) @@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work, TP_fast_assign( __entry->work = work; __entry->function = work->func; - __entry->workqueue = cwq->wq; + __entry->workqueue = pwq->wq; __entry->req_cpu = req_cpu; - __entry->cpu = cwq->pool->gcwq->cpu; + __entry->cpu = pwq->pool->cpu; ), TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index e7f52c33400..d5844122ff3 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -6,24 +6,10 @@ * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. */ #ifndef _UAPI_EXYNOS_DRM_H_ @@ -185,6 +171,8 @@ enum drm_exynos_flip { EXYNOS_DRM_FLIP_NONE = (0 << 0), EXYNOS_DRM_FLIP_VERTICAL = (1 << 0), EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1), + EXYNOS_DRM_FLIP_BOTH = EXYNOS_DRM_FLIP_VERTICAL | + EXYNOS_DRM_FLIP_HORIZONTAL, }; enum drm_exynos_degree { diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 76352ac45f2..9f096f1c090 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -26,7 +26,6 @@ #include <linux/types.h> #include <linux/elf-em.h> -#include <linux/ptrace.h> /* The netlink messages for the audit system is divided into blocks: * 1000 - 1099 are for commanding the audit system @@ -106,6 +105,7 @@ #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ +#define AUDIT_SECCOMP 1326 /* Secure Computing event */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 77cdba9df27..bb991dfe134 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -28,25 +28,16 @@ #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION /* - * Architectures where both 32- and 64-bit binaries can be executed - * on 64-bit kernels need this. This keeps the structure format - * uniform, and makes sure the wait_queue_token isn't too big to be - * passed back down to the kernel. - * - * This assumes that on these architectures: - * mode 32 bit 64 bit - * ------------------------- - * int 32 bit 32 bit - * long 32 bit 64 bit - * - * If so, 32-bit user-space code should be backwards compatible. + * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed + * back to the kernel via ioctl from userspace. On architectures where 32- and + * 64-bit userspace binaries can be executed it's important that the size of + * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we + * do not break the binary ABI interface by changing the structure size. */ - -#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ - || defined(__powerpc__) || defined(__s390__) -typedef unsigned int autofs_wqt_t; -#else +#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */ typedef unsigned long autofs_wqt_t; +#else +typedef unsigned int autofs_wqt_t; #endif /* Packet types */ diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h index 78dbd2f996a..22d95c6854e 100644 --- a/include/uapi/linux/msg.h +++ b/include/uapi/linux/msg.h @@ -10,6 +10,7 @@ /* msgrcv options */ #define MSG_NOERROR 010000 /* no error if message is too big */ #define MSG_EXCEPT 020000 /* recv any msg except of specified type.*/ +#define MSG_COPY 040000 /* copy (not remove) all queue messages */ /* Obsolete, used only for backwards compatibility and libc5 compiles */ struct msqid_ds { diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4f63c05d27c..9fa9c622a7f 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -579,7 +579,8 @@ enum perf_event_type { * { u32 size; * char data[size];}&& PERF_SAMPLE_RAW * - * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK + * { u64 nr; + * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 78f99d97475..2c6c85f18ea 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -50,7 +50,8 @@ #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ -#define PORT_MAX_8250 24 /* max port ID */ +#define PORT_BRCM_TRUMANAGE 24 +#define PORT_MAX_8250 25 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 50598472dc4..f738e25377f 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -152,6 +152,12 @@ #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) +/* + * Interface status, Figure 9-5 USB 3.0 spec + */ +#define USB_INTRF_STAT_FUNC_RW_CAP 1 +#define USB_INTRF_STAT_FUNC_RW 2 + #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ |