diff options
author | Paul Mundt <lethal@linux-sh.org> | 2012-01-09 09:56:37 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-01-09 09:56:37 +0900 |
commit | 04cf399640b7acfa9abe2eb7900cd934db8af697 (patch) | |
tree | f9a055f2f0170550f5f0b0507b06ffce8d98945d /include | |
parent | 17f0056e6a2f3d1818801705f5e12b71217bf4ef (diff) | |
parent | a0e86bd4252519321b0d102dc4ed90557aa7bee9 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into rmobile-latest
Conflicts:
arch/arm/mach-shmobile/Makefile
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
261 files changed, 7409 insertions, 2224 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 62ce6823c0f..9a62937c56c 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,70 +4,66 @@ #include <linux/time.h> #include <linux/jiffies.h> -typedef unsigned long cputime_t; +typedef unsigned long __nocast cputime_t; -#define cputime_zero (0UL) #define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_max ((~0UL >> 1) - 1) -#define cputime_add(__a, __b) ((__a) + (__b)) -#define cputime_sub(__a, __b) ((__a) - (__b)) -#define cputime_div(__a, __n) ((__a) / (__n)) -#define cputime_halve(__a) ((__a) >> 1) -#define cputime_eq(__a, __b) ((__a) == (__b)) -#define cputime_gt(__a, __b) ((__a) > (__b)) -#define cputime_ge(__a, __b) ((__a) >= (__b)) -#define cputime_lt(__a, __b) ((__a) < (__b)) -#define cputime_le(__a, __b) ((__a) <= (__b)) -#define cputime_to_jiffies(__ct) (__ct) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) #define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) (__hz) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) -typedef u64 cputime64_t; +typedef u64 __nocast cputime64_t; -#define cputime64_zero (0ULL) -#define cputime64_add(__a, __b) ((__a) + (__b)) -#define cputime64_sub(__a, __b) ((__a) - (__b)) -#define cputime64_to_jiffies64(__ct) (__ct) -#define jiffies64_to_cputime64(__jif) (__jif) -#define cputime_to_cputime64(__ct) ((u64) __ct) -#define cputime64_gt(__a, __b) ((__a) > (__b)) +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) -#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) +#define nsecs_to_cputime64(__ct) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) /* * Convert cputime to microseconds and back. */ -#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) -#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)) +#define usecs_to_cputime(__usec) \ + jiffies_to_cputime(usecs_to_jiffies(__usec)) +#define usecs_to_cputime64(__usec) \ + jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000)) /* * Convert cputime to seconds and back. */ -#define cputime_to_secs(jif) ((jif) / HZ) -#define secs_to_cputime(sec) ((sec) * HZ) +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) /* * Convert cputime to timespec and back. */ -#define timespec_to_cputime(__val) timespec_to_jiffies(__val) -#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) /* * Convert cputime to timeval and back. */ -#define timeval_to_cputime(__val) timeval_to_jiffies(__val) -#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) /* * Convert cputime to clock and back. */ -#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) -#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) /* * Convert cputime64 to clock. */ -#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) #endif diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 8c8621097fa..d466c8d8826 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> +#include <linux/of.h> #ifdef CONFIG_GPIOLIB @@ -128,13 +129,14 @@ struct gpio_chip { */ struct device_node *of_node; int of_gpio_n_cells; - int (*of_xlate)(struct gpio_chip *gc, struct device_node *np, - const void *gpio_spec, u32 *flags); + int (*of_xlate)(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, u32 *flags); #endif }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset); +extern struct gpio_chip *gpio_to_chip(unsigned gpio); extern int __must_check gpiochip_reserve(int start, int ngpio); /* add/remove chips */ diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h index 9a6115e7cf6..49c1704173e 100644 --- a/include/asm-generic/socket.h +++ b/include/asm-generic/socket.h @@ -64,4 +64,7 @@ #define SO_DOMAIN 39 #define SO_RXQ_OVFL 40 + +#define SO_WIFI_STATUS 41 +#define SCM_WIFI_STATUS SO_WIFI_STATUS #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h index 7a0f69e6c61..bd39806013b 100644 --- a/include/asm-generic/types.h +++ b/include/asm-generic/types.h @@ -6,10 +6,4 @@ */ #include <asm-generic/int-ll64.h> -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_GENERIC_TYPES_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index f4c38d8c667..2292d1af9d7 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs) __SYSCALL(__NR_setns, sys_setns) #define __NR_sendmmsg 269 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg) +#define __NR_process_vm_readv 270 +__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \ + compat_sys_process_vm_readv) +#define __NR_process_vm_writev 271 +__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \ + compat_sys_process_vm_writev) #undef __NR_syscalls -#define __NR_syscalls 270 +#define __NR_syscalls 272 /* * All syscalls below here should go away really, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 1f9e9516e2b..e8acca892af 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -820,7 +820,7 @@ struct drm_driver { * Specifically, the timestamp in @vblank_time should correspond as * closely as possible to the time when the first video scanline of * the video frame after the end of VBLANK will start scanning out, - * the time immmediately after end of the VBLANK interval. If the + * the time immediately after end of the VBLANK interval. If the * @crtc is currently inside VBLANK, this will be a time in the future. * If the @crtc is currently scanning out a frame, this will be the * past start time of the current scanout. This is meant to adhere diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index d30bedfeb7e..ddd46db65b5 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -235,6 +235,8 @@ struct drm_mode_fb_cmd { #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 #define DRM_MODE_FB_DIRTY_FLAGS 0x03 +#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 + /* * Mark a region of a framebuffer as dirty. * diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index f81676f1b31..14b6cd02228 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -182,8 +182,11 @@ {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -195,8 +198,18 @@ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ @@ -238,6 +251,7 @@ {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -480,6 +494,8 @@ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ @@ -494,6 +510,8 @@ {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0, 0, 0} #define r128_PCI_IDS \ diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index 1d161cb3aca..12050434d57 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h @@ -32,17 +32,16 @@ /** * User-desired buffer creation information structure. * - * @size: requested size for the object. + * @size: user-desired memory allocation size. * - this size value would be page-aligned internally. * @flags: user request for setting memory type or cache attributes. - * @handle: returned handle for the object. - * @pad: just padding to be 64-bit aligned. + * @handle: returned a handle to created gem object. + * - this handle will be set by gem module of kernel side. */ struct drm_exynos_gem_create { - unsigned int size; + uint64_t size; unsigned int flags; unsigned int handle; - unsigned int pad; }; /** diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index b65be6054a1..be94be6d6f1 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -874,6 +874,10 @@ struct drm_radeon_gem_pwrite { #define RADEON_CHUNK_ID_RELOCS 0x01 #define RADEON_CHUNK_ID_IB 0x02 +#define RADEON_CHUNK_ID_FLAGS 0x03 + +/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ +#define RADEON_CS_KEEP_TILING_FLAGS 0x01 struct drm_radeon_cs_chunk { uint32_t chunk_id; diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 619b5657af7..c94e71781b7 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -185,6 +185,7 @@ header-y += if_pppol2tp.h header-y += if_pppox.h header-y += if_slip.h header-y += if_strip.h +header-y += if_team.h header-y += if_tr.h header-y += if_tun.h header-y += if_tunnel.h @@ -194,7 +195,9 @@ header-y += igmp.h header-y += in.h header-y += in6.h header-y += in_route.h +header-y += sock_diag.h header-y += inet_diag.h +header-y += unix_diag.h header-y += inotify.h header-y += input.h header-y += ioctl.h diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index fcbbe71a3cc..724c69c40bb 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -16,6 +16,7 @@ #include <linux/clk.h> #include <linux/device.h> +#include <linux/mod_devicetable.h> #include <linux/err.h> #include <linux/resource.h> #include <linux/regulator/consumer.h> @@ -35,12 +36,6 @@ struct amba_device { unsigned int irq[AMBA_NR_IRQS]; }; -struct amba_id { - unsigned int id; - unsigned int mask; - void *data; -}; - struct amba_driver { struct device_driver drv; int (*probe)(struct amba_device *, const struct amba_id *); diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index 4ce98f54186..572f637299c 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h @@ -238,6 +238,9 @@ struct dma_chan; * @enable_dma: if true enables DMA driven transfers. * @dma_rx_param: parameter to locate an RX DMA channel. * @dma_tx_param: parameter to locate a TX DMA channel. + * @autosuspend_delay: delay in ms following transfer completion before the + * runtime power management system suspends the device. A setting of 0 + * indicates no delay and the device will be suspended immediately. */ struct pl022_ssp_controller { u16 bus_id; @@ -246,6 +249,7 @@ struct pl022_ssp_controller { bool (*dma_filter)(struct dma_chan *chan, void *filter_param); void *dma_rx_param; void *dma_tx_param; + int autosuspend_delay; }; /** diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 49a83ca900b..f4ff882cb2d 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -445,16 +445,6 @@ void vcc_insert_socket(struct sock *sk); void atm_dev_release_vccs(struct atm_dev *dev); -/* - * This is approximately the algorithm used by alloc_skb. - * - */ - -static inline int atm_guess_pdu2truesize(int size) -{ - return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info); -} - static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) { diff --git a/include/linux/audit.h b/include/linux/audit.h index 2f81c6f3b63..426ab9f4dd8 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -468,13 +468,13 @@ extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); #define audit_get_sessionid(t) ((t)->sessionid) extern void audit_log_task_context(struct audit_buffer *ab); extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); -extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern int audit_bprm(struct linux_binprm *bprm); extern void audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); extern int audit_set_macxattr(const char *name); -extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr); +extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); @@ -494,12 +494,12 @@ static inline void audit_fd_pair(int fd1, int fd2) if (unlikely(!audit_dummy_context())) __audit_fd_pair(fd1, fd2); } -static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { if (unlikely(!audit_dummy_context())) __audit_ipc_set_perm(qbytes, uid, gid, mode); } -static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr) +static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { if (unlikely(!audit_dummy_context())) __audit_mq_open(oflag, mode, attr); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 4d4b59de946..f4b8346b1a3 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -205,61 +205,82 @@ struct bcma_bus { struct ssb_sprom sprom; }; -extern inline u32 bcma_read8(struct bcma_device *core, u16 offset) +static inline u32 bcma_read8(struct bcma_device *core, u16 offset) { return core->bus->ops->read8(core, offset); } -extern inline u32 bcma_read16(struct bcma_device *core, u16 offset) +static inline u32 bcma_read16(struct bcma_device *core, u16 offset) { return core->bus->ops->read16(core, offset); } -extern inline u32 bcma_read32(struct bcma_device *core, u16 offset) +static inline u32 bcma_read32(struct bcma_device *core, u16 offset) { return core->bus->ops->read32(core, offset); } -extern inline +static inline void bcma_write8(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write8(core, offset, value); } -extern inline +static inline void bcma_write16(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write16(core, offset, value); } -extern inline +static inline void bcma_write32(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write32(core, offset, value); } #ifdef CONFIG_BCMA_BLOCKIO -extern inline void bcma_block_read(struct bcma_device *core, void *buffer, +static inline void bcma_block_read(struct bcma_device *core, void *buffer, size_t count, u16 offset, u8 reg_width) { core->bus->ops->block_read(core, buffer, count, offset, reg_width); } -extern inline void bcma_block_write(struct bcma_device *core, const void *buffer, - size_t count, u16 offset, u8 reg_width) +static inline void bcma_block_write(struct bcma_device *core, + const void *buffer, size_t count, + u16 offset, u8 reg_width) { core->bus->ops->block_write(core, buffer, count, offset, reg_width); } #endif -extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset) +static inline u32 bcma_aread32(struct bcma_device *core, u16 offset) { return core->bus->ops->aread32(core, offset); } -extern inline +static inline void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->awrite32(core, offset, value); } -#define bcma_mask32(cc, offset, mask) \ - bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask)) -#define bcma_set32(cc, offset, set) \ - bcma_write32(cc, offset, bcma_read32(cc, offset) | (set)) -#define bcma_maskset32(cc, offset, mask, set) \ - bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set)) +static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) & mask); +} +static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set) +{ + bcma_write32(cc, offset, bcma_read32(cc, offset) | set); +} +static inline void bcma_maskset32(struct bcma_device *cc, + u16 offset, u32 mask, u32 set) +{ + bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set); +} +static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) & mask); +} +static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set) +{ + bcma_write16(cc, offset, bcma_read16(cc, offset) | set); +} +static inline void bcma_maskset16(struct bcma_device *cc, + u16 offset, u16 mask, u16 set) +{ + bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set); +} extern bool bcma_core_is_enabled(struct bcma_device *core); extern void bcma_core_disable(struct bcma_device *core, u32 flags); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 1526d965ed0..a33086a7530 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -203,6 +203,7 @@ #define BCMA_CC_PMU_CTL 0x0600 /* PMU control */ #define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ #define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16 +#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400 #define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ #define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ #define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ diff --git a/include/linux/bio.h b/include/linux/bio.h index a3c071c9e18..847994aef0e 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -211,8 +211,8 @@ extern void bio_pair_release(struct bio_pair *dbio); extern struct bio_set *bioset_create(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); -extern struct bio *bio_alloc(gfp_t, int); -extern struct bio *bio_kmalloc(gfp_t, int); +extern struct bio *bio_alloc(gfp_t, unsigned int); +extern struct bio *bio_kmalloc(gfp_t, unsigned int); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); extern void bio_free(struct bio *, struct bio_set *); @@ -519,7 +519,11 @@ extern void bio_integrity_init(void); #define bioset_integrity_create(a, b) (0) #define bio_integrity_prep(a) (0) #define bio_integrity_enabled(a) (0) -#define bio_integrity_clone(a, b, c, d) (0) +static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask, struct bio_set *bs) +{ + return 0; +} #define bioset_integrity_free(a) do { } while (0) #define bio_integrity_free(a, b) do { } while (0) #define bio_integrity_endio(a, b) do { } while (0) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a3ef66a2a08..3c1063acb2a 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w); #include <asm/bitops.h> #define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +/* same as for_each_set_bit() but use bit as value to start with */ +#define for_each_set_bit_cont(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) static __inline__ int get_bitmask_order(unsigned int count) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7a6d3b5bc7..94acd8172b5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *); */ extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); -extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, - request_fn_proc *, - spinlock_t *, int node_id); extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); extern struct request_queue *blk_init_allocated_queue(struct request_queue *, request_fn_proc *, spinlock_t *); diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index ab344a52110..66d3e954eb6 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, unsigned long endpfn); extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); -unsigned long free_all_memory_core_early(int nodeid); +extern unsigned long free_low_memory_core_early(int nodeid); extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); extern unsigned long free_all_bootmem(void); diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h new file mode 100644 index 00000000000..7702641f87e --- /dev/null +++ b/include/linux/can/platform/cc770.h @@ -0,0 +1,33 @@ +#ifndef _CAN_PLATFORM_CC770_H_ +#define _CAN_PLATFORM_CC770_H_ + +/* CPU Interface Register (0x02) */ +#define CPUIF_CEN 0x01 /* Clock Out Enable */ +#define CPUIF_MUX 0x04 /* Multiplex */ +#define CPUIF_SLP 0x08 /* Sleep */ +#define CPUIF_PWD 0x10 /* Power Down Mode */ +#define CPUIF_DMC 0x20 /* Divide Memory Clock */ +#define CPUIF_DSC 0x40 /* Divide System Clock */ +#define CPUIF_RST 0x80 /* Hardware Reset Status */ + +/* Clock Out Register (0x1f) */ +#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */ +#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */ +#define CLKOUT_SL_SHIFT 4 + +/* Bus Configuration Register (0x2f) */ +#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */ +#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */ +#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */ +#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */ +#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */ + +struct cc770_platform_data { + u32 osc_freq; /* CAN bus oscillator frequency in Hz */ + + u8 cir; /* CPU Interface Register */ + u8 cor; /* Clock Out Register */ + u8 bcr; /* Bus Configuration Register */ +}; + +#endif /* !_CAN_PLATFORM_CC770_H_ */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index f88eacb111d..7c05ac202d9 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -10,6 +10,12 @@ #include "osdmap.h" #include "messenger.h" +/* + * Maximum object name size + * (must be at least as big as RBD_MAX_MD_NAME_LEN -- currently 100) + */ +#define MAX_OBJ_NAME_SIZE 100 + struct ceph_msg; struct ceph_snap_context; struct ceph_osd_request; @@ -75,7 +81,7 @@ struct ceph_osd_request { struct inode *r_inode; /* for use by callbacks */ void *r_priv; /* ditto */ - char r_oid[40]; /* object name */ + char r_oid[MAX_OBJ_NAME_SIZE]; /* object name */ int r_oid_len; unsigned long r_stamp; /* send OR check time */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1b7f9d52501..a17becc36ca 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -319,7 +319,7 @@ struct cftype { * If not 0, file mode is set to this value, otherwise it will * be figured out automatically */ - mode_t mode; + umode_t mode; /* * If non-zero, defines the maximum length of string that can diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index ac663c18776..0bd390ce98b 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -59,8 +59,16 @@ SUBSYS(net_cls) SUBSYS(blkio) #endif +/* */ + #ifdef CONFIG_CGROUP_PERF SUBSYS(perf) #endif /* */ + +#ifdef CONFIG_NETPRIO_CGROUP +SUBSYS(net_prio) +#endif + +/* */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 139c4db55f1..081147da056 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -71,7 +71,7 @@ struct timecounter { /** * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds - * @tc: Pointer to cycle counter. + * @cc: Pointer to cycle counter. * @cycles: Cycles * * XXX - This could use some mult_lxl_ll() asm optimization. Same code @@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc); * time base as values returned by * timecounter_read() * @tc: Pointer to time counter. - * @cycle: a value returned by tc->cc->read() + * @cycle_tstamp: a value returned by tc->cc->read() * * Cycle counts that are converted correctly as long as they * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], @@ -156,10 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @mult: cycle to nanosecond multiplier * @shift: cycle to nanosecond divisor (power of two) * @max_idle_ns: max idle time permitted by the clocksource (nsecs) + * @maxadj: maximum adjustment value to mult (~11%) * @flags: flags describing special properties * @archdata: arch-specific data * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary + * @cycle_last: most recent cycle counter value seen by ::read() */ struct clocksource { /* @@ -172,7 +174,7 @@ struct clocksource { u32 mult; u32 shift; u64 max_idle_ns; - + u32 maxadj; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif @@ -186,6 +188,7 @@ struct clocksource { void (*suspend)(struct clocksource *cs); void (*resume)(struct clocksource *cs); + /* private: */ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ struct list_head wd_list; @@ -260,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) /** * clocksource_cyc2ns - converts clocksource cycles to nanoseconds + * @cycles: cycles + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) * * Converts cycles to nanoseconds, using the given mult and shift. * diff --git a/include/linux/compat.h b/include/linux/compat.h index 154bf568301..41c9f6515f4 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -422,9 +422,9 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *, unsigned int nr_segs, unsigned int flags); asmlinkage long compat_sys_open(const char __user *filename, int flags, - int mode); + umode_t mode); asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, - int flags, int mode); + int flags, umode_t mode); asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); @@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type, extern void __user *compat_alloc_user_space(unsigned long len); +asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, + const struct compat_iovec __user *lvec, + unsigned long liovcnt, const struct compat_iovec __user *rvec, + unsigned long riovcnt, unsigned long flags); +asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, + const struct compat_iovec __user *lvec, + unsigned long liovcnt, const struct compat_iovec __user *rvec, + unsigned long riovcnt, unsigned long flags); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 3081c58d696..34025df6182 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -124,7 +124,7 @@ extern struct config_item *config_group_find_item(struct config_group *, struct configfs_attribute { const char *ca_name; struct module *ca_owner; - mode_t ca_mode; + umode_t ca_mode; }; /* diff --git a/include/linux/cordic.h b/include/linux/cordic.h index f932093e20c..cf68ca4a508 100644 --- a/include/linux/cordic.h +++ b/include/linux/cordic.h @@ -35,8 +35,8 @@ struct cordic_iq { * @theta: angle in degrees for which i/q coordinate is to be calculated. * @coord: function output parameter holding the i/q coordinate. * - * The function calculates the i/q coordinate for a given angle using - * cordic algorithm. The coordinate consists of a real (i) and an + * The function calculates the i/q coordinate for a given angle using the + * CORDIC algorithm. The coordinate consists of a real (i) and an * imaginary (q) part. The real part is essentially the cosine of the * angle and the imaginary part is the sine of the angle. The returned * values are scaled by 2^16 for precision. The range for theta is diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6cb60fd2ea8..1f6587590a1 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -14,7 +14,7 @@ #ifndef _LINUX_CPU_H_ #define _LINUX_CPU_H_ -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/node.h> #include <linux/compiler.h> #include <linux/cpumask.h> @@ -22,19 +22,20 @@ struct cpu { int node_id; /* The node which contains the CPU */ int hotpluggable; /* creates sysfs control file if hotpluggable */ - struct sys_device sysdev; + struct device dev; }; extern int register_cpu(struct cpu *cpu, int num); -extern struct sys_device *get_cpu_sysdev(unsigned cpu); +extern struct device *get_cpu_device(unsigned cpu); +extern bool cpu_is_hotpluggable(unsigned cpu); -extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); -extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); +extern int cpu_add_dev_attr(struct device_attribute *attr); +extern void cpu_remove_dev_attr(struct device_attribute *attr); -extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); -extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); +extern int cpu_add_dev_attr_group(struct attribute_group *attrs); +extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); -extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); +extern int sched_create_sysfs_power_savings_entries(struct device *dev); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); @@ -160,7 +161,7 @@ static inline void cpu_maps_update_done(void) } #endif /* CONFIG_SMP */ -extern struct sysdev_class cpu_sysdev_class; +extern struct bus_type cpu_subsys; #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 7408af843b8..23f81de5182 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -130,7 +130,6 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); extern int cpuidle_idle_call(void); - extern int cpuidle_register_driver(struct cpuidle_driver *drv); struct cpuidle_driver *cpuidle_get_driver(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); @@ -145,7 +144,6 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } static inline int cpuidle_idle_call(void) { return -ENODEV; } - static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 4df92619936..ed9f74f6c51 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *); */ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); -extern char *__d_path(const struct path *path, struct path *root, char *, int); +extern char *__d_path(const struct path *, const struct path *, char *, int); +extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *d_path_with_unreachable(const struct path *, char *, int); extern char *dentry_path_raw(struct dentry *, char *, int); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index e7d9b20ddc5..6169c26fd8c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -16,6 +16,7 @@ #define _DEBUGFS_H_ #include <linux/fs.h> +#include <linux/seq_file.h> #include <linux/types.h> @@ -26,6 +27,17 @@ struct debugfs_blob_wrapper { unsigned long size; }; +struct debugfs_reg32 { + char *name; + unsigned long offset; +}; + +struct debugfs_regset32 { + struct debugfs_reg32 *regs; + int nregs; + void __iomem *base; +}; + extern struct dentry *arch_debugfs_dir; #if defined(CONFIG_DEBUG_FS) @@ -34,7 +46,7 @@ extern struct dentry *arch_debugfs_dir; extern const struct file_operations debugfs_file_operations; extern const struct inode_operations debugfs_link_operations; -struct dentry *debugfs_create_file(const char *name, mode_t mode, +struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); @@ -49,31 +61,38 @@ void debugfs_remove_recursive(struct dentry *dentry); struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); -struct dentry *debugfs_create_u8(const char *name, mode_t mode, +struct dentry *debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, u8 *value); -struct dentry *debugfs_create_u16(const char *name, mode_t mode, +struct dentry *debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, u16 *value); -struct dentry *debugfs_create_u32(const char *name, mode_t mode, +struct dentry *debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value); -struct dentry *debugfs_create_u64(const char *name, mode_t mode, +struct dentry *debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value); -struct dentry *debugfs_create_x8(const char *name, mode_t mode, +struct dentry *debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value); -struct dentry *debugfs_create_x16(const char *name, mode_t mode, +struct dentry *debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value); -struct dentry *debugfs_create_x32(const char *name, mode_t mode, +struct dentry *debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent, u32 *value); -struct dentry *debugfs_create_x64(const char *name, mode_t mode, +struct dentry *debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent, u64 *value); -struct dentry *debugfs_create_size_t(const char *name, mode_t mode, +struct dentry *debugfs_create_size_t(const char *name, umode_t mode, struct dentry *parent, size_t *value); -struct dentry *debugfs_create_bool(const char *name, mode_t mode, +struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, u32 *value); -struct dentry *debugfs_create_blob(const char *name, mode_t mode, +struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob); +struct dentry *debugfs_create_regset32(const char *name, mode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset); + +int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix); + bool debugfs_initialized(void); #else @@ -86,7 +105,7 @@ bool debugfs_initialized(void); * want to duplicate the design decision mistakes of procfs and devfs again. */ -static inline struct dentry *debugfs_create_file(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { @@ -118,76 +137,83 @@ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentr return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_u8(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_u16(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_u32(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_u64(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_x8(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_x16(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_x32(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent, u32 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_size_t(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode, struct dentry *parent, size_t *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, u32 *value) { return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode, +static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob) { return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_regset32(const char *name, + mode_t mode, struct dentry *parent, + struct debugfs_regset32 *regset) +{ + return ERR_PTR(-ENODEV); +} + static inline bool debugfs_initialized(void) { return false; diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 65970b811e2..0e5f5785d9f 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -46,6 +46,8 @@ struct debug_obj { * fails * @fixup_free: fixup function, which is called when the free check * fails + * @fixup_assert_init: fixup function, which is called when the assert_init + * check fails */ struct debug_obj_descr { const char *name; @@ -54,6 +56,7 @@ struct debug_obj_descr { int (*fixup_activate) (void *addr, enum debug_obj_state state); int (*fixup_destroy) (void *addr, enum debug_obj_state state); int (*fixup_free) (void *addr, enum debug_obj_state state); + int (*fixup_assert_init)(void *addr, enum debug_obj_state state); }; #ifdef CONFIG_DEBUG_OBJECTS @@ -64,6 +67,7 @@ extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); /* * Active state: @@ -89,6 +93,8 @@ static inline void debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } static inline void debug_object_free (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } static inline void debug_objects_early_init(void) { } static inline void debug_objects_mem_init(void) { } diff --git a/include/linux/device.h b/include/linux/device.h index ffbcf95cd97..5b3adb8f958 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -53,6 +53,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); * struct bus_type - The bus type of the device * * @name: The name of the bus. + * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id). + * @dev_root: Default device to use as the parent. * @bus_attrs: Default attributes of the bus. * @dev_attrs: Default attributes of the devices on the bus. * @drv_attrs: Default attributes of the device drivers on the bus. @@ -69,7 +71,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); * @resume: Called to bring a device on this bus out of sleep mode. * @pm: Power management operations of this bus, callback the specific * device driver's pm-ops. - * @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU + * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU * driver implementations to a bus and allow the driver to do * bus-specific setup * @p: The private data of the driver core, only the driver core can @@ -86,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); */ struct bus_type { const char *name; + const char *dev_name; + struct device *dev_root; struct bus_attribute *bus_attrs; struct device_attribute *dev_attrs; struct driver_attribute *drv_attrs; @@ -106,12 +110,30 @@ struct bus_type { struct subsys_private *p; }; -extern int __must_check bus_register(struct bus_type *bus); +/* This is a #define to keep the compiler from merging different + * instances of the __key variable */ +#define bus_register(subsys) \ +({ \ + static struct lock_class_key __key; \ + __bus_register(subsys, &__key); \ +}) +extern int __must_check __bus_register(struct bus_type *bus, + struct lock_class_key *key); extern void bus_unregister(struct bus_type *bus); extern int __must_check bus_rescan_devices(struct bus_type *bus); /* iterator helpers for buses */ +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; +void subsys_dev_iter_init(struct subsys_dev_iter *iter, + struct bus_type *subsys, + struct device *start, + const struct device_type *type); +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); +void subsys_dev_iter_exit(struct subsys_dev_iter *iter); int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); @@ -121,10 +143,10 @@ struct device *bus_find_device(struct bus_type *bus, struct device *start, struct device *bus_find_device_by_name(struct bus_type *bus, struct device *start, const char *name); - +struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, + struct device *hint); int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, void *data, int (*fn)(struct device_driver *, void *)); - void bus_sort_breadthfirst(struct bus_type *bus, int (*compare)(const struct device *a, const struct device *b)); @@ -256,6 +278,33 @@ struct device *driver_find_device(struct device_driver *drv, int (*match)(struct device *dev, void *data)); /** + * struct subsys_interface - interfaces to device functions + * @name name of the device function + * @subsystem subsytem of the devices to attach to + * @node the list of functions registered at the subsystem + * @add device hookup to device function handler + * @remove device hookup to device function handler + * + * Simple interfaces attached to a subsystem. Multiple interfaces can + * attach to a subsystem and its devices. Unlike drivers, they do not + * exclusively claim or control devices. Interfaces usually represent + * a specific functionality of a subsystem/class of devices. + */ +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *dev, struct subsys_interface *sif); + int (*remove_dev)(struct device *dev, struct subsys_interface *sif); +}; + +int subsys_interface_register(struct subsys_interface *sif); +void subsys_interface_unregister(struct subsys_interface *sif); + +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups); + +/** * struct class - device classes * @name: Name of the class. * @owner: The module owner. @@ -294,7 +343,7 @@ struct class { struct kobject *dev_kobj; int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); - char *(*devnode)(struct device *dev, mode_t *mode); + char *(*devnode)(struct device *dev, umode_t *mode); void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); @@ -423,7 +472,7 @@ struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(struct device *dev, struct kobj_uevent_env *env); - char *(*devnode)(struct device *dev, mode_t *mode); + char *(*devnode)(struct device *dev, umode_t *mode); void (*release)(struct device *dev); const struct dev_pm_ops *pm; @@ -438,11 +487,31 @@ struct device_attribute { const char *buf, size_t count); }; -#define DEVICE_ATTR(_name, _mode, _show, _store) \ -struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; -extern int __must_check device_create_file(struct device *device, - const struct device_attribute *entry); +ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t device_show_int(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_int(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); + +#define DEVICE_ATTR(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ULONG_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } +#define DEVICE_INT_ATTR(_name, _mode, _var) \ + struct dev_ext_attribute dev_attr_##_name = \ + { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } + +extern int device_create_file(struct device *device, + const struct device_attribute *entry); extern void device_remove_file(struct device *dev, const struct device_attribute *attr); extern int __must_check device_create_bin_file(struct device *dev, @@ -490,6 +559,9 @@ extern int devres_release_group(struct device *dev, void *id); extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); extern void devm_kfree(struct device *dev, void *p); +void __iomem *devm_request_and_ioremap(struct device *dev, + struct resource *res); + struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about @@ -600,6 +672,7 @@ struct device { struct device_node *of_node; /* associated device tree node */ dev_t devt; /* dev_t, creates the sysfs "dev" */ + u32 id; /* device instance */ spinlock_t devres_lock; struct list_head devres_head; @@ -682,6 +755,11 @@ static inline bool device_async_suspend_enabled(struct device *dev) return !!dev->power.async_suspend; } +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) +{ + dev->power.ignore_children = enable; +} + static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); @@ -715,7 +793,7 @@ extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); extern const char *device_get_devnode(struct device *dev, - mode_t *mode, const char **tmp); + umode_t *mode, const char **tmp); extern void *dev_get_drvdata(const struct device *dev); extern int dev_set_drvdata(struct device *dev, void *data); @@ -919,4 +997,25 @@ extern long sysfs_deprecated; #define sysfs_deprecated 0 #endif +/** + * module_driver() - Helper macro for drivers that don't do anything + * special in module init/exit. This eliminates a lot of boilerplate. + * Each module may only use this macro once, and calling it replaces + * module_init() and module_exit(). + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define module_driver(__driver, __register, __unregister) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver)); \ +} \ +module_init(__driver##_init); \ +static void __exit __driver##_exit(void) \ +{ \ + __unregister(&(__driver)); \ +} \ +module_exit(__driver##_exit); + #endif /* _DEVICE_H_ */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h new file mode 100644 index 00000000000..f8ac076afa5 --- /dev/null +++ b/include/linux/dma-buf.h @@ -0,0 +1,176 @@ +/* + * Header file for dma buffer sharing framework. + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Author: Sumit Semwal <sumit.semwal@ti.com> + * + * Many thanks to linaro-mm-sig list, and specially + * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and + * Daniel Vetter <daniel@ffwll.ch> for their support in creation and + * refining of this idea. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __DMA_BUF_H__ +#define __DMA_BUF_H__ + +#include <linux/file.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/scatterlist.h> +#include <linux/list.h> +#include <linux/dma-mapping.h> + +struct dma_buf; +struct dma_buf_attachment; + +/** + * struct dma_buf_ops - operations possible on struct dma_buf + * @attach: [optional] allows different devices to 'attach' themselves to the + * given buffer. It might return -EBUSY to signal that backing storage + * is already allocated and incompatible with the requirements + * of requesting device. + * @detach: [optional] detach a given device from this buffer. + * @map_dma_buf: returns list of scatter pages allocated, increases usecount + * of the buffer. Requires atleast one attach to be called + * before. Returned sg list should already be mapped into + * _device_ address space. This call may sleep. May also return + * -EINTR. Should return -EINVAL if attach hasn't been called yet. + * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter + * pages. + * @release: release this buffer; to be called after the last dma_buf_put. + */ +struct dma_buf_ops { + int (*attach)(struct dma_buf *, struct device *, + struct dma_buf_attachment *); + + void (*detach)(struct dma_buf *, struct dma_buf_attachment *); + + /* For {map,unmap}_dma_buf below, any specific buffer attributes + * required should get added to device_dma_parameters accessible + * via dev->dma_params. + */ + struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, + enum dma_data_direction); + void (*unmap_dma_buf)(struct dma_buf_attachment *, + struct sg_table *); + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY + * if the call would block. + */ + + /* after final dma_buf_put() */ + void (*release)(struct dma_buf *); + +}; + +/** + * struct dma_buf - shared buffer object + * @size: size of the buffer + * @file: file pointer used for sharing buffers across, and for refcounting. + * @attachments: list of dma_buf_attachment that denotes all devices attached. + * @ops: dma_buf_ops associated with this buffer object. + * @priv: exporter specific private data for this buffer object. + */ +struct dma_buf { + size_t size; + struct file *file; + struct list_head attachments; + const struct dma_buf_ops *ops; + /* mutex to serialize list manipulation and other ops */ + struct mutex lock; + void *priv; +}; + +/** + * struct dma_buf_attachment - holds device-buffer attachment data + * @dmabuf: buffer for this attachment. + * @dev: device attached to the buffer. + * @node: list of dma_buf_attachment. + * @priv: exporter specific attachment data. + * + * This structure holds the attachment information between the dma_buf buffer + * and its user device(s). The list contains one attachment struct per device + * attached to the buffer. + */ +struct dma_buf_attachment { + struct dma_buf *dmabuf; + struct device *dev; + struct list_head node; + void *priv; +}; + +#ifdef CONFIG_DMA_SHARED_BUFFER +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev); +void dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *dmabuf_attach); +struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, + size_t size, int flags); +int dma_buf_fd(struct dma_buf *dmabuf); +struct dma_buf *dma_buf_get(int fd); +void dma_buf_put(struct dma_buf *dmabuf); + +struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, + enum dma_data_direction); +void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *); +#else + +static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *dmabuf_attach) +{ + return; +} + +static inline struct dma_buf *dma_buf_export(void *priv, + struct dma_buf_ops *ops, + size_t size, int flags) +{ + return ERR_PTR(-ENODEV); +} + +static inline int dma_buf_fd(struct dma_buf *dmabuf) +{ + return -ENODEV; +} + +static inline struct dma_buf *dma_buf_get(int fd) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dma_buf_put(struct dma_buf *dmabuf) +{ + return; +} + +static inline struct sg_table *dma_buf_map_attachment( + struct dma_buf_attachment *attach, enum dma_data_direction write) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + return; +} + +#endif /* CONFIG_DMA_SHARED_BUFFER */ + +#endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index ef90cbd8e17..57c9a8ae4f2 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu); extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); extern int dmar_disabled; +extern int intel_iommu_enabled; #else static inline int iommu_calculate_agaw(struct intel_iommu *iommu) { @@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu) { } #define dmar_disabled (1) +#define intel_iommu_enabled (0) #endif diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h new file mode 100644 index 00000000000..5621547d631 --- /dev/null +++ b/include/linux/dynamic_queue_limits.h @@ -0,0 +1,97 @@ +/* + * Dynamic queue limits (dql) - Definitions + * + * Copyright (c) 2011, Tom Herbert <therbert@google.com> + * + * This header file contains the definitions for dynamic queue limits (dql). + * dql would be used in conjunction with a producer/consumer type queue + * (possibly a HW queue). Such a queue would have these general properties: + * + * 1) Objects are queued up to some limit specified as number of objects. + * 2) Periodically a completion process executes which retires consumed + * objects. + * 3) Starvation occurs when limit has been reached, all queued data has + * actually been consumed, but completion processing has not yet run + * so queuing new data is blocked. + * 4) Minimizing the amount of queued data is desirable. + * + * The goal of dql is to calculate the limit as the minimum number of objects + * needed to prevent starvation. + * + * The primary functions of dql are: + * dql_queued - called when objects are enqueued to record number of objects + * dql_avail - returns how many objects are available to be queued based + * on the object limit and how many objects are already enqueued + * dql_completed - called at completion time to indicate how many objects + * were retired from the queue + * + * The dql implementation does not implement any locking for the dql data + * structures, the higher layer should provide this. dql_queued should + * be serialized to prevent concurrent execution of the function; this + * is also true for dql_completed. However, dql_queued and dlq_completed can + * be executed concurrently (i.e. they can be protected by different locks). + */ + +#ifndef _LINUX_DQL_H +#define _LINUX_DQL_H + +#ifdef __KERNEL__ + +struct dql { + /* Fields accessed in enqueue path (dql_queued) */ + unsigned int num_queued; /* Total ever queued */ + unsigned int adj_limit; /* limit + num_completed */ + unsigned int last_obj_cnt; /* Count at last queuing */ + + /* Fields accessed only by completion path (dql_completed) */ + + unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */ + unsigned int num_completed; /* Total ever completed */ + + unsigned int prev_ovlimit; /* Previous over limit */ + unsigned int prev_num_queued; /* Previous queue total */ + unsigned int prev_last_obj_cnt; /* Previous queuing cnt */ + + unsigned int lowest_slack; /* Lowest slack found */ + unsigned long slack_start_time; /* Time slacks seen */ + + /* Configuration */ + unsigned int max_limit; /* Max limit */ + unsigned int min_limit; /* Minimum limit */ + unsigned int slack_hold_time; /* Time to measure slack */ +}; + +/* Set some static maximums */ +#define DQL_MAX_OBJECT (UINT_MAX / 16) +#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) + +/* + * Record number of objects queued. Assumes that caller has already checked + * availability in the queue with dql_avail. + */ +static inline void dql_queued(struct dql *dql, unsigned int count) +{ + BUG_ON(count > DQL_MAX_OBJECT); + + dql->num_queued += count; + dql->last_obj_cnt = count; +} + +/* Returns how many objects can be queued, < 0 indicates over limit. */ +static inline int dql_avail(const struct dql *dql) +{ + return dql->adj_limit - dql->num_queued; +} + +/* Record number of completed objects and recalculate the limit. */ +void dql_completed(struct dql *dql, unsigned int count); + +/* Reset dql state */ +void dql_reset(struct dql *dql); + +/* Initialize dql state */ +int dql_init(struct dql *dql, unsigned hold_time); + +#endif /* _KERNEL_ */ + +#endif /* _LINUX_DQL_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 055b248bdd5..1cd3947987e 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -13,7 +13,7 @@ #define _LINUX_EDAC_H_ #include <linux/atomic.h> -#include <linux/sysdev.h> +#include <linux/device.h> #define EDAC_OPSTATE_INVAL -1 #define EDAC_OPSTATE_POLL 0 @@ -23,12 +23,12 @@ extern int edac_op_state; extern int edac_err_assert; extern atomic_t edac_handlers; -extern struct sysdev_class edac_class; +extern struct bus_type edac_subsys; extern int edac_handler_set(void); extern void edac_atomic_assert_error(void); -extern struct sysdev_class *edac_get_sysfs_class(void); -extern void edac_put_sysfs_class(void); +extern struct bus_type *edac_get_sysfs_subsys(void); +extern void edac_put_sysfs_subsys(void); static inline void opstate_init(void) { diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index c4627cbdb8e..e50f98b0297 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -33,6 +33,7 @@ #define PCI_EEPROM_WIDTH_93C86 8 #define PCI_EEPROM_WIDTH_OPCODE 3 #define PCI_EEPROM_WRITE_OPCODE 0x05 +#define PCI_EEPROM_ERASE_OPCODE 0x07 #define PCI_EEPROM_READ_OPCODE 0x06 #define PCI_EEPROM_EWDS_OPCODE 0x10 #define PCI_EEPROM_EWEN_OPCODE 0x13 @@ -46,6 +47,7 @@ * @register_write(struct eeprom_93cx6 *eeprom): handler to * write to the eeprom register by using all reg_* fields. * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @drive_data: Set if we're driving the data line. * @reg_data_in: register field to indicate data input * @reg_data_out: register field to indicate data output * @reg_data_clock: register field to set the data clock @@ -62,6 +64,7 @@ struct eeprom_93cx6 { int width; + char drive_data; char reg_data_in; char reg_data_out; char reg_data_clock; @@ -72,3 +75,8 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, u16 *data); extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, __le16 *data, const u16 words); + +extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); + +extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, + u8 addr, u16 data); diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h index 034072cea85..fd0628be45c 100644 --- a/include/linux/errqueue.h +++ b/include/linux/errqueue.h @@ -17,14 +17,15 @@ struct sock_extended_err { #define SO_EE_ORIGIN_LOCAL 1 #define SO_EE_ORIGIN_ICMP 2 #define SO_EE_ORIGIN_ICMP6 3 -#define SO_EE_ORIGIN_TIMESTAMPING 4 +#define SO_EE_ORIGIN_TXSTATUS 4 +#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) #ifdef __KERNEL__ #include <net/ip.h> -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif @@ -33,7 +34,7 @@ struct sock_extended_err { struct sock_exterr_skb { union { struct inet_skb_parm h4; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index de33de1e205..da5b2de99ae 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -489,7 +489,10 @@ struct ethtool_rx_flow_spec { * on return. * * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined - * rules on return. + * rules on return. If @data is non-zero on return then it is the + * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the + * driver supports any special location values. If that flag is not + * set in @data then special location values should not be used. * * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an * existing rule on entry and @fs contains the rule on return. @@ -501,10 +504,23 @@ struct ethtool_rx_flow_spec { * must use the second parameter to get_rxnfc() instead of @rule_locs. * * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. - * @fs.@location specifies the location to use and must not be ignored. + * @fs.@location either specifies the location to use or is a special + * location value with %RX_CLS_LOC_SPECIAL flag set. On return, + * @fs.@location is the actual rule location. * * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an * existing rule on entry. + * + * A driver supporting the special location values for + * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused + * location, and may remove a rule at a later location (lower + * priority) that matches exactly the same set of flows. The special + * values are: %RX_CLS_LOC_ANY, selecting any location; + * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum + * priority); and %RX_CLS_LOC_LAST, selecting the last suitable + * location (minimum priority). Additional special values may be + * defined in future and drivers must return -%EINVAL for any + * unrecognised value. */ struct ethtool_rxnfc { __u32 cmd; @@ -543,9 +559,15 @@ struct compat_ethtool_rxnfc { /** * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR - * @size: On entry, the array size of the user buffer. On return from - * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. + * @size: On entry, the array size of the user buffer, which may be zero. + * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware + * indirection table. * @ring_index: RX ring/queue index for each hash value + * + * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size + * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means + * the table should be reset to default values. This last feature + * is not supported by the original implementations. */ struct ethtool_rxfh_indir { __u32 cmd; @@ -724,9 +746,6 @@ enum ethtool_sfeatures_retval_bits { #include <linux/rculist.h> -/* needed by dev_disable_lro() */ -extern int __ethtool_set_flags(struct net_device *dev, u32 flags); - extern int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); @@ -750,19 +769,18 @@ struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); -u32 ethtool_op_get_tx_csum(struct net_device *dev); -int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); -int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); -int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data); -u32 ethtool_op_get_sg(struct net_device *dev); -int ethtool_op_set_sg(struct net_device *dev, u32 data); -u32 ethtool_op_get_tso(struct net_device *dev); -int ethtool_op_set_tso(struct net_device *dev, u32 data); -u32 ethtool_op_get_ufo(struct net_device *dev); -int ethtool_op_set_ufo(struct net_device *dev, u32 data); -u32 ethtool_op_get_flags(struct net_device *dev); -int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); -bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); + +/** + * ethtool_rxfh_indir_default - get default value for RX flow hash indirection + * @index: Index in RX flow hash indirection table + * @n_rx_rings: Number of RX rings to use + * + * This function provides the default policy for RX flow hash indirection. + */ +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} /** * struct ethtool_ops - optional netdev operations @@ -807,22 +825,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); * @get_pauseparam: Report pause parameters * @set_pauseparam: Set pause parameters. Returns a negative error code * or zero. - * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM. - * Report whether receive checksums are turned on or off. - * @set_rx_csum: Deprecated in favour of generic netdev features. Turn - * receive checksum on or off. Returns a negative error code or zero. - * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums - * are turned on or off. - * @set_tx_csum: Deprecated in favour of generic netdev features. Turn - * transmit checksums on or off. Returns a negative error code or zero. - * @get_sg: Deprecated as redundant. Report whether scatter-gather is - * enabled. - * @set_sg: Deprecated in favour of generic netdev features. Turn - * scatter-gather on or off. Returns a negative error code or zero. - * @get_tso: Deprecated as redundant. Report whether TCP segmentation - * offload is enabled. - * @set_tso: Deprecated in favour of generic netdev features. Turn TCP - * segmentation offload on or off. Returns a negative error code or zero. * @self_test: Run specified self-tests * @get_strings: Return a set of strings that describe the requested objects * @set_phys_id: Identify the physical devices, e.g. by flashing an LED @@ -844,15 +846,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); * negative error code or zero. * @complete: Function to be called after any other operation except * @begin. Will be called even if the other operation failed. - * @get_ufo: Deprecated as redundant. Report whether UDP fragmentation - * offload is enabled. - * @set_ufo: Deprecated in favour of generic netdev features. Turn UDP - * fragmentation offload on or off. Returns a negative error code or zero. - * @get_flags: Deprecated as redundant. Report features included in - * &enum ethtool_flags that are enabled. - * @set_flags: Deprecated in favour of generic netdev features. Turn - * features included in &enum ethtool_flags on or off. Returns a - * negative error code or zero. * @get_priv_flags: Report driver-specific feature flags. * @set_priv_flags: Set driver-specific feature flags. Returns a negative * error code or zero. @@ -866,11 +859,13 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); * @reset: Reset (part of) the device, as specified by a bitmask of * flags from &enum ethtool_reset_flags. Returns a negative * error code or zero. - * @set_rx_ntuple: Set an RX n-tuple rule. Returns a negative error code - * or zero. + * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. + * Returns zero if not supported for this specific device. * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. + * Will not be called if @get_rxfh_indir_size returns zero. * Returns a negative error code or zero. * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. + * Will not be called if @get_rxfh_indir_size returns zero. * Returns a negative error code or zero. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or @@ -917,14 +912,6 @@ struct ethtool_ops { struct ethtool_pauseparam*); int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*); - u32 (*get_rx_csum)(struct net_device *); - int (*set_rx_csum)(struct net_device *, u32); - u32 (*get_tx_csum)(struct net_device *); - int (*set_tx_csum)(struct net_device *, u32); - u32 (*get_sg)(struct net_device *); - int (*set_sg)(struct net_device *, u32); - u32 (*get_tso)(struct net_device *); - int (*set_tso)(struct net_device *, u32); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 stringset, u8 *); int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); @@ -932,10 +919,6 @@ struct ethtool_ops { struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); - u32 (*get_ufo)(struct net_device *); - int (*set_ufo)(struct net_device *, u32); - u32 (*get_flags)(struct net_device *); - int (*set_flags)(struct net_device *, u32); u32 (*get_priv_flags)(struct net_device *); int (*set_priv_flags)(struct net_device *, u32); int (*get_sset_count)(struct net_device *, int); @@ -944,12 +927,9 @@ struct ethtool_ops { int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); int (*flash_device)(struct net_device *, struct ethtool_flash *); int (*reset)(struct net_device *, u32 *); - int (*set_rx_ntuple)(struct net_device *, - struct ethtool_rx_ntuple *); - int (*get_rxfh_indir)(struct net_device *, - struct ethtool_rxfh_indir *); - int (*set_rxfh_indir)(struct net_device *, - const struct ethtool_rxfh_indir *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh_indir)(struct net_device *, u32 *); + int (*set_rxfh_indir)(struct net_device *, const u32 *); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); @@ -1173,6 +1153,12 @@ struct ethtool_ops { #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL +/* Special RX classification rule insert location values */ +#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ +#define RX_CLS_LOC_ANY 0xffffffff +#define RX_CLS_LOC_FIRST 0xfffffffe +#define RX_CLS_LOC_LAST 0xfffffffd + /* Reset flags */ /* The reset() operation must clear the flags for the components which * were actually reset. On successful return, the flags indicate the diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index dec99116a0e..f957085d40e 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -884,7 +884,7 @@ extern int ext3fs_dirhash(const char *name, int len, struct /* ialloc.c */ extern struct inode * ext3_new_inode (handle_t *, struct inode *, - const struct qstr *, int); + const struct qstr *, umode_t); extern void ext3_free_inode (handle_t *, struct inode *); extern struct inode * ext3_orphan_get (struct super_block *, unsigned long); extern unsigned long ext3_count_free_inodes (struct super_block *); diff --git a/include/linux/freezer.h b/include/linux/freezer.h index a5386e3ee75..0ab54e16a91 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -5,71 +5,58 @@ #include <linux/sched.h> #include <linux/wait.h> +#include <linux/atomic.h> #ifdef CONFIG_FREEZER +extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ +extern bool pm_freezing; /* PM freezing in effect */ +extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ + /* * Check if a process has been frozen */ -static inline int frozen(struct task_struct *p) +static inline bool frozen(struct task_struct *p) { return p->flags & PF_FROZEN; } -/* - * Check if there is a request to freeze a process - */ -static inline int freezing(struct task_struct *p) -{ - return test_tsk_thread_flag(p, TIF_FREEZE); -} - -/* - * Request that a process be frozen - */ -static inline void set_freeze_flag(struct task_struct *p) -{ - set_tsk_thread_flag(p, TIF_FREEZE); -} +extern bool freezing_slow_path(struct task_struct *p); /* - * Sometimes we may need to cancel the previous 'freeze' request + * Check if there is a request to freeze a process */ -static inline void clear_freeze_flag(struct task_struct *p) -{ - clear_tsk_thread_flag(p, TIF_FREEZE); -} - -static inline bool should_send_signal(struct task_struct *p) +static inline bool freezing(struct task_struct *p) { - return !(p->flags & PF_FREEZER_NOSIG); + if (likely(!atomic_read(&system_freezing_cnt))) + return false; + return freezing_slow_path(p); } /* Takes and releases task alloc lock using task_lock() */ -extern int thaw_process(struct task_struct *p); +extern void __thaw_task(struct task_struct *t); -extern void refrigerator(void); +extern bool __refrigerator(bool check_kthr_stop); extern int freeze_processes(void); extern int freeze_kernel_threads(void); extern void thaw_processes(void); -static inline int try_to_freeze(void) +static inline bool try_to_freeze(void) { - if (freezing(current)) { - refrigerator(); - return 1; - } else - return 0; + might_sleep(); + if (likely(!freezing(current))) + return false; + return __refrigerator(false); } -extern bool freeze_task(struct task_struct *p, bool sig_only); -extern void cancel_freezing(struct task_struct *p); +extern bool freeze_task(struct task_struct *p); +extern bool set_freezable(void); #ifdef CONFIG_CGROUP_FREEZER -extern int cgroup_freezing_or_frozen(struct task_struct *task); +extern bool cgroup_freezing(struct task_struct *task); #else /* !CONFIG_CGROUP_FREEZER */ -static inline int cgroup_freezing_or_frozen(struct task_struct *task) +static inline bool cgroup_freezing(struct task_struct *task) { - return 0; + return false; } #endif /* !CONFIG_CGROUP_FREEZER */ @@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task) * appropriately in case the child has exited before the freezing of tasks is * complete. However, we don't want kernel threads to be frozen in unexpected * places, so we allow them to block freeze_processes() instead or to set - * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork - * parents. Fortunately, in the ____call_usermodehelper() case the parent won't - * really block freeze_processes(), since ____call_usermodehelper() (the child) - * does a little before exec/exit and it can't be frozen before waking up the - * parent. + * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the + * parent won't really block freeze_processes(), since ____call_usermodehelper() + * (the child) does a little before exec/exit and it can't be frozen before + * waking up the parent. */ -/* - * If the current task is a user space one, tell the freezer not to count it as - * freezable. - */ + +/* Tell the freezer not to count the current task as freezable. */ static inline void freezer_do_not_count(void) { - if (current->mm) - current->flags |= PF_FREEZER_SKIP; + current->flags |= PF_FREEZER_SKIP; } /* - * If the current task is a user space one, tell the freezer to count it as - * freezable again and try to freeze it. + * Tell the freezer to count the current task as freezable again and try to + * freeze it. */ static inline void freezer_count(void) { - if (current->mm) { - current->flags &= ~PF_FREEZER_SKIP; - try_to_freeze(); - } + current->flags &= ~PF_FREEZER_SKIP; + try_to_freeze(); } /* @@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p) } /* - * Tell the freezer that the current task should be frozen by it + * These macros are intended to be used whenever you want allow a task that's + * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note + * that neither return any clear indication of whether a freeze event happened + * while in this function. */ -static inline void set_freezable(void) -{ - current->flags &= ~PF_NOFREEZE; -} -/* - * Tell the freezer that the current task should be frozen by it and that it - * should send a fake signal to the task to freeze it. - */ -static inline void set_freezable_with_signal(void) -{ - current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); -} +/* Like schedule(), but should not block the freezer. */ +#define freezable_schedule() \ +({ \ + freezer_do_not_count(); \ + schedule(); \ + freezer_count(); \ +}) + +/* Like schedule_timeout_killable(), but should not block the freezer. */ +#define freezable_schedule_timeout_killable(timeout) \ +({ \ + long __retval; \ + freezer_do_not_count(); \ + __retval = schedule_timeout_killable(timeout); \ + freezer_count(); \ + __retval; \ +}) /* * Freezer-friendly wrappers around wait_event_interruptible(), @@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void) #define wait_event_freezable(wq, condition) \ ({ \ int __retval; \ - do { \ + for (;;) { \ __retval = wait_event_interruptible(wq, \ (condition) || freezing(current)); \ - if (__retval && !freezing(current)) \ + if (__retval || (condition)) \ break; \ - else if (!(condition)) \ - __retval = -ERESTARTSYS; \ - } while (try_to_freeze()); \ + try_to_freeze(); \ + } \ __retval; \ }) - #define wait_event_freezable_timeout(wq, condition, timeout) \ ({ \ long __retval = timeout; \ - do { \ + for (;;) { \ __retval = wait_event_interruptible_timeout(wq, \ (condition) || freezing(current), \ __retval); \ - } while (try_to_freeze()); \ + if (__retval <= 0 || (condition)) \ + break; \ + try_to_freeze(); \ + } \ __retval; \ }) + #else /* !CONFIG_FREEZER */ -static inline int frozen(struct task_struct *p) { return 0; } -static inline int freezing(struct task_struct *p) { return 0; } -static inline void set_freeze_flag(struct task_struct *p) {} -static inline void clear_freeze_flag(struct task_struct *p) {} -static inline int thaw_process(struct task_struct *p) { return 1; } +static inline bool frozen(struct task_struct *p) { return false; } +static inline bool freezing(struct task_struct *p) { return false; } +static inline void __thaw_task(struct task_struct *t) {} -static inline void refrigerator(void) {} +static inline bool __refrigerator(bool check_kthr_stop) { return false; } static inline int freeze_processes(void) { return -ENOSYS; } static inline int freeze_kernel_threads(void) { return -ENOSYS; } static inline void thaw_processes(void) {} -static inline int try_to_freeze(void) { return 0; } +static inline bool try_to_freeze(void) { return false; } static inline void freezer_do_not_count(void) {} static inline void freezer_count(void) {} static inline int freezer_should_skip(struct task_struct *p) { return 0; } static inline void set_freezable(void) {} -static inline void set_freezable_with_signal(void) {} + +#define freezable_schedule() schedule() + +#define freezable_schedule_timeout_killable(timeout) \ + schedule_timeout_killable(timeout) #define wait_event_freezable(wq, condition) \ wait_event_interruptible(wq, condition) diff --git a/include/linux/fs.h b/include/linux/fs.h index 0c4df261af7..7aacf31418f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -393,8 +393,8 @@ struct inodes_stat_t { #include <linux/semaphore.h> #include <linux/fiemap.h> #include <linux/rculist_bl.h> -#include <linux/shrinker.h> #include <linux/atomic.h> +#include <linux/shrinker.h> #include <asm/byteorder.h> @@ -1428,6 +1428,7 @@ struct super_block { #else struct list_head s_files; #endif + struct list_head s_mounts; /* list of mounts; _not_ for fs use */ /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */ struct list_head s_dentry_lru; /* unused dentry lru */ int s_nr_dentry_unused; /* # of dentry on lru */ @@ -1440,7 +1441,7 @@ struct super_block { struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; - struct list_head s_instances; + struct hlist_node s_instances; struct quota_info s_dquot; /* Diskquota specific options */ int s_frozen; @@ -1481,6 +1482,12 @@ struct super_block { int cleancache_poolid; struct shrinker s_shrink; /* per-sb shrinker handle */ + + /* Number of inodes with nlink == 0 but still referenced */ + atomic_long_t s_remove_count; + + /* Being remounted read-only */ + int s_readonly_remount; }; /* superblock cache pruning functions */ @@ -1516,9 +1523,9 @@ extern void unlock_super(struct super_block *); /* * VFS helper functions.. */ -extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); -extern int vfs_mkdir(struct inode *, struct dentry *, int); -extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); +extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *); +extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); +extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); extern int vfs_symlink(struct inode *, struct dentry *, const char *); extern int vfs_link(struct dentry *, struct inode *, struct dentry *); extern int vfs_rmdir(struct inode *, struct dentry *); @@ -1534,7 +1541,7 @@ extern void dentry_unhash(struct dentry *dentry); * VFS file helper functions. */ extern void inode_init_owner(struct inode *inode, const struct inode *dir, - mode_t mode); + umode_t mode); /* * VFS FS_IOC_FIEMAP helper definitions. */ @@ -1619,13 +1626,13 @@ struct inode_operations { int (*readlink) (struct dentry *, char __user *,int); void (*put_link) (struct dentry *, struct nameidata *, void *); - int (*create) (struct inode *,struct dentry *,int, struct nameidata *); + int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); - int (*mkdir) (struct inode *,struct dentry *,int); + int (*mkdir) (struct inode *,struct dentry *,umode_t); int (*rmdir) (struct inode *,struct dentry *); - int (*mknod) (struct inode *,struct dentry *,int,dev_t); + int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *); void (*truncate) (struct inode *); @@ -1672,10 +1679,10 @@ struct super_operations { int (*remount_fs) (struct super_block *, int *, char *); void (*umount_begin) (struct super_block *); - int (*show_options)(struct seq_file *, struct vfsmount *); - int (*show_devname)(struct seq_file *, struct vfsmount *); - int (*show_path)(struct seq_file *, struct vfsmount *); - int (*show_stats)(struct seq_file *, struct vfsmount *); + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); #ifdef CONFIG_QUOTA ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); @@ -1764,31 +1771,10 @@ static inline void mark_inode_dirty_sync(struct inode *inode) __mark_inode_dirty(inode, I_DIRTY_SYNC); } -/** - * set_nlink - directly set an inode's link count - * @inode: inode - * @nlink: new nlink (should be non-zero) - * - * This is a low-level filesystem helper to replace any - * direct filesystem manipulation of i_nlink. - */ -static inline void set_nlink(struct inode *inode, unsigned int nlink) -{ - inode->__i_nlink = nlink; -} - -/** - * inc_nlink - directly increment an inode's link count - * @inode: inode - * - * This is a low-level filesystem helper to replace any - * direct filesystem manipulation of i_nlink. Currently, - * it is only here for parity with dec_nlink(). - */ -static inline void inc_nlink(struct inode *inode) -{ - inode->__i_nlink++; -} +extern void inc_nlink(struct inode *inode); +extern void drop_nlink(struct inode *inode); +extern void clear_nlink(struct inode *inode); +extern void set_nlink(struct inode *inode, unsigned int nlink); static inline void inode_inc_link_count(struct inode *inode) { @@ -1796,35 +1782,6 @@ static inline void inode_inc_link_count(struct inode *inode) mark_inode_dirty(inode); } -/** - * drop_nlink - directly drop an inode's link count - * @inode: inode - * - * This is a low-level filesystem helper to replace any - * direct filesystem manipulation of i_nlink. In cases - * where we are attempting to track writes to the - * filesystem, a decrement to zero means an imminent - * write when the file is truncated and actually unlinked - * on the filesystem. - */ -static inline void drop_nlink(struct inode *inode) -{ - inode->__i_nlink--; -} - -/** - * clear_nlink - directly zero an inode's link count - * @inode: inode - * - * This is a low-level filesystem helper to replace any - * direct filesystem manipulation of i_nlink. See - * drop_nlink() for why we care about i_nlink hitting zero. - */ -static inline void clear_nlink(struct inode *inode) -{ - inode->__i_nlink = 0; -} - static inline void inode_dec_link_count(struct inode *inode) { drop_nlink(inode); @@ -1864,7 +1821,7 @@ struct file_system_type { void (*kill_sb) (struct super_block *); struct module *owner; struct file_system_type * next; - struct list_head fs_supers; + struct hlist_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; @@ -1886,6 +1843,7 @@ extern struct dentry *mount_single(struct file_system_type *fs_type, extern struct dentry *mount_nodev(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); void generic_shutdown_super(struct super_block *sb); void kill_block_super(struct super_block *sb); void kill_anon_super(struct super_block *sb); @@ -1938,9 +1896,10 @@ extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, extern int vfs_statfs(struct path *, struct kstatfs *); extern int user_statfs(const char __user *, struct kstatfs *); extern int fd_statfs(int, struct kstatfs *); -extern int statfs_by_dentry(struct dentry *, struct kstatfs *); +extern int vfs_ustat(dev_t, struct kstatfs *); extern int freeze_super(struct super_block *super); extern int thaw_super(struct super_block *super); +extern bool our_mnt(struct vfsmount *mnt); extern int current_umask(void); @@ -2052,8 +2011,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, extern int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, - int mode); -extern struct file *filp_open(const char *, int, int); + umode_t mode); +extern struct file *filp_open(const char *, int, umode_t); extern struct file *file_open_root(struct dentry *, struct vfsmount *, const char *, int); extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, @@ -2090,6 +2049,7 @@ extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); extern int sync_blockdev(struct block_device *bdev); +extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); @@ -2097,6 +2057,7 @@ extern int fsync_bdev(struct block_device *); #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } +static inline void kill_bdev(struct block_device *bdev) {} static inline void invalidate_bdev(struct block_device *bdev) {} static inline struct super_block *freeze_bdev(struct block_device *sb) @@ -2189,8 +2150,6 @@ extern const struct file_operations read_pipefifo_fops; extern const struct file_operations write_pipefifo_fops; extern const struct file_operations rdwr_pipefifo_fops; -extern int fs_may_remount_ro(struct super_block *); - #ifdef CONFIG_BLOCK /* * return READ, READA, or WRITE @@ -2413,6 +2372,7 @@ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync); +extern void block_sync_page(struct page *page); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, @@ -2529,7 +2489,6 @@ extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern struct super_block *get_super(struct block_device *); extern struct super_block *get_active_super(struct block_device *bdev); -extern struct super_block *user_get_super(dev_t); extern void drop_super(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); extern void iterate_supers_type(struct file_system_type *, @@ -2588,7 +2547,7 @@ extern void setattr_copy(struct inode *inode, const struct iattr *attr); extern void file_update_time(struct file *file); -extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); +extern int generic_show_options(struct seq_file *m, struct dentry *root); extern void save_mount_options(struct super_block *sb, char *options); extern void replace_mount_options(struct super_block *sb, char *options); @@ -2689,7 +2648,7 @@ int __init get_filesystem_list(char *buf); #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ (flag & __FMODE_NONOTIFY))) -static inline int is_sxid(mode_t mode) +static inline int is_sxid(umode_t mode) { return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 96efa6794ea..c3da42dd22b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -172,6 +172,7 @@ enum { TRACE_EVENT_FL_FILTERED_BIT, TRACE_EVENT_FL_RECORDED_CMD_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, + TRACE_EVENT_FL_NO_SET_FILTER_BIT, }; enum { @@ -179,6 +180,7 @@ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), + TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), }; struct ftrace_event_call { diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 61549b26ad6..73c28dea10a 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -85,6 +85,30 @@ enum { /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); +#ifdef CONFIG_PROVE_LOCKING +extern int lockdep_genl_is_held(void); +#endif + +/** + * rcu_dereference_genl - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * + * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() + * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() + */ +#define rcu_dereference_genl(p) \ + rcu_dereference_check(p, lockdep_genl_is_held()) + +/** + * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex + * @p: The pointer to read, prior to dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because + * caller holds genl mutex. + */ +#define genl_dereference(p) \ + rcu_dereference_protected(p, lockdep_genl_is_held()) #endif /* __KERNEL__ */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 9de31bc98c8..fe23ee76858 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -21,8 +21,6 @@ #define dev_to_part(device) container_of((device), struct hd_struct, __dev) #define disk_to_dev(disk) (&(disk)->part0.__dev) #define part_to_dev(part) (&((part)->__dev)) -#define alias_name(disk) ((disk)->alias ? (disk)->alias : \ - (disk)->disk_name) extern struct device_type part_type; extern struct kobject *block_depr; @@ -60,7 +58,6 @@ enum { #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 -#define ALIAS_LEN 256 #include <linux/major.h> #include <linux/device.h> @@ -166,8 +163,7 @@ struct gendisk { * disks that can't be partitioned. */ char disk_name[DISK_NAME_LEN]; /* name of major driver */ - char *alias; /* alias name of disk */ - char *(*devnode)(struct gendisk *gd, mode_t *mode); + char *(*devnode)(struct gendisk *gd, umode_t *mode); unsigned int events; /* supported events */ unsigned int async_events; /* async events, subset of all */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f743883f769..bb7f3097185 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -139,20 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk) extern void account_system_vtime(struct task_struct *tsk); #endif -#if defined(CONFIG_NO_HZ) #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) -extern void rcu_enter_nohz(void); -extern void rcu_exit_nohz(void); - -static inline void rcu_irq_enter(void) -{ - rcu_exit_nohz(); -} - -static inline void rcu_irq_exit(void) -{ - rcu_enter_nohz(); -} static inline void rcu_nmi_enter(void) { @@ -163,17 +150,9 @@ static inline void rcu_nmi_exit(void) } #else -extern void rcu_irq_enter(void); -extern void rcu_irq_exit(void); extern void rcu_nmi_enter(void); extern void rcu_nmi_exit(void); #endif -#else -# define rcu_irq_enter() do { } while (0) -# define rcu_irq_exit() do { } while (0) -# define rcu_nmi_enter() do { } while (0) -# define rcu_nmi_exit() do { } while (0) -#endif /* #if defined(CONFIG_NO_HZ) */ /* * It is safe to do non-atomic ops on ->hardirq_context, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 19644e0016b..d9d6c868b86 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -110,11 +110,6 @@ static inline void copy_huge_page(struct page *dst, struct page *src) #define hugetlb_change_protection(vma, address, end, newprot) -#ifndef HPAGE_MASK -#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */ -#define HPAGE_SIZE PAGE_SIZE -#endif - #endif /* !CONFIG_HUGETLB_PAGE */ #define HUGETLB_ANON_FILE "anon_hugepage" diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a81bf6d23b3..8e25a9167f1 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -432,9 +432,6 @@ void i2c_unlock_adapter(struct i2c_adapter *); /* Internal numbers to terminate lists */ #define I2C_CLIENT_END 0xfffeU -/* The numbers to use to set I2C bus address */ -#define ANY_I2C_BUS 0xffff - /* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ #define I2C_ADDRS(addr, addrs...) \ ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) @@ -485,6 +482,19 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) { return adap->nr; } + +/** + * module_i2c_driver() - Helper macro for registering a I2C driver + * @__i2c_driver: i2c_driver struct + * + * Helper macro for I2C drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_i2c_driver(__i2c_driver) \ + module_driver(__i2c_driver, i2c_add_driver, \ + i2c_del_driver) + #endif /* I2C */ #endif /* __KERNEL__ */ diff --git a/include/linux/ide.h b/include/linux/ide.h index 42557851b12..501370b61ee 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -920,7 +920,7 @@ __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL) typedef struct { const char *name; - mode_t mode; + umode_t mode; const struct file_operations *proc_fops; } ide_proc_entry_t; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 48363c3c40f..210e2c32553 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -128,6 +128,7 @@ #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 +#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060 /* A-MSDU 802.11n */ #define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 /* Mesh Control 802.11s */ @@ -543,6 +544,15 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc) cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); } +/** + * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set + * @seq_ctrl: frame sequence control bytes in little-endian byteorder + */ +static inline int ieee80211_is_first_frag(__le16 seq_ctrl) +{ + return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; +} + struct ieee80211s_hdr { u8 flags; u8 ttl; @@ -770,6 +780,9 @@ struct ieee80211_mgmt { } u; } __attribute__ ((packed)); +/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ +#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 + /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -1552,6 +1565,8 @@ enum ieee80211_sa_query_action { #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 #define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 +#define WLAN_CIPHER_SUITE_SMS4 0x00147201 + /* AKM suite selectors */ #define WLAN_AKM_SUITE_8021X 0x000FAC01 #define WLAN_AKM_SUITE_PSK 0x000FAC02 @@ -1689,6 +1704,23 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) } /** + * ieee80211_is_public_action - check if frame is a public action frame + * @hdr: the frame + * @len: length of the frame + */ +static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, + size_t len) +{ + struct ieee80211_mgmt *mgmt = (void *)hdr; + + if (len < IEEE80211_MIN_ACTION_SIZE) + return false; + if (!ieee80211_is_action(hdr->frame_control)) + return false; + return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC; +} + +/** * ieee80211_fhss_chan_to_freq - get channel frequency * @channel: the FHSS channel * diff --git a/include/linux/if.h b/include/linux/if.h index db20bd4fd16..06b6ef60c82 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -79,6 +79,7 @@ #define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing * skbs on transmit */ #define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */ +#define IFF_TEAM_PORT 0x40000 /* device used as team port */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index e473003e4bd..56d907a2c80 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -79,6 +79,7 @@ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ +#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_TIPC 0x88CA /* TIPC */ #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ diff --git a/include/linux/if_team.h b/include/linux/if_team.h new file mode 100644 index 00000000000..828181fbad5 --- /dev/null +++ b/include/linux/if_team.h @@ -0,0 +1,242 @@ +/* + * include/linux/if_team.h - Network team device driver header + * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _LINUX_IF_TEAM_H_ +#define _LINUX_IF_TEAM_H_ + +#ifdef __KERNEL__ + +struct team_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_dropped; + u32 tx_dropped; +}; + +struct team; + +struct team_port { + struct net_device *dev; + struct hlist_node hlist; /* node in hash list */ + struct list_head list; /* node in ordinary list */ + struct team *team; + int index; + + /* + * A place for storing original values of the device before it + * become a port. + */ + struct { + unsigned char dev_addr[MAX_ADDR_LEN]; + unsigned int mtu; + } orig; + + bool linkup; + u32 speed; + u8 duplex; + + struct rcu_head rcu; +}; + +struct team_mode_ops { + int (*init)(struct team *team); + void (*exit)(struct team *team); + rx_handler_result_t (*receive)(struct team *team, + struct team_port *port, + struct sk_buff *skb); + bool (*transmit)(struct team *team, struct sk_buff *skb); + int (*port_enter)(struct team *team, struct team_port *port); + void (*port_leave)(struct team *team, struct team_port *port); + void (*port_change_mac)(struct team *team, struct team_port *port); +}; + +enum team_option_type { + TEAM_OPTION_TYPE_U32, + TEAM_OPTION_TYPE_STRING, +}; + +struct team_option { + struct list_head list; + const char *name; + enum team_option_type type; + int (*getter)(struct team *team, void *arg); + int (*setter)(struct team *team, void *arg); +}; + +struct team_mode { + struct list_head list; + const char *kind; + struct module *owner; + size_t priv_size; + const struct team_mode_ops *ops; +}; + +#define TEAM_PORT_HASHBITS 4 +#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS) + +#define TEAM_MODE_PRIV_LONGS 4 +#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS) + +struct team { + struct net_device *dev; /* associated netdevice */ + struct team_pcpu_stats __percpu *pcpu_stats; + + struct mutex lock; /* used for overall locking, e.g. port lists write */ + + /* + * port lists with port count + */ + int port_count; + struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; + struct list_head port_list; + + struct list_head option_list; + + const struct team_mode *mode; + struct team_mode_ops ops; + long mode_priv[TEAM_MODE_PRIV_LONGS]; +}; + +static inline struct hlist_head *team_port_index_hash(struct team *team, + int port_index) +{ + return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; +} + +static inline struct team_port *team_get_port_by_index(struct team *team, + int port_index) +{ + struct hlist_node *p; + struct team_port *port; + struct hlist_head *head = team_port_index_hash(team, port_index); + + hlist_for_each_entry(port, p, head, hlist) + if (port->index == port_index) + return port; + return NULL; +} +static inline struct team_port *team_get_port_by_index_rcu(struct team *team, + int port_index) +{ + struct hlist_node *p; + struct team_port *port; + struct hlist_head *head = team_port_index_hash(team, port_index); + + hlist_for_each_entry_rcu(port, p, head, hlist) + if (port->index == port_index) + return port; + return NULL; +} + +extern int team_port_set_team_mac(struct team_port *port); +extern int team_options_register(struct team *team, + const struct team_option *option, + size_t option_count); +extern void team_options_unregister(struct team *team, + const struct team_option *option, + size_t option_count); +extern int team_mode_register(struct team_mode *mode); +extern int team_mode_unregister(struct team_mode *mode); + +#endif /* __KERNEL__ */ + +#define TEAM_STRING_MAX_LEN 32 + +/********************************** + * NETLINK_GENERIC netlink family. + **********************************/ + +enum { + TEAM_CMD_NOOP, + TEAM_CMD_OPTIONS_SET, + TEAM_CMD_OPTIONS_GET, + TEAM_CMD_PORT_LIST_GET, + + __TEAM_CMD_MAX, + TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1), +}; + +enum { + TEAM_ATTR_UNSPEC, + TEAM_ATTR_TEAM_IFINDEX, /* u32 */ + TEAM_ATTR_LIST_OPTION, /* nest */ + TEAM_ATTR_LIST_PORT, /* nest */ + + __TEAM_ATTR_MAX, + TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1, +}; + +/* Nested layout of get/set msg: + * + * [TEAM_ATTR_LIST_OPTION] + * [TEAM_ATTR_ITEM_OPTION] + * [TEAM_ATTR_OPTION_*], ... + * [TEAM_ATTR_ITEM_OPTION] + * [TEAM_ATTR_OPTION_*], ... + * ... + * [TEAM_ATTR_LIST_PORT] + * [TEAM_ATTR_ITEM_PORT] + * [TEAM_ATTR_PORT_*], ... + * [TEAM_ATTR_ITEM_PORT] + * [TEAM_ATTR_PORT_*], ... + * ... + */ + +enum { + TEAM_ATTR_ITEM_OPTION_UNSPEC, + TEAM_ATTR_ITEM_OPTION, /* nest */ + + __TEAM_ATTR_ITEM_OPTION_MAX, + TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1, +}; + +enum { + TEAM_ATTR_OPTION_UNSPEC, + TEAM_ATTR_OPTION_NAME, /* string */ + TEAM_ATTR_OPTION_CHANGED, /* flag */ + TEAM_ATTR_OPTION_TYPE, /* u8 */ + TEAM_ATTR_OPTION_DATA, /* dynamic */ + + __TEAM_ATTR_OPTION_MAX, + TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, +}; + +enum { + TEAM_ATTR_ITEM_PORT_UNSPEC, + TEAM_ATTR_ITEM_PORT, /* nest */ + + __TEAM_ATTR_ITEM_PORT_MAX, + TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1, +}; + +enum { + TEAM_ATTR_PORT_UNSPEC, + TEAM_ATTR_PORT_IFINDEX, /* u32 */ + TEAM_ATTR_PORT_CHANGED, /* flag */ + TEAM_ATTR_PORT_LINKUP, /* flag */ + TEAM_ATTR_PORT_SPEED, /* u32 */ + TEAM_ATTR_PORT_DUPLEX, /* u8 */ + + __TEAM_ATTR_PORT_MAX, + TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, +}; + +/* + * NETLINK_GENERIC related info + */ +#define TEAM_GENL_NAME "team" +#define TEAM_GENL_VERSION 0x1 +#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event" + +#endif /* _LINUX_IF_TEAM_H_ */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 12d5543b14f..13aff1e2183 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -/* if this changes, algorithm will have to be reworked because this - * depends on completely exhausting the VLAN identifier space. Thus - * it gives constant time look-up, but in many cases it wastes memory. - */ -#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 -#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) - -struct vlan_group { - struct net_device *real_dev; /* The ethernet(like) device - * the vlan is attached to. - */ - unsigned int nr_vlans; - struct hlist_node hlist; /* linked list */ - struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS]; - struct rcu_head rcu; -}; +struct vlan_info; static inline int is_vlan_dev(struct net_device *dev) { @@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler); extern struct sk_buff *vlan_untag(struct sk_buff *skb); +extern int vlan_vid_add(struct net_device *dev, unsigned short vid); +extern void vlan_vid_del(struct net_device *dev, unsigned short vid); + +extern int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev); +extern void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev); #else static inline struct net_device * __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) @@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb) { return skb; } + +static inline int vlan_vid_add(struct net_device *dev, unsigned short vid) +{ + return 0; +} + +static inline void vlan_vid_del(struct net_device *dev, unsigned short vid) +{ +} + +static inline int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ + return 0; +} + +static inline void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev) +{ +} #endif /** @@ -310,6 +322,40 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) return protocol; } + +static inline void vlan_set_encap_proto(struct sk_buff *skb, + struct vlan_hdr *vhdr) +{ + __be16 proto; + unsigned char *rawp; + + /* + * Was a VLAN packet, grab the encapsulated protocol, which the layer + * three protocols care about. + */ + + proto = vhdr->h_vlan_encapsulated_proto; + if (ntohs(proto) >= 1536) { + skb->protocol = proto; + return; + } + + rawp = skb->data; + if (*(unsigned short *) rawp == 0xFFFF) + /* + * This is a magic hack to spot IPX packets. Older Novell + * breaks the protocol design and runs IPX over 802.3 without + * an 802.2 LLC layer. We look for FFFF which isn't a used + * 802.2 SSAP/DSAP. This won't work for fault tolerant netware + * but does for the rest. + */ + skb->protocol = htons(ETH_P_802_3); + else + /* + * Real 802.2 LLC + */ + skb->protocol = htons(ETH_P_802_2); +} #endif /* __KERNEL__ */ /* VLAN IOCTLs are found in sockios.h */ @@ -352,7 +398,7 @@ struct vlan_ioctl_args { unsigned int skb_priority; unsigned int name_type; unsigned int bind_type; - unsigned int flag; /* Matches vlan_dev_info flags */ + unsigned int flag; /* Matches vlan_dev_priv flags */ } u; short vlan_qos; diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 80b480c9753..34e8d52c192 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -22,7 +22,7 @@ struct inet_diag_sockid { /* Request structure */ -struct inet_diag_req { +struct inet_diag_req_compat { __u8 idiag_family; /* Family of addresses. */ __u8 idiag_src_len; __u8 idiag_dst_len; @@ -34,6 +34,15 @@ struct inet_diag_req { __u32 idiag_dbs; /* Tables to dump (NI) */ }; +struct inet_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u8 idiag_ext; + __u8 pad; + __u32 idiag_states; + struct inet_diag_sockid id; +}; + enum { INET_DIAG_REQ_NONE, INET_DIAG_REQ_BYTECODE, @@ -98,9 +107,11 @@ enum { INET_DIAG_VEGASINFO, INET_DIAG_CONG, INET_DIAG_TOS, + INET_DIAG_TCLASS, + INET_DIAG_SKMEMINFO, }; -#define INET_DIAG_MAX INET_DIAG_TOS +#define INET_DIAG_MAX INET_DIAG_SKMEMINFO /* INET_DIAG_MEM */ @@ -124,16 +135,41 @@ struct tcpvegas_info { #ifdef __KERNEL__ struct sock; struct inet_hashinfo; +struct nlattr; +struct nlmsghdr; +struct sk_buff; +struct netlink_callback; struct inet_diag_handler { - struct inet_hashinfo *idiag_hashinfo; + void (*dump)(struct sk_buff *skb, + struct netlink_callback *cb, + struct inet_diag_req *r, + struct nlattr *bc); + + int (*dump_one)(struct sk_buff *in_skb, + const struct nlmsghdr *nlh, + struct inet_diag_req *req); + void (*idiag_get_info)(struct sock *sk, struct inet_diag_msg *r, void *info); - __u16 idiag_info_size; __u16 idiag_type; }; +struct inet_connection_sock; +int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, + struct sk_buff *skb, struct inet_diag_req *req, + u32 pid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh); +void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, + struct netlink_callback *cb, struct inet_diag_req *r, + struct nlattr *bc); +int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, + struct sk_buff *in_skb, const struct nlmsghdr *nlh, + struct inet_diag_req *req); + +int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); + extern int inet_diag_register(const struct inet_diag_handler *handler); extern void inet_diag_unregister(const struct inet_diag_handler *handler); #endif /* __KERNEL__ */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 08ffab01e76..32574eef939 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -126,6 +126,8 @@ extern struct cred init_cred; # define INIT_PERF_EVENTS(tsk) #endif +#define INIT_TASK_COMM "swapper" + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -162,7 +164,7 @@ extern struct cred init_cred; .group_leader = &tsk, \ RCU_INIT_POINTER(.real_cred, &init_cred), \ RCU_INIT_POINTER(.cred, &init_cred), \ - .comm = "swapper", \ + .comm = INIT_TASK_COMM, \ .thread = INIT_THREAD, \ .fs = &init_fs, \ .files = &init_files, \ @@ -184,7 +186,6 @@ extern struct cred init_cred; [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ }, \ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ - .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ INIT_PERF_EVENTS(tsk) \ INIT_TRACE_IRQFLAGS \ diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 3b1594d662b..30e816148df 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -93,7 +93,7 @@ struct kern_ipc_perm gid_t gid; uid_t cuid; gid_t cgid; - mode_t mode; + umode_t mode; unsigned long seq; void *security; }; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0c997767429..6318268dcaf 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -404,7 +404,7 @@ struct tcp6_sock { extern int inet6_sk_rebuild_header(struct sock *sk); -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) { return inet_sk(__sk)->pinet6; @@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define inet6_rcv_saddr(__sk) NULL #define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 -#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ +#endif /* IS_ENABLED(CONFIG_IPV6) */ #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\ (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net) && \ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 99834e581b9..bd4272b61a1 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -91,10 +91,11 @@ static inline unsigned int irq_domain_to_irq(struct irq_domain *d, extern void irq_domain_add(struct irq_domain *domain); extern void irq_domain_del(struct irq_domain *domain); + +extern struct irq_domain_ops irq_domain_simple_ops; #endif /* CONFIG_IRQ_DOMAIN */ #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) -extern struct irq_domain_ops irq_domain_simple_ops; extern void irq_domain_add_simple(struct device_node *controller, int irq_base); extern void irq_domain_generate_simple(const struct of_device_id *match, u64 phys_base, unsigned int irq_start); diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h index f0a2f8b0aa1..2a8b1659bf3 100644 --- a/include/linux/iscsi_boot_sysfs.h +++ b/include/linux/iscsi_boot_sysfs.h @@ -91,7 +91,7 @@ struct iscsi_boot_kobj { * The enum of the type. This can be any value of the above * properties. */ - mode_t (*is_visible) (void *data, int type); + umode_t (*is_visible) (void *data, int type); /* * Driver specific release function. @@ -110,20 +110,20 @@ struct iscsi_boot_kobj * iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type), + umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type), + umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type), + umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 388b0d425b5..5ce8b140428 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <linux/workqueue.h> #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) @@ -14,6 +15,12 @@ struct jump_label_key { #endif }; +struct jump_label_key_deferred { + struct jump_label_key key; + unsigned long timeout; + struct delayed_work work; +}; + # include <asm/jump_label.h> # define HAVE_JUMP_LABEL #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */ @@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, extern int jump_label_text_reserved(void *start, void *end); extern void jump_label_inc(struct jump_label_key *key); extern void jump_label_dec(struct jump_label_key *key); +extern void jump_label_dec_deferred(struct jump_label_key_deferred *key); extern bool jump_label_enabled(struct jump_label_key *key); extern void jump_label_apply_nops(struct module *mod); +extern void jump_label_rate_limit(struct jump_label_key_deferred *key, + unsigned long rl); #else /* !HAVE_JUMP_LABEL */ @@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void) { } +struct jump_label_key_deferred { + struct jump_label_key key; +}; + static __always_inline bool static_branch(struct jump_label_key *key) { if (unlikely(atomic_read(&key->enabled))) @@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key) atomic_dec(&key->enabled); } +static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key) +{ + jump_label_dec(&key->key); +} + static inline int jump_label_text_reserved(void *start, void *end) { return 0; @@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod) { return 0; } + +static inline void jump_label_rate_limit(struct jump_label_key_deferred *key, + unsigned long rl) +{ +} #endif /* HAVE_JUMP_LABEL */ +#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), }) +#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), }) + #endif /* _LINUX_JUMP_LABEL_H */ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0cce2db580c..2fbd9053c2d 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -6,6 +6,7 @@ #include <linux/percpu.h> #include <linux/cpumask.h> #include <linux/interrupt.h> +#include <linux/sched.h> #include <asm/irq.h> #include <asm/cputime.h> @@ -15,21 +16,25 @@ * used by rstatd/perfmeter */ -struct cpu_usage_stat { - cputime64_t user; - cputime64_t nice; - cputime64_t system; - cputime64_t softirq; - cputime64_t irq; - cputime64_t idle; - cputime64_t iowait; - cputime64_t steal; - cputime64_t guest; - cputime64_t guest_nice; +enum cpu_usage_stat { + CPUTIME_USER, + CPUTIME_NICE, + CPUTIME_SYSTEM, + CPUTIME_SOFTIRQ, + CPUTIME_IRQ, + CPUTIME_IDLE, + CPUTIME_IOWAIT, + CPUTIME_STEAL, + CPUTIME_GUEST, + CPUTIME_GUEST_NICE, + NR_STATS, +}; + +struct kernel_cpustat { + u64 cpustat[NR_STATS]; }; struct kernel_stat { - struct cpu_usage_stat cpustat; #ifndef CONFIG_GENERIC_HARDIRQS unsigned int irqs[NR_IRQS]; #endif @@ -38,10 +43,13 @@ struct kernel_stat { }; DECLARE_PER_CPU(struct kernel_stat, kstat); +DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); -#define kstat_cpu(cpu) per_cpu(kstat, cpu) /* Must have preemption disabled for this to be meaningful. */ -#define kstat_this_cpu __get_cpu_var(kstat) +#define kstat_this_cpu (&__get_cpu_var(kstat)) +#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) +#define kstat_cpu(cpu) per_cpu(kstat, cpu) +#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) extern unsigned long long nr_context_switches(void); diff --git a/include/linux/kmod.h b/include/linux/kmod.h index b16f6539073..722f477c4ef 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -117,5 +117,7 @@ extern void usermodehelper_init(void); extern int usermodehelper_disable(void); extern void usermodehelper_enable(void); extern bool usermodehelper_is_disabled(void); +extern void read_lock_usermodehelper(void); +extern void read_unlock_usermodehelper(void); #endif /* __LINUX_KMOD_H__ */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index ad81e1c5148..fc615a97e2d 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -191,8 +191,6 @@ static inline struct kobj_type *get_ktype(struct kobject *kobj) } extern struct kobject *kset_find_obj(struct kset *, const char *); -extern struct kobject *kset_find_obj_hinted(struct kset *, const char *, - struct kobject *); /* The global /sys/kernel/ kobject for people to chain off of */ extern struct kobject *kernel_kobj; diff --git a/include/linux/kref.h b/include/linux/kref.h index d4a62ab2ee5..abc0120b09b 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -15,16 +15,81 @@ #ifndef _KREF_H_ #define _KREF_H_ -#include <linux/types.h> +#include <linux/bug.h> +#include <linux/atomic.h> struct kref { atomic_t refcount; }; -void kref_init(struct kref *kref); -void kref_get(struct kref *kref); -int kref_put(struct kref *kref, void (*release) (struct kref *kref)); -int kref_sub(struct kref *kref, unsigned int count, - void (*release) (struct kref *kref)); +/** + * kref_init - initialize object. + * @kref: object in question. + */ +static inline void kref_init(struct kref *kref) +{ + atomic_set(&kref->refcount, 1); +} + +/** + * kref_get - increment refcount for object. + * @kref: object. + */ +static inline void kref_get(struct kref *kref) +{ + WARN_ON(!atomic_read(&kref->refcount)); + atomic_inc(&kref->refcount); +} +/** + * kref_sub - subtract a number of refcounts for object. + * @kref: object. + * @count: Number of recounts to subtract. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Subtract @count from the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) +{ + WARN_ON(release == NULL); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + +/** + * kref_put - decrement refcount for object. + * @kref: object. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Decrement the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) +{ + return kref_sub(kref, 1, release); +} #endif /* _KREF_H_ */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 5cac19b3a26..0714b24c0e4 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); int kthread_should_stop(void); +bool kthread_freezable_should_stop(bool *was_frozen); void *kthread_data(struct task_struct *k); int kthreadd(void *unused); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f47fcd30273..68e67e50d02 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -555,9 +555,9 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_PPC_SMT 64 #define KVM_CAP_PPC_RMA 65 #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */ -#define KVM_CAP_PPC_HIOR 67 #define KVM_CAP_PPC_PAPR 68 #define KVM_CAP_S390_GMAP 71 +#define KVM_CAP_TSC_DEADLINE_TIMER 72 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index b0e99898527..e23121f9d82 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h @@ -10,6 +10,8 @@ #define _INCLUDE_GUARD_LATENCYTOP_H_ #include <linux/compiler.h> +struct task_struct; + #ifdef CONFIG_LATENCYTOP #define LT_SAVECOUNT 32 @@ -23,7 +25,6 @@ struct latency_record { }; -struct task_struct; extern int latencytop_enabled; void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); diff --git a/include/linux/lglock.h b/include/linux/lglock.h index f549056fb20..87f402ccec5 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -22,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/lockdep.h> #include <linux/percpu.h> +#include <linux/cpu.h> /* can make br locks by using local lock for read side, global lock for write */ #define br_lock_init(name) name##_lock_init() @@ -72,9 +73,31 @@ #define DEFINE_LGLOCK(name) \ \ + DEFINE_SPINLOCK(name##_cpu_lock); \ + cpumask_t name##_cpus __read_mostly; \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ DEFINE_LGLOCK_LOCKDEP(name); \ \ + static int \ + name##_lg_cpu_callback(struct notifier_block *nb, \ + unsigned long action, void *hcpu) \ + { \ + switch (action & ~CPU_TASKS_FROZEN) { \ + case CPU_UP_PREPARE: \ + spin_lock(&name##_cpu_lock); \ + cpu_set((unsigned long)hcpu, name##_cpus); \ + spin_unlock(&name##_cpu_lock); \ + break; \ + case CPU_UP_CANCELED: case CPU_DEAD: \ + spin_lock(&name##_cpu_lock); \ + cpu_clear((unsigned long)hcpu, name##_cpus); \ + spin_unlock(&name##_cpu_lock); \ + } \ + return NOTIFY_OK; \ + } \ + static struct notifier_block name##_lg_cpu_notifier = { \ + .notifier_call = name##_lg_cpu_callback, \ + }; \ void name##_lock_init(void) { \ int i; \ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ @@ -83,6 +106,11 @@ lock = &per_cpu(name##_lock, i); \ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ } \ + register_hotcpu_notifier(&name##_lg_cpu_notifier); \ + get_online_cpus(); \ + for_each_online_cpu(i) \ + cpu_set(i, name##_cpus); \ + put_online_cpus(); \ } \ EXPORT_SYMBOL(name##_lock_init); \ \ @@ -124,9 +152,9 @@ \ void name##_global_lock_online(void) { \ int i; \ - preempt_disable(); \ + spin_lock(&name##_cpu_lock); \ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_online_cpu(i) { \ + for_each_cpu(i, &name##_cpus) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_lock(lock); \ @@ -137,12 +165,12 @@ void name##_global_unlock_online(void) { \ int i; \ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_online_cpu(i) { \ + for_each_cpu(i, &name##_cpus) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_unlock(lock); \ } \ - preempt_enable(); \ + spin_unlock(&name##_cpu_lock); \ } \ EXPORT_SYMBOL(name##_global_unlock_online); \ \ diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index ff9abff55aa..90b0656a869 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap) return ipv4_is_loopback(sin->sin_addr.s_addr); } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) static inline int __nlm_privileged_request6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; @@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap) return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; } -#else /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ +#else /* IS_ENABLED(CONFIG_IPV6) */ static inline int __nlm_privileged_request6(const struct sockaddr *sap) { return 0; } -#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ +#endif /* IS_ENABLED(CONFIG_IPV6) */ /* * Ensure incoming requests are from local privileged callers. diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b6a56e37284..d36619ead3b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -343,6 +343,8 @@ extern void lockdep_trace_alloc(gfp_t mask); #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) +#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) + #else /* !LOCKDEP */ static inline void lockdep_off(void) @@ -392,6 +394,8 @@ struct lock_class_key { }; #define lockdep_assert_held(l) do { } while (0) +#define lockdep_recursing(tsk) (0) + #endif /* !LOCKDEP */ #ifdef CONFIG_LOCK_STAT diff --git a/include/linux/log2.h b/include/linux/log2.h index 25b808631cd..fd7ff3d91e6 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define rounddown_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 0 : \ (1UL << ilog2(n))) : \ __rounddown_pow_of_two(n) \ ) diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h index 0fe00cd4c93..76f52bbbb2f 100644 --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h @@ -32,6 +32,8 @@ struct mdiobb_ops { struct mdiobb_ctrl { const struct mdiobb_ops *ops; + /* reset callback */ + int (*reset)(struct mii_bus *bus); }; /* The returned bus is not yet registered with the phy layer. */ diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h index e9d3fdfe41d..7c9fe3c2be7 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/mdio-gpio.h @@ -20,6 +20,8 @@ struct mdio_gpio_platform_data { unsigned int phy_mask; int irqs[PHY_MAX_ADDR]; + /* reset callback */ + int (*reset)(struct mii_bus *bus); }; #endif /* __LINUX_MDIO_GPIO_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index e6b843e16e8..a6bb1023514 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -2,8 +2,6 @@ #define _LINUX_MEMBLOCK_H #ifdef __KERNEL__ -#define MEMBLOCK_ERROR 0 - #ifdef CONFIG_HAVE_MEMBLOCK /* * Logical memory blocks. @@ -19,81 +17,161 @@ #include <linux/init.h> #include <linux/mm.h> -#include <asm/memblock.h> - #define INIT_MEMBLOCK_REGIONS 128 struct memblock_region { phys_addr_t base; phys_addr_t size; +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP + int nid; +#endif }; struct memblock_type { unsigned long cnt; /* number of regions */ unsigned long max; /* size of the allocated array */ + phys_addr_t total_size; /* size of all regions */ struct memblock_region *regions; }; struct memblock { phys_addr_t current_limit; - phys_addr_t memory_size; /* Updated by memblock_analyze() */ struct memblock_type memory; struct memblock_type reserved; }; extern struct memblock memblock; extern int memblock_debug; -extern int memblock_can_resize; #define memblock_dbg(fmt, ...) \ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); +phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align); int memblock_free_reserved_regions(void); int memblock_reserve_reserved_regions(void); -extern void memblock_init(void); -extern void memblock_analyze(void); -extern long memblock_add(phys_addr_t base, phys_addr_t size); -extern long memblock_remove(phys_addr_t base, phys_addr_t size); -extern long memblock_free(phys_addr_t base, phys_addr_t size); -extern long memblock_reserve(phys_addr_t base, phys_addr_t size); +void memblock_allow_resize(void); +int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); +int memblock_add(phys_addr_t base, phys_addr_t size); +int memblock_remove(phys_addr_t base, phys_addr_t size); +int memblock_free(phys_addr_t base, phys_addr_t size); +int memblock_reserve(phys_addr_t base, phys_addr_t size); + +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, + unsigned long *out_end_pfn, int *out_nid); + +/** + * for_each_mem_pfn_range - early memory pfn range iterator + * @i: an integer used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to ulong for start pfn of the range, can be %NULL + * @p_end: ptr to ulong for end pfn of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over configured memory ranges. Available after early_node_map is + * populated. + */ +#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ + for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ + i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +/** + * for_each_free_mem_range - iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock. Available as + * soon as memblock is initialized. + */ +#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ + for (i = 0, \ + __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) + +void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); -/* The numa aware allocator is only available if - * CONFIG_ARCH_POPULATES_NODE_MAP is set +/** + * for_each_free_mem_range_reverse - rev-iterate through free memblock areas + * @i: u64 used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over free (memory && !reserved) areas of memblock in reverse + * order. Available as soon as memblock is initialized. */ -extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, - int nid); -extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, - int nid); +#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ + for (i = (u64)ULLONG_MAX, \ + __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) -extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid); + +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ + r->nid = nid; +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ + return r->nid; +} +#else +static inline void memblock_set_region_node(struct memblock_region *r, int nid) +{ +} + +static inline int memblock_get_region_node(const struct memblock_region *r) +{ + return 0; +} +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); +phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); + +phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 -extern phys_addr_t memblock_alloc_base(phys_addr_t size, - phys_addr_t align, - phys_addr_t max_addr); -extern phys_addr_t __memblock_alloc_base(phys_addr_t size, - phys_addr_t align, - phys_addr_t max_addr); -extern phys_addr_t memblock_phys_mem_size(void); -extern phys_addr_t memblock_start_of_DRAM(void); -extern phys_addr_t memblock_end_of_DRAM(void); -extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); -extern int memblock_is_memory(phys_addr_t addr); -extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); -extern int memblock_is_reserved(phys_addr_t addr); -extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); - -extern void memblock_dump_all(void); - -/* Provided by the architecture */ -extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); -extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, - phys_addr_t addr2, phys_addr_t size2); +phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, + phys_addr_t max_addr); +phys_addr_t memblock_phys_mem_size(void); +phys_addr_t memblock_start_of_DRAM(void); +phys_addr_t memblock_end_of_DRAM(void); +void memblock_enforce_memory_limit(phys_addr_t memory_limit); +int memblock_is_memory(phys_addr_t addr); +int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); +int memblock_is_reserved(phys_addr_t addr); +int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); + +extern void __memblock_dump_all(void); + +static inline void memblock_dump_all(void) +{ + if (memblock_debug) + __memblock_dump_all(); +} /** * memblock_set_current_limit - Set the current allocation limit to allow @@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, * accessible during boot * @limit: New limit value (physical address) */ -extern void memblock_set_current_limit(phys_addr_t limit); +void memblock_set_current_limit(phys_addr_t limit); /* @@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo region++) -#ifdef ARCH_DISCARD_MEMBLOCK -#define __init_memblock __init -#define __initdata_memblock __initdata +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK +#define __init_memblock __meminit +#define __initdata_memblock __meminitdata #else #define __init_memblock #define __initdata_memblock @@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { - return MEMBLOCK_ERROR; + return 0; } #endif /* CONFIG_HAVE_MEMBLOCK */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b87068a1a09..9b296ea41bb 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); +extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); +extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); + static inline int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) { @@ -381,5 +384,25 @@ mem_cgroup_print_bad_page(struct page *page) } #endif +enum { + UNDER_LIMIT, + SOFT_LIMIT, + OVER_LIMIT, +}; + +#ifdef CONFIG_INET +struct sock; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM +void sock_update_memcg(struct sock *sk); +void sock_release_memcg(struct sock *sk); +#else +static inline void sock_update_memcg(struct sock *sk) +{ +} +static inline void sock_release_memcg(struct sock *sk) +{ +} +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ +#endif /* CONFIG_INET */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 935699b30b7..1ac7f6e405f 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -15,7 +15,6 @@ #ifndef _LINUX_MEMORY_H_ #define _LINUX_MEMORY_H_ -#include <linux/sysdev.h> #include <linux/node.h> #include <linux/compiler.h> #include <linux/mutex.h> @@ -38,7 +37,7 @@ struct memory_block { int phys_device; /* to which fru does this belong? */ void *hw; /* optional pointer to fw/hw data */ int (*phys_callback)(struct memory_block *); - struct sys_device sysdev; + struct device dev; }; int arch_get_memory_phys_device(unsigned long start_pfn); diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h new file mode 100644 index 00000000000..5702d1be13b --- /dev/null +++ b/include/linux/mfd/da9052/da9052.h @@ -0,0 +1,131 @@ +/* + * da9052 declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen <dchen@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __MFD_DA9052_DA9052_H +#define __MFD_DA9052_DA9052_H + +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/list.h> +#include <linux/mfd/core.h> + +#include <linux/mfd/da9052/reg.h> + +#define DA9052_IRQ_DCIN 0 +#define DA9052_IRQ_VBUS 1 +#define DA9052_IRQ_DCINREM 2 +#define DA9052_IRQ_VBUSREM 3 +#define DA9052_IRQ_VDDLOW 4 +#define DA9052_IRQ_ALARM 5 +#define DA9052_IRQ_SEQRDY 6 +#define DA9052_IRQ_COMP1V2 7 +#define DA9052_IRQ_NONKEY 8 +#define DA9052_IRQ_IDFLOAT 9 +#define DA9052_IRQ_IDGND 10 +#define DA9052_IRQ_CHGEND 11 +#define DA9052_IRQ_TBAT 12 +#define DA9052_IRQ_ADC_EOM 13 +#define DA9052_IRQ_PENDOWN 14 +#define DA9052_IRQ_TSIREADY 15 +#define DA9052_IRQ_GPI0 16 +#define DA9052_IRQ_GPI1 17 +#define DA9052_IRQ_GPI2 18 +#define DA9052_IRQ_GPI3 19 +#define DA9052_IRQ_GPI4 20 +#define DA9052_IRQ_GPI5 21 +#define DA9052_IRQ_GPI6 22 +#define DA9052_IRQ_GPI7 23 +#define DA9052_IRQ_GPI8 24 +#define DA9052_IRQ_GPI9 25 +#define DA9052_IRQ_GPI10 26 +#define DA9052_IRQ_GPI11 27 +#define DA9052_IRQ_GPI12 28 +#define DA9052_IRQ_GPI13 29 +#define DA9052_IRQ_GPI14 30 +#define DA9052_IRQ_GPI15 31 + +enum da9052_chip_id { + DA9052, + DA9053_AA, + DA9053_BA, + DA9053_BB, +}; + +struct da9052_pdata; + +struct da9052 { + struct mutex io_lock; + + struct device *dev; + struct regmap *regmap; + + int irq_base; + u8 chip_id; + + int chip_irq; +}; + +/* Device I/O API */ +static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg) +{ + int val, ret; + + ret = regmap_read(da9052->regmap, reg, &val); + if (ret < 0) + return ret; + return val; +} + +static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg, + unsigned char val) +{ + return regmap_write(da9052->regmap, reg, val); +} + +static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); +} + +static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, + unsigned reg_cnt, unsigned char *val) +{ + return regmap_raw_write(da9052->regmap, reg, val, reg_cnt); +} + +static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, + unsigned char bit_mask, + unsigned char reg_val) +{ + return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); +} + +int da9052_device_init(struct da9052 *da9052, u8 chip_id); +void da9052_device_exit(struct da9052 *da9052); + +extern struct regmap_config da9052_regmap_config; + +#endif /* __MFD_DA9052_DA9052_H */ diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h new file mode 100644 index 00000000000..62c5c3c2992 --- /dev/null +++ b/include/linux/mfd/da9052/pdata.h @@ -0,0 +1,40 @@ +/* + * Platform data declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen <dchen@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __MFD_DA9052_PDATA_H__ +#define __MFD_DA9052_PDATA_H__ + +#define DA9052_MAX_REGULATORS 14 + +struct da9052; + +struct da9052_pdata { + struct led_platform_data *pled; + int (*init) (struct da9052 *da9052); + int irq_base; + int gpio_base; + int use_for_apm; + struct regulator_init_data *regulators[DA9052_MAX_REGULATORS]; +}; + +#endif diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h new file mode 100644 index 00000000000..b97f7309d7f --- /dev/null +++ b/include/linux/mfd/da9052/reg.h @@ -0,0 +1,749 @@ +/* + * Register declarations for DA9052 PMICs. + * + * Copyright(c) 2011 Dialog Semiconductor Ltd. + * + * Author: David Dajun Chen <dchen@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_MFD_DA9052_REG_H +#define __LINUX_MFD_DA9052_REG_H + +/* PAGE REGISTERS */ +#define DA9052_PAGE0_CON_REG 0 +#define DA9052_PAGE1_CON_REG 128 + +/* STATUS REGISTERS */ +#define DA9052_STATUS_A_REG 1 +#define DA9052_STATUS_B_REG 2 +#define DA9052_STATUS_C_REG 3 +#define DA9052_STATUS_D_REG 4 + +/* EVENT REGISTERS */ +#define DA9052_EVENT_A_REG 5 +#define DA9052_EVENT_B_REG 6 +#define DA9052_EVENT_C_REG 7 +#define DA9052_EVENT_D_REG 8 +#define DA9052_FAULTLOG_REG 9 + +/* IRQ REGISTERS */ +#define DA9052_IRQ_MASK_A_REG 10 +#define DA9052_IRQ_MASK_B_REG 11 +#define DA9052_IRQ_MASK_C_REG 12 +#define DA9052_IRQ_MASK_D_REG 13 + +/* CONTROL REGISTERS */ +#define DA9052_CONTROL_A_REG 14 +#define DA9052_CONTROL_B_REG 15 +#define DA9052_CONTROL_C_REG 16 +#define DA9052_CONTROL_D_REG 17 + +#define DA9052_PDDIS_REG 18 +#define DA9052_INTERFACE_REG 19 +#define DA9052_RESET_REG 20 + +/* GPIO REGISTERS */ +#define DA9052_GPIO_0_1_REG 21 +#define DA9052_GPIO_2_3_REG 22 +#define DA9052_GPIO_4_5_REG 23 +#define DA9052_GPIO_6_7_REG 24 +#define DA9052_GPIO_14_15_REG 28 + +/* POWER SEQUENCER CONTROL REGISTERS */ +#define DA9052_ID_0_1_REG 29 +#define DA9052_ID_2_3_REG 30 +#define DA9052_ID_4_5_REG 31 +#define DA9052_ID_6_7_REG 32 +#define DA9052_ID_8_9_REG 33 +#define DA9052_ID_10_11_REG 34 +#define DA9052_ID_12_13_REG 35 +#define DA9052_ID_14_15_REG 36 +#define DA9052_ID_16_17_REG 37 +#define DA9052_ID_18_19_REG 38 +#define DA9052_ID_20_21_REG 39 +#define DA9052_SEQ_STATUS_REG 40 +#define DA9052_SEQ_A_REG 41 +#define DA9052_SEQ_B_REG 42 +#define DA9052_SEQ_TIMER_REG 43 + +/* LDO AND BUCK REGISTERS */ +#define DA9052_BUCKA_REG 44 +#define DA9052_BUCKB_REG 45 +#define DA9052_BUCKCORE_REG 46 +#define DA9052_BUCKPRO_REG 47 +#define DA9052_BUCKMEM_REG 48 +#define DA9052_BUCKPERI_REG 49 +#define DA9052_LDO1_REG 50 +#define DA9052_LDO2_REG 51 +#define DA9052_LDO3_REG 52 +#define DA9052_LDO4_REG 53 +#define DA9052_LDO5_REG 54 +#define DA9052_LDO6_REG 55 +#define DA9052_LDO7_REG 56 +#define DA9052_LDO8_REG 57 +#define DA9052_LDO9_REG 58 +#define DA9052_LDO10_REG 59 +#define DA9052_SUPPLY_REG 60 +#define DA9052_PULLDOWN_REG 61 +#define DA9052_CHGBUCK_REG 62 +#define DA9052_WAITCONT_REG 63 +#define DA9052_ISET_REG 64 +#define DA9052_BATCHG_REG 65 + +/* BATTERY CONTROL REGISTRS */ +#define DA9052_CHG_CONT_REG 66 +#define DA9052_INPUT_CONT_REG 67 +#define DA9052_CHG_TIME_REG 68 +#define DA9052_BBAT_CONT_REG 69 + +/* LED CONTROL REGISTERS */ +#define DA9052_BOOST_REG 70 +#define DA9052_LED_CONT_REG 71 +#define DA9052_LEDMIN123_REG 72 +#define DA9052_LED1_CONF_REG 73 +#define DA9052_LED2_CONF_REG 74 +#define DA9052_LED3_CONF_REG 75 +#define DA9052_LED1CONT_REG 76 +#define DA9052_LED2CONT_REG 77 +#define DA9052_LED3CONT_REG 78 +#define DA9052_LED_CONT_4_REG 79 +#define DA9052_LED_CONT_5_REG 80 + +/* ADC CONTROL REGISTERS */ +#define DA9052_ADC_MAN_REG 81 +#define DA9052_ADC_CONT_REG 82 +#define DA9052_ADC_RES_L_REG 83 +#define DA9052_ADC_RES_H_REG 84 +#define DA9052_VDD_RES_REG 85 +#define DA9052_VDD_MON_REG 86 + +#define DA9052_ICHG_AV_REG 87 +#define DA9052_ICHG_THD_REG 88 +#define DA9052_ICHG_END_REG 89 +#define DA9052_TBAT_RES_REG 90 +#define DA9052_TBAT_HIGHP_REG 91 +#define DA9052_TBAT_HIGHN_REG 92 +#define DA9052_TBAT_LOW_REG 93 +#define DA9052_T_OFFSET_REG 94 + +#define DA9052_ADCIN4_RES_REG 95 +#define DA9052_AUTO4_HIGH_REG 96 +#define DA9052_AUTO4_LOW_REG 97 +#define DA9052_ADCIN5_RES_REG 98 +#define DA9052_AUTO5_HIGH_REG 99 +#define DA9052_AUTO5_LOW_REG 100 +#define DA9052_ADCIN6_RES_REG 101 +#define DA9052_AUTO6_HIGH_REG 102 +#define DA9052_AUTO6_LOW_REG 103 + +#define DA9052_TJUNC_RES_REG 104 + +/* TSI CONTROL REGISTERS */ +#define DA9052_TSI_CONT_A_REG 105 +#define DA9052_TSI_CONT_B_REG 106 +#define DA9052_TSI_X_MSB_REG 107 +#define DA9052_TSI_Y_MSB_REG 108 +#define DA9052_TSI_LSB_REG 109 +#define DA9052_TSI_Z_MSB_REG 110 + +/* RTC COUNT REGISTERS */ +#define DA9052_COUNT_S_REG 111 +#define DA9052_COUNT_MI_REG 112 +#define DA9052_COUNT_H_REG 113 +#define DA9052_COUNT_D_REG 114 +#define DA9052_COUNT_MO_REG 115 +#define DA9052_COUNT_Y_REG 116 + +/* RTC CONTROL REGISTERS */ +#define DA9052_ALARM_MI_REG 117 +#define DA9052_ALARM_H_REG 118 +#define DA9052_ALARM_D_REG 119 +#define DA9052_ALARM_MO_REG 120 +#define DA9052_ALARM_Y_REG 121 +#define DA9052_SECOND_A_REG 122 +#define DA9052_SECOND_B_REG 123 +#define DA9052_SECOND_C_REG 124 +#define DA9052_SECOND_D_REG 125 + +/* PAGE CONFIGURATION BIT */ +#define DA9052_PAGE_CONF 0X80 + +/* STATUS REGISTER A BITS */ +#define DA9052_STATUSA_VDATDET 0X80 +#define DA9052_STATUSA_VBUSSEL 0X40 +#define DA9052_STATUSA_DCINSEL 0X20 +#define DA9052_STATUSA_VBUSDET 0X10 +#define DA9052_STATUSA_DCINDET 0X08 +#define DA9052_STATUSA_IDGND 0X04 +#define DA9052_STATUSA_IDFLOAT 0X02 +#define DA9052_STATUSA_NONKEY 0X01 + +/* STATUS REGISTER B BITS */ +#define DA9052_STATUSB_COMPDET 0X80 +#define DA9052_STATUSB_SEQUENCING 0X40 +#define DA9052_STATUSB_GPFB2 0X20 +#define DA9052_STATUSB_CHGTO 0X10 +#define DA9052_STATUSB_CHGEND 0X08 +#define DA9052_STATUSB_CHGLIM 0X04 +#define DA9052_STATUSB_CHGPRE 0X02 +#define DA9052_STATUSB_CHGATT 0X01 + +/* STATUS REGISTER C BITS */ +#define DA9052_STATUSC_GPI7 0X80 +#define DA9052_STATUSC_GPI6 0X40 +#define DA9052_STATUSC_GPI5 0X20 +#define DA9052_STATUSC_GPI4 0X10 +#define DA9052_STATUSC_GPI3 0X08 +#define DA9052_STATUSC_GPI2 0X04 +#define DA9052_STATUSC_GPI1 0X02 +#define DA9052_STATUSC_GPI0 0X01 + +/* STATUS REGISTER D BITS */ +#define DA9052_STATUSD_GPI15 0X80 +#define DA9052_STATUSD_GPI14 0X40 +#define DA9052_STATUSD_GPI13 0X20 +#define DA9052_STATUSD_GPI12 0X10 +#define DA9052_STATUSD_GPI11 0X08 +#define DA9052_STATUSD_GPI10 0X04 +#define DA9052_STATUSD_GPI9 0X02 +#define DA9052_STATUSD_GPI8 0X01 + +/* EVENT REGISTER A BITS */ +#define DA9052_EVENTA_ECOMP1V2 0X80 +#define DA9052_EVENTA_ESEQRDY 0X40 +#define DA9052_EVENTA_EALRAM 0X20 +#define DA9052_EVENTA_EVDDLOW 0X10 +#define DA9052_EVENTA_EVBUSREM 0X08 +#define DA9052_EVENTA_EDCINREM 0X04 +#define DA9052_EVENTA_EVBUSDET 0X02 +#define DA9052_EVENTA_EDCINDET 0X01 + +/* EVENT REGISTER B BITS */ +#define DA9052_EVENTB_ETSIREADY 0X80 +#define DA9052_EVENTB_EPENDOWN 0X40 +#define DA9052_EVENTB_EADCEOM 0X20 +#define DA9052_EVENTB_ETBAT 0X10 +#define DA9052_EVENTB_ECHGEND 0X08 +#define DA9052_EVENTB_EIDGND 0X04 +#define DA9052_EVENTB_EIDFLOAT 0X02 +#define DA9052_EVENTB_ENONKEY 0X01 + +/* EVENT REGISTER C BITS */ +#define DA9052_EVENTC_EGPI7 0X80 +#define DA9052_EVENTC_EGPI6 0X40 +#define DA9052_EVENTC_EGPI5 0X20 +#define DA9052_EVENTC_EGPI4 0X10 +#define DA9052_EVENTC_EGPI3 0X08 +#define DA9052_EVENTC_EGPI2 0X04 +#define DA9052_EVENTC_EGPI1 0X02 +#define DA9052_EVENTC_EGPI0 0X01 + +/* EVENT REGISTER D BITS */ +#define DA9052_EVENTD_EGPI15 0X80 +#define DA9052_EVENTD_EGPI14 0X40 +#define DA9052_EVENTD_EGPI13 0X20 +#define DA9052_EVENTD_EGPI12 0X10 +#define DA9052_EVENTD_EGPI11 0X08 +#define DA9052_EVENTD_EGPI10 0X04 +#define DA9052_EVENTD_EGPI9 0X02 +#define DA9052_EVENTD_EGPI8 0X01 + +/* IRQ MASK REGISTERS BITS */ +#define DA9052_M_NONKEY 0X0100 + +/* TSI EVENT REGISTERS BITS */ +#define DA9052_E_PEN_DOWN 0X4000 +#define DA9052_E_TSI_READY 0X8000 + +/* FAULT LOG REGISTER BITS */ +#define DA9052_FAULTLOG_WAITSET 0X80 +#define DA9052_FAULTLOG_NSDSET 0X40 +#define DA9052_FAULTLOG_KEYSHUT 0X20 +#define DA9052_FAULTLOG_TEMPOVER 0X08 +#define DA9052_FAULTLOG_VDDSTART 0X04 +#define DA9052_FAULTLOG_VDDFAULT 0X02 +#define DA9052_FAULTLOG_TWDERROR 0X01 + +/* CONTROL REGISTER A BITS */ +#define DA9052_CONTROLA_GPIV 0X80 +#define DA9052_CONTROLA_PMOTYPE 0X20 +#define DA9052_CONTROLA_PMOV 0X10 +#define DA9052_CONTROLA_PMIV 0X08 +#define DA9052_CONTROLA_PMIFV 0X08 +#define DA9052_CONTROLA_PWR1EN 0X04 +#define DA9052_CONTROLA_PWREN 0X02 +#define DA9052_CONTROLA_SYSEN 0X01 + +/* CONTROL REGISTER B BITS */ +#define DA9052_CONTROLB_SHUTDOWN 0X80 +#define DA9052_CONTROLB_DEEPSLEEP 0X40 +#define DA9052_CONTROL_B_WRITEMODE 0X20 +#define DA9052_CONTROLB_BBATEN 0X10 +#define DA9052_CONTROLB_OTPREADEN 0X08 +#define DA9052_CONTROLB_AUTOBOOT 0X04 +#define DA9052_CONTROLB_ACTDIODE 0X02 +#define DA9052_CONTROLB_BUCKMERGE 0X01 + +/* CONTROL REGISTER C BITS */ +#define DA9052_CONTROLC_BLINKDUR 0X80 +#define DA9052_CONTROLC_BLINKFRQ 0X60 +#define DA9052_CONTROLC_DEBOUNCING 0X1C +#define DA9052_CONTROLC_PMFB2PIN 0X02 +#define DA9052_CONTROLC_PMFB1PIN 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_CONTROLD_WATCHDOG 0X80 +#define DA9052_CONTROLD_ACCDETEN 0X40 +#define DA9052_CONTROLD_GPI1415SD 0X20 +#define DA9052_CONTROLD_NONKEYSD 0X10 +#define DA9052_CONTROLD_KEEPACTEN 0X08 +#define DA9052_CONTROLD_TWDSCALE 0X07 + +/* POWER DOWN DISABLE REGISTER BITS */ +#define DA9052_PDDIS_PMCONTPD 0X80 +#define DA9052_PDDIS_OUT32KPD 0X40 +#define DA9052_PDDIS_CHGBBATPD 0X20 +#define DA9052_PDDIS_CHGPD 0X10 +#define DA9052_PDDIS_HS2WIREPD 0X08 +#define DA9052_PDDIS_PMIFPD 0X04 +#define DA9052_PDDIS_GPADCPD 0X02 +#define DA9052_PDDIS_GPIOPD 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_INTERFACE_IFBASEADDR 0XE0 +#define DA9052_INTERFACE_NCSPOL 0X10 +#define DA9052_INTERFACE_RWPOL 0X08 +#define DA9052_INTERFACE_CPHA 0X04 +#define DA9052_INTERFACE_CPOL 0X02 +#define DA9052_INTERFACE_IFTYPE 0X01 + +/* CONTROL REGISTER D BITS */ +#define DA9052_RESET_RESETEVENT 0XC0 +#define DA9052_RESET_RESETTIMER 0X3F + +/* GPIO REGISTERS */ +/* GPIO CONTROL REGISTER BITS */ +#define DA9052_GPIO_EVEN_PORT_PIN 0X03 +#define DA9052_GPIO_EVEN_PORT_TYPE 0X04 +#define DA9052_GPIO_EVEN_PORT_MODE 0X08 + +#define DA9052_GPIO_ODD_PORT_PIN 0X30 +#define DA9052_GPIO_ODD_PORT_TYPE 0X40 +#define DA9052_GPIO_ODD_PORT_MODE 0X80 + +/*POWER SEQUENCER REGISTER BITS */ +/* SEQ CONTROL REGISTER BITS FOR ID 0 AND 1 */ +#define DA9052_ID01_LDO1STEP 0XF0 +#define DA9052_ID01_SYSPRE 0X04 +#define DA9052_ID01_DEFSUPPLY 0X02 +#define DA9052_ID01_NRESMODE 0X01 + +/* SEQ CONTROL REGISTER BITS FOR ID 2 AND 3 */ +#define DA9052_ID23_LDO3STEP 0XF0 +#define DA9052_ID23_LDO2STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 4 AND 5 */ +#define DA9052_ID45_LDO5STEP 0XF0 +#define DA9052_ID45_LDO4STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 6 AND 7 */ +#define DA9052_ID67_LDO7STEP 0XF0 +#define DA9052_ID67_LDO6STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 8 AND 9 */ +#define DA9052_ID89_LDO9STEP 0XF0 +#define DA9052_ID89_LDO8STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 10 AND 11 */ +#define DA9052_ID1011_PDDISSTEP 0XF0 +#define DA9052_ID1011_LDO10STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 12 AND 13 */ +#define DA9052_ID1213_VMEMSWSTEP 0XF0 +#define DA9052_ID1213_VPERISWSTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 14 AND 15 */ +#define DA9052_ID1415_BUCKPROSTEP 0XF0 +#define DA9052_ID1415_BUCKCORESTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 16 AND 17 */ +#define DA9052_ID1617_BUCKPERISTEP 0XF0 +#define DA9052_ID1617_BUCKMEMSTEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 18 AND 19 */ +#define DA9052_ID1819_GPRISE2STEP 0XF0 +#define DA9052_ID1819_GPRISE1STEP 0X0F + +/* SEQ CONTROL REGISTER BITS FOR ID 20 AND 21 */ +#define DA9052_ID2021_GPFALL2STEP 0XF0 +#define DA9052_ID2021_GPFALL1STEP 0X0F + +/* POWER SEQ STATUS REGISTER BITS */ +#define DA9052_SEQSTATUS_SEQPOINTER 0XF0 +#define DA9052_SEQSTATUS_WAITSTEP 0X0F + +/* POWER SEQ A REGISTER BITS */ +#define DA9052_SEQA_POWEREND 0XF0 +#define DA9052_SEQA_SYSTEMEND 0X0F + +/* POWER SEQ B REGISTER BITS */ +#define DA9052_SEQB_PARTDOWN 0XF0 +#define DA9052_SEQB_MAXCOUNT 0X0F + +/* POWER SEQ TIMER REGISTER BITS */ +#define DA9052_SEQTIMER_SEQDUMMY 0XF0 +#define DA9052_SEQTIMER_SEQTIME 0X0F + +/*POWER SUPPLY CONTROL REGISTER BITS */ +/* BUCK REGISTER A BITS */ +#define DA9052_BUCKA_BPROILIM 0XC0 +#define DA9052_BUCKA_BPROMODE 0X30 +#define DA9052_BUCKA_BCOREILIM 0X0C +#define DA9052_BUCKA_BCOREMODE 0X03 + +/* BUCK REGISTER B BITS */ +#define DA9052_BUCKB_BERIILIM 0XC0 +#define DA9052_BUCKB_BPERIMODE 0X30 +#define DA9052_BUCKB_BMEMILIM 0X0C +#define DA9052_BUCKB_BMEMMODE 0X03 + +/* BUCKCORE REGISTER BITS */ +#define DA9052_BUCKCORE_BCORECONF 0X80 +#define DA9052_BUCKCORE_BCOREEN 0X40 +#define DA9052_BUCKCORE_VBCORE 0X3F + +/* BUCKPRO REGISTER BITS */ +#define DA9052_BUCKPRO_BPROCONF 0X80 +#define DA9052_BUCKPRO_BPROEN 0X40 +#define DA9052_BUCKPRO_VBPRO 0X3F + +/* BUCKMEM REGISTER BITS */ +#define DA9052_BUCKMEM_BMEMCONF 0X80 +#define DA9052_BUCKMEM_BMEMEN 0X40 +#define DA9052_BUCKMEM_VBMEM 0X3F + +/* BUCKPERI REGISTER BITS */ +#define DA9052_BUCKPERI_BPERICONF 0X80 +#define DA9052_BUCKPERI_BPERIEN 0X40 +#define DA9052_BUCKPERI_BPERIHS 0X20 +#define DA9052_BUCKPERI_VBPERI 0X1F + +/* LDO1 REGISTER BITS */ +#define DA9052_LDO1_LDO1CONF 0X80 +#define DA9052_LDO1_LDO1EN 0X40 +#define DA9052_LDO1_VLDO1 0X1F + +/* LDO2 REGISTER BITS */ +#define DA9052_LDO2_LDO2CONF 0X80 +#define DA9052_LDO2_LDO2EN 0X40 +#define DA9052_LDO2_VLDO2 0X3F + +/* LDO3 REGISTER BITS */ +#define DA9052_LDO3_LDO3CONF 0X80 +#define DA9052_LDO3_LDO3EN 0X40 +#define DA9052_LDO3_VLDO3 0X3F + +/* LDO4 REGISTER BITS */ +#define DA9052_LDO4_LDO4CONF 0X80 +#define DA9052_LDO4_LDO4EN 0X40 +#define DA9052_LDO4_VLDO4 0X3F + +/* LDO5 REGISTER BITS */ +#define DA9052_LDO5_LDO5CONF 0X80 +#define DA9052_LDO5_LDO5EN 0X40 +#define DA9052_LDO5_VLDO5 0X3F + +/* LDO6 REGISTER BITS */ +#define DA9052_LDO6_LDO6CONF 0X80 +#define DA9052_LDO6_LDO6EN 0X40 +#define DA9052_LDO6_VLDO6 0X3F + +/* LDO7 REGISTER BITS */ +#define DA9052_LDO7_LDO7CONF 0X80 +#define DA9052_LDO7_LDO7EN 0X40 +#define DA9052_LDO7_VLDO7 0X3F + +/* LDO8 REGISTER BITS */ +#define DA9052_LDO8_LDO8CONF 0X80 +#define DA9052_LDO8_LDO8EN 0X40 +#define DA9052_LDO8_VLDO8 0X3F + +/* LDO9 REGISTER BITS */ +#define DA9052_LDO9_LDO9CONF 0X80 +#define DA9052_LDO9_LDO9EN 0X40 +#define DA9052_LDO9_VLDO9 0X3F + +/* LDO10 REGISTER BITS */ +#define DA9052_LDO10_LDO10CONF 0X80 +#define DA9052_LDO10_LDO10EN 0X40 +#define DA9052_LDO10_VLDO10 0X3F + +/* SUPPLY REGISTER BITS */ +#define DA9052_SUPPLY_VLOCK 0X80 +#define DA9052_SUPPLY_VMEMSWEN 0X40 +#define DA9052_SUPPLY_VPERISWEN 0X20 +#define DA9052_SUPPLY_VLDO3GO 0X10 +#define DA9052_SUPPLY_VLDO2GO 0X08 +#define DA9052_SUPPLY_VBMEMGO 0X04 +#define DA9052_SUPPLY_VBPROGO 0X02 +#define DA9052_SUPPLY_VBCOREGO 0X01 + +/* PULLDOWN REGISTER BITS */ +#define DA9052_PULLDOWN_LDO5PDDIS 0X20 +#define DA9052_PULLDOWN_LDO2PDDIS 0X10 +#define DA9052_PULLDOWN_LDO1PDDIS 0X08 +#define DA9052_PULLDOWN_MEMPDDIS 0X04 +#define DA9052_PULLDOWN_PROPDDIS 0X02 +#define DA9052_PULLDOWN_COREPDDIS 0X01 + +/* BAT CHARGER REGISTER BITS */ +/* CHARGER BUCK REGISTER BITS */ +#define DA9052_CHGBUCK_CHGTEMP 0X80 +#define DA9052_CHGBUCK_CHGUSBILIM 0X40 +#define DA9052_CHGBUCK_CHGBUCKLP 0X20 +#define DA9052_CHGBUCK_CHGBUCKEN 0X10 +#define DA9052_CHGBUCK_ISETBUCK 0X0F + +/* WAIT COUNTER REGISTER BITS */ +#define DA9052_WAITCONT_WAITDIR 0X80 +#define DA9052_WAITCONT_RTCCLOCK 0X40 +#define DA9052_WAITCONT_WAITMODE 0X20 +#define DA9052_WAITCONT_EN32KOUT 0X10 +#define DA9052_WAITCONT_DELAYTIME 0X0F + +/* ISET CONTROL REGISTER BITS */ +#define DA9052_ISET_ISETDCIN 0XF0 +#define DA9052_ISET_ISETVBUS 0X0F + +/* BATTERY CHARGER CONTROL REGISTER BITS */ +#define DA9052_BATCHG_ICHGPRE 0XC0 +#define DA9052_BATCHG_ICHGBAT 0X3F + +/* CHARGER COUNTER REGISTER BITS */ +#define DA9052_CHG_CONT_VCHG_BAT 0XF8 +#define DA9052_CHG_CONT_TCTR 0X07 + +/* INPUT CONTROL REGISTER BITS */ +#define DA9052_INPUT_CONT_TCTR_MODE 0X80 +#define DA9052_INPUT_CONT_VBUS_SUSP 0X10 +#define DA9052_INPUT_CONT_DCIN_SUSP 0X08 + +/* CHARGING TIME REGISTER BITS */ +#define DA9052_CHGTIME_CHGTIME 0XFF + +/* BACKUP BATTERY CONTROL REGISTER BITS */ +#define DA9052_BBATCONT_BCHARGERISET 0XF0 +#define DA9052_BBATCONT_BCHARGERVSET 0X0F + +/* LED REGISTERS BITS */ +/* LED BOOST REGISTER BITS */ +#define DA9052_BOOST_EBFAULT 0X80 +#define DA9052_BOOST_MBFAULT 0X40 +#define DA9052_BOOST_BOOSTFRQ 0X20 +#define DA9052_BOOST_BOOSTILIM 0X10 +#define DA9052_BOOST_LED3INEN 0X08 +#define DA9052_BOOST_LED2INEN 0X04 +#define DA9052_BOOST_LED1INEN 0X02 +#define DA9052_BOOST_BOOSTEN 0X01 + +/* LED CONTROL REGISTER BITS */ +#define DA9052_LEDCONT_SELLEDMODE 0X80 +#define DA9052_LEDCONT_LED3ICONT 0X40 +#define DA9052_LEDCONT_LED3RAMP 0X20 +#define DA9052_LEDCONT_LED3EN 0X10 +#define DA9052_LEDCONT_LED2RAMP 0X08 +#define DA9052_LEDCONT_LED2EN 0X04 +#define DA9052_LEDCONT_LED1RAMP 0X02 +#define DA9052_LEDCONT_LED1EN 0X01 + +/* LEDMIN123 REGISTER BIT */ +#define DA9052_LEDMIN123_LEDMINCURRENT 0XFF + +/* LED1CONF REGISTER BIT */ +#define DA9052_LED1CONF_LED1CURRENT 0XFF + +/* LED2CONF REGISTER BIT */ +#define DA9052_LED2CONF_LED2CURRENT 0XFF + +/* LED3CONF REGISTER BIT */ +#define DA9052_LED3CONF_LED3CURRENT 0XFF + +/* LED COUNT REGISTER BIT */ +#define DA9052_LED_CONT_DIM 0X80 + +/* ADC MAN REGISTERS BITS */ +#define DA9052_ADC_MAN_MAN_CONV 0X10 +#define DA9052_ADC_MAN_MUXSEL_VDDOUT 0X00 +#define DA9052_ADC_MAN_MUXSEL_ICH 0X01 +#define DA9052_ADC_MAN_MUXSEL_TBAT 0X02 +#define DA9052_ADC_MAN_MUXSEL_VBAT 0X03 +#define DA9052_ADC_MAN_MUXSEL_AD4 0X04 +#define DA9052_ADC_MAN_MUXSEL_AD5 0X05 +#define DA9052_ADC_MAN_MUXSEL_AD6 0X06 +#define DA9052_ADC_MAN_MUXSEL_VBBAT 0X09 + +/* ADC CONTROL REGSISTERS BITS */ +#define DA9052_ADCCONT_COMP1V2EN 0X80 +#define DA9052_ADCCONT_ADCMODE 0X40 +#define DA9052_ADCCONT_TBATISRCEN 0X20 +#define DA9052_ADCCONT_AD4ISRCEN 0X10 +#define DA9052_ADCCONT_AUTOAD6EN 0X08 +#define DA9052_ADCCONT_AUTOAD5EN 0X04 +#define DA9052_ADCCONT_AUTOAD4EN 0X02 +#define DA9052_ADCCONT_AUTOVDDEN 0X01 + +/* ADC 10 BIT MANUAL CONVERSION RESULT LOW REGISTER */ +#define DA9052_ADC_RES_LSB 0X03 + +/* ADC 10 BIT MANUAL CONVERSION RESULT HIGH REGISTER */ +#define DA9052_ADCRESH_ADCRESMSB 0XFF + +/* VDD RES REGSISTER BIT*/ +#define DA9052_VDDRES_VDDOUTRES 0XFF + +/* VDD MON REGSISTER BIT */ +#define DA9052_VDDMON_VDDOUTMON 0XFF + +/* ICHG_AV REGSISTER BIT */ +#define DA9052_ICHGAV_ICHGAV 0XFF + +/* ICHG_THD REGSISTER BIT */ +#define DA9052_ICHGTHD_ICHGTHD 0XFF + +/* ICHG_END REGSISTER BIT */ +#define DA9052_ICHGEND_ICHGEND 0XFF + +/* TBAT_RES REGSISTER BIT */ +#define DA9052_TBATRES_TBATRES 0XFF + +/* TBAT_HIGHP REGSISTER BIT */ +#define DA9052_TBATHIGHP_TBATHIGHP 0XFF + +/* TBAT_HIGHN REGSISTER BIT */ +#define DA9052_TBATHIGHN_TBATHIGHN 0XFF + +/* TBAT_LOW REGSISTER BIT */ +#define DA9052_TBATLOW_TBATLOW 0XFF + +/* T_OFFSET REGSISTER BIT */ +#define DA9052_TOFFSET_TOFFSET 0XFF + +/* ADCIN4_RES REGSISTER BIT */ +#define DA9052_ADCIN4RES_ADCIN4RES 0XFF + +/* ADCIN4_HIGH REGSISTER BIT */ +#define DA9052_AUTO4HIGH_AUTO4HIGH 0XFF + +/* ADCIN4_LOW REGSISTER BIT */ +#define DA9052_AUTO4LOW_AUTO4LOW 0XFF + +/* ADCIN5_RES REGSISTER BIT */ +#define DA9052_ADCIN5RES_ADCIN5RES 0XFF + +/* ADCIN5_HIGH REGSISTER BIT */ +#define DA9052_AUTO5HIGH_AUTOHIGH 0XFF + +/* ADCIN5_LOW REGSISTER BIT */ +#define DA9052_AUTO5LOW_AUTO5LOW 0XFF + +/* ADCIN6_RES REGSISTER BIT */ +#define DA9052_ADCIN6RES_ADCIN6RES 0XFF + +/* ADCIN6_HIGH REGSISTER BIT */ +#define DA9052_AUTO6HIGH_AUTO6HIGH 0XFF + +/* ADCIN6_LOW REGSISTER BIT */ +#define DA9052_AUTO6LOW_AUTO6LOW 0XFF + +/* TJUNC_RES REGSISTER BIT*/ +#define DA9052_TJUNCRES_TJUNCRES 0XFF + +/* TSI REGISTER */ +/* TSI CONTROL REGISTER A BITS */ +#define DA9052_TSICONTA_TSIDELAY 0XC0 +#define DA9052_TSICONTA_TSISKIP 0X38 +#define DA9052_TSICONTA_TSIMODE 0X04 +#define DA9052_TSICONTA_PENDETEN 0X02 +#define DA9052_TSICONTA_AUTOTSIEN 0X01 + +/* TSI CONTROL REGISTER B BITS */ +#define DA9052_TSICONTB_ADCREF 0X80 +#define DA9052_TSICONTB_TSIMAN 0X40 +#define DA9052_TSICONTB_TSIMUX 0X30 +#define DA9052_TSICONTB_TSISEL3 0X08 +#define DA9052_TSICONTB_TSISEL2 0X04 +#define DA9052_TSICONTB_TSISEL1 0X02 +#define DA9052_TSICONTB_TSISEL0 0X01 + +/* TSI X CO-ORDINATE MSB RESULT REGISTER BITS */ +#define DA9052_TSIXMSB_TSIXM 0XFF + +/* TSI Y CO-ORDINATE MSB RESULT REGISTER BITS */ +#define DA9052_TSIYMSB_TSIYM 0XFF + +/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */ +#define DA9052_TSILSB_PENDOWN 0X40 +#define DA9052_TSILSB_TSIZL 0X30 +#define DA9052_TSILSB_TSIYL 0X0C +#define DA9052_TSILSB_TSIXL 0X03 + +/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */ +#define DA9052_TSIZMSB_TSIZM 0XFF + +/* RTC REGISTER */ +/* RTC TIMER SECONDS REGISTER BITS */ +#define DA9052_COUNTS_MONITOR 0X40 +#define DA9052_RTC_SEC 0X3F + +/* RTC TIMER MINUTES REGISTER BIT */ +#define DA9052_RTC_MIN 0X3F + +/* RTC TIMER HOUR REGISTER BIT */ +#define DA9052_RTC_HOUR 0X1F + +/* RTC TIMER DAYS REGISTER BIT */ +#define DA9052_RTC_DAY 0X1F + +/* RTC TIMER MONTHS REGISTER BIT */ +#define DA9052_RTC_MONTH 0X0F + +/* RTC TIMER YEARS REGISTER BIT */ +#define DA9052_RTC_YEAR 0X3F + +/* RTC ALARM MINUTES REGISTER BITS */ +#define DA9052_ALARMM_I_TICK_TYPE 0X80 +#define DA9052_ALARMMI_ALARMTYPE 0X40 + +/* RTC ALARM YEARS REGISTER BITS */ +#define DA9052_ALARM_Y_TICK_ON 0X80 +#define DA9052_ALARM_Y_ALARM_ON 0X40 + +/* RTC SECONDS REGISTER A BITS */ +#define DA9052_SECONDA_SECONDSA 0XFF + +/* RTC SECONDS REGISTER B BITS */ +#define DA9052_SECONDB_SECONDSB 0XFF + +/* RTC SECONDS REGISTER C BITS */ +#define DA9052_SECONDC_SECONDSC 0XFF + +/* RTC SECONDS REGISTER D BITS */ +#define DA9052_SECONDD_SECONDSD 0XFF + +#endif +/* __LINUX_MFD_DA9052_REG_H */ diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h index 82b4c8801a4..8bf2cb9502d 100644 --- a/include/linux/mfd/tps65910.h +++ b/include/linux/mfd/tps65910.h @@ -243,7 +243,8 @@ /*Registers VDD1, VDD2 voltage values definitions */ -#define VDD1_2_NUM_VOLTS 73 +#define VDD1_2_NUM_VOLT_FINE 73 +#define VDD1_2_NUM_VOLT_COARSE 3 #define VDD1_2_MIN_VOLT 6000 #define VDD1_2_OFFSET 125 diff --git a/include/linux/mii.h b/include/linux/mii.h index 27748230aa6..2783eca629a 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -9,6 +9,7 @@ #define __LINUX_MII_H__ #include <linux/types.h> +#include <linux/ethtool.h> /* Generic MII registers. */ #define MII_BMCR 0x00 /* Basic mode control register */ @@ -240,6 +241,205 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock, } /** + * ethtool_adv_to_mii_adv_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_ADVERTISE register. + */ +static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_10baseT_Half) + result |= ADVERTISE_10HALF; + if (ethadv & ADVERTISED_10baseT_Full) + result |= ADVERTISE_10FULL; + if (ethadv & ADVERTISED_100baseT_Half) + result |= ADVERTISE_100HALF; + if (ethadv & ADVERTISED_100baseT_Full) + result |= ADVERTISE_100FULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_PAUSE_CAP; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_PAUSE_ASYM; + + return result; +} + +/** + * mii_adv_to_ethtool_adv_t + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits + * to ethtool advertisement settings. + */ +static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (adv & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +/** + * ethtool_adv_to_mii_ctrl1000_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000T mode. + */ +static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000HALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000FULL; + + return result; +} + +/** + * mii_ctrl1000_to_ethtool_adv_t + * @adv: value of the MII_CTRL1000 register + * + * A small helper function that translates MII_CTRL1000 + * bits, when in 1000Base-T mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * mii_lpa_to_ethtool_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA + * bits, when in 1000Base-T mode, to ethtool + * LP advertisement settings. + */ +static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_t(lpa); +} + +/** + * mii_stat1000_to_ethtool_lpa_t + * @adv: value of the MII_STAT1000 register + * + * A small helper function that translates MII_STAT1000 + * bits, when in 1000Base-T mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * ethtool_adv_to_mii_adv_x + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000Base-X mode. + */ +static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_1000XHALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_1000XFULL; + if (ethadv & ADVERTISED_Pause) + result |= ADVERTISE_1000XPAUSE; + if (ethadv & ADVERTISED_Asym_Pause) + result |= ADVERTISE_1000XPSE_ASYM; + + return result; +} + +/** + * mii_adv_to_ethtool_adv_x + * @adv: value of the MII_CTRL1000 register + * + * A small helper function that translates MII_CTRL1000 + * bits, when in 1000Base-X mode, to ethtool + * advertisement settings. + */ +static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_1000XHALF) + result |= ADVERTISED_1000baseT_Half; + if (adv & ADVERTISE_1000XFULL) + result |= ADVERTISED_1000baseT_Full; + if (adv & ADVERTISE_1000XPAUSE) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_1000XPSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +/** + * mii_lpa_to_ethtool_lpa_x + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA + * bits, when in 1000Base-X mode, to ethtool + * LP advertisement settings. + */ +static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_x(lpa); +} + +/** * mii_advertise_flowctrl - get flow control advertisement flags * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index c41d7270c6c..32085249e9c 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -54,7 +54,7 @@ struct miscdevice { struct device *parent; struct device *this_device; const char *nodename; - mode_t mode; + umode_t mode; }; extern int misc_register(struct miscdevice * misc); diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index b56e4587208..9958ff2cad3 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -59,12 +59,15 @@ enum { MLX4_CMD_HW_HEALTH_CHECK = 0x50, MLX4_CMD_SET_PORT = 0xc, MLX4_CMD_SET_NODE = 0x5a, + MLX4_CMD_QUERY_FUNC = 0x56, MLX4_CMD_ACCESS_DDR = 0x2e, MLX4_CMD_MAP_ICM = 0xffa, MLX4_CMD_UNMAP_ICM = 0xff9, MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, + /*master notify fw on finish for slave's flr*/ + MLX4_CMD_INFORM_FLR_DONE = 0x5b, /* TPT commands */ MLX4_CMD_SW2HW_MPT = 0xd, @@ -119,6 +122,26 @@ enum { /* miscellaneous commands */ MLX4_CMD_DIAG_RPRT = 0x30, MLX4_CMD_NOP = 0x31, + MLX4_CMD_ACCESS_MEM = 0x2e, + MLX4_CMD_SET_VEP = 0x52, + + /* Ethernet specific commands */ + MLX4_CMD_SET_VLAN_FLTR = 0x47, + MLX4_CMD_SET_MCAST_FLTR = 0x48, + MLX4_CMD_DUMP_ETH_STATS = 0x49, + + /* Communication channel commands */ + MLX4_CMD_ARM_COMM_CHANNEL = 0x57, + MLX4_CMD_GEN_EQE = 0x58, + + /* virtual commands */ + MLX4_CMD_ALLOC_RES = 0xf00, + MLX4_CMD_FREE_RES = 0xf01, + MLX4_CMD_MCAST_ATTACH = 0xf05, + MLX4_CMD_UCAST_ATTACH = 0xf06, + MLX4_CMD_PROMISC = 0xf08, + MLX4_CMD_QUERY_FUNC_CAP = 0xf0a, + MLX4_CMD_QP_ATTACH = 0xf0b, /* debug commands */ MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, @@ -126,6 +149,7 @@ enum { /* statistics commands */ MLX4_CMD_QUERY_IF_STAT = 0X54, + MLX4_CMD_SET_IF_STAT = 0X55, }; enum { @@ -135,7 +159,8 @@ enum { }; enum { - MLX4_MAILBOX_SIZE = 4096 + MLX4_MAILBOX_SIZE = 4096, + MLX4_ACCESS_MEM_ALIGN = 256, }; enum { @@ -148,6 +173,11 @@ enum { MLX4_SET_PORT_GID_TABLE = 0x5, }; +enum { + MLX4_CMD_WRAPPED, + MLX4_CMD_NATIVE +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox { int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, int out_is_imm, u32 in_modifier, u8 op_modifier, - u16 op, unsigned long timeout); + u16 op, unsigned long timeout, int native); /* Invoke a command with no output parameter */ static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier, - u8 op_modifier, u16 op, unsigned long timeout) + u8 op_modifier, u16 op, unsigned long timeout, + int native) { return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier, - op_modifier, op, timeout); + op_modifier, op, timeout, native); } /* Invoke a command with an output mailbox */ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, - unsigned long timeout) + unsigned long timeout, int native) { return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier, - op_modifier, op, timeout); + op_modifier, op, timeout, native); } /* @@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param */ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param, u32 in_modifier, u8 op_modifier, u16 op, - unsigned long timeout) + unsigned long timeout, int native) { return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier, - op_modifier, op, timeout); + op_modifier, op, timeout, native); } struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev); void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox); +u32 mlx4_comm_get_version(void); + +#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) + #endif /* MLX4_CMD_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 84b0b1848f1..5c4fe8e5bfe 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -47,6 +47,9 @@ enum { MLX4_FLAG_MSI_X = 1 << 0, MLX4_FLAG_OLD_PORT_CMDS = 1 << 1, + MLX4_FLAG_MASTER = 1 << 2, + MLX4_FLAG_SLAVE = 1 << 3, + MLX4_FLAG_SRIOV = 1 << 4, }; enum { @@ -58,6 +61,15 @@ enum { }; enum { + MLX4_MAX_NUM_PF = 16, + MLX4_MAX_NUM_VF = 64, + MLX4_MFUNC_MAX = 80, + MLX4_MFUNC_EQ_NUM = 4, + MLX4_MFUNC_MAX_EQES = 8, + MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) +}; + +enum { MLX4_DEV_CAP_FLAG_RC = 1LL << 0, MLX4_DEV_CAP_FLAG_UC = 1LL << 1, MLX4_DEV_CAP_FLAG_UD = 1LL << 2, @@ -77,11 +89,13 @@ enum { MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34, - MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, + MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37, + MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38, MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, - MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 + MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, + MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 }; #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) @@ -116,7 +130,11 @@ enum mlx4_event { MLX4_EVENT_TYPE_PORT_CHANGE = 0x09, MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f, MLX4_EVENT_TYPE_ECC_DETECT = 0x0e, - MLX4_EVENT_TYPE_CMD = 0x0a + MLX4_EVENT_TYPE_CMD = 0x0a, + MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, + MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, + MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, + MLX4_EVENT_TYPE_NONE = 0xff, }; enum { @@ -183,6 +201,7 @@ enum mlx4_qp_region { }; enum mlx4_port_type { + MLX4_PORT_TYPE_NONE = 0, MLX4_PORT_TYPE_IB = 1, MLX4_PORT_TYPE_ETH = 2, MLX4_PORT_TYPE_AUTO = 3 @@ -215,6 +234,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) struct mlx4_caps { u64 fw_ver; + u32 function; int num_ports; int vl_cap[MLX4_MAX_PORTS + 1]; int ib_mtu_cap[MLX4_MAX_PORTS + 1]; @@ -229,6 +249,7 @@ struct mlx4_caps { u64 trans_code[MLX4_MAX_PORTS + 1]; int local_ca_ack_delay; int num_uars; + u32 uar_page_size; int bf_reg_size; int bf_regs_per_page; int max_sq_sg; @@ -252,8 +273,7 @@ struct mlx4_caps { int num_comp_vectors; int comp_pool; int num_mpts; - int num_mtt_segs; - int mtts_per_seg; + int num_mtts; int fmr_reserved_mtts; int reserved_mtts; int reserved_mrws; @@ -283,7 +303,9 @@ struct mlx4_caps { int log_num_prios; enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; u8 supported_type[MLX4_MAX_PORTS + 1]; - u32 port_mask; + u8 suggested_type[MLX4_MAX_PORTS + 1]; + u8 default_sense[MLX4_MAX_PORTS + 1]; + u32 port_mask[MLX4_MAX_PORTS + 1]; enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; u32 max_counters; u8 ext_port_cap[MLX4_MAX_PORTS + 1]; @@ -303,7 +325,7 @@ struct mlx4_buf { }; struct mlx4_mtt { - u32 first_seg; + u32 offset; int order; int page_shift; }; @@ -465,10 +487,12 @@ struct mlx4_counter { struct mlx4_dev { struct pci_dev *pdev; unsigned long flags; + unsigned long num_slaves; struct mlx4_caps caps; struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; + int num_vfs; }; struct mlx4_init_port_param { @@ -487,14 +511,32 @@ struct mlx4_init_port_param { #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ - if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ - ~(dev)->caps.port_mask) & 1 << ((port) - 1)) + if ((type) == (dev)->caps.port_mask[(port)]) -#define mlx4_foreach_ib_transport_port(port, dev) \ - for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ - if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \ - ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) +#define mlx4_foreach_ib_transport_port(port, dev) \ + for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ + if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ + ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) +static inline int mlx4_is_master(struct mlx4_dev *dev) +{ + return dev->flags & MLX4_FLAG_MASTER; +} + +static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) +{ + return (qpn < dev->caps.sqp_start + 8); +} + +static inline int mlx4_is_mfunc(struct mlx4_dev *dev) +{ + return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); +} + +static inline int mlx4_is_slave(struct mlx4_dev *dev) +{ + return dev->flags & MLX4_FLAG_SLAVE; +} int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, struct mlx4_buf *buf); @@ -560,6 +602,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm int mlx4_INIT_PORT(struct mlx4_dev *dev, int port); int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port); +int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot); +int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot); int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol protocol); int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], @@ -570,9 +616,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port); int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); -int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap); -void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn); -int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap); +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); +int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); +int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); +void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 48cc4cb9785..bee8fa23127 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -97,6 +97,33 @@ enum { MLX4_QP_BIT_RIC = 1 << 4, }; +enum { + MLX4_RSS_HASH_XOR = 0, + MLX4_RSS_HASH_TOP = 1, + + MLX4_RSS_UDP_IPV6 = 1 << 0, + MLX4_RSS_UDP_IPV4 = 1 << 1, + MLX4_RSS_TCP_IPV6 = 1 << 2, + MLX4_RSS_IPV6 = 1 << 3, + MLX4_RSS_TCP_IPV4 = 1 << 4, + MLX4_RSS_IPV4 = 1 << 5, + + /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24, + /* offset of being RSS indirection QP within mlx4_qp_context.flags */ + MLX4_RSS_QPC_FLAG_OFFSET = 13, +}; + +struct mlx4_rss_context { + __be32 base_qpn; + __be32 default_qpn; + u16 reserved; + u8 hash_fn; + u8 flags; + __be32 rss_key[10]; + __be32 base_qpn_udp; +}; + struct mlx4_qp_path { u8 fl; u8 reserved1[2]; @@ -183,6 +210,7 @@ struct mlx4_wqe_ctrl_seg { * [4] IP checksum * [3:2] C (generate completion queue entry) * [1] SE (solicited event) + * [0] FL (force loopback) */ __be32 srcrb_flags; /* diff --git a/include/linux/mm.h b/include/linux/mm.h index 3dc3a8c2c48..5d9b4c9813b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -10,6 +10,7 @@ #include <linux/mmzone.h> #include <linux/rbtree.h> #include <linux/prio_tree.h> +#include <linux/atomic.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> #include <linux/range.h> @@ -1252,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page) extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); -#ifdef CONFIG_ARCH_POPULATES_NODE_MAP +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP /* - * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its + * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its * zones, allocate the backing mem_map and account for memory holes in a more * architecture independent manner. This is a substitute for creating the * zone_sizes[] and zholes_size[] arrays and passing them to * free_area_init_node() * * An architecture is expected to register range of page frames backed by - * physical memory with add_active_range() before calling + * physical memory with memblock_add[_node]() before calling * free_area_init_nodes() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() - * add_active_range(node_id, start_pfn, end_pfn) + * memblock_add_node(base, size, nid) * free_area_init_nodes(max_zone_pfns); * - * If the architecture guarantees that there are no holes in the ranges - * registered with add_active_range(), free_bootmem_active_regions() - * will call free_bootmem_node() for each registered physical page range. - * Similarly sparse_memory_present_with_active_regions() calls - * memory_present() for each range when SPARSEMEM is enabled. + * free_bootmem_with_active_regions() calls free_bootmem_node() for each + * registered physical page range. Similarly + * sparse_memory_present_with_active_regions() calls memory_present() for + * each range when SPARSEMEM is enabled. * * See mm/page_alloc.c for more information on each function exposed by - * CONFIG_ARCH_POPULATES_NODE_MAP + * CONFIG_HAVE_MEMBLOCK_NODE_MAP. */ extern void free_area_init_nodes(unsigned long *max_zone_pfn); -extern void add_active_range(unsigned int nid, unsigned long start_pfn, - unsigned long end_pfn); -extern void remove_active_range(unsigned int nid, unsigned long start_pfn, - unsigned long end_pfn); -extern void remove_all_active_ranges(void); -void sort_node_map(void); unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); @@ -1299,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); int add_from_early_node_map(struct range *range, int az, int nr_range, int nid); -u64 __init find_memory_core_early(int nid, u64 size, u64 align, - u64 goal, u64 limit); -typedef int (*work_fn_t)(unsigned long, unsigned long, void *); -extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); -#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ -#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ + +#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) static inline int __early_pfn_to_nid(unsigned long pfn) { diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 415f2db414e..c8ef9bc54d5 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -218,6 +218,7 @@ struct mmc_card { #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ +#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ /* byte mode */ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ #define MMC_NO_POWER_NOTIFICATION 0 @@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; } +static inline int mmc_card_long_read_time(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_LONG_READ_TIME; +} + #define mmc_card_name(c) ((c)->cid.prod_name) #define mmc_card_id(c) (dev_name(&(c)->dev)) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 188cb2ffe8d..3ac040f1936 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -598,13 +598,13 @@ struct zonelist { #endif }; -#ifdef CONFIG_ARCH_POPULATES_NODE_MAP +#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP struct node_active_region { unsigned long start_pfn; unsigned long end_pfn; int nid; }; -#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ +#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #ifndef CONFIG_DISCONTIGMEM /* The array of struct pages - for discontigmem use pgdat->lmem_map */ @@ -720,7 +720,7 @@ extern int movable_zone; static inline int zone_movable_is_highmem(void) { -#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP) +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE) return movable_zone == ZONE_HIGHMEM; #else return 0; @@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #endif #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ - !defined(CONFIG_ARCH_POPULATES_NODE_MAP) + !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) static inline unsigned long early_pfn_to_nid(unsigned long pfn) { return 0; diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 29304855652..5a8e3903d77 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -2,39 +2,16 @@ #define _NAMESPACE_H_ #ifdef __KERNEL__ -#include <linux/path.h> -#include <linux/seq_file.h> -#include <linux/wait.h> - -struct mnt_namespace { - atomic_t count; - struct vfsmount * root; - struct list_head list; - wait_queue_head_t poll; - int event; -}; - -struct proc_mounts { - struct seq_file m; /* must be the first element */ - struct mnt_namespace *ns; - struct path root; -}; - +struct mnt_namespace; struct fs_struct; -extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt); extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct fs_struct *); extern void put_mnt_ns(struct mnt_namespace *ns); -static inline void get_mnt_ns(struct mnt_namespace *ns) -{ - atomic_inc(&ns->count); -} -extern const struct seq_operations mounts_op; -extern const struct seq_operations mountinfo_op; -extern const struct seq_operations mountstats_op; -extern int mnt_had_events(struct proc_mounts *); +extern const struct file_operations proc_mounts_operations; +extern const struct file_operations proc_mountinfo_operations; +extern const struct file_operations proc_mountstats_operations; #endif #endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 468819cdde8..83ac0713ed0 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -542,4 +542,22 @@ struct isapnp_device_id { kernel_ulong_t driver_data; /* data private to the driver */ }; +/** + * struct amba_id - identifies a device on an AMBA bus + * @id: The significant bits if the hardware device ID + * @mask: Bitmask specifying which bits of the id field are significant when + * matching. A driver binds to a device when ((hardware device ID) & mask) + * == id. + * @data: Private data used by the driver. + */ +struct amba_id { + unsigned int id; + unsigned int mask; +#ifndef __KERNEL__ + kernel_ulong_t data; +#else + void *data; +#endif +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/mount.h b/include/linux/mount.h index 33fe53d7811..d7029f4a191 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -47,45 +47,10 @@ struct mnt_namespace; #define MNT_INTERNAL 0x4000 -struct mnt_pcp { - int mnt_count; - int mnt_writers; -}; - struct vfsmount { - struct list_head mnt_hash; - struct vfsmount *mnt_parent; /* fs we are mounted on */ - struct dentry *mnt_mountpoint; /* dentry of mountpoint */ struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ -#ifdef CONFIG_SMP - struct mnt_pcp __percpu *mnt_pcp; - atomic_t mnt_longterm; /* how many of the refs are longterm */ -#else - int mnt_count; - int mnt_writers; -#endif - struct list_head mnt_mounts; /* list of children, anchored here */ - struct list_head mnt_child; /* and going through their mnt_child */ int mnt_flags; - /* 4 bytes hole on 64bits arches without fsnotify */ -#ifdef CONFIG_FSNOTIFY - __u32 mnt_fsnotify_mask; - struct hlist_head mnt_fsnotify_marks; -#endif - const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ - struct list_head mnt_list; - struct list_head mnt_expire; /* link in fs-specific expiry list */ - struct list_head mnt_share; /* circular list of shared mounts */ - struct list_head mnt_slave_list;/* list of slave mounts */ - struct list_head mnt_slave; /* slave list entry */ - struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */ - struct mnt_namespace *mnt_ns; /* containing namespace */ - int mnt_id; /* mount identifier */ - int mnt_group_id; /* peer group identifier */ - int mnt_expiry_mark; /* true if marked for expiry */ - int mnt_pinned; - int mnt_ghosts; }; struct file; /* forward dec */ @@ -94,15 +59,13 @@ extern int mnt_want_write(struct vfsmount *mnt); extern int mnt_want_write_file(struct file *file); extern int mnt_clone_write(struct vfsmount *mnt); extern void mnt_drop_write(struct vfsmount *mnt); +extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern void mnt_pin(struct vfsmount *mnt); extern void mnt_unpin(struct vfsmount *mnt); extern int __mnt_is_readonly(struct vfsmount *mnt); -extern struct vfsmount *do_kern_mount(const char *fstype, int flags, - const char *name, void *data); - struct file_system_type; extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h index a7003b7a695..b188f68a08c 100644 --- a/include/linux/neighbour.h +++ b/include/linux/neighbour.h @@ -116,6 +116,7 @@ enum { NDTPA_PROXY_DELAY, /* u64, msecs */ NDTPA_PROXY_QLEN, /* u32 */ NDTPA_LOCKTIME, /* u64, msecs */ + NDTPA_QUEUE_LENBYTES, /* u32 */ __NDTPA_MAX }; #define NDTPA_MAX (__NDTPA_MAX - 1) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h new file mode 100644 index 00000000000..77f5202977c --- /dev/null +++ b/include/linux/netdev_features.h @@ -0,0 +1,146 @@ +/* + * Network device features. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifndef _LINUX_NETDEV_FEATURES_H +#define _LINUX_NETDEV_FEATURES_H + +#include <linux/types.h> + +typedef u64 netdev_features_t; + +enum { + NETIF_F_SG_BIT, /* Scatter/gather IO. */ + NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */ + __UNUSED_NETIF_F_1, + NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */ + NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */ + NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */ + NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */ + NETIF_F_HW_VLAN_TX_BIT, /* Transmit VLAN hw acceleration */ + NETIF_F_HW_VLAN_RX_BIT, /* Receive VLAN hw acceleration */ + NETIF_F_HW_VLAN_FILTER_BIT, /* Receive filtering on VLAN */ + NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ + NETIF_F_GSO_BIT, /* Enable software GSO. */ + NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ + /* do not use LLTX in new drivers */ + NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ + NETIF_F_GRO_BIT, /* Generic receive offload */ + NETIF_F_LRO_BIT, /* large receive offload */ + + /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ + NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ + = NETIF_F_GSO_SHIFT, + NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ + NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ + NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ + NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ + NETIF_F_FSO_BIT, /* ... FCoE segmentation */ + NETIF_F_GSO_RESERVED1, /* ... free (fill GSO_MASK to 8 bits) */ + /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ + NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ + = NETIF_F_GSO_LAST, + + NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ + NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ + NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ + NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ + NETIF_F_RXHASH_BIT, /* Receive hashing offload */ + NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ + NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */ + NETIF_F_LOOPBACK_BIT, /* Enable loopback */ + + /* + * Add your fresh new feature above and remember to update + * netdev_features_strings[] in net/core/ethtool.c and maybe + * some feature mask #defines below. Please also describe it + * in Documentation/networking/netdev-features.txt. + */ + + /**/NETDEV_FEATURE_COUNT +}; + +/* copy'n'paste compression ;) */ +#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit)) +#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) + +#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) +#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) +#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) +#define NETIF_F_FSO __NETIF_F(FSO) +#define NETIF_F_GRO __NETIF_F(GRO) +#define NETIF_F_GSO __NETIF_F(GSO) +#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) +#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) +#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM) +#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER) +#define NETIF_F_HW_VLAN_RX __NETIF_F(HW_VLAN_RX) +#define NETIF_F_HW_VLAN_TX __NETIF_F(HW_VLAN_TX) +#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) +#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) +#define NETIF_F_LLTX __NETIF_F(LLTX) +#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) +#define NETIF_F_LRO __NETIF_F(LRO) +#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) +#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) +#define NETIF_F_NTUPLE __NETIF_F(NTUPLE) +#define NETIF_F_RXCSUM __NETIF_F(RXCSUM) +#define NETIF_F_RXHASH __NETIF_F(RXHASH) +#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) +#define NETIF_F_SG __NETIF_F(SG) +#define NETIF_F_TSO6 __NETIF_F(TSO6) +#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) +#define NETIF_F_TSO __NETIF_F(TSO) +#define NETIF_F_UFO __NETIF_F(UFO) +#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) + +/* Features valid for ethtool to change */ +/* = all defined minus driver/device-class-related */ +#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ + NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) + +/* remember that ((t)1 << t_BITS) is undefined in C99 */ +#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ + (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \ + ~NETIF_F_NEVER_CHANGE) + +/* Segmentation offload feature mask */ +#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ + __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) + +/* List of features with software fallbacks. */ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ + NETIF_F_TSO6 | NETIF_F_UFO) + +#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM +#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) +#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) + +#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) + +/* + * If one device supports one of these features, then enable them + * for all in netdev_increment_features. + */ +#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ + NETIF_F_SG | NETIF_F_HIGHDMA | \ + NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) +/* + * If one device doesn't support one of these features, then disable it + * for all in netdev_increment_features. + */ +#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) + +/* changeable features with no special hardware requirements */ +#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) + +#endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cbeb5867cff..a1d109590da 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -43,6 +43,7 @@ #include <linux/rculist.h> #include <linux/dmaengine.h> #include <linux/workqueue.h> +#include <linux/dynamic_queue_limits.h> #include <linux/ethtool.h> #include <net/net_namespace.h> @@ -50,8 +51,10 @@ #ifdef CONFIG_DCB #include <net/dcbnl.h> #endif +#include <net/netprio_cgroup.h> + +#include <linux/netdev_features.h> -struct vlan_group; struct netpoll_info; struct phy_device; /* 802.11 specific */ @@ -141,22 +144,20 @@ static inline bool dev_xmit_complete(int rc) * used. */ -#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) +#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else # define LL_MAX_HEADER 96 # endif -#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) +#elif IS_ENABLED(CONFIG_TR) # define LL_MAX_HEADER 48 #else # define LL_MAX_HEADER 32 #endif -#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ - !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ - !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ - !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) +#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ + !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) #define MAX_HEADER LL_MAX_HEADER #else #define MAX_HEADER (LL_MAX_HEADER + 48) @@ -212,6 +213,11 @@ enum { #include <linux/cache.h> #include <linux/skbuff.h> +#ifdef CONFIG_RPS +#include <linux/jump_label.h> +extern struct jump_label_key rps_needed; +#endif + struct neighbour; struct neigh_parms; struct sk_buff; @@ -272,16 +278,11 @@ struct hh_cache { * * We could use other alignment values, but we must maintain the * relationship HH alignment <= LL alignment. - * - * LL_ALLOCATED_SPACE also takes into account the tailroom the device - * may need. */ #define LL_RESERVED_SPACE(dev) \ ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) -#define LL_ALLOCATED_SPACE(dev) \ - ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, @@ -516,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n) #endif enum netdev_queue_state_t { - __QUEUE_STATE_XOFF, + __QUEUE_STATE_DRV_XOFF, + __QUEUE_STATE_STACK_XOFF, __QUEUE_STATE_FROZEN, -#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ - (1 << __QUEUE_STATE_FROZEN)) +#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ + (1 << __QUEUE_STATE_STACK_XOFF)) +#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ + (1 << __QUEUE_STATE_FROZEN)) }; +/* + * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The + * netif_tx_* functions below are used to manipulate this flag. The + * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit + * queue independently. The netif_xmit_*stopped functions below are called + * to check if the queue has been stopped by the driver or stack (either + * of the XOFF bits are set in the state). Drivers should not need to call + * netif_xmit*stopped functions, they should only be using netif_tx_*. + */ struct netdev_queue { /* @@ -528,9 +541,8 @@ struct netdev_queue { */ struct net_device *dev; struct Qdisc *qdisc; - unsigned long state; struct Qdisc *qdisc_sleeping; -#if defined(CONFIG_RPS) || defined(CONFIG_XPS) +#ifdef CONFIG_SYSFS struct kobject kobj; #endif #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) @@ -545,6 +557,18 @@ struct netdev_queue { * please use this field instead of dev->trans_start */ unsigned long trans_start; + + /* + * Number of TX timeouts for this queue + * (/sys/class/net/DEV/Q/trans_timeout) + */ + unsigned long trans_timeout; + + unsigned long state; + +#ifdef CONFIG_BQL + struct dql dql; +#endif } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -573,7 +597,7 @@ struct rps_map { struct rcu_head rcu; u16 cpus[0]; }; -#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) +#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) /* * The rps_dev_flow structure contains the mapping of a flow to a CPU, the @@ -597,7 +621,7 @@ struct rps_dev_flow_table { struct rps_dev_flow flows[0]; }; #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ - (_num * sizeof(struct rps_dev_flow))) + ((_num) * sizeof(struct rps_dev_flow))) /* * The rps_sock_flow_table contains mappings of flows to the last CPU @@ -608,7 +632,7 @@ struct rps_sock_flow_table { u16 ents[0]; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ - (_num * sizeof(u16))) + ((_num) * sizeof(u16))) #define RPS_NO_CPU 0xffff @@ -660,7 +684,7 @@ struct xps_map { struct rcu_head rcu; u16 queues[0]; }; -#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) +#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ / sizeof(u16)) @@ -683,6 +707,23 @@ struct netdev_tc_txq { u16 offset; }; +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +/* + * This structure is to hold information about the device + * configured to run FCoE protocol stack. + */ +struct netdev_fcoe_hbainfo { + char manufacturer[64]; + char serial_number[64]; + char hardware_version[64]; + char driver_version[64]; + char optionrom_version[64]; + char firmware_version[64]; + char model[256]; + char model_description[256]; +}; +#endif + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -767,11 +808,11 @@ struct netdev_tc_txq { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * this function is called when a VLAN id is registered. * - * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * this function is called when a VLAN id is unregistered. * @@ -823,6 +864,13 @@ struct netdev_tc_txq { * perform necessary setup and returns 1 to indicate the device is set up * successfully to perform DDP on this I/O, otherwise this returns 0. * + * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + * struct netdev_fcoe_hbainfo *hbainfo); + * Called when the FCoE Protocol stack wants information on the underlying + * device. This information is utilized by the FCoE protocol stack to + * register attributes with Fiber Channel management service as per the + * FC-GS Fabric Device Management Information(FDMI) specification. + * * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); * Called when the underlying device wants to override default World Wide * Name (WWN) generation mechanism in FCoE protocol stack to pass its own @@ -845,12 +893,13 @@ struct netdev_tc_txq { * Called to release previously enslaved netdev. * * Feature/offload setting functions. - * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); * Adjusts the requested feature flags according to device-specific * constraints, and returns the resulting flags. Must not modify * the device state. * - * int (*ndo_set_features)(struct net_device *dev, u32 features); + * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). * Must return >0 or -errno if it changed dev->features itself. @@ -885,9 +934,9 @@ struct net_device_ops { struct rtnl_link_stats64 *storage); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); - void (*ndo_vlan_rx_add_vid)(struct net_device *dev, + int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); - void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, + int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); @@ -912,7 +961,7 @@ struct net_device_ops { int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); int (*ndo_setup_tc)(struct net_device *dev, u8 tc); -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); int (*ndo_fcoe_ddp_setup)(struct net_device *dev, @@ -925,9 +974,11 @@ struct net_device_ops { u16 xid, struct scatterlist *sgl, unsigned int sgc); + int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + struct netdev_fcoe_hbainfo *hbainfo); #endif -#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE) +#if IS_ENABLED(CONFIG_LIBFCOE) #define NETDEV_FCOE_WWNN 0 #define NETDEV_FCOE_WWPN 1 int (*ndo_fcoe_get_wwn)(struct net_device *dev, @@ -944,10 +995,12 @@ struct net_device_ops { struct net_device *slave_dev); int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); - u32 (*ndo_fix_features)(struct net_device *dev, - u32 features); + netdev_features_t (*ndo_fix_features)(struct net_device *dev, + netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, - u32 features); + netdev_features_t features); + int (*ndo_neigh_construct)(struct neighbour *n); + void (*ndo_neigh_destroy)(struct neighbour *n); }; /* @@ -997,91 +1050,13 @@ struct net_device { struct list_head unreg_list; /* currently active device features */ - u32 features; + netdev_features_t features; /* user-changeable features */ - u32 hw_features; + netdev_features_t hw_features; /* user-requested features */ - u32 wanted_features; + netdev_features_t wanted_features; /* mask of features inheritable by VLAN devices */ - u32 vlan_features; - - /* Net device feature bits; if you change something, - * also update netdev_features_strings[] in ethtool.c */ - -#define NETIF_F_SG 1 /* Scatter/gather IO. */ -#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ -#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ -#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ -#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ -#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ -#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ -#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ -#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ -#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ -#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ -#define NETIF_F_GSO 2048 /* Enable software GSO. */ -#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ - /* do not use LLTX in new drivers */ -#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ -#define NETIF_F_GRO 16384 /* Generic receive offload */ -#define NETIF_F_LRO 32768 /* large receive offload */ - -/* the GSO_MASK reserves bits 16 through 23 */ -#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ -#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ -#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ -#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ -#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ -#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ -#define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */ -#define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */ - - /* Segmentation offload features */ -#define NETIF_F_GSO_SHIFT 16 -#define NETIF_F_GSO_MASK 0x00ff0000 -#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) -#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) -#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) -#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) -#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) -#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) - - /* Features valid for ethtool to change */ - /* = all defined minus driver/device-class-related */ -#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ - NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) -#define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE) - - /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ - NETIF_F_TSO6 | NETIF_F_UFO) - - -#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) -#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) -#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) -#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) - -#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) - -#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ - NETIF_F_FSO) - - /* - * If one device supports one of these features, then enable them - * for all in netdev_increment_features. - */ -#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ - NETIF_F_SG | NETIF_F_HIGHDMA | \ - NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) - /* - * If one device doesn't support one of these features, then disable it - * for all in netdev_increment_features. - */ -#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) - - /* changeable features with no special hardware requirements */ -#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) + netdev_features_t vlan_features; /* Interface index. Unique device identifier */ int ifindex; @@ -1132,6 +1107,7 @@ struct net_device { unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ + unsigned char neigh_priv_len; unsigned short dev_id; /* for shared network cards */ spinlock_t addr_list_lock; @@ -1144,11 +1120,11 @@ struct net_device { /* Protocol specific pointers */ -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) - struct vlan_group __rcu *vlgrp; /* VLAN group */ +#if IS_ENABLED(CONFIG_VLAN_8021Q) + struct vlan_info __rcu *vlan_info; /* VLAN info */ #endif -#ifdef CONFIG_NET_DSA - void *dsa_ptr; /* dsa specific data */ +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ #endif void *atalk_ptr; /* AppleTalk link */ struct in_device __rcu *ip_ptr; /* IPv4 specific data */ @@ -1184,9 +1160,11 @@ struct net_device { unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ -#if defined(CONFIG_RPS) || defined(CONFIG_XPS) +#ifdef CONFIG_SYSFS struct kset *queues_kset; +#endif +#ifdef CONFIG_RPS struct netdev_rx_queue *_rx; /* Number of RX queues allocated at register_netdev() time */ @@ -1308,10 +1286,13 @@ struct net_device { struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; u8 prio_tc_map[TC_BITMASK + 1]; -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#if IS_ENABLED(CONFIG_FCOE) /* max exchange id for FCoE LRO by ddp */ unsigned int fcoe_ddp_xid; #endif +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) + struct netprio_map __rcu *priomap; +#endif /* phy device may attach itself for hardware timestamping */ struct phy_device *phydev; @@ -1515,7 +1496,7 @@ struct packet_type { struct packet_type *, struct net_device *); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - u32 features); + netdev_features_t features); int (*gso_send_check)(struct sk_buff *skb); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); @@ -1783,7 +1764,7 @@ extern void __netif_schedule(struct Qdisc *q); static inline void netif_schedule_queue(struct netdev_queue *txq) { - if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + if (!(txq->state & QUEUE_STATE_ANY_XOFF)) __netif_schedule(txq->qdisc); } @@ -1797,7 +1778,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev) static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) { - clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); + clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } /** @@ -1829,7 +1810,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) return; } #endif - if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) __netif_schedule(dev_queue->qdisc); } @@ -1861,7 +1842,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); return; } - set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); + set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } /** @@ -1888,7 +1869,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev) static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { - return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); + return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); } /** @@ -1902,9 +1883,68 @@ static inline int netif_queue_stopped(const struct net_device *dev) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } -static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) +static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_ANY_XOFF; +} + +static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; +} + +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{ +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); + if (unlikely(dql_avail(&dev_queue->dql) < 0)) { + set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); + if (unlikely(dql_avail(&dev_queue->dql) >= 0)) + clear_bit(__QUEUE_STATE_STACK_XOFF, + &dev_queue->state); + } +#endif +} + +static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) { - return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; + netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); +} + +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned pkts, unsigned bytes) +{ +#ifdef CONFIG_BQL + if (likely(bytes)) { + dql_completed(&dev_queue->dql, bytes); + if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF, + &dev_queue->state) && + dql_avail(&dev_queue->dql) >= 0)) { + if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, + &dev_queue->state)) + netif_schedule_queue(dev_queue); + } + } +#endif +} + +static inline void netdev_completed_queue(struct net_device *dev, + unsigned pkts, unsigned bytes) +{ + netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); +} + +static inline void netdev_tx_reset_queue(struct netdev_queue *q) +{ +#ifdef CONFIG_BQL + dql_reset(&q->dql); +#endif +} + +static inline void netdev_reset_queue(struct net_device *dev_queue) +{ + netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); } /** @@ -1991,7 +2031,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) if (netpoll_trap()) return; #endif - if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) __netif_schedule(txq->qdisc); } @@ -2520,7 +2560,8 @@ extern int netdev_set_master(struct net_device *dev, struct net_device *master) extern int netdev_set_bond_master(struct net_device *dev, struct net_device *master); extern int skb_checksum_help(struct sk_buff *skb); -extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, + netdev_features_t features); #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); #else @@ -2536,6 +2577,8 @@ extern void net_disable_timestamp(void); extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); extern void dev_seq_stop(struct seq_file *seq, void *v); +extern int dev_seq_open_ops(struct inode *inode, struct file *file, + const struct seq_operations *ops); #endif extern int netdev_class_create_file(struct class_attribute *class_attr); @@ -2547,11 +2590,13 @@ extern const char *netdev_drivername(const struct net_device *dev); extern void linkwatch_run_queue(void); -static inline u32 netdev_get_wanted_features(struct net_device *dev) +static inline netdev_features_t netdev_get_wanted_features( + struct net_device *dev) { return (dev->features & ~dev->hw_features) | dev->wanted_features; } -u32 netdev_increment_features(u32 all, u32 one, u32 mask); +netdev_features_t netdev_increment_features(netdev_features_t all, + netdev_features_t one, netdev_features_t mask); int __netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev); void netdev_change_features(struct net_device *dev); @@ -2559,21 +2604,31 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -u32 netif_skb_features(struct sk_buff *skb); +netdev_features_t netif_skb_features(struct sk_buff *skb); -static inline int net_gso_ok(u32 features, int gso_type) +static inline int net_gso_ok(netdev_features_t features, int gso_type) { - int feature = gso_type << NETIF_F_GSO_SHIFT; + netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; + + /* check flags correspondence */ + BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); + return (features & feature) == feature; } -static inline int skb_gso_ok(struct sk_buff *skb, u32 features) +static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features) { return net_gso_ok(features, skb_shinfo(skb)->gso_type) && (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); } -static inline int netif_needs_gso(struct sk_buff *skb, int features) +static inline int netif_needs_gso(struct sk_buff *skb, + netdev_features_t features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); @@ -2592,22 +2647,6 @@ static inline int netif_is_bond_slave(struct net_device *dev) extern struct pernet_operations __net_initdata loopback_net_ops; -static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) -{ - if (dev->features & NETIF_F_RXCSUM) - return 1; - if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) - return 0; - return dev->ethtool_ops->get_rx_csum(dev); -} - -static inline u32 dev_ethtool_get_flags(struct net_device *dev) -{ - if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) - return 0; - return dev->ethtool_ops->get_flags(dev); -} - /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* netdev_printk helpers, similar to dev_printk */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 857f5026ced..b809265607d 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -162,6 +162,24 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +#if defined(CONFIG_JUMP_LABEL) +#include <linux/jump_label.h> +extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) +{ + if (__builtin_constant_p(pf) && + __builtin_constant_p(hook)) + return static_branch(&nf_hooks_needed[pf][hook]); + + return !list_empty(&nf_hooks[pf][hook]); +} +#else +static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) +{ + return !list_empty(&nf_hooks[pf][hook]); +} +#endif + int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh); @@ -179,11 +197,9 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh) { -#ifndef CONFIG_NETFILTER_DEBUG - if (list_empty(&nf_hooks[pf][hook])) - return 1; -#endif - return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); + if (nf_hooks_active(pf, hook)) + return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); + return 1; } static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index a1b410c76fc..e144f54185c 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -5,7 +5,9 @@ header-y += nf_conntrack_ftp.h header-y += nf_conntrack_sctp.h header-y += nf_conntrack_tcp.h header-y += nf_conntrack_tuple_common.h +header-y += nf_nat.h header-y += nfnetlink.h +header-y += nfnetlink_acct.h header-y += nfnetlink_compat.h header-y += nfnetlink_conntrack.h header-y += nfnetlink_log.h @@ -21,6 +23,7 @@ header-y += xt_DSCP.h header-y += xt_IDLETIMER.h header-y += xt_LED.h header-y += xt_MARK.h +header-y += xt_nfacct.h header-y += xt_NFLOG.h header-y += xt_NFQUEUE.h header-y += xt_RATEEST.h @@ -40,6 +43,7 @@ header-y += xt_cpu.h header-y += xt_dccp.h header-y += xt_devgroup.h header-y += xt_dscp.h +header-y += xt_ecn.h header-y += xt_esp.h header-y += xt_hashlimit.h header-y += xt_helper.h diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 0d3dd66322e..9e3a2838291 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -83,6 +83,10 @@ enum ip_conntrack_status { /* Conntrack is a fake untracked entry */ IPS_UNTRACKED_BIT = 12, IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), + + /* Conntrack has a userspace helper. */ + IPS_USERSPACE_HELPER_BIT = 13, + IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT), }; /* Connection tracking event types */ diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h index 2ea22b018a8..2f6bbc5b812 100644 --- a/include/linux/netfilter/nf_conntrack_tuple_common.h +++ b/include/linux/netfilter/nf_conntrack_tuple_common.h @@ -7,6 +7,33 @@ enum ip_conntrack_dir { IP_CT_DIR_MAX }; +/* The protocol-specific manipulable parts of the tuple: always in + * network order + */ +union nf_conntrack_man_proto { + /* Add other protocols here. */ + __be16 all; + + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ + } gre; +}; + #define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL) #endif /* _NF_CONNTRACK_TUPLE_COMMON_H */ diff --git a/include/linux/netfilter/nf_nat.h b/include/linux/netfilter/nf_nat.h new file mode 100644 index 00000000000..8df2d13730b --- /dev/null +++ b/include/linux/netfilter/nf_nat.h @@ -0,0 +1,25 @@ +#ifndef _NETFILTER_NF_NAT_H +#define _NETFILTER_NF_NAT_H + +#include <linux/netfilter.h> +#include <linux/netfilter/nf_conntrack_tuple_common.h> + +#define NF_NAT_RANGE_MAP_IPS 1 +#define NF_NAT_RANGE_PROTO_SPECIFIED 2 +#define NF_NAT_RANGE_PROTO_RANDOM 4 +#define NF_NAT_RANGE_PERSISTENT 8 + +struct nf_nat_ipv4_range { + unsigned int flags; + __be32 min_ip; + __be32 max_ip; + union nf_conntrack_man_proto min; + union nf_conntrack_man_proto max; +}; + +struct nf_nat_ipv4_multi_range_compat { + unsigned int rangesize; + struct nf_nat_ipv4_range range[1]; +}; + +#endif /* _NETFILTER_NF_NAT_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 74d33861473..b64454c2f79 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -48,7 +48,8 @@ struct nfgenmsg { #define NFNL_SUBSYS_ULOG 4 #define NFNL_SUBSYS_OSF 5 #define NFNL_SUBSYS_IPSET 6 -#define NFNL_SUBSYS_COUNT 7 +#define NFNL_SUBSYS_ACCT 7 +#define NFNL_SUBSYS_COUNT 8 #ifdef __KERNEL__ diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h new file mode 100644 index 00000000000..7c4279b4ae7 --- /dev/null +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -0,0 +1,36 @@ +#ifndef _NFNL_ACCT_H_ +#define _NFNL_ACCT_H_ + +#ifndef NFACCT_NAME_MAX +#define NFACCT_NAME_MAX 32 +#endif + +enum nfnl_acct_msg_types { + NFNL_MSG_ACCT_NEW, + NFNL_MSG_ACCT_GET, + NFNL_MSG_ACCT_GET_CTRZERO, + NFNL_MSG_ACCT_DEL, + NFNL_MSG_ACCT_MAX +}; + +enum nfnl_acct_type { + NFACCT_UNSPEC, + NFACCT_NAME, + NFACCT_PKTS, + NFACCT_BYTES, + NFACCT_USE, + __NFACCT_MAX +}; +#define NFACCT_MAX (__NFACCT_MAX - 1) + +#ifdef __KERNEL__ + +struct nf_acct; + +extern struct nf_acct *nfnl_acct_find_get(const char *filter_name); +extern void nfnl_acct_put(struct nf_acct *acct); +extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); + +#endif /* __KERNEL__ */ + +#endif /* _NFNL_ACCT_H */ diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h index b56e76811c0..6390f0992f3 100644 --- a/include/linux/netfilter/xt_CT.h +++ b/include/linux/netfilter/xt_CT.h @@ -3,7 +3,8 @@ #include <linux/types.h> -#define XT_CT_NOTRACK 0x1 +#define XT_CT_NOTRACK 0x1 +#define XT_CT_USERSPACE_HELPER 0x2 struct xt_ct_target_info { __u16 flags; diff --git a/include/linux/netfilter/xt_ecn.h b/include/linux/netfilter/xt_ecn.h new file mode 100644 index 00000000000..7158fca364f --- /dev/null +++ b/include/linux/netfilter/xt_ecn.h @@ -0,0 +1,35 @@ +/* iptables module for matching the ECN header in IPv4 and TCP header + * + * (C) 2002 Harald Welte <laforge@gnumonks.org> + * + * This software is distributed under GNU GPL v2, 1991 + * + * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp +*/ +#ifndef _XT_ECN_H +#define _XT_ECN_H + +#include <linux/types.h> +#include <linux/netfilter/xt_dscp.h> + +#define XT_ECN_IP_MASK (~XT_DSCP_MASK) + +#define XT_ECN_OP_MATCH_IP 0x01 +#define XT_ECN_OP_MATCH_ECE 0x10 +#define XT_ECN_OP_MATCH_CWR 0x20 + +#define XT_ECN_OP_MATCH_MASK 0xce + +/* match info */ +struct xt_ecn_info { + __u8 operation; + __u8 invert; + __u8 ip_ect; + union { + struct { + __u8 ect; + } tcp; + } proto; +}; + +#endif /* _XT_ECN_H */ diff --git a/include/linux/netfilter/xt_nfacct.h b/include/linux/netfilter/xt_nfacct.h new file mode 100644 index 00000000000..3e19c8a8657 --- /dev/null +++ b/include/linux/netfilter/xt_nfacct.h @@ -0,0 +1,13 @@ +#ifndef _XT_NFACCT_MATCH_H +#define _XT_NFACCT_MATCH_H + +#include <linux/netfilter/nfnetlink_acct.h> + +struct nf_acct; + +struct xt_nfacct_match_info { + char name[NFACCT_NAME_MAX]; + struct nf_acct *nfacct; +}; + +#endif /* _XT_NFACCT_MATCH_H */ diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/linux/netfilter/xt_rpfilter.h new file mode 100644 index 00000000000..8358d4f7195 --- /dev/null +++ b/include/linux/netfilter/xt_rpfilter.h @@ -0,0 +1,23 @@ +#ifndef _XT_RPATH_H +#define _XT_RPATH_H + +#include <linux/types.h> + +enum { + XT_RPFILTER_LOOSE = 1 << 0, + XT_RPFILTER_VALID_MARK = 1 << 1, + XT_RPFILTER_ACCEPT_LOCAL = 1 << 2, + XT_RPFILTER_INVERT = 1 << 3, +#ifdef __KERNEL__ + XT_RPFILTER_OPTION_MASK = XT_RPFILTER_LOOSE | + XT_RPFILTER_VALID_MARK | + XT_RPFILTER_ACCEPT_LOCAL | + XT_RPFILTER_INVERT, +#endif +}; + +struct xt_rpfilter_info { + __u8 flags; +}; + +#endif diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild index c3b45480ecf..f9930c87fff 100644 --- a/include/linux/netfilter_ipv4/Kbuild +++ b/include/linux/netfilter_ipv4/Kbuild @@ -12,4 +12,3 @@ header-y += ipt_ah.h header-y += ipt_ecn.h header-y += ipt_realm.h header-y += ipt_ttl.h -header-y += nf_nat.h diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h index eabf95fb7d3..0e0c063dbf6 100644 --- a/include/linux/netfilter_ipv4/ipt_ecn.h +++ b/include/linux/netfilter_ipv4/ipt_ecn.h @@ -1,35 +1,15 @@ -/* iptables module for matching the ECN header in IPv4 and TCP header - * - * (C) 2002 Harald Welte <laforge@gnumonks.org> - * - * This software is distributed under GNU GPL v2, 1991 - * - * ipt_ecn.h,v 1.4 2002/08/05 19:39:00 laforge Exp -*/ #ifndef _IPT_ECN_H #define _IPT_ECN_H -#include <linux/types.h> -#include <linux/netfilter/xt_dscp.h> +#include <linux/netfilter/xt_ecn.h> +#define ipt_ecn_info xt_ecn_info -#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) - -#define IPT_ECN_OP_MATCH_IP 0x01 -#define IPT_ECN_OP_MATCH_ECE 0x10 -#define IPT_ECN_OP_MATCH_CWR 0x20 - -#define IPT_ECN_OP_MATCH_MASK 0xce - -/* match info */ -struct ipt_ecn_info { - __u8 operation; - __u8 invert; - __u8 ip_ect; - union { - struct { - __u8 ect; - } tcp; - } proto; +enum { + IPT_ECN_IP_MASK = XT_ECN_IP_MASK, + IPT_ECN_OP_MATCH_IP = XT_ECN_OP_MATCH_IP, + IPT_ECN_OP_MATCH_ECE = XT_ECN_OP_MATCH_ECE, + IPT_ECN_OP_MATCH_CWR = XT_ECN_OP_MATCH_CWR, + IPT_ECN_OP_MATCH_MASK = XT_ECN_OP_MATCH_MASK, }; -#endif /* _IPT_ECN_H */ +#endif /* IPT_ECN_H */ diff --git a/include/linux/netfilter_ipv4/nf_nat.h b/include/linux/netfilter_ipv4/nf_nat.h deleted file mode 100644 index 7a861d09fc8..00000000000 --- a/include/linux/netfilter_ipv4/nf_nat.h +++ /dev/null @@ -1,58 +0,0 @@ -#ifndef _LINUX_NF_NAT_H -#define _LINUX_NF_NAT_H - -#include <linux/types.h> - -#define IP_NAT_RANGE_MAP_IPS 1 -#define IP_NAT_RANGE_PROTO_SPECIFIED 2 -#define IP_NAT_RANGE_PROTO_RANDOM 4 -#define IP_NAT_RANGE_PERSISTENT 8 - -/* The protocol-specific manipulable parts of the tuple. */ -union nf_conntrack_man_proto { - /* Add other protocols here. */ - __be16 all; - - struct { - __be16 port; - } tcp; - struct { - __be16 port; - } udp; - struct { - __be16 id; - } icmp; - struct { - __be16 port; - } dccp; - struct { - __be16 port; - } sctp; - struct { - __be16 key; /* GRE key is 32bit, PPtP only uses 16bit */ - } gre; -}; - -/* Single range specification. */ -struct nf_nat_range { - /* Set to OR of flags above. */ - unsigned int flags; - - /* Inclusive: network order. */ - __be32 min_ip, max_ip; - - /* Inclusive: network order */ - union nf_conntrack_man_proto min, max; -}; - -/* For backwards compat: don't use in modern code. */ -struct nf_nat_multi_range_compat { - unsigned int rangesize; /* Must be 1. */ - - /* hangs off end. */ - struct nf_nat_range range[1]; -}; - -#define nf_nat_multi_range nf_nat_multi_range_compat - -#endif diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 8374d296736..52e48959cfa 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -8,7 +8,7 @@ #define NETLINK_UNUSED 1 /* Unused number */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ #define NETLINK_FIREWALL 3 /* Firewalling hook */ -#define NETLINK_INET_DIAG 4 /* INET socket monitoring */ +#define NETLINK_SOCK_DIAG 4 /* socket monitoring */ #define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */ #define NETLINK_XFRM 6 /* ipsec */ #define NETLINK_SELINUX 7 /* SELinux event notifications */ @@ -27,6 +27,8 @@ #define NETLINK_RDMA 20 #define NETLINK_CRYPTO 21 /* Crypto layer */ +#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG + #define MAX_LINKS 32 struct sockaddr_nl { diff --git a/include/linux/nfc.h b/include/linux/nfc.h index 36cb955b05c..01d4e5d6032 100644 --- a/include/linux/nfc.h +++ b/include/linux/nfc.h @@ -62,6 +62,8 @@ enum nfc_commands { NFC_CMD_GET_DEVICE, NFC_CMD_DEV_UP, NFC_CMD_DEV_DOWN, + NFC_CMD_DEP_LINK_UP, + NFC_CMD_DEP_LINK_DOWN, NFC_CMD_START_POLL, NFC_CMD_STOP_POLL, NFC_CMD_GET_TARGET, @@ -86,6 +88,9 @@ enum nfc_commands { * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the * target is not NFC-Forum compliant) + * @NFC_ATTR_TARGET_NFCID1: NFC-A targets identifier, max 10 bytes + * @NFC_ATTR_COMM_MODE: Passive or active mode + * @NFC_ATTR_RF_MODE: Initiator or target */ enum nfc_attrs { NFC_ATTR_UNSPEC, @@ -95,6 +100,9 @@ enum nfc_attrs { NFC_ATTR_TARGET_INDEX, NFC_ATTR_TARGET_SENS_RES, NFC_ATTR_TARGET_SEL_RES, + NFC_ATTR_TARGET_NFCID1, + NFC_ATTR_COMM_MODE, + NFC_ATTR_RF_MODE, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; @@ -111,6 +119,14 @@ enum nfc_attrs { #define NFC_PROTO_MAX 6 +/* NFC communication modes */ +#define NFC_COMM_ACTIVE 0 +#define NFC_COMM_PASSIVE 1 + +/* NFC RF modes */ +#define NFC_RF_INITIATOR 0 +#define NFC_RF_TARGET 1 + /* NFC protocols masks used in bitsets */ #define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) #define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) @@ -125,9 +141,22 @@ struct sockaddr_nfc { __u32 nfc_protocol; }; +#define NFC_LLCP_MAX_SERVICE_NAME 63 +struct sockaddr_nfc_llcp { + sa_family_t sa_family; + __u32 dev_idx; + __u32 target_idx; + __u32 nfc_protocol; + __u8 dsap; /* Destination SAP, if known */ + __u8 ssap; /* Source SAP to be bound to */ + char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; + size_t service_name_len; +}; + /* NFC socket protocols */ #define NFC_SOCKPROTO_RAW 0 -#define NFC_SOCKPROTO_MAX 1 +#define NFC_SOCKPROTO_LLCP 1 +#define NFC_SOCKPROTO_MAX 2 #define NFC_HEADER_SIZE 1 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ab2c6343361..8c29950d2fa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -373,7 +373,7 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); -extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); @@ -410,6 +410,9 @@ extern const struct inode_operations nfs_file_inode_operations; extern const struct inode_operations nfs3_file_inode_operations; #endif /* CONFIG_NFS_V3 */ extern const struct file_operations nfs_file_operations; +#ifdef CONFIG_NFS_V4 +extern const struct file_operations nfs4_file_operations; +#endif /* CONFIG_NFS_V4 */ extern const struct address_space_operations nfs_file_aops; extern const struct address_space_operations nfs_dir_aops; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index c74595ba709..2a7c533be5d 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1192,6 +1192,7 @@ struct nfs_rpc_ops { const struct dentry_operations *dentry_ops; const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; + const struct file_operations *file_ops; int (*getroot) (struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 8049bf77d79..0f5ff373982 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -509,6 +509,38 @@ * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup). * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame. * + * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP + * (or GO) interface (i.e. hostapd) to ask for unexpected frames to + * implement sending deauth to stations that send unexpected class 3 + * frames. Also used as the event sent by the kernel when such a frame + * is received. + * For the event, the %NL80211_ATTR_MAC attribute carries the TA and + * other attributes like the interface index are present. + * If used as the command it must have an interface index and you can + * only unsubscribe from the event by closing the socket. Subscription + * is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events. + * + * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the + * associated station identified by %NL80211_ATTR_MAC sent a 4addr frame + * and wasn't already in a 4-addr VLAN. The event will be sent similarly + * to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener. + * + * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface + * by sending a null data frame to it and reporting when the frame is + * acknowleged. This is used to allow timing out inactive clients. Uses + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a + * direct reply with an %NL80211_ATTR_COOKIE that is later used to match + * up the event with the request. The event includes the same data and + * has %NL80211_ATTR_ACK set if the frame was ACKed. + * + * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from + * other BSSes when any interfaces are in AP mode. This helps implement + * OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME + * messages. Note that per PHY only one application may register. + * + * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether + * No Acknowledgement Policy should be applied. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -638,6 +670,16 @@ enum nl80211_commands { NL80211_CMD_TDLS_OPER, NL80211_CMD_TDLS_MGMT, + NL80211_CMD_UNEXPECTED_FRAME, + + NL80211_CMD_PROBE_CLIENT, + + NL80211_CMD_REGISTER_BEACONS, + + NL80211_CMD_UNEXPECTED_4ADDR_FRAME, + + NL80211_CMD_SET_NOACK_MAP, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -658,6 +700,8 @@ enum nl80211_commands { #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT +#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS + /* source-level API compatibility */ #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG @@ -1109,6 +1153,46 @@ enum nl80211_commands { * %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be * used for asking the driver to perform a TDLS operation. * + * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices + * that have AP support to indicate that they have the AP SME integrated + * with support for the features listed in this attribute, see + * &enum nl80211_ap_sme_features. + * + * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells + * the driver to not wait for an acknowledgement. Note that due to this, + * it will also not give a status callback nor return a cookie. This is + * mostly useful for probe responses to save airtime. + * + * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from + * &enum nl80211_feature_flags and is advertised in wiphy information. + * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe + * + * requests while operating in AP-mode. + * This attribute holds a bitmap of the supported protocols for + * offloading (see &enum nl80211_probe_resp_offload_support_attr). + * + * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire + * probe-response frame. The DA field in the 802.11 header is zero-ed out, + * to be filled by the FW. + * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable + * this feature. Currently, only supported in mac80211 drivers. + * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the + * ATTR_HT_CAPABILITY to which attention should be paid. + * Currently, only mac80211 NICs support this feature. + * The values that may be configured are: + * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40 + * AMPDU density and AMPDU factor. + * All values are treated as suggestions and may be ignored + * by the driver as required. The actual values may be seen in + * the station debugfs ht_caps file. + * + * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country + * abides to when initiating radiation on DFS channels. A country maps + * to one DFS region. + * + * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of + * up to 16 TIDs. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1337,6 +1421,23 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_SUPPORT, NL80211_ATTR_TDLS_EXTERNAL_SETUP, + NL80211_ATTR_DEVICE_AP_SME, + + NL80211_ATTR_DONT_WAIT_FOR_ACK, + + NL80211_ATTR_FEATURE_FLAGS, + + NL80211_ATTR_PROBE_RESP_OFFLOAD, + + NL80211_ATTR_PROBE_RESP, + + NL80211_ATTR_DFS_REGION, + + NL80211_ATTR_DISABLE_HT, + NL80211_ATTR_HT_CAPABILITY_MASK, + + NL80211_ATTR_NOACK_MAP, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -1371,6 +1472,7 @@ enum nl80211_attrs { #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES #define NL80211_ATTR_KEY NL80211_ATTR_KEY #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS +#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_REG_RULES 32 @@ -1434,7 +1536,11 @@ enum nl80211_iftype { * @NL80211_STA_FLAG_WME: station is WME/QoS capable * @NL80211_STA_FLAG_MFP: station uses management frame protection * @NL80211_STA_FLAG_AUTHENTICATED: station is authenticated - * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer + * @NL80211_STA_FLAG_TDLS_PEER: station is a TDLS peer -- this flag should + * only be used in managed mode (even in the flags mask). Note that the + * flag can't be changed, it is only valid while adding a station, and + * attempts to change it will silently be ignored (rather than rejected + * as errors.) * @NL80211_STA_FLAG_MAX: highest station flag number currently defined * @__NL80211_STA_FLAG_AFTER_LAST: internal use */ @@ -1549,6 +1655,7 @@ enum nl80211_sta_bss_param { * containing info as possible, see &enum nl80211_sta_bss_param * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. + * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -1571,6 +1678,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_BSS_PARAM, NL80211_STA_INFO_CONNECTED_TIME, NL80211_STA_INFO_STA_FLAGS, + NL80211_STA_INFO_BEACON_LOSS, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -1845,6 +1953,21 @@ enum nl80211_reg_rule_flags { }; /** + * enum nl80211_dfs_regions - regulatory DFS regions + * + * @NL80211_DFS_UNSET: Country has no DFS master region specified + * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC + * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI + * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec + */ +enum nl80211_dfs_regions { + NL80211_DFS_UNSET = 0, + NL80211_DFS_FCC = 1, + NL80211_DFS_ETSI = 2, + NL80211_DFS_JP = 3, +}; + +/** * enum nl80211_survey_info - survey information * * These attribute types are used with %NL80211_ATTR_SURVEY_INFO @@ -1977,6 +2100,10 @@ enum nl80211_mntr_flags { * access to a broader network beyond the MBSS. This is done via Root * Announcement frames. * + * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in + * TUs) during which a mesh STA can send only one Action frame containing a + * PERR element. + * * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use @@ -2000,6 +2127,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_ELEMENT_TTL, NL80211_MESHCONF_HWMP_RANN_INTERVAL, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, + NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, @@ -2650,4 +2778,45 @@ enum nl80211_tdls_operation { NL80211_TDLS_DISABLE_LINK, }; +/* + * enum nl80211_ap_sme_features - device-integrated AP features + * Reserved for future use, no bits are defined in + * NL80211_ATTR_DEVICE_AP_SME yet. +enum nl80211_ap_sme_features { +}; + */ + +/** + * enum nl80211_feature_flags - device/driver features + * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back + * TX status to the socket error queue when requested with the + * socket option. + * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates. + */ +enum nl80211_feature_flags { + NL80211_FEATURE_SK_TX_STATUS = 1 << 0, + NL80211_FEATURE_HT_IBSS = 1 << 1, +}; + +/** + * enum nl80211_probe_resp_offload_support_attr - optional supported + * protocols for probe-response offloading by the driver/FW. + * To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute. + * Each enum value represents a bit in the bitmap of supported + * protocols. Typically a subset of probe-requests belonging to a + * supported protocol will be excluded from offload and uploaded + * to the host. + * + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1 + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2 + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P + * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u + */ +enum nl80211_probe_resp_offload_support_attr { + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0, + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1, + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2, + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/node.h b/include/linux/node.h index 92370e22343..624e53cecc0 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -14,12 +14,12 @@ #ifndef _LINUX_NODE_H_ #define _LINUX_NODE_H_ -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/cpumask.h> #include <linux/workqueue.h> struct node { - struct sys_device sysdev; + struct device dev; #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) struct work_struct node_work; @@ -80,6 +80,6 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg, } #endif -#define to_node(sys_device) container_of(sys_device, struct node, sysdev) +#define to_node(device) container_of(device, struct node, dev) #endif /* _LINUX_NODE_H_ */ diff --git a/include/linux/of.h b/include/linux/of.h index 4948552d60f..a75a831e205 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -65,6 +65,27 @@ struct device_node { #endif }; +#define MAX_PHANDLE_ARGS 8 +struct of_phandle_args { + struct device_node *np; + int args_count; + uint32_t args[MAX_PHANDLE_ARGS]; +}; + +#if defined(CONFIG_SPARC) || !defined(CONFIG_OF) +/* Dummy ref counting routines - to be implemented later */ +static inline struct device_node *of_node_get(struct device_node *node) +{ + return node; +} +static inline void of_node_put(struct device_node *node) +{ +} +#else +extern struct device_node *of_node_get(struct device_node *node); +extern void of_node_put(struct device_node *node); +#endif + #ifdef CONFIG_OF /* Pointer for first entry in chain of all nodes. */ @@ -95,21 +116,6 @@ static inline void of_node_set_flag(struct device_node *n, unsigned long flag) extern struct device_node *of_find_all_nodes(struct device_node *prev); -#if defined(CONFIG_SPARC) -/* Dummy ref counting routines - to be implemented later */ -static inline struct device_node *of_node_get(struct device_node *node) -{ - return node; -} -static inline void of_node_put(struct device_node *node) -{ -} - -#else -extern struct device_node *of_node_get(struct device_node *node); -extern void of_node_put(struct device_node *node); -#endif - /* * OF address retrieval & translation */ @@ -219,8 +225,8 @@ extern int of_device_is_available(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); -#define for_each_property(pp, properties) \ - for (pp = properties; pp != NULL; pp = pp->next) +#define for_each_property_of_node(dn, pp) \ + for (pp = dn->properties; pp != NULL; pp = pp->next) extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); @@ -230,9 +236,9 @@ extern int of_modalias_node(struct device_node *node, char *modalias, int len); extern struct device_node *of_parse_phandle(struct device_node *np, const char *phandle_name, int index); -extern int of_parse_phandles_with_args(struct device_node *np, +extern int of_parse_phandle_with_args(struct device_node *np, const char *list_name, const char *cells_name, int index, - struct device_node **out_node, const void **out_args); + struct of_phandle_args *out_args); extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index c84d900fbbb..ed136ad698c 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -71,7 +71,7 @@ extern int of_fdt_is_compatible(struct boot_param_header *blob, unsigned long node, const char *compat); extern int of_fdt_match(struct boot_param_header *blob, unsigned long node, - const char **compat); + const char *const *compat); extern void of_fdt_unflatten_tree(unsigned long *blob, struct device_node **mynodes); @@ -88,7 +88,7 @@ extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, extern void *of_get_flat_dt_prop(unsigned long node, const char *name, unsigned long *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); -extern int of_flat_dt_match(unsigned long node, const char **matches); +extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 52280a2b5e6..b254052a49d 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/gpio.h> +#include <linux/of.h> struct device_node; @@ -57,8 +58,9 @@ extern int of_mm_gpiochip_add(struct device_node *np, extern void of_gpiochip_add(struct gpio_chip *gc); extern void of_gpiochip_remove(struct gpio_chip *gc); extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np); -extern int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, - const void *gpio_spec, u32 *flags); +extern int of_gpio_simple_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags); #else /* CONFIG_OF_GPIO */ @@ -75,8 +77,8 @@ static inline unsigned int of_gpio_count(struct device_node *np) } static inline int of_gpio_simple_xlate(struct gpio_chip *gc, - struct device_node *np, - const void *gpio_spec, u32 *flags) + const struct of_phandle_args *gpiospec, + u32 *flags) { return -ENOSYS; } diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h new file mode 100644 index 00000000000..eb1efa54fe8 --- /dev/null +++ b/include/linux/openvswitch.h @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2007-2011 Nicira Networks. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + */ + +#ifndef _LINUX_OPENVSWITCH_H +#define _LINUX_OPENVSWITCH_H 1 + +#include <linux/types.h> + +/** + * struct ovs_header - header for OVS Generic Netlink messages. + * @dp_ifindex: ifindex of local port for datapath (0 to make a request not + * specific to a datapath). + * + * Attributes following the header are specific to a particular OVS Generic + * Netlink family, but all of the OVS families use this header. + */ + +struct ovs_header { + int dp_ifindex; +}; + +/* Datapaths. */ + +#define OVS_DATAPATH_FAMILY "ovs_datapath" +#define OVS_DATAPATH_MCGROUP "ovs_datapath" +#define OVS_DATAPATH_VERSION 0x1 + +enum ovs_datapath_cmd { + OVS_DP_CMD_UNSPEC, + OVS_DP_CMD_NEW, + OVS_DP_CMD_DEL, + OVS_DP_CMD_GET, + OVS_DP_CMD_SET +}; + +/** + * enum ovs_datapath_attr - attributes for %OVS_DP_* commands. + * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local + * port". This is the name of the network device whose dp_ifindex is given in + * the &struct ovs_header. Always present in notifications. Required in + * %OVS_DP_NEW requests. May be used as an alternative to specifying + * dp_ifindex in other requests (with a dp_ifindex of 0). + * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially + * set on the datapath port (for OVS_ACTION_ATTR_MISS). Only valid on + * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should + * not be sent. + * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the + * datapath. Always present in notifications. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_DP_* commands. + */ +enum ovs_datapath_attr { + OVS_DP_ATTR_UNSPEC, + OVS_DP_ATTR_NAME, /* name of dp_ifindex netdev */ + OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */ + OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */ + __OVS_DP_ATTR_MAX +}; + +#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1) + +struct ovs_dp_stats { + __u64 n_hit; /* Number of flow table matches. */ + __u64 n_missed; /* Number of flow table misses. */ + __u64 n_lost; /* Number of misses not sent to userspace. */ + __u64 n_flows; /* Number of flows present */ +}; + +struct ovs_vport_stats { + __u64 rx_packets; /* total packets received */ + __u64 tx_packets; /* total packets transmitted */ + __u64 rx_bytes; /* total bytes received */ + __u64 tx_bytes; /* total bytes transmitted */ + __u64 rx_errors; /* bad packets received */ + __u64 tx_errors; /* packet transmit problems */ + __u64 rx_dropped; /* no space in linux buffers */ + __u64 tx_dropped; /* no space available in linux */ +}; + +/* Fixed logical ports. */ +#define OVSP_LOCAL ((__u16)0) + +/* Packet transfer. */ + +#define OVS_PACKET_FAMILY "ovs_packet" +#define OVS_PACKET_VERSION 0x1 + +enum ovs_packet_cmd { + OVS_PACKET_CMD_UNSPEC, + + /* Kernel-to-user notifications. */ + OVS_PACKET_CMD_MISS, /* Flow table miss. */ + OVS_PACKET_CMD_ACTION, /* OVS_ACTION_ATTR_USERSPACE action. */ + + /* Userspace commands. */ + OVS_PACKET_CMD_EXECUTE /* Apply actions to a packet. */ +}; + +/** + * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands. + * @OVS_PACKET_ATTR_PACKET: Present for all notifications. Contains the entire + * packet as received, from the start of the Ethernet header onward. For + * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by + * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is + * the flow key extracted from the packet as originally received. + * @OVS_PACKET_ATTR_KEY: Present for all notifications. Contains the flow key + * extracted from the packet as nested %OVS_KEY_ATTR_* attributes. This allows + * userspace to adapt its flow setup strategy by comparing its notion of the + * flow key against the kernel's. + * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used + * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. + * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION + * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an + * %OVS_USERSPACE_ATTR_USERDATA attribute. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_PACKET_* commands. + */ +enum ovs_packet_attr { + OVS_PACKET_ATTR_UNSPEC, + OVS_PACKET_ATTR_PACKET, /* Packet data. */ + OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ + OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */ + __OVS_PACKET_ATTR_MAX +}; + +#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1) + +/* Virtual ports. */ + +#define OVS_VPORT_FAMILY "ovs_vport" +#define OVS_VPORT_MCGROUP "ovs_vport" +#define OVS_VPORT_VERSION 0x1 + +enum ovs_vport_cmd { + OVS_VPORT_CMD_UNSPEC, + OVS_VPORT_CMD_NEW, + OVS_VPORT_CMD_DEL, + OVS_VPORT_CMD_GET, + OVS_VPORT_CMD_SET +}; + +enum ovs_vport_type { + OVS_VPORT_TYPE_UNSPEC, + OVS_VPORT_TYPE_NETDEV, /* network device */ + OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */ + __OVS_VPORT_TYPE_MAX +}; + +#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1) + +/** + * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands. + * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath. + * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type + * of vport. + * @OVS_VPORT_ATTR_NAME: Name of vport. For a vport based on a network device + * this is the name of the network device. Maximum length %IFNAMSIZ-1 bytes + * plus a null terminator. + * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information. + * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that + * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on + * this port. A value of zero indicates that upcalls should not be sent. + * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for + * packets sent or received through the vport. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_VPORT_* commands. + * + * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and + * %OVS_VPORT_ATTR_NAME attributes are required. %OVS_VPORT_ATTR_PORT_NO is + * optional; if not specified a free port number is automatically selected. + * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type + * of vport. + * and other attributes are ignored. + * + * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to + * look up the vport to operate on; otherwise dp_idx from the &struct + * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport. + */ +enum ovs_vport_attr { + OVS_VPORT_ATTR_UNSPEC, + OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */ + OVS_VPORT_ATTR_TYPE, /* u32 OVS_VPORT_TYPE_* constant. */ + OVS_VPORT_ATTR_NAME, /* string name, up to IFNAMSIZ bytes long */ + OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */ + OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */ + OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */ + __OVS_VPORT_ATTR_MAX +}; + +#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1) + +/* Flows. */ + +#define OVS_FLOW_FAMILY "ovs_flow" +#define OVS_FLOW_MCGROUP "ovs_flow" +#define OVS_FLOW_VERSION 0x1 + +enum ovs_flow_cmd { + OVS_FLOW_CMD_UNSPEC, + OVS_FLOW_CMD_NEW, + OVS_FLOW_CMD_DEL, + OVS_FLOW_CMD_GET, + OVS_FLOW_CMD_SET +}; + +struct ovs_flow_stats { + __u64 n_packets; /* Number of matched packets. */ + __u64 n_bytes; /* Number of matched bytes. */ +}; + +enum ovs_key_attr { + OVS_KEY_ATTR_UNSPEC, + OVS_KEY_ATTR_ENCAP, /* Nested set of encapsulated attributes. */ + OVS_KEY_ATTR_PRIORITY, /* u32 skb->priority */ + OVS_KEY_ATTR_IN_PORT, /* u32 OVS dp port number */ + OVS_KEY_ATTR_ETHERNET, /* struct ovs_key_ethernet */ + OVS_KEY_ATTR_VLAN, /* be16 VLAN TCI */ + OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */ + OVS_KEY_ATTR_IPV4, /* struct ovs_key_ipv4 */ + OVS_KEY_ATTR_IPV6, /* struct ovs_key_ipv6 */ + OVS_KEY_ATTR_TCP, /* struct ovs_key_tcp */ + OVS_KEY_ATTR_UDP, /* struct ovs_key_udp */ + OVS_KEY_ATTR_ICMP, /* struct ovs_key_icmp */ + OVS_KEY_ATTR_ICMPV6, /* struct ovs_key_icmpv6 */ + OVS_KEY_ATTR_ARP, /* struct ovs_key_arp */ + OVS_KEY_ATTR_ND, /* struct ovs_key_nd */ + __OVS_KEY_ATTR_MAX +}; + +#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) + +/** + * enum ovs_frag_type - IPv4 and IPv6 fragment type + * @OVS_FRAG_TYPE_NONE: Packet is not a fragment. + * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0. + * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset. + * + * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct + * ovs_key_ipv6. + */ +enum ovs_frag_type { + OVS_FRAG_TYPE_NONE, + OVS_FRAG_TYPE_FIRST, + OVS_FRAG_TYPE_LATER, + __OVS_FRAG_TYPE_MAX +}; + +#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1) + +struct ovs_key_ethernet { + __u8 eth_src[6]; + __u8 eth_dst[6]; +}; + +struct ovs_key_ipv4 { + __be32 ipv4_src; + __be32 ipv4_dst; + __u8 ipv4_proto; + __u8 ipv4_tos; + __u8 ipv4_ttl; + __u8 ipv4_frag; /* One of OVS_FRAG_TYPE_*. */ +}; + +struct ovs_key_ipv6 { + __be32 ipv6_src[4]; + __be32 ipv6_dst[4]; + __be32 ipv6_label; /* 20-bits in least-significant bits. */ + __u8 ipv6_proto; + __u8 ipv6_tclass; + __u8 ipv6_hlimit; + __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ +}; + +struct ovs_key_tcp { + __be16 tcp_src; + __be16 tcp_dst; +}; + +struct ovs_key_udp { + __be16 udp_src; + __be16 udp_dst; +}; + +struct ovs_key_icmp { + __u8 icmp_type; + __u8 icmp_code; +}; + +struct ovs_key_icmpv6 { + __u8 icmpv6_type; + __u8 icmpv6_code; +}; + +struct ovs_key_arp { + __be32 arp_sip; + __be32 arp_tip; + __be16 arp_op; + __u8 arp_sha[6]; + __u8 arp_tha[6]; +}; + +struct ovs_key_nd { + __u32 nd_target[4]; + __u8 nd_sll[6]; + __u8 nd_tll[6]; +}; + +/** + * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. + * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow + * key. Always present in notifications. Required for all requests (except + * dumps). + * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying + * the actions to take for packets that match the key. Always present in + * notifications. Required for %OVS_FLOW_CMD_NEW requests, optional for + * %OVS_FLOW_CMD_SET requests. + * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this + * flow. Present in notifications if the stats would be nonzero. Ignored in + * requests. + * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the + * TCP flags seen on packets in this flow. Only present in notifications for + * TCP flows, and only if it would be nonzero. Ignored in requests. + * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on + * the system monotonic clock, at which a packet was last processed for this + * flow. Only present in notifications if a packet has been processed for this + * flow. Ignored in requests. + * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the + * last-used time, accumulated TCP flags, and statistics for this flow. + * Otherwise ignored in requests. Never present in notifications. + * + * These attributes follow the &struct ovs_header within the Generic Netlink + * payload for %OVS_FLOW_* commands. + */ +enum ovs_flow_attr { + OVS_FLOW_ATTR_UNSPEC, + OVS_FLOW_ATTR_KEY, /* Sequence of OVS_KEY_ATTR_* attributes. */ + OVS_FLOW_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + OVS_FLOW_ATTR_STATS, /* struct ovs_flow_stats. */ + OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */ + OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ + OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ + __OVS_FLOW_ATTR_MAX +}; + +#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) + +/** + * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. + * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with + * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of + * %UINT32_MAX samples all packets and intermediate values sample intermediate + * fractions of packets. + * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event. + * Actions are passed as nested attributes. + * + * Executes the specified actions with the given probability on a per-packet + * basis. + */ +enum ovs_sample_attr { + OVS_SAMPLE_ATTR_UNSPEC, + OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */ + OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ + __OVS_SAMPLE_ATTR_MAX, +}; + +#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1) + +/** + * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. + * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION + * message should be sent. Required. + * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the + * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA, + */ +enum ovs_userspace_attr { + OVS_USERSPACE_ATTR_UNSPEC, + OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ + OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */ + __OVS_USERSPACE_ATTR_MAX +}; + +#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) + +/** + * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. + * @vlan_tpid: Tag protocol identifier (TPID) to push. + * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set + * (but it will not be set in the 802.1Q header that is pushed). + * + * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID + * values are those that the kernel module also parses as 802.1Q headers, to + * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN + * from having surprising results. + */ +struct ovs_action_push_vlan { + __be16 vlan_tpid; /* 802.1Q TPID. */ + __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ +}; + +/** + * enum ovs_action_attr - Action types. + * + * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. + * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested + * %OVS_USERSPACE_ATTR_* attributes. + * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header. The + * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its + * value. + * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the + * packet. + * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. + * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in + * the nested %OVS_SAMPLE_ATTR_* attributes. + * + * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all + * fields within a header are modifiable, e.g. the IPv4 protocol and fragment + * type may not be changed. + */ + +enum ovs_action_attr { + OVS_ACTION_ATTR_UNSPEC, + OVS_ACTION_ATTR_OUTPUT, /* u32 port number. */ + OVS_ACTION_ATTR_USERSPACE, /* Nested OVS_USERSPACE_ATTR_*. */ + OVS_ACTION_ATTR_SET, /* One nested OVS_KEY_ATTR_*. */ + OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ + OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ + OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ + __OVS_ACTION_ATTR_MAX +}; + +#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1) + +#endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index e3d0b389024..7ef68724f0f 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -12,7 +12,7 @@ struct pci_ats { unsigned int is_enabled:1; /* Enable bit is set */ }; -#ifdef CONFIG_PCI_IOV +#ifdef CONFIG_PCI_ATS extern int pci_enable_ats(struct pci_dev *dev, int ps); extern void pci_disable_ats(struct pci_dev *dev); @@ -29,7 +29,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) return dev->ats && dev->ats->is_enabled; } -#else /* CONFIG_PCI_IOV */ +#else /* CONFIG_PCI_ATS */ static inline int pci_enable_ats(struct pci_dev *dev, int ps) { @@ -50,7 +50,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) return 0; } -#endif /* CONFIG_PCI_IOV */ +#endif /* CONFIG_PCI_ATS */ #ifdef CONFIG_PCI_PRI diff --git a/include/linux/pci.h b/include/linux/pci.h index 337df0d5d5f..7cda65b5f79 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -338,7 +338,7 @@ struct pci_dev { struct list_head msi_list; #endif struct pci_vpd *vpd; -#ifdef CONFIG_PCI_IOV +#ifdef CONFIG_PCI_ATS union { struct pci_sriov *sriov; /* SR-IOV capability related */ struct pci_dev *physfn; /* the PF this VF is associated with */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 172ba70306d..2aaee0ca9da 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -517,8 +517,12 @@ #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 +#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 +#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 +#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 #define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 #define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 +#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1e9ebe5e009..08855613ceb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -54,6 +54,7 @@ enum perf_hw_id { PERF_COUNT_HW_BUS_CYCLES = 6, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, PERF_COUNT_HW_MAX, /* non-ABI */ }; @@ -822,6 +823,7 @@ struct perf_event { int mmap_locked; struct user_struct *mmap_user; struct ring_buffer *rb; + struct list_head rb_entry; /* poll related */ wait_queue_head_t waitq; @@ -889,6 +891,7 @@ struct perf_event_context { int nr_active; int is_active; int nr_stat; + int nr_freq; int rotate_disable; atomic_t refcount; struct task_struct *task; @@ -1062,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) } } -extern struct jump_label_key perf_sched_events; +extern struct jump_label_key_deferred perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { - if (static_branch(&perf_sched_events)) + if (static_branch(&perf_sched_events.key)) __perf_event_task_sched_in(prev, task); } @@ -1076,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); - if (static_branch(&perf_sched_events)) + if (static_branch(&perf_sched_events.key)) __perf_event_task_sched_out(prev, next); } diff --git a/include/linux/phonet.h b/include/linux/phonet.h index f53a4167c5f..f48bfc80cb4 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -38,6 +38,7 @@ #define PNPIPE_ENCAP 1 #define PNPIPE_IFINDEX 2 #define PNPIPE_HANDLE 3 +#define PNPIPE_INITSTATE 4 #define PNADDR_ANY 0 #define PNADDR_BROADCAST 0xFC @@ -49,6 +50,7 @@ /* ioctls */ #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) +#define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13) #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index c5336705921..8f1b928f777 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -30,7 +30,7 @@ */ struct tc_stats { - __u64 bytes; /* NUmber of enqueues bytes */ + __u64 bytes; /* Number of enqueued bytes */ __u32 packets; /* Number of enqueued packets */ __u32 drops; /* Packets dropped because of lack of resources */ __u32 overlimits; /* Number of throttle events when this @@ -162,25 +162,24 @@ struct tc_sfq_qopt { unsigned flows; /* Maximal number of flows */ }; +struct tc_sfq_qopt_v1 { + struct tc_sfq_qopt v0; + unsigned int depth; /* max number of packets per flow */ + unsigned int headdrop; +}; + + struct tc_sfq_xstats { __s32 allot; }; -/* - * NOTE: limit, divisor and flows are hardwired to code at the moment. - * - * limit=flows=128, divisor=1024; - * - * The only reason for this is efficiency, it is possible - * to change these parameters in compile time. - */ - /* RED section */ enum { TCA_RED_UNSPEC, TCA_RED_PARMS, TCA_RED_STAB, + TCA_RED_MAX_P, __TCA_RED_MAX, }; @@ -194,8 +193,9 @@ struct tc_red_qopt { unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ unsigned char Scell_log; /* cell size for idle damping */ unsigned char flags; -#define TC_RED_ECN 1 -#define TC_RED_HARDDROP 2 +#define TC_RED_ECN 1 +#define TC_RED_HARDDROP 2 +#define TC_RED_ADAPTATIVE 4 }; struct tc_red_xstats { @@ -214,6 +214,7 @@ enum { TCA_GRED_PARMS, TCA_GRED_STAB, TCA_GRED_DPS, + TCA_GRED_MAX_P, __TCA_GRED_MAX, }; @@ -253,6 +254,7 @@ enum { TCA_CHOKE_UNSPEC, TCA_CHOKE_PARMS, TCA_CHOKE_STAB, + TCA_CHOKE_MAX_P, __TCA_CHOKE_MAX, }; @@ -297,7 +299,7 @@ struct tc_htb_glob { __u32 debug; /* debug flags */ /* stats */ - __u32 direct_pkts; /* count of non shapped packets */ + __u32 direct_pkts; /* count of non shaped packets */ }; enum { TCA_HTB_UNSPEC, @@ -465,6 +467,7 @@ enum { TCA_NETEM_REORDER, TCA_NETEM_CORRUPT, TCA_NETEM_LOSS, + TCA_NETEM_RATE, __TCA_NETEM_MAX, }; @@ -495,6 +498,13 @@ struct tc_netem_corrupt { __u32 correlation; }; +struct tc_netem_rate { + __u32 rate; /* byte/s */ + __s32 packet_overhead; + __u32 cell_size; + __s32 cell_overhead; +}; + enum { NETEM_LOSS_UNSPEC, NETEM_LOSS_GI, /* General Intuitive - 4 state model */ @@ -503,7 +513,7 @@ enum { }; #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) -/* State transition probablities for 4 state model */ +/* State transition probabilities for 4 state model */ struct tc_netem_gimodel { __u32 p13; __u32 p31; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 2a23f7d1a82..60e9994ef40 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -63,7 +63,7 @@ struct platform_device_info { u64 dma_mask; }; extern struct platform_device *platform_device_register_full( - struct platform_device_info *pdevinfo); + const struct platform_device_info *pdevinfo); /** * platform_device_register_resndata - add a platform-level device with @@ -196,16 +196,8 @@ static inline void platform_set_drvdata(struct platform_device *pdev, void *data * calling it replaces module_init() and module_exit() */ #define module_platform_driver(__platform_driver) \ -static int __init __platform_driver##_init(void) \ -{ \ - return platform_driver_register(&(__platform_driver)); \ -} \ -module_init(__platform_driver##_init); \ -static void __exit __platform_driver##_exit(void) \ -{ \ - platform_driver_unregister(&(__platform_driver)); \ -} \ -module_exit(__platform_driver##_exit); + module_driver(__platform_driver, platform_driver_register, \ + platform_driver_unregister) extern struct platform_device *platform_create_bundle(struct platform_driver *driver, int (*probe)(struct platform_device *), @@ -264,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \ } #endif /* MODULE */ -#ifdef CONFIG_PM_SLEEP -extern int platform_pm_prepare(struct device *dev); -extern void platform_pm_complete(struct device *dev); -#else -#define platform_pm_prepare NULL -#define platform_pm_complete NULL -#endif - #ifdef CONFIG_SUSPEND extern int platform_pm_suspend(struct device *dev); -extern int platform_pm_suspend_noirq(struct device *dev); extern int platform_pm_resume(struct device *dev); -extern int platform_pm_resume_noirq(struct device *dev); #else #define platform_pm_suspend NULL #define platform_pm_resume NULL -#define platform_pm_suspend_noirq NULL -#define platform_pm_resume_noirq NULL #endif #ifdef CONFIG_HIBERNATE_CALLBACKS extern int platform_pm_freeze(struct device *dev); -extern int platform_pm_freeze_noirq(struct device *dev); extern int platform_pm_thaw(struct device *dev); -extern int platform_pm_thaw_noirq(struct device *dev); extern int platform_pm_poweroff(struct device *dev); -extern int platform_pm_poweroff_noirq(struct device *dev); extern int platform_pm_restore(struct device *dev); -extern int platform_pm_restore_noirq(struct device *dev); #else #define platform_pm_freeze NULL #define platform_pm_thaw NULL #define platform_pm_poweroff NULL #define platform_pm_restore NULL -#define platform_pm_freeze_noirq NULL -#define platform_pm_thaw_noirq NULL -#define platform_pm_poweroff_noirq NULL -#define platform_pm_restore_noirq NULL #endif #ifdef CONFIG_PM_SLEEP #define USE_PLATFORM_PM_SLEEP_OPS \ - .prepare = platform_pm_prepare, \ - .complete = platform_pm_complete, \ .suspend = platform_pm_suspend, \ .resume = platform_pm_resume, \ .freeze = platform_pm_freeze, \ .thaw = platform_pm_thaw, \ .poweroff = platform_pm_poweroff, \ - .restore = platform_pm_restore, \ - .suspend_noirq = platform_pm_suspend_noirq, \ - .resume_noirq = platform_pm_resume_noirq, \ - .freeze_noirq = platform_pm_freeze_noirq, \ - .thaw_noirq = platform_pm_thaw_noirq, \ - .poweroff_noirq = platform_pm_poweroff_noirq, \ - .restore_noirq = platform_pm_restore_noirq, + .restore = platform_pm_restore, #else #define USE_PLATFORM_PM_SLEEP_OPS #endif diff --git a/include/linux/pm.h b/include/linux/pm.h index f15acb64681..e4982ac3fbb 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -54,118 +54,145 @@ typedef struct pm_message { /** * struct dev_pm_ops - device PM callbacks * - * Several driver power state transitions are externally visible, affecting + * Several device power state transitions are externally visible, affecting * the state of pending I/O queues and (for drivers that touch hardware) * interrupts, wakeups, DMA, and other hardware state. There may also be - * internal transitions to various low power modes, which are transparent + * internal transitions to various low-power modes which are transparent * to the rest of the driver stack (such as a driver that's ON gating off * clocks which are not in active use). * - * The externally visible transitions are handled with the help of the following - * callbacks included in this structure: - * - * @prepare: Prepare the device for the upcoming transition, but do NOT change - * its hardware state. Prevent new children of the device from being - * registered after @prepare() returns (the driver's subsystem and - * generally the rest of the kernel is supposed to prevent new calls to the - * probe method from being made too once @prepare() has succeeded). If - * @prepare() detects a situation it cannot handle (e.g. registration of a - * child already in progress), it may return -EAGAIN, so that the PM core - * can execute it once again (e.g. after the new child has been registered) - * to recover from the race condition. This method is executed for all - * kinds of suspend transitions and is followed by one of the suspend - * callbacks: @suspend(), @freeze(), or @poweroff(). - * The PM core executes @prepare() for all devices before starting to - * execute suspend callbacks for any of them, so drivers may assume all of - * the other devices to be present and functional while @prepare() is being - * executed. In particular, it is safe to make GFP_KERNEL memory - * allocations from within @prepare(). However, drivers may NOT assume - * anything about the availability of the user space at that time and it - * is not correct to request firmware from within @prepare() (it's too - * late to do that). [To work around this limitation, drivers may - * register suspend and hibernation notifiers that are executed before the - * freezing of tasks.] + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that two levels of callbacks are + * involved. First, the PM core executes callbacks provided by PM domains, + * device types, classes and bus types. They are the subsystem-level callbacks + * supposed to execute callbacks provided by device drivers, although they may + * choose not to do that. If the driver callbacks are executed, they have to + * collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. + * + * @prepare: The principal role of this callback is to prevent new children of + * the device from being registered after it has returned (the driver's + * subsystem and generally the rest of the kernel is supposed to prevent + * new calls to the probe method from being made too once @prepare() has + * succeeded). If @prepare() detects a situation it cannot handle (e.g. + * registration of a child already in progress), it may return -EAGAIN, so + * that the PM core can execute it once again (e.g. after a new child has + * been registered) to recover from the race condition. + * This method is executed for all kinds of suspend transitions and is + * followed by one of the suspend callbacks: @suspend(), @freeze(), or + * @poweroff(). The PM core executes subsystem-level @prepare() for all + * devices before starting to invoke suspend callbacks for any of them, so + * generally devices may be assumed to be functional or to respond to + * runtime resume requests while @prepare() is being executed. However, + * device drivers may NOT assume anything about the availability of user + * space at that time and it is NOT valid to request firmware from within + * @prepare() (it's too late to do that). It also is NOT valid to allocate + * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. + * [To work around these limitations, drivers may register suspend and + * hibernation notifiers to be executed before the freezing of tasks.] * * @complete: Undo the changes made by @prepare(). This method is executed for * all kinds of resume transitions, following one of the resume callbacks: * @resume(), @thaw(), @restore(). Also called if the state transition - * fails before the driver's suspend callback (@suspend(), @freeze(), - * @poweroff()) can be executed (e.g. if the suspend callback fails for one + * fails before the driver's suspend callback: @suspend(), @freeze() or + * @poweroff(), can be executed (e.g. if the suspend callback fails for one * of the other devices that the PM core has unsuccessfully attempted to * suspend earlier). - * The PM core executes @complete() after it has executed the appropriate - * resume callback for all devices. + * The PM core executes subsystem-level @complete() after it has executed + * the appropriate resume callbacks for all devices. * * @suspend: Executed before putting the system into a sleep state in which the - * contents of main memory are preserved. Quiesce the device, put it into - * a low power state appropriate for the upcoming system state (such as - * PCI_D3hot), and enable wakeup events as appropriate. + * contents of main memory are preserved. The exact action to perform + * depends on the device's subsystem (PM domain, device type, class or bus + * type), but generally the device must be quiescent after subsystem-level + * @suspend() has returned, so that it doesn't do any I/O or DMA. + * Subsystem-level @suspend() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. * * @resume: Executed after waking the system up from a sleep state in which the - * contents of main memory were preserved. Put the device into the - * appropriate state, according to the information saved in memory by the - * preceding @suspend(). The driver starts working again, responding to - * hardware events and software requests. The hardware may have gone - * through a power-off reset, or it may have maintained state from the - * previous suspend() which the driver may rely on while resuming. On most - * platforms, there are no restrictions on availability of resources like - * clocks during @resume(). + * contents of main memory were preserved. The exact action to perform + * depends on the device's subsystem, but generally the driver is expected + * to start working again, responding to hardware events and software + * requests (the device itself may be left in a low-power state, waiting + * for a runtime resume to occur). The state of the device at the time its + * driver's @resume() callback is run depends on the platform and subsystem + * the device belongs to. On most platforms, there are no restrictions on + * availability of resources like clocks during @resume(). + * Subsystem-level @resume() is executed for all devices after invoking + * subsystem-level @resume_noirq() for all of them. * * @freeze: Hibernation-specific, executed before creating a hibernation image. - * Quiesce operations so that a consistent image can be created, but do NOT - * otherwise put the device into a low power device state and do NOT emit - * system wakeup events. Save in main memory the device settings to be - * used by @restore() during the subsequent resume from hibernation or by - * the subsequent @thaw(), if the creation of the image or the restoration - * of main memory contents from it fails. + * Analogous to @suspend(), but it should not enable the device to signal + * wakeup events or change its power state. The majority of subsystems + * (with the notable exception of the PCI bus type) expect the driver-level + * @freeze() to save the device settings in memory to be used by @restore() + * during the subsequent resume from hibernation. + * Subsystem-level @freeze() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. * * @thaw: Hibernation-specific, executed after creating a hibernation image OR - * if the creation of the image fails. Also executed after a failing + * if the creation of an image has failed. Also executed after a failing * attempt to restore the contents of main memory from such an image. * Undo the changes made by the preceding @freeze(), so the device can be * operated in the same way as immediately before the call to @freeze(). + * Subsystem-level @thaw() is executed for all devices after invoking + * subsystem-level @thaw_noirq() for all of them. It also may be executed + * directly after @freeze() in case of a transition error. * * @poweroff: Hibernation-specific, executed after saving a hibernation image. - * Quiesce the device, put it into a low power state appropriate for the - * upcoming system state (such as PCI_D3hot), and enable wakeup events as - * appropriate. + * Analogous to @suspend(), but it need not save the device's settings in + * memory. + * Subsystem-level @poweroff() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. * * @restore: Hibernation-specific, executed after restoring the contents of main - * memory from a hibernation image. Driver starts working again, - * responding to hardware events and software requests. Drivers may NOT - * make ANY assumptions about the hardware state right prior to @restore(). - * On most platforms, there are no restrictions on availability of - * resources like clocks during @restore(). - * - * @suspend_noirq: Complete the operations of ->suspend() by carrying out any - * actions required for suspending the device that need interrupts to be - * disabled - * - * @resume_noirq: Prepare for the execution of ->resume() by carrying out any - * actions required for resuming the device that need interrupts to be - * disabled - * - * @freeze_noirq: Complete the operations of ->freeze() by carrying out any - * actions required for freezing the device that need interrupts to be - * disabled - * - * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any - * actions required for thawing the device that need interrupts to be - * disabled - * - * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any - * actions required for handling the device that need interrupts to be - * disabled - * - * @restore_noirq: Prepare for the execution of ->restore() by carrying out any - * actions required for restoring the operations of the device that need - * interrupts to be disabled + * memory from a hibernation image, analogous to @resume(). + * + * @suspend_noirq: Complete the actions started by @suspend(). Carry out any + * additional operations required for suspending the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @suspend_noirq() is being executed. + * It generally is expected that the device will be in a low-power state + * (appropriate for the target system sleep state) after subsystem-level + * @suspend_noirq() has returned successfully. If the device can generate + * system wakeup signals and is enabled to wake up the system, it should be + * configured to do so at that time. However, depending on the platform + * and device's subsystem, @suspend() may be allowed to put the device into + * the low-power state and configure it to generate wakeup signals, in + * which case it generally is not necessary to define @suspend_noirq(). + * + * @resume_noirq: Prepare for the execution of @resume() by carrying out any + * operations required for resuming the device that might be racing with + * its driver's interrupt handler, which is guaranteed not to run while + * @resume_noirq() is being executed. + * + * @freeze_noirq: Complete the actions started by @freeze(). Carry out any + * additional operations required for freezing the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @freeze_noirq() is being executed. + * The power state of the device should not be changed by either @freeze() + * or @freeze_noirq() and it should not be configured to signal system + * wakeup by any of these callbacks. + * + * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @thaw_noirq() is being executed. + * + * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to + * @suspend_noirq(), but it need not save the device's settings in memory. + * + * @restore_noirq: Prepare for the execution of @restore() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @restore_noirq() is being executed. Analogous to @resume_noirq(). * * All of the above callbacks, except for @complete(), return error codes. * However, the error codes returned by the resume operations, @resume(), - * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do + * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do * not cause the PM core to abort the resume transition during which they are - * returned. The error codes returned in that cases are only printed by the PM + * returned. The error codes returned in those cases are only printed by the PM * core to the system logs for debugging purposes. Still, it is recommended * that drivers only return error codes from their resume methods in case of an * unrecoverable failure (i.e. when the device being handled refuses to resume @@ -174,31 +201,43 @@ typedef struct pm_message { * their children. * * It is allowed to unregister devices while the above callbacks are being - * executed. However, it is not allowed to unregister a device from within any - * of its own callbacks. + * executed. However, a callback routine must NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). * - * There also are the following callbacks related to run-time power management - * of devices: + * Refer to Documentation/power/devices.txt for more information about the role + * of the above callbacks in the system suspend process. + * + * There also are callbacks related to runtime power management of devices. + * Again, these callbacks are executed by the PM core only for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are supposed to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. * * @runtime_suspend: Prepare the device for a condition in which it won't be * able to communicate with the CPU(s) and RAM due to power management. - * This need not mean that the device should be put into a low power state. + * This need not mean that the device should be put into a low-power state. * For example, if the device is behind a link which is about to be turned * off, the device may remain at full power. If the device does go to low - * power and is capable of generating run-time wake-up events, remote - * wake-up (i.e., a hardware mechanism allowing the device to request a - * change of its power state via a wake-up event, such as PCI PME) should - * be enabled for it. + * power and is capable of generating runtime wakeup events, remote wakeup + * (i.e., a hardware mechanism allowing the device to request a change of + * its power state via an interrupt) should be enabled for it. * * @runtime_resume: Put the device into the fully active state in response to a - * wake-up event generated by hardware or at the request of software. If - * necessary, put the device into the full power state and restore its + * wakeup event generated by hardware or at the request of software. If + * necessary, put the device into the full-power state and restore its * registers, so that it is fully operational. * - * @runtime_idle: Device appears to be inactive and it might be put into a low - * power state if all of the necessary conditions are satisfied. Check + * @runtime_idle: Device appears to be inactive and it might be put into a + * low-power state if all of the necessary conditions are satisfied. Check * these conditions and handle the device as appropriate, possibly queueing * a suspend request for it. The return value is ignored by the PM core. + * + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the above callbacks in device runtime power management. + * */ struct dev_pm_ops { @@ -261,19 +300,6 @@ const struct dev_pm_ops name = { \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } -/* - * Use this for subsystems (bus types, device types, device classes) that don't - * need any special suspend/resume handling in addition to invoking the PM - * callbacks provided by device drivers supporting both the system sleep PM and - * runtime PM, make the pm member point to generic_subsys_pm_ops. - */ -#ifdef CONFIG_PM -extern struct dev_pm_ops generic_subsys_pm_ops; -#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops) -#else -#define GENERIC_SUBSYS_PM_OPS NULL -#endif - /** * PM_EVENT_ messages * @@ -447,6 +473,7 @@ struct dev_pm_info { unsigned int async_suspend:1; bool is_prepared:1; /* Owned by the PM core */ bool is_suspended:1; /* Ditto */ + bool ignore_children:1; spinlock_t lock; #ifdef CONFIG_PM_SLEEP struct list_head entry; @@ -464,7 +491,6 @@ struct dev_pm_info { atomic_t usage_count; atomic_t child_count; unsigned int disable_depth:3; - unsigned int ignore_children:1; unsigned int idle_notification:1; unsigned int request_pending:1; unsigned int deferred_resume:1; @@ -482,6 +508,8 @@ struct dev_pm_info { unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; + ktime_t suspend_time; + s64 max_time_suspended_ns; #endif struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ struct pm_qos_constraints *constraints; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 65633e5a2bc..a03a0ad998b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -10,6 +10,7 @@ #define _LINUX_PM_DOMAIN_H #include <linux/device.h> +#include <linux/err.h> enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -21,6 +22,23 @@ enum gpd_status { struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); + bool (*stop_ok)(struct device *dev); +}; + +struct gpd_dev_ops { + int (*start)(struct device *dev); + int (*stop)(struct device *dev); + int (*save_state)(struct device *dev); + int (*restore_state)(struct device *dev); + int (*suspend)(struct device *dev); + int (*suspend_late)(struct device *dev); + int (*resume_early)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*freeze_late)(struct device *dev); + int (*thaw_early)(struct device *dev); + int (*thaw)(struct device *dev); + bool (*active_wakeup)(struct device *dev); }; struct generic_pm_domain { @@ -32,6 +50,7 @@ struct generic_pm_domain { struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; + char *name; unsigned int in_progress; /* Number of devices being suspended now */ atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ @@ -44,10 +63,13 @@ struct generic_pm_domain { bool suspend_power_off; /* Power status before system suspend */ bool dev_irq_safe; /* Device callbacks are IRQ-safe */ int (*power_off)(struct generic_pm_domain *domain); + s64 power_off_latency_ns; int (*power_on)(struct generic_pm_domain *domain); - int (*start_device)(struct device *dev); - int (*stop_device)(struct device *dev); - bool (*active_wakeup)(struct device *dev); + s64 power_on_latency_ns; + struct gpd_dev_ops dev_ops; + s64 break_even_ns; /* Power break even for the entire domain. */ + s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ + ktime_t power_off_time; }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -62,8 +84,18 @@ struct gpd_link { struct list_head slave_node; }; +struct gpd_timing_data { + s64 stop_latency_ns; + s64 start_latency_ns; + s64 save_state_latency_ns; + s64 restore_state_latency_ns; + s64 break_even_ns; +}; + struct generic_pm_domain_data { struct pm_domain_data base; + struct gpd_dev_ops ops; + struct gpd_timing_data td; bool need_restore; }; @@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data * } #ifdef CONFIG_PM_GENERIC_DOMAINS -extern int pm_genpd_add_device(struct generic_pm_domain *genpd, - struct device *dev); +static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) +{ + return to_gpd_data(dev->power.subsys_data->domain_data); +} + +extern struct dev_power_governor simple_qos_governor; + +extern struct generic_pm_domain *dev_to_genpd(struct device *dev); +extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev, + struct gpd_timing_data *td); + +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return __pm_genpd_add_device(genpd, dev, NULL); +} + extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); +extern int pm_genpd_add_callbacks(struct device *dev, + struct gpd_dev_ops *ops, + struct gpd_timing_data *td); +extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); + extern int pm_genpd_poweron(struct generic_pm_domain *genpd); + +extern bool default_stop_ok(struct device *dev); + +extern struct dev_power_governor pm_domain_always_on_gov; #else + +static inline struct generic_pm_domain *dev_to_genpd(struct device *dev) +{ + return ERR_PTR(-ENOSYS); +} +static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev, + struct gpd_timing_data *td) +{ + return -ENOSYS; +} static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) { @@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline void pm_genpd_init(struct generic_pm_domain *genpd, - struct dev_power_governor *gov, bool is_off) {} +static inline int pm_genpd_add_callbacks(struct device *dev, + struct gpd_dev_ops *ops, + struct gpd_timing_data *td) +{ + return -ENOSYS; +} +static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) +{ + return -ENOSYS; +} +static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off) +{ +} static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) { return -ENOSYS; } +static inline bool default_stop_ok(struct device *dev) +{ + return false; +} +#define pm_domain_always_on_gov NULL #endif +static inline int pm_genpd_remove_callbacks(struct device *dev) +{ + return __pm_genpd_remove_callbacks(dev, true); +} + #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); extern void pm_genpd_poweroff_unused(void); diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 83b0ea302a8..e5bbcbaa6f5 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); int pm_qos_request_active(struct pm_qos_request *req); s32 pm_qos_read_value(struct pm_qos_constraints *c); +s32 __dev_pm_qos_read_value(struct device *dev); s32 dev_pm_qos_read_value(struct device *dev); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, s32 value); @@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value); #else static inline int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, @@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req) static inline s32 pm_qos_read_value(struct pm_qos_constraints *c) { return 0; } +static inline s32 __dev_pm_qos_read_value(struct device *dev) + { return 0; } static inline s32 dev_pm_qos_read_value(struct device *dev) { return 0; } static inline int dev_pm_qos_add_request(struct device *dev, @@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev) { dev->power.power_state = PMSG_INVALID; } +static inline int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value) + { return 0; } #endif #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index d8d90361964..609daae7a01 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); +extern void pm_runtime_update_max_time_suspended(struct device *dev, + s64 delta_ns); static inline bool pm_children_suspended(struct device *dev) { @@ -52,11 +54,6 @@ static inline bool pm_children_suspended(struct device *dev) || !atomic_read(&dev->power.child_count); } -static inline void pm_suspend_ignore_children(struct device *dev, bool enable) -{ - dev->power.ignore_children = enable; -} - static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); @@ -130,7 +127,6 @@ static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} static inline bool pm_children_suspended(struct device *dev) { return false; } -static inline void pm_suspend_ignore_children(struct device *dev, bool en) {} static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool device_run_wake(struct device *dev) { return false; } @@ -154,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev, static inline unsigned long pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } +static inline void pm_runtime_update_max_time_suspended(struct device *dev, + s64 delta_ns) {} + #endif /* !CONFIG_PM_RUNTIME */ static inline int pm_runtime_idle(struct device *dev) diff --git a/include/linux/poison.h b/include/linux/poison.h index 79159de0e34..2110a81c5e2 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -40,12 +40,6 @@ #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT -#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL -#else -#define MEMBLOCK_INACTIVE 0x44c9e71bUL -#endif - #define SLUB_RED_INACTIVE 0xbb #define SLUB_RED_ACTIVE 0xcc diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 643b96c7a94..6d9e575519c 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -50,7 +50,7 @@ typedef int (write_proc_t)(struct file *file, const char __user *buffer, struct proc_dir_entry { unsigned int low_ino; - mode_t mode; + umode_t mode; nlink_t nlink; uid_t uid; gid_t gid; @@ -106,9 +106,9 @@ extern void proc_root_init(void); void proc_flush_task(struct task_struct *task); -extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, +extern struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode, struct proc_dir_entry *parent); -struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, +struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops, void *data); @@ -146,17 +146,17 @@ extern void proc_device_tree_update_prop(struct proc_dir_entry *pde, extern struct proc_dir_entry *proc_symlink(const char *, struct proc_dir_entry *, const char *); extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *); -extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, +extern struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode, struct proc_dir_entry *parent); -static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode, +static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops) { return proc_create_data(name, mode, parent, proc_fops, NULL); } static inline struct proc_dir_entry *create_proc_read_entry(const char *name, - mode_t mode, struct proc_dir_entry *base, + umode_t mode, struct proc_dir_entry *base, read_proc_t *read_proc, void * data) { struct proc_dir_entry *res=create_proc_entry(name,mode,base); @@ -168,7 +168,7 @@ static inline struct proc_dir_entry *create_proc_read_entry(const char *name, } extern struct proc_dir_entry *proc_net_fops_create(struct net *net, - const char *name, mode_t mode, const struct file_operations *fops); + const char *name, umode_t mode, const struct file_operations *fops); extern void proc_net_remove(struct net *net, const char *name); extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, struct proc_dir_entry *parent); @@ -185,15 +185,15 @@ static inline void proc_flush_task(struct task_struct *task) } static inline struct proc_dir_entry *create_proc_entry(const char *name, - mode_t mode, struct proc_dir_entry *parent) { return NULL; } + umode_t mode, struct proc_dir_entry *parent) { return NULL; } static inline struct proc_dir_entry *proc_create(const char *name, - mode_t mode, struct proc_dir_entry *parent, + umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops) { return NULL; } static inline struct proc_dir_entry *proc_create_data(const char *name, - mode_t mode, struct proc_dir_entry *parent, + umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops, void *data) { return NULL; @@ -205,10 +205,10 @@ static inline struct proc_dir_entry *proc_symlink(const char *name, static inline struct proc_dir_entry *proc_mkdir(const char *name, struct proc_dir_entry *parent) {return NULL;} static inline struct proc_dir_entry *proc_mkdir_mode(const char *name, - mode_t mode, struct proc_dir_entry *parent) { return NULL; } + umode_t mode, struct proc_dir_entry *parent) { return NULL; } static inline struct proc_dir_entry *create_proc_read_entry(const char *name, - mode_t mode, struct proc_dir_entry *base, + umode_t mode, struct proc_dir_entry *base, read_proc_t *read_proc, void * data) { return NULL; } struct tty_driver; diff --git a/include/linux/pstore.h b/include/linux/pstore.h index ea567321ae3..e1461e143be 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -22,6 +22,9 @@ #ifndef _LINUX_PSTORE_H #define _LINUX_PSTORE_H +#include <linux/time.h> +#include <linux/kmsg_dump.h> + /* types */ enum pstore_type_id { PSTORE_TYPE_DMESG = 0, @@ -35,11 +38,14 @@ struct pstore_info { spinlock_t buf_lock; /* serialize access to 'buf' */ char *buf; size_t bufsize; + struct mutex read_mutex; /* serialize open/read/close */ int (*open)(struct pstore_info *psi); int (*close)(struct pstore_info *psi); ssize_t (*read)(u64 *id, enum pstore_type_id *type, - struct timespec *time, struct pstore_info *psi); - int (*write)(enum pstore_type_id type, u64 *id, + struct timespec *time, char **buf, + struct pstore_info *psi); + int (*write)(enum pstore_type_id type, + enum kmsg_dump_reason reason, u64 *id, unsigned int part, size_t size, struct pstore_info *psi); int (*erase)(enum pstore_type_id type, u64 id, struct pstore_info *psi); @@ -48,18 +54,12 @@ struct pstore_info { #ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); -extern int pstore_write(enum pstore_type_id type, char *buf, size_t size); #else static inline int pstore_register(struct pstore_info *psi) { return -ENODEV; } -static inline int -pstore_write(enum pstore_type_id type, char *buf, size_t size) -{ - return -ENODEV; -} #endif #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 9e65d9e2066..6f6df86f1ae 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -277,7 +277,10 @@ struct mdp_superblock_1 { */ #define MD_FEATURE_RESHAPE_ACTIVE 4 #define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */ - -#define MD_FEATURE_ALL (1|2|4|8) +#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an + * active device with same 'role'. + * 'recovery_offset' is also set. + */ +#define MD_FEATURE_ALL (1|2|4|8|16) #endif diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 2b59cc82439..53272e9860a 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -132,7 +132,7 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, PROT_READ|PROT_WRITE, \ MAP_PRIVATE|MAP_ANONYMOUS,\ 0, 0)) -# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE) +# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y)) static inline void cpu_relax(void) { diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 3a8f0c9b293..5bf5500db83 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -2,7 +2,7 @@ #define _LINUX_RAMFS_H struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, - int mode, dev_t dev); + umode_t mode, dev_t dev); extern struct dentry *ramfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2cf4226ade7..81c04f4348e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */ #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) extern void rcutorture_record_test_transition(void); extern void rcutorture_record_progress(unsigned long vernum); +extern void do_trace_rcu_torture_read(char *rcutorturename, + struct rcu_head *rhp); #else static inline void rcutorture_record_test_transition(void) { @@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void) static inline void rcutorture_record_progress(unsigned long vernum) { } +#ifdef CONFIG_RCU_TRACE +extern void do_trace_rcu_torture_read(char *rcutorturename, + struct rcu_head *rhp); +#else +#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) +#endif #endif #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) @@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; - -#ifdef CONFIG_NO_HZ - -extern void rcu_enter_nohz(void); -extern void rcu_exit_nohz(void); - -#else /* #ifdef CONFIG_NO_HZ */ - -static inline void rcu_enter_nohz(void) -{ -} - -static inline void rcu_exit_nohz(void) -{ -} - -#endif /* #else #ifdef CONFIG_NO_HZ */ +extern void rcu_idle_enter(void); +extern void rcu_idle_exit(void); +extern void rcu_irq_enter(void); +extern void rcu_irq_exit(void); /* * Infrastructure to implement the synchronize_() primitives in @@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) #ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#ifdef CONFIG_PROVE_RCU +extern int rcu_is_cpu_idle(void); +#else /* !CONFIG_PROVE_RCU */ +static inline int rcu_is_cpu_idle(void) +{ + return 0; +} +#endif /* else !CONFIG_PROVE_RCU */ -extern struct lockdep_map rcu_bh_lock_map; -# define rcu_read_acquire_bh() \ - lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) +static inline void rcu_lock_acquire(struct lockdep_map *map) +{ + WARN_ON_ONCE(rcu_is_cpu_idle()); + lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); +} -extern struct lockdep_map rcu_sched_lock_map; -# define rcu_read_acquire_sched() \ - lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release_sched() \ - lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) +static inline void rcu_lock_release(struct lockdep_map *map) +{ + WARN_ON_ONCE(rcu_is_cpu_idle()); + lock_release(map, 1, _THIS_IP_); +} +extern struct lockdep_map rcu_lock_map; +extern struct lockdep_map rcu_bh_lock_map; +extern struct lockdep_map rcu_sched_lock_map; extern int debug_lockdep_rcu_enabled(void); /** @@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void); * * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. + * + * Note that rcu_read_lock() and the matching rcu_read_unlock() must + * occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock() in process context if the matching rcu_read_lock() + * was invoked from within an irq handler. */ static inline int rcu_read_lock_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; + if (rcu_is_cpu_idle()) + return 0; return lock_is_held(&rcu_lock_map); } @@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void); * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. + * + * Note that if the CPU is in the idle loop from an RCU point of + * view (ie: that we are in the section between rcu_idle_enter() and + * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU + * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs + * that are in such a section, considering these as in extended quiescent + * state, so such a CPU is effectively never in an RCU read-side critical + * section regardless of what RCU primitives it invokes. This state of + * affairs is required --- we need to keep an RCU-free window in idle + * where the CPU may possibly enter into low power mode. This way we can + * notice an extended quiescent state to other CPUs that started a grace + * period. Otherwise we would delay any grace period as long as we run in + * the idle task. */ #ifdef CONFIG_PREEMPT_COUNT static inline int rcu_read_lock_sched_held(void) @@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void) if (!debug_lockdep_rcu_enabled()) return 1; + if (rcu_is_cpu_idle()) + return 0; if (debug_locks) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); @@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void) #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -# define rcu_read_acquire_bh() do { } while (0) -# define rcu_read_release_bh() do { } while (0) -# define rcu_read_acquire_sched() do { } while (0) -# define rcu_read_release_sched() do { } while (0) +# define rcu_lock_acquire(a) do { } while (0) +# define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) { @@ -637,7 +658,7 @@ static inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); - rcu_read_acquire(); + rcu_lock_acquire(&rcu_lock_map); } /* @@ -657,7 +678,7 @@ static inline void rcu_read_lock(void) */ static inline void rcu_read_unlock(void) { - rcu_read_release(); + rcu_lock_release(&rcu_lock_map); __release(RCU); __rcu_read_unlock(); } @@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void) * critical sections in interrupt context can use just rcu_read_lock(), * though this should at least be commented to avoid confusing people * reading the code. + * + * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() + * must occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() + * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); - rcu_read_acquire_bh(); + rcu_lock_acquire(&rcu_bh_lock_map); } /* @@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { - rcu_read_release_bh(); + rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); } @@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void) * are being done using call_rcu_sched() or synchronize_rcu_sched(). * Read-side critical sections can also be introduced by anything that * disables preemption, including local_irq_disable() and friends. + * + * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() + * must occur in the same context, for example, it is illegal to invoke + * rcu_read_unlock_sched() from process context if the matching + * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); - rcu_read_acquire_sched(); + rcu_lock_acquire(&rcu_sched_lock_map); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ @@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) */ static inline void rcu_read_unlock_sched(void) { - rcu_read_release_sched(); + rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); } diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 690276a642c..eb93921cdd3 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -23,9 +23,8 @@ struct spi_device; /* An enum of all the supported cache types */ enum regcache_type { REGCACHE_NONE, - REGCACHE_INDEXED, REGCACHE_RBTREE, - REGCACHE_LZO + REGCACHE_COMPRESSED }; /** @@ -83,7 +82,7 @@ struct regmap_config { bool (*precious_reg)(struct device *dev, unsigned int reg); unsigned int max_register; - struct reg_default *reg_defaults; + const struct reg_default *reg_defaults; unsigned int num_reg_defaults; enum regcache_type cache_type; const void *reg_defaults_raw; @@ -129,6 +128,8 @@ struct regmap *regmap_init_spi(struct spi_device *dev, const struct regmap_config *config); void regmap_exit(struct regmap *map); +int regmap_reinit_cache(struct regmap *map, + const struct regmap_config *config); int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); int regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len); @@ -139,9 +140,61 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_count); int regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val); +int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change); int regcache_sync(struct regmap *map); void regcache_cache_only(struct regmap *map, bool enable); void regcache_cache_bypass(struct regmap *map, bool enable); +void regcache_mark_dirty(struct regmap *map); + +/** + * Description of an IRQ for the generic regmap irq_chip. + * + * @reg_offset: Offset of the status/mask register within the bank + * @mask: Mask used to flag/control the register. + */ +struct regmap_irq { + unsigned int reg_offset; + unsigned int mask; +}; + +/** + * Description of a generic regmap irq_chip. This is not intended to + * handle every possible interrupt controller, but it should handle a + * substantial proportion of those that are found in the wild. + * + * @name: Descriptive name for IRQ controller. + * + * @status_base: Base status register address. + * @mask_base: Base mask register address. + * @ack_base: Base ack address. If zero then the chip is clear on read. + * + * @num_regs: Number of registers in each control bank. + * @irqs: Descriptors for individual IRQs. Interrupt numbers are + * assigned based on the index in the array of the interrupt. + * @num_irqs: Number of descriptors. + */ +struct regmap_irq_chip { + const char *name; + + unsigned int status_base; + unsigned int mask_base; + unsigned int ack_base; + + int num_regs; + + const struct regmap_irq *irqs; + int num_irqs; +}; + +struct regmap_irq_chip_data; + +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, + int irq_base, struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); +int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); #endif diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 96d465f8d3e..2213ddcce20 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1759,13 +1759,14 @@ struct reiserfs_journal_header { REISERFS_QUOTA_TRANS_BLOCKS(sb))) #ifdef CONFIG_QUOTA +#define REISERFS_QUOTA_OPTS ((1 << REISERFS_USRQUOTA) | (1 << REISERFS_GRPQUOTA)) /* We need to update data and inode (atime) */ -#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0) +#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? 2 : 0) /* 1 balancing, 1 bitmap, 1 data per write + stat data update */ -#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \ +#define REISERFS_QUOTA_INIT_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \ (DQUOT_INIT_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_INIT_REWRITE+1) : 0) /* same as with INIT */ -#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? \ +#define REISERFS_QUOTA_DEL_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & REISERFS_QUOTA_OPTS ? \ (DQUOT_DEL_ALLOC*(JOURNAL_PER_BALANCE_CNT+2)+DQUOT_DEL_REWRITE+1) : 0) #else #define REISERFS_QUOTA_TRANS_BLOCKS(s) 0 @@ -2056,7 +2057,7 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key); struct reiserfs_security_handle; int reiserfs_new_inode(struct reiserfs_transaction_handle *th, - struct inode *dir, int mode, + struct inode *dir, umode_t mode, const char *symname, loff_t i_size, struct dentry *dentry, struct inode *inode, struct reiserfs_security_handle *security); diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 52c83b6a758..8c9e85c64b4 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -417,6 +417,7 @@ struct reiserfs_sb_info { char *s_qf_names[MAXQUOTAS]; int s_jquota_fmt; #endif + char *s_jdev; /* Stored jdev for mount option showing */ #ifdef CONFIG_REISERFS_CHECK struct tree_balance *cur_tb; /* @@ -482,7 +483,8 @@ enum reiserfs_mount_options { REISERFS_ERROR_RO, REISERFS_ERROR_CONTINUE, - REISERFS_QUOTA, /* Some quota option specified */ + REISERFS_USRQUOTA, /* User quota option specified */ + REISERFS_GRPQUOTA, /* Group quota option specified */ REISERFS_TEST1, REISERFS_TEST2, diff --git a/include/linux/relay.h b/include/linux/relay.h index 14a86bc7102..a822fd71fd6 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -144,7 +144,7 @@ struct rchan_callbacks */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, - int mode, + umode_t mode, struct rchan_buf *buf, int *is_global); diff --git a/include/linux/sched.h b/include/linux/sched.h index 68daf4f27e2..ad93e1ec8c6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!( ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FREEZING) == 0) + (task->flags & PF_FROZEN) == 0) #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern void select_nohz_load_balancer(int stop_tick); +extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else static inline void select_nohz_load_balancer(int stop_tick) { } +static inline void set_cpu_sd_state_idle(void) { } #endif /* @@ -483,8 +485,8 @@ struct task_cputime { #define INIT_CPUTIME \ (struct task_cputime) { \ - .utime = cputime_zero, \ - .stime = cputime_zero, \ + .utime = 0, \ + .stime = 0, \ .sum_exec_runtime = 0, \ } @@ -901,6 +903,10 @@ struct sched_group_power { * single CPU. */ unsigned int power, power_orig; + /* + * Number of busy cpus in this group. + */ + atomic_t nr_busy_cpus; }; struct sched_group { @@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) return to_cpumask(sg->cpumask); } +/** + * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. + * @group: The group whose first cpu is to be returned. + */ +static inline unsigned int group_first_cpu(struct sched_group *group) +{ + return cpumask_first(sched_group_cpus(group)); +} + struct sched_domain_attr { int relax_domain_level; }; @@ -1315,8 +1330,8 @@ struct task_struct { * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ - struct task_struct *real_parent; /* real parent process */ - struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ + struct task_struct __rcu *real_parent; /* real parent process */ + struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ /* * children/sibling forms the list of my natural children */ @@ -1521,7 +1536,6 @@ struct task_struct { #ifdef CONFIG_FAULT_INJECTION int make_it_fail; #endif - struct prop_local_single dirties; /* * when (nr_dirtied >= nr_dirtied_pause), it's time to call * balance_dirty_pages() for some dirty throttling pause @@ -1773,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ -#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ @@ -1789,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ -#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -2071,6 +2083,14 @@ extern int sched_setscheduler(struct task_struct *, int, extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); extern struct task_struct *idle_task(int cpu); +/** + * is_idle_task - is the specified task an idle task? + * @tsk: the task in question. + */ +static inline bool is_idle_task(struct task_struct *p) +{ + return p->pid == 0; +} extern struct task_struct *curr_task(int cpu); extern void set_curr_task(int cpu, struct task_struct *p); diff --git a/include/linux/security.h b/include/linux/security.h index 19d8e04e168..98112cf9388 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -186,7 +186,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Security module identifier. * * @name: - * A string that acts as a unique identifeir for the LSM with max number + * A string that acts as a unique identifier for the LSM with max number * of characters = SECURITY_NAME_MAX. * * Security hooks for program execution operations. @@ -275,7 +275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @copy copied data which will be passed to the security module. * Returns 0 if the copy was successful. * @sb_remount: - * Extracts security system specifc mount options and verifys no changes + * Extracts security system specific mount options and verifies no changes * are being made to those options. * @sb superblock being remounted * @data contains the filesystem-specific data. @@ -380,15 +380,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if permission is granted. * @inode_mkdir: * Check permissions to create a new directory in the existing directory - * associated with inode strcture @dir. - * @dir containst the inode structure of parent of the directory to be created. + * associated with inode structure @dir. + * @dir contains the inode structure of parent of the directory to be created. * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. * Return 0 if permission is granted. * @path_mkdir: * Check permissions to create a new directory in the existing directory - * associated with path strcture @path. - * @dir containst the path structure of parent of the directory + * associated with path structure @path. + * @dir contains the path structure of parent of the directory * to be created. * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. @@ -578,7 +578,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @file contains the file structure. * @cmd contains the operation to perform. * @arg contains the operational arguments. - * Check permission for an ioctl operation on @file. Note that @arg can + * Check permission for an ioctl operation on @file. Note that @arg * sometimes represents a user space pointer; in other cases, it may be a * simple integer value. When @arg represents a user space pointer, it * should never be used by the security module. @@ -606,7 +606,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if permission is granted. * @file_fcntl: * Check permission before allowing the file operation specified by @cmd - * from being performed on the file @file. Note that @arg can sometimes + * from being performed on the file @file. Note that @arg sometimes * represents a user space pointer; in other cases, it may be a simple * integer value. When @arg represents a user space pointer, it should * never be used by the security module. @@ -793,7 +793,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * information can be saved using the eff_cap field of the * netlink_skb_parms structure. Also may be used to provide fine * grained control over message transmission. - * @sk associated sock of task sending the message., + * @sk associated sock of task sending the message. * @skb contains the sk_buff structure for the netlink message. * Return 0 if the information was successfully saved and message * is allowed to be transmitted. @@ -1080,9 +1080,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * should free it. * @key points to the key to be queried. * @_buffer points to a pointer that should be set to point to the - * resulting string (if no label or an error occurs). + * resulting string (if no label or an error occurs). * Return the length of the string (including terminating NUL) or -ve if - * an error. + * an error. * May also return 0 (and a NULL buffer pointer) if there is no label. * * Security hooks affecting all System V IPC operations. @@ -1268,7 +1268,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * credentials. * @tsk contains the task_struct for the process. * @cred contains the credentials to use. - * @ns contains the user namespace we want the capability in + * @ns contains the user namespace we want the capability in * @cap contains the capability <include/linux/capability.h>. * @audit: Whether to write an audit message or not * Return 0 if the capability is granted for @tsk. @@ -1370,7 +1370,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @ctxlen contains the length of @ctx. * * @inode_getsecctx: - * Returns a string containing all relavent security context information + * Returns a string containing all relevant security context information * * @inode we wish to get the security context of. * @ctx is a pointer in which to place the allocated security context. @@ -1424,9 +1424,9 @@ struct security_operations { #ifdef CONFIG_SECURITY_PATH int (*path_unlink) (struct path *dir, struct dentry *dentry); - int (*path_mkdir) (struct path *dir, struct dentry *dentry, int mode); + int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode); int (*path_rmdir) (struct path *dir, struct dentry *dentry); - int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode, + int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); int (*path_truncate) (struct path *path); int (*path_symlink) (struct path *dir, struct dentry *dentry, @@ -1435,8 +1435,7 @@ struct security_operations { struct dentry *new_dentry); int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry); - int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt, - mode_t mode); + int (*path_chmod) (struct path *path, umode_t mode); int (*path_chown) (struct path *path, uid_t uid, gid_t gid); int (*path_chroot) (struct path *path); #endif @@ -1447,16 +1446,16 @@ struct security_operations { const struct qstr *qstr, char **name, void **value, size_t *len); int (*inode_create) (struct inode *dir, - struct dentry *dentry, int mode); + struct dentry *dentry, umode_t mode); int (*inode_link) (struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int (*inode_unlink) (struct inode *dir, struct dentry *dentry); int (*inode_symlink) (struct inode *dir, struct dentry *dentry, const char *old_name); - int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode); + int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode); int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); int (*inode_mknod) (struct inode *dir, struct dentry *dentry, - int mode, dev_t dev); + umode_t mode, dev_t dev); int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int (*inode_readlink) (struct dentry *dentry); @@ -1716,15 +1715,15 @@ int security_inode_init_security(struct inode *inode, struct inode *dir, int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, char **name, void **value, size_t *len); -int security_inode_create(struct inode *dir, struct dentry *dentry, int mode); +int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int security_inode_unlink(struct inode *dir, struct dentry *dentry); int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name); -int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode); +int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); int security_inode_rmdir(struct inode *dir, struct dentry *dentry); -int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev); +int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int security_inode_readlink(struct dentry *dentry); @@ -2056,12 +2055,12 @@ static inline int security_old_inode_init_security(struct inode *inode, char **name, void **value, size_t *len) { - return 0; + return -EOPNOTSUPP; } static inline int security_inode_create(struct inode *dir, struct dentry *dentry, - int mode) + umode_t mode) { return 0; } @@ -2855,9 +2854,9 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #ifdef CONFIG_SECURITY_PATH int security_path_unlink(struct path *dir, struct dentry *dentry); -int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode); +int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode); int security_path_rmdir(struct path *dir, struct dentry *dentry); -int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, +int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev); int security_path_truncate(struct path *path); int security_path_symlink(struct path *dir, struct dentry *dentry, @@ -2866,8 +2865,7 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry); int security_path_rename(struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry); -int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt, - mode_t mode); +int security_path_chmod(struct path *path, umode_t mode); int security_path_chown(struct path *path, uid_t uid, gid_t gid); int security_path_chroot(struct path *path); #else /* CONFIG_SECURITY_PATH */ @@ -2877,7 +2875,7 @@ static inline int security_path_unlink(struct path *dir, struct dentry *dentry) } static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, - int mode) + umode_t mode) { return 0; } @@ -2888,7 +2886,7 @@ static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) } static inline int security_path_mknod(struct path *dir, struct dentry *dentry, - int mode, unsigned int dev) + umode_t mode, unsigned int dev) { return 0; } @@ -2919,9 +2917,7 @@ static inline int security_path_rename(struct path *old_dir, return 0; } -static inline int security_path_chmod(struct dentry *dentry, - struct vfsmount *mnt, - mode_t mode) +static inline int security_path_chmod(struct path *path, umode_t mode) { return 0; } @@ -3010,7 +3006,7 @@ static inline void security_audit_rule_free(void *lsmrule) #ifdef CONFIG_SECURITYFS -extern struct dentry *securityfs_create_file(const char *name, mode_t mode, +extern struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); @@ -3025,7 +3021,7 @@ static inline struct dentry *securityfs_create_dir(const char *name, } static inline struct dentry *securityfs_create_file(const char *name, - mode_t mode, + umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 0b69a468421..44f1514b00b 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -74,7 +74,7 @@ static inline void seq_commit(struct seq_file *m, int num) } } -char *mangle_path(char *s, char *p, char *esc); +char *mangle_path(char *s, const char *p, const char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); loff_t seq_lseek(struct file *, loff_t, int); @@ -86,10 +86,10 @@ int seq_write(struct seq_file *seq, const void *data, size_t len); __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); -int seq_path(struct seq_file *, struct path *, char *); -int seq_dentry(struct seq_file *, struct dentry *, char *); -int seq_path_root(struct seq_file *m, struct path *path, struct path *root, - char *esc); +int seq_path(struct seq_file *, const struct path *, const char *); +int seq_dentry(struct seq_file *, struct dentry *, const char *); +int seq_path_root(struct seq_file *m, const struct path *path, + const struct path *root, const char *esc); int seq_bitmap(struct seq_file *m, const unsigned long *bits, unsigned int nr_bits); static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) diff --git a/include/linux/serial.h b/include/linux/serial.h index 97ff8e27a6c..3d86517fe7d 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -207,13 +207,15 @@ struct serial_icounter_struct { struct serial_rs485 { __u32 flags; /* RS485 feature flags */ -#define SER_RS485_ENABLED (1 << 0) -#define SER_RS485_RTS_ON_SEND (1 << 1) -#define SER_RS485_RTS_AFTER_SEND (1 << 2) -#define SER_RS485_RTS_BEFORE_SEND (1 << 3) +#define SER_RS485_ENABLED (1 << 0) /* If enabled */ +#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for + RTS pin when + sending */ +#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for + RTS pin after sent*/ #define SER_RS485_RX_DURING_TX (1 << 4) - __u32 delay_rts_before_send; /* Milliseconds */ - __u32 delay_rts_after_send; /* Milliseconds */ + __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ + __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ __u32 padding[5]; /* Memory is cheap, new structs are a royal PITA .. */ }; diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index 5812fefbced..b160645f559 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h @@ -95,6 +95,7 @@ struct intc_desc { unsigned int num_resources; intc_enum force_enable; intc_enum force_disable; + bool skip_syscore_suspend; struct intc_hw_desc hw; }; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 9291ac3cc62..e4c711c6f32 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -30,7 +30,7 @@ struct shmem_sb_info { spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ uid_t uid; /* Mount uid for root directory */ gid_t gid; /* Mount gid for root directory */ - mode_t mode; /* Mount mode for root directory */ + umode_t mode; /* Mount mode for root directory */ struct mempolicy *mpol; /* default memory policy for mappings */ }; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index a83833a1f7a..07ceb97d53f 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -35,7 +35,7 @@ struct shrinker { /* These are for internal use */ struct list_head list; - long nr; /* objs pending delete */ + atomic_long_t nr_in_batch; /* objs pending delete */ }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ extern void register_shrinker(struct shrinker *); diff --git a/include/linux/sigma.h b/include/linux/sigma.h index e2accb3164d..d0de882c0d9 100644 --- a/include/linux/sigma.h +++ b/include/linux/sigma.h @@ -24,7 +24,7 @@ struct sigma_firmware { struct sigma_firmware_header { unsigned char magic[7]; u8 version; - u32 crc; + __le32 crc; }; enum { @@ -40,19 +40,14 @@ enum { struct sigma_action { u8 instr; u8 len_hi; - u16 len; - u16 addr; + __le16 len; + __be16 addr; unsigned char payload[]; }; static inline u32 sigma_action_len(struct sigma_action *sa) { - return (sa->len_hi << 16) | sa->len; -} - -static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len) -{ - return sizeof(*sa) + payload_len + (payload_len % 2); + return (sa->len_hi << 16) | le16_to_cpu(sa->len); } extern int process_sigma_firmware(struct i2c_client *client, const char *name); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fe864885c1e..50db9b04a55 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -30,6 +30,7 @@ #include <linux/dmaengine.h> #include <linux/hrtimer.h> #include <linux/dma-mapping.h> +#include <linux/netdev_features.h> /* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 @@ -87,7 +88,6 @@ * at device setup time. * NETIF_F_HW_CSUM - it is clever device, it is able to checksum * everything. - * NETIF_F_NO_CSUM - loopback or reliable single hop media. * NETIF_F_IP_CSUM - device is dumb. It is able to csum only * TCP/UDP over IPv4. Sigh. Vendors like this * way by an unknown reason. Though, see comment above @@ -128,13 +128,17 @@ struct sk_buff_head { struct sk_buff; -/* To allow 64K frame to be packed as single skb without frag_list. Since - * GRO uses frags we allocate at least 16 regardless of page size. +/* To allow 64K frame to be packed as single skb without frag_list we + * require 64K/PAGE_SIZE pages plus 1 additional page to allow for + * buffers which do not start on a page boundary. + * + * Since GRO uses frags we allocate at least 16 regardless of page + * size. */ -#if (65536/PAGE_SIZE + 2) < 16 +#if (65536/PAGE_SIZE + 1) < 16 #define MAX_SKB_FRAGS 16UL #else -#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) +#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) #endif typedef struct skb_frag_struct skb_frag_t; @@ -218,6 +222,9 @@ enum { /* device driver supports TX zero-copy buffers */ SKBTX_DEV_ZEROCOPY = 1 << 4, + + /* generate wifi status information (where possible) */ + SKBTX_WIFI_STATUS = 1 << 5, }; /* @@ -235,15 +242,15 @@ struct ubuf_info { * the end of the header data, ie. at skb->end. */ struct skb_shared_info { - unsigned short nr_frags; + unsigned char nr_frags; + __u8 tx_flags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; unsigned short gso_type; - __be32 ip6_frag_id; - __u8 tx_flags; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; + __be32 ip6_frag_id; /* * Warning : all fields before dataref are cleared in __alloc_skb() @@ -352,6 +359,8 @@ typedef unsigned char *sk_buff_data_t; * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport * ports. + * @wifi_acked_valid: wifi_acked was set + * @wifi_acked: whether frame was acked on wifi or not * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -445,10 +454,11 @@ struct sk_buff { #endif __u8 ooo_okay:1; __u8 l4_rxhash:1; + __u8 wifi_acked_valid:1; + __u8 wifi_acked:1; + /* 10/12 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); - /* 0/13 bit hole */ - #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; #endif @@ -540,6 +550,7 @@ extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int fclone, int node); +extern struct sk_buff *build_skb(void *data); static inline struct sk_buff *alloc_skb(unsigned int size, gfp_t priority) { @@ -561,8 +572,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); -extern struct sk_buff *pskb_copy(struct sk_buff *skb, - gfp_t gfp_mask); +extern struct sk_buff *__pskb_copy(struct sk_buff *skb, + int headroom, gfp_t gfp_mask); + extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); @@ -1662,38 +1674,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, } /** - * __netdev_alloc_page - allocate a page for ps-rx on a specific device - * @dev: network device to receive on - * @gfp_mask: alloc_pages_node mask - * - * Allocate a new page. dev currently unused. - * - * %NULL is returned if there is no free memory. - */ -static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) -{ - return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); -} - -/** - * netdev_alloc_page - allocate a page for ps-rx on a specific device - * @dev: network device to receive on - * - * Allocate a new page. dev currently unused. - * - * %NULL is returned if there is no free memory. - */ -static inline struct page *netdev_alloc_page(struct net_device *dev) -{ - return __netdev_alloc_page(dev, GFP_ATOMIC); -} - -static inline void netdev_free_page(struct net_device *dev, struct page *page) -{ - __free_page(page); -} - -/** * skb_frag_page - retrieve the page refered to by a paged fragment * @frag: the paged fragment * @@ -1824,6 +1804,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, frag->page_offset + offset, size, dir); } +static inline struct sk_buff *pskb_copy(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy(skb, skb_headroom(skb), gfp_mask); +} + /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check @@ -2105,7 +2091,8 @@ extern void skb_split(struct sk_buff *skb, extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); -extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); +extern struct sk_buff *skb_segment(struct sk_buff *skb, + netdev_features_t features); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -2263,6 +2250,15 @@ static inline void skb_tx_timestamp(struct sk_buff *skb) sw_tx_timestamp(skb); } +/** + * skb_complete_wifi_ack - deliver skb with wifi status + * + * @skb: the original outgoing packet + * @acked: ack status + * + */ +void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); + extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h new file mode 100644 index 00000000000..ce718cbce43 --- /dev/null +++ b/include/linux/smscphy.h @@ -0,0 +1,25 @@ +#ifndef __LINUX_SMSCPHY_H__ +#define __LINUX_SMSCPHY_H__ + +#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */ +#define MII_LAN83C185_IM 30 /* Interrupt Mask */ +#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */ + +#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */ +#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */ +#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */ +#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */ +#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */ +#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */ +#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */ + +#define MII_LAN83C185_ISF_INT_ALL (0x0e) + +#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \ + (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \ + MII_LAN83C185_ISF_INT7) + +#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */ +#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */ + +#endif /* __LINUX_SMSCPHY_H__ */ diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h new file mode 100644 index 00000000000..251729a4788 --- /dev/null +++ b/include/linux/sock_diag.h @@ -0,0 +1,48 @@ +#ifndef __SOCK_DIAG_H__ +#define __SOCK_DIAG_H__ + +#include <linux/types.h> + +#define SOCK_DIAG_BY_FAMILY 20 + +struct sock_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; +}; + +enum { + SK_MEMINFO_RMEM_ALLOC, + SK_MEMINFO_RCVBUF, + SK_MEMINFO_WMEM_ALLOC, + SK_MEMINFO_SNDBUF, + SK_MEMINFO_FWD_ALLOC, + SK_MEMINFO_WMEM_QUEUED, + SK_MEMINFO_OPTMEM, + + SK_MEMINFO_VARS, +}; + +#ifdef __KERNEL__ +struct sk_buff; +struct nlmsghdr; +struct sock; + +struct sock_diag_handler { + __u8 family; + int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); +}; + +int sock_diag_register(struct sock_diag_handler *h); +void sock_diag_unregister(struct sock_diag_handler *h); + +void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); +void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); + +int sock_diag_check_cookie(void *sk, __u32 *cookie); +void sock_diag_save_cookie(void *sk, __u32 *cookie); + +int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); + +extern struct sock *sock_diag_nlsk; +#endif /* KERNEL */ +#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index bb4f5fbbbd8..176fce9cc6b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -200,6 +200,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } +/** + * module_spi_driver() - Helper macro for registering a SPI driver + * @__spi_driver: spi_driver struct + * + * Helper macro for SPI drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_spi_driver(__spi_driver) \ + module_driver(__spi_driver, spi_register_driver, \ + spi_unregister_driver) /** * struct spi_master - interface to SPI master controller diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 58971e891f4..e1b005918bb 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -28,6 +28,7 @@ #define _LINUX_SRCU_H #include <linux/mutex.h> +#include <linux/rcupdate.h> struct srcu_struct_array { int c[2]; @@ -60,18 +61,10 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name, __init_srcu_struct((sp), #sp, &__srcu_key); \ }) -# define srcu_read_acquire(sp) \ - lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define srcu_read_release(sp) \ - lock_release(&(sp)->dep_map, 1, _THIS_IP_) - #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ int init_srcu_struct(struct srcu_struct *sp); -# define srcu_read_acquire(sp) do { } while (0) -# define srcu_read_release(sp) do { } while (0) - #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ void cleanup_srcu_struct(struct srcu_struct *sp); @@ -90,12 +83,32 @@ long srcu_batches_completed(struct srcu_struct *sp); * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. + * + * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. + * + * Note that if the CPU is in the idle loop from an RCU point of view + * (ie: that we are in the section between rcu_idle_enter() and + * rcu_idle_exit()) then srcu_read_lock_held() returns false even if + * the CPU did an srcu_read_lock(). The reason for this is that RCU + * ignores CPUs that are in such a section, considering these as in + * extended quiescent state, so such a CPU is effectively never in an + * RCU read-side critical section regardless of what RCU primitives it + * invokes. This state of affairs is required --- we need to keep an + * RCU-free window in idle where the CPU may possibly enter into low + * power mode. This way we can notice an extended quiescent state to + * other CPUs that started a grace period. Otherwise we would delay any + * grace period as long as we run in the idle task. */ static inline int srcu_read_lock_held(struct srcu_struct *sp) { - if (debug_locks) - return lock_is_held(&sp->dep_map); - return 1; + if (rcu_is_cpu_idle()) + return 0; + + if (!debug_lockdep_rcu_enabled()) + return 1; + + return lock_is_held(&sp->dep_map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -145,12 +158,17 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) * one way to indirectly wait on an SRCU grace period is to acquire * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). + * + * Note that srcu_read_lock() and the matching srcu_read_unlock() must + * occur in the same context, for example, it is illegal to invoke + * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() + * was invoked in process context. */ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) { int retval = __srcu_read_lock(sp); - srcu_read_acquire(sp); + rcu_lock_acquire(&(sp)->dep_map); return retval; } @@ -164,8 +182,51 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp) { - srcu_read_release(sp); + rcu_lock_release(&(sp)->dep_map); + __srcu_read_unlock(sp, idx); +} + +/** + * srcu_read_lock_raw - register a new reader for an SRCU-protected structure. + * @sp: srcu_struct in which to register the new reader. + * + * Enter an SRCU read-side critical section. Similar to srcu_read_lock(), + * but avoids the RCU-lockdep checking. This means that it is legal to + * use srcu_read_lock_raw() in one context, for example, in an exception + * handler, and then have the matching srcu_read_unlock_raw() in another + * context, for example in the task that took the exception. + * + * However, the entire SRCU read-side critical section must reside within a + * single task. For example, beware of using srcu_read_lock_raw() in + * a device interrupt handler and srcu_read_unlock() in the interrupted + * task: This will not work if interrupts are threaded. + */ +static inline int srcu_read_lock_raw(struct srcu_struct *sp) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __srcu_read_lock(sp); + local_irq_restore(flags); + return ret; +} + +/** + * srcu_read_unlock_raw - unregister reader from an SRCU-protected structure. + * @sp: srcu_struct in which to unregister the old reader. + * @idx: return value from corresponding srcu_read_lock_raw(). + * + * Exit an SRCU read-side critical section without lockdep-RCU checking. + * See srcu_read_lock_raw() for more details. + */ +static inline void srcu_read_unlock_raw(struct srcu_struct *sp, int idx) +{ + unsigned long flags; + + local_irq_save(flags); __srcu_read_unlock(sp, idx); + local_irq_restore(flags); } #endif diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 061e560251b..dcf35b0f303 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -94,6 +94,15 @@ struct ssb_sprom { } ghz5; /* 5GHz band */ } antenna_gain; + struct { + struct { + u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; + } ghz2; + struct { + u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut; + } ghz5; + } fem; + /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ }; diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 98941203a27..c814ae6eeb2 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -432,6 +432,23 @@ #define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ #define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ #define SSB_SPROM8_RXPO5G_SHIFT 8 +#define SSB_SPROM8_FEM2G 0x00AE +#define SSB_SPROM8_FEM5G 0x00B0 +#define SSB_SROM8_FEM_TSSIPOS 0x0001 +#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0 +#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006 +#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SSB_SROM8_FEM_PDET_RANGE 0x00F8 +#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SSB_SROM8_FEM_TR_ISO 0x0700 +#define SSB_SROM8_FEM_TR_ISO_SHIFT 8 +#define SSB_SROM8_FEM_ANTSWLUT 0xF800 +#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SSB_SPROM8_THERMAL 0x00B2 +#define SSB_SPROM8_MPWR_RAWTS 0x00B4 +#define SSB_SPROM8_TS_SLP_OPT_CORRX 0x00B6 +#define SSB_SPROM8_FOC_HWIQ_IQSWP 0x00B8 +#define SSB_SPROM8_PHYCAL_TEMPDELTA 0x00BA #define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */ #define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ #define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 5efd8cef389..57531f8e595 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -203,7 +203,7 @@ extern void cache_unregister(struct cache_detail *cd); extern void cache_unregister_net(struct cache_detail *cd, struct net *net); extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, - mode_t, struct cache_detail *); + umode_t, struct cache_detail *); extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); extern void qword_add(char **bpp, int *lp, char *str); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 3d8f9c44e27..2c5993a17c3 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst, return true; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { @@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst, struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; dsin6->sin6_family = ssin6->sin6_family; - ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); + dsin6->sin6_addr = ssin6->sin6_addr; return true; } -#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ +#else /* !(IS_ENABLED(CONFIG_IPV6) */ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { @@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst, { return false; } -#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ +#endif /* !(IS_ENABLED(CONFIG_IPV6) */ /** * rpc_cmp_addr - compare the address portion of two sockaddrs. diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index e4ea43058d8..2bb03d77375 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -55,7 +55,7 @@ extern int rpc_remove_client_dir(struct dentry *); struct cache_detail; extern struct dentry *rpc_create_cache_dir(struct dentry *, struct qstr *, - mode_t umode, + umode_t umode, struct cache_detail *); extern void rpc_remove_cache_dir(struct dentry *); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 57a692432f8..95040cc3310 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/pm.h> #include <linux/mm.h> +#include <linux/freezer.h> #include <asm/errno.h> #ifdef CONFIG_VT @@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; } #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ #define PM_POST_RESTORE 0x0006 /* Restore failed */ +extern struct mutex pm_mutex; + #ifdef CONFIG_PM_SLEEP void save_processor_state(void); void restore_processor_state(void); @@ -351,6 +354,19 @@ extern bool events_check_enabled; extern bool pm_wakeup_pending(void); extern bool pm_get_wakeup_count(unsigned int *count); extern bool pm_save_wakeup_count(unsigned int count); + +static inline void lock_system_sleep(void) +{ + freezer_do_not_count(); + mutex_lock(&pm_mutex); +} + +static inline void unlock_system_sleep(void) +{ + mutex_unlock(&pm_mutex); + freezer_count(); +} + #else /* !CONFIG_PM_SLEEP */ static inline int register_pm_notifier(struct notifier_block *nb) @@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) #define pm_notifier(fn, pri) do { (void)(fn); } while (0) static inline bool pm_wakeup_pending(void) { return false; } -#endif /* !CONFIG_PM_SLEEP */ - -extern struct mutex pm_mutex; -#ifndef CONFIG_HIBERNATE_CALLBACKS static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} -#else - -/* Let some subsystems like memory hotadd exclude hibernation */ - -static inline void lock_system_sleep(void) -{ - mutex_lock(&pm_mutex); -} - -static inline void unlock_system_sleep(void) -{ - mutex_unlock(&pm_mutex); -} -#endif +#endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS /* diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 86a24b1166d..515669fa3c1 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -475,7 +475,7 @@ asmlinkage long sys_mincore(unsigned long start, size_t len, asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old); asmlinkage long sys_chroot(const char __user *filename); -asmlinkage long sys_mknod(const char __user *filename, int mode, +asmlinkage long sys_mknod(const char __user *filename, umode_t mode, unsigned dev); asmlinkage long sys_link(const char __user *oldname, const char __user *newname); @@ -483,8 +483,8 @@ asmlinkage long sys_symlink(const char __user *old, const char __user *new); asmlinkage long sys_unlink(const char __user *pathname); asmlinkage long sys_rename(const char __user *oldname, const char __user *newname); -asmlinkage long sys_chmod(const char __user *filename, mode_t mode); -asmlinkage long sys_fchmod(unsigned int fd, mode_t mode); +asmlinkage long sys_chmod(const char __user *filename, umode_t mode); +asmlinkage long sys_fchmod(unsigned int fd, umode_t mode); asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); #if BITS_PER_LONG == 32 @@ -517,9 +517,9 @@ asmlinkage long sys_sendfile64(int out_fd, int in_fd, loff_t __user *offset, size_t count); asmlinkage long sys_readlink(const char __user *path, char __user *buf, int bufsiz); -asmlinkage long sys_creat(const char __user *pathname, int mode); +asmlinkage long sys_creat(const char __user *pathname, umode_t mode); asmlinkage long sys_open(const char __user *filename, - int flags, int mode); + int flags, umode_t mode); asmlinkage long sys_close(unsigned int fd); asmlinkage long sys_access(const char __user *filename, int mode); asmlinkage long sys_vhangup(void); @@ -582,7 +582,7 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); -asmlinkage long sys_mkdir(const char __user *pathname, int mode); +asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode); asmlinkage long sys_chdir(const char __user *filename); asmlinkage long sys_fchdir(unsigned int fd); asmlinkage long sys_rmdir(const char __user *pathname); @@ -679,7 +679,7 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, unsigned long third, void __user *ptr, long fifth); -asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr); +asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr); asmlinkage long sys_mq_unlink(const char __user *name); asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout); asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout); @@ -753,11 +753,11 @@ asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus); asmlinkage long sys_spu_create(const char __user *name, - unsigned int flags, mode_t mode, int fd); + unsigned int flags, umode_t mode, int fd); -asmlinkage long sys_mknodat(int dfd, const char __user * filename, int mode, +asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode, unsigned dev); -asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, int mode); +asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode); asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag); asmlinkage long sys_symlinkat(const char __user * oldname, int newdfd, const char __user * newname); @@ -769,11 +769,11 @@ asmlinkage long sys_futimesat(int dfd, const char __user *filename, struct timeval __user *utimes); asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); asmlinkage long sys_fchmodat(int dfd, const char __user * filename, - mode_t mode); + umode_t mode); asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, - int mode); + umode_t mode); asmlinkage long sys_newfstatat(int dfd, const char __user *filename, struct stat __user *statbuf, int flag); asmlinkage long sys_fstatat64(int dfd, const char __user *filename, diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 703cfa33a3c..bb9127dd814 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -1038,7 +1038,7 @@ struct ctl_table const char *procname; /* Text ID for /proc/sys, or zero */ void *data; int maxlen; - mode_t mode; + umode_t mode; struct ctl_table *child; struct ctl_table *parent; /* Automatically set */ proc_handler *proc_handler; /* Callback for text formatting */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index dac0859e644..0010009b2f0 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -25,7 +25,7 @@ enum kobj_ns_type; struct attribute { const char *name; - mode_t mode; + umode_t mode; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key *key; struct lock_class_key skey; @@ -55,7 +55,7 @@ do { \ struct attribute_group { const char *name; - mode_t (*is_visible)(struct kobject *, + umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; }; @@ -133,7 +133,7 @@ int __must_check sysfs_create_file(struct kobject *kobj, int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute **attr); int __must_check sysfs_chmod_file(struct kobject *kobj, - const struct attribute *attr, mode_t mode); + const struct attribute *attr, umode_t mode); void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); @@ -221,7 +221,7 @@ static inline int sysfs_create_files(struct kobject *kobj, } static inline int sysfs_chmod_file(struct kobject *kobj, - const struct attribute *attr, mode_t mode) + const struct attribute *attr, umode_t mode) { return 0; } diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 7f59ee94698..46a85c9e1f2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -238,6 +238,11 @@ struct tcp_sack_block { u32 end_seq; }; +/*These are used to set the sack_ok field in struct tcp_options_received */ +#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ +#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/ +#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ + struct tcp_options_received { /* PAWS/RTTM data */ long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ diff --git a/include/linux/tick.h b/include/linux/tick.h index b232ccc0ee2..ab8be90b5cc 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -7,6 +7,7 @@ #define _LINUX_TICK_H #include <linux/clockchips.h> +#include <linux/irqflags.h> #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -121,14 +122,16 @@ static inline int tick_oneshot_mode_active(void) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ -extern void tick_nohz_stop_sched_tick(int inidle); -extern void tick_nohz_restart_sched_tick(void); +extern void tick_nohz_idle_enter(void); +extern void tick_nohz_idle_exit(void); +extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); # else -static inline void tick_nohz_stop_sched_tick(int inidle) { } -static inline void tick_nohz_restart_sched_tick(void) { } +static inline void tick_nohz_idle_enter(void) { } +static inline void tick_nohz_idle_exit(void) { } + static inline ktime_t tick_nohz_get_sleep_length(void) { ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; diff --git a/include/linux/types.h b/include/linux/types.h index 57a97234bec..e5fa5034551 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -24,6 +24,7 @@ typedef __kernel_fd_set fd_set; typedef __kernel_dev_t dev_t; typedef __kernel_ino_t ino_t; typedef __kernel_mode_t mode_t; +typedef unsigned short umode_t; typedef __kernel_nlink_t nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; @@ -188,7 +189,7 @@ typedef __u32 __bitwise __wsum; * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid * common 32/64-bit compat problems. * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other - * architectures) and to 8-byte boundaries on 64-bit architetures. The new + * architectures) and to 8-byte boundaries on 64-bit architectures. The new * aligned_64 type enforces 8-byte alignment so that structs containing * aligned_64 values have the same alignment on 32-bit and 64-bit architectures. * No conversions are necessary between 32-bit user-space and a 64-bit kernel. diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h new file mode 100644 index 00000000000..b1d2bf16b33 --- /dev/null +++ b/include/linux/unix_diag.h @@ -0,0 +1,54 @@ +#ifndef __UNIX_DIAG_H__ +#define __UNIX_DIAG_H__ + +#include <linux/types.h> + +struct unix_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u16 pad; + __u32 udiag_states; + __u32 udiag_ino; + __u32 udiag_show; + __u32 udiag_cookie[2]; +}; + +#define UDIAG_SHOW_NAME 0x00000001 /* show name (not path) */ +#define UDIAG_SHOW_VFS 0x00000002 /* show VFS inode info */ +#define UDIAG_SHOW_PEER 0x00000004 /* show peer socket info */ +#define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */ +#define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */ +#define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */ + +struct unix_diag_msg { + __u8 udiag_family; + __u8 udiag_type; + __u8 udiag_state; + __u8 pad; + + __u32 udiag_ino; + __u32 udiag_cookie[2]; +}; + +enum { + UNIX_DIAG_NAME, + UNIX_DIAG_VFS, + UNIX_DIAG_PEER, + UNIX_DIAG_ICONS, + UNIX_DIAG_RQLEN, + UNIX_DIAG_MEMINFO, + + UNIX_DIAG_MAX, +}; + +struct unix_diag_vfs { + __u32 udiag_vfs_ino; + __u32 udiag_vfs_dev; +}; + +struct unix_diag_rqlen { + __u32 udiag_rqueue; + __u32 udiag_wqueue; +}; + +#endif diff --git a/include/linux/usb.h b/include/linux/usb.h index d3d0c137433..7f8d4d61ca4 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -935,7 +935,7 @@ extern struct bus_type usb_bus_type; */ struct usb_class_driver { char *name; - char *(*devnode)(struct device *dev, mode_t *mode); + char *(*devnode)(struct device *dev, umode_t *mode); const struct file_operations *fops; int minor_base; }; @@ -953,6 +953,18 @@ extern int usb_register_driver(struct usb_driver *, struct module *, extern void usb_deregister(struct usb_driver *); +/** + * module_usb_driver() - Helper macro for registering a USB driver + * @__usb_driver: usb_driver struct + * + * Helper macro for USB drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_usb_driver(__usb_driver) \ + module_driver(__usb_driver, usb_register, \ + usb_deregister) + extern int usb_register_device_driver(struct usb_device_driver *, struct module *); extern void usb_deregister_device_driver(struct usb_device_driver *); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index add4790b21f..5206d6541da 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -85,6 +85,8 @@ * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again + * Device must not be reset from its vq/config callbacks, or in + * parallel with being added/removed. * @find_vqs: find virtqueues and instantiate them. * vdev: the virtio_device * nvqs: the number of virtqueues to find @@ -100,6 +102,10 @@ * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. + * @bus_name: return the bus name associated with the device + * vdev: the virtio_device + * This returns a pointer to the bus name a la pci_name from which + * the caller can then copy. */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -117,6 +123,7 @@ struct virtio_config_ops { void (*del_vqs)(struct virtio_device *); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); + const char *(*bus_name)(struct virtio_device *vdev); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -182,5 +189,14 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, return ERR_PTR(err); return vq; } + +static inline +const char *virtio_bus_name(struct virtio_device *vdev) +{ + if (!vdev->config->bus_name) + return "virtio"; + return vdev->config->bus_name(vdev); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h index 27c7edefbc8..5c7b6f0daef 100644 --- a/include/linux/virtio_mmio.h +++ b/include/linux/virtio_mmio.h @@ -63,7 +63,7 @@ #define VIRTIO_MMIO_GUEST_FEATURES 0x020 /* Activated features set selector - Write Only */ -#define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024 +#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024 /* Guest's memory page size in bytes - Write Only */ #define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 687fb11e201..dcdfc2bda92 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -119,7 +119,7 @@ unmap_kernel_range(unsigned long addr, unsigned long size) #endif /* Allocate/destroy a 'vmalloc' VM area. */ -extern struct vm_struct *alloc_vm_area(size_t size); +extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); extern void free_vm_area(struct vm_struct *area); /* for /dev/kmem */ @@ -131,6 +131,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count); */ extern rwlock_t vmlist_lock; extern struct vm_struct *vmlist; +extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); #ifdef CONFIG_SMP diff --git a/include/linux/wait.h b/include/linux/wait.h index 3efc9f3f43a..a9ce45e8501 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -77,13 +77,13 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } -extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); +extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); #define init_waitqueue_head(q) \ do { \ static struct lock_class_key __key; \ \ - __init_waitqueue_head((q), &__key); \ + __init_waitqueue_head((q), #q, &__key); \ } while (0) #ifdef CONFIG_LOCKDEP diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h index e0aa39612eb..3157cc1fada 100644 --- a/include/linux/wanrouter.h +++ b/include/linux/wanrouter.h @@ -309,7 +309,7 @@ typedef struct wandev_conf #define WANOPT_EVEN 2 /* CHDLC Protocol Options */ -/* DF Commmented out for now. +/* DF Commented out for now. #define WANOPT_CHDLC_NO_DCD IGNORE_DCD_FOR_LINK_STAT #define WANOPT_CHDLC_NO_CTS IGNORE_CTS_FOR_LINK_STAT diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h index 4b697395326..0d6373195d3 100644 --- a/include/linux/wl12xx.h +++ b/include/linux/wl12xx.h @@ -54,6 +54,9 @@ struct wl12xx_platform_data { int board_ref_clock; int board_tcxo_clock; unsigned long platform_quirks; + bool pwr_in_suspend; + + struct wl1271_if_operations *ops; }; /* Platform does not support level trigger interrupts */ @@ -73,6 +76,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data) #endif -const struct wl12xx_platform_data *wl12xx_get_platform_data(void); +struct wl12xx_platform_data *wl12xx_get_platform_data(void); #endif diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index b1377b931eb..5fb2c3d10c0 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl, static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id; + struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd); return icd ? icd->vdev : NULL; } @@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu return container_of(vq, struct soc_camera_device, vb_vidq); } +static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd) +{ + return (icd->iface << 8) | (icd->devnum + 1); +} + void soc_camera_lock(struct vb2_queue *vq); void soc_camera_unlock(struct vb2_queue *vq); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index cbc6bb0a683..f68dce2d8d8 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -151,7 +151,8 @@ extern int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *src_addr); extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); -extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len); +extern void addrconf_prefix_rcv(struct net_device *dev, + u8 *opt, int len, bool sllao); /* * anycast prototypes (anycast.c) diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 91ab5b01678..5a4e29b168c 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -11,10 +11,13 @@ extern void unix_notinflight(struct file *fp); extern void unix_gc(void); extern void wait_for_unix_gc(void); extern struct sock *unix_get_socket(struct file *filp); +extern struct sock *unix_peer_get(struct sock *); #define UNIX_HASH_SIZE 256 extern unsigned int unix_tot_inflight; +extern spinlock_t unix_table_lock; +extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; struct unix_address { atomic_t refcnt; @@ -63,6 +66,9 @@ struct unix_sock { #define peer_wait peer_wq.wait +long unix_inq_len(struct sock *sk); +long unix_outq_len(struct sock *sk); + #ifdef CONFIG_SYSCTL extern int unix_sysctl_register(struct net *net); extern void unix_sysctl_unregister(struct net *net); diff --git a/include/net/arp.h b/include/net/arp.h index 4979af8b155..0013dc87940 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -23,7 +23,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct neigh_table *tbl, str rcu_read_lock_bh(); nht = rcu_dereference_bh(tbl->nht); - hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift); for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); n != NULL; n = rcu_dereference_bh(n->next)) { diff --git a/include/net/atmclip.h b/include/net/atmclip.h index 497ef6444a7..5865924d4aa 100644 --- a/include/net/atmclip.h +++ b/include/net/atmclip.h @@ -15,7 +15,6 @@ #define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back)) -#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key) struct sk_buff; @@ -36,24 +35,18 @@ struct clip_vcc { struct atmarp_entry { - __be32 ip; /* IP address */ struct clip_vcc *vccs; /* active VCCs; NULL if resolution is pending */ unsigned long expires; /* entry expiration time */ struct neighbour *neigh; /* neighbour back-pointer */ }; - #define PRIV(dev) ((struct clip_priv *) netdev_priv(dev)) - struct clip_priv { int number; /* for convenience ... */ spinlock_t xoff_lock; /* ensures that pop is atomic (SMP) */ struct net_device *next; /* next CLIP interface */ }; - -extern struct neigh_table *clip_tbl_hook; - #endif diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index e86af08293a..abaad6ed9b8 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -36,6 +36,11 @@ #define PF_BLUETOOTH AF_BLUETOOTH #endif +/* Bluetooth versions */ +#define BLUETOOTH_VER_1_1 1 +#define BLUETOOTH_VER_1_2 2 +#define BLUETOOTH_VER_2_0 3 + /* Reserv for core and drivers use */ #define BT_SKB_RESERVE 8 @@ -77,6 +82,33 @@ struct bt_power { #define BT_POWER_FORCE_ACTIVE_OFF 0 #define BT_POWER_FORCE_ACTIVE_ON 1 +#define BT_CHANNEL_POLICY 10 + +/* BR/EDR only (default policy) + * AMP controllers cannot be used. + * Channel move requests from the remote device are denied. + * If the L2CAP channel is currently using AMP, move the channel to BR/EDR. + */ +#define BT_CHANNEL_POLICY_BREDR_ONLY 0 + +/* BR/EDR Preferred + * Allow use of AMP controllers. + * If the L2CAP channel is currently on AMP, move it to BR/EDR. + * Channel move requests from the remote device are allowed. + */ +#define BT_CHANNEL_POLICY_BREDR_PREFERRED 1 + +/* AMP Preferred + * Allow use of AMP controllers + * If the L2CAP channel is currently on BR/EDR and AMP controller + * resources are available, initiate a channel move to AMP. + * Channel move requests from the remote device are allowed. + * If the L2CAP socket has not been connected yet, try to create + * and configure the channel directly on an AMP controller rather + * than BR/EDR. + */ +#define BT_CHANNEL_POLICY_AMP_PREFERRED 2 + __printf(2, 3) int bt_printk(const char *level, const char *fmt, ...); @@ -158,7 +190,7 @@ struct bt_skb_cb { __u8 pkt_type; __u8 incoming; __u16 expect; - __u8 tx_seq; + __u16 tx_seq; __u8 retries; __u8 sar; unsigned short channel; @@ -218,32 +250,10 @@ extern void bt_sysfs_cleanup(void); extern struct dentry *bt_debugfs; -#ifdef CONFIG_BT_L2CAP int l2cap_init(void); void l2cap_exit(void); -#else -static inline int l2cap_init(void) -{ - return 0; -} - -static inline void l2cap_exit(void) -{ -} -#endif -#ifdef CONFIG_BT_SCO int sco_init(void); void sco_exit(void); -#else -static inline int sco_init(void) -{ - return 0; -} - -static inline void sco_exit(void) -{ -} -#endif #endif /* __BLUETOOTH_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index aaf79af7243..5b2fed5eebf 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -88,6 +88,14 @@ enum { HCI_RESET, }; +/* + * BR/EDR and/or LE controller flags: the flags defined here should represent + * states from the controller. + */ +enum { + HCI_LE_SCAN, +}; + /* HCI ioctl defines */ #define HCIDEVUP _IOW('H', 201, int) #define HCIDEVDOWN _IOW('H', 202, int) @@ -202,6 +210,7 @@ enum { #define LMP_EV4 0x01 #define LMP_EV5 0x02 +#define LMP_NO_BREDR 0x20 #define LMP_LE 0x40 #define LMP_SNIFF_SUBR 0x02 @@ -264,6 +273,17 @@ enum { #define HCI_LK_SMP_IRK 0x82 #define HCI_LK_SMP_CSRK 0x83 +/* ---- HCI Error Codes ---- */ +#define HCI_ERROR_AUTH_FAILURE 0x05 +#define HCI_ERROR_REJ_BAD_ADDR 0x0f +#define HCI_ERROR_REMOTE_USER_TERM 0x13 +#define HCI_ERROR_LOCAL_HOST_TERM 0x16 +#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 + +/* Flow control modes */ +#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00 +#define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01 + /* ----- HCI Commands ---- */ #define HCI_OP_NOP 0x0000 @@ -446,6 +466,14 @@ struct hci_rp_user_confirm_reply { #define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d +#define HCI_OP_USER_PASSKEY_REPLY 0x042e +struct hci_cp_user_passkey_reply { + bdaddr_t bdaddr; + __le32 passkey; +} __packed; + +#define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f + #define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430 struct hci_cp_remote_oob_data_reply { bdaddr_t bdaddr; @@ -662,6 +690,12 @@ struct hci_rp_read_local_oob_data { #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 +#define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66 +struct hci_rp_read_flow_control_mode { + __u8 status; + __u8 mode; +} __packed; + #define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d struct hci_cp_write_le_host_supported { __u8 le; @@ -716,6 +750,14 @@ struct hci_rp_read_bd_addr { bdaddr_t bdaddr; } __packed; +#define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a +struct hci_rp_read_data_block_size { + __u8 status; + __le16 max_acl_len; + __le16 block_len; + __le16 num_blocks; +} __packed; + #define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c struct hci_cp_write_page_scan_activity { __le16 interval; @@ -726,6 +768,21 @@ struct hci_cp_write_page_scan_activity { #define PAGE_SCAN_TYPE_STANDARD 0x00 #define PAGE_SCAN_TYPE_INTERLACED 0x01 +#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409 +struct hci_rp_read_local_amp_info { + __u8 status; + __u8 amp_status; + __le32 total_bw; + __le32 max_bw; + __le32 min_latency; + __le32 max_pdu; + __u8 amp_type; + __le16 pal_cap; + __le16 max_assoc_size; + __le32 max_flush_to; + __le32 be_flush_to; +} __packed; + #define HCI_OP_LE_SET_EVENT_MASK 0x2001 struct hci_cp_le_set_event_mask { __u8 mask[8]; @@ -738,6 +795,18 @@ struct hci_rp_le_read_buffer_size { __u8 le_max_pkt; } __packed; +#define HCI_OP_LE_SET_SCAN_PARAM 0x200b +struct hci_cp_le_set_scan_param { + __u8 type; + __le16 interval; + __le16 window; + __u8 own_address_type; + __u8 filter_policy; +} __packed; + +#define LE_SCANNING_DISABLED 0x00 +#define LE_SCANNING_ENABLED 0x01 + #define HCI_OP_LE_SET_SCAN_ENABLE 0x200c struct hci_cp_le_set_scan_enable { __u8 enable; @@ -913,9 +982,14 @@ struct hci_ev_role_change { } __packed; #define HCI_EV_NUM_COMP_PKTS 0x13 +struct hci_comp_pkts_info { + __le16 handle; + __le16 count; +} __packed; + struct hci_ev_num_comp_pkts { __u8 num_hndl; - /* variable length part */ + struct hci_comp_pkts_info handles[0]; } __packed; #define HCI_EV_MODE_CHANGE 0x14 @@ -1054,6 +1128,11 @@ struct hci_ev_user_confirm_req { __le32 passkey; } __packed; +#define HCI_EV_USER_PASSKEY_REQUEST 0x34 +struct hci_ev_user_passkey_req { + bdaddr_t bdaddr; +} __packed; + #define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35 struct hci_ev_remote_oob_data_request { bdaddr_t bdaddr; @@ -1309,4 +1388,6 @@ struct hci_inquiry_req { }; #define IREQ_CACHE_FLUSH 0x0001 +extern int enable_hs; + #endif /* __HCI_H */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 3779ea36225..5e2e9845849 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -28,9 +28,8 @@ #include <linux/interrupt.h> #include <net/bluetooth/hci.h> -/* HCI upper protocols */ -#define HCI_PROTO_L2CAP 0 -#define HCI_PROTO_SCO 1 +/* HCI priority */ +#define HCI_PRIO_MAX 7 /* HCI Core structures */ struct inquiry_data { @@ -51,14 +50,12 @@ struct inquiry_entry { }; struct inquiry_cache { - spinlock_t lock; __u32 timestamp; struct inquiry_entry *list; }; struct hci_conn_hash { struct list_head list; - spinlock_t lock; unsigned int acl_num; unsigned int sco_num; unsigned int le_num; @@ -115,7 +112,7 @@ struct adv_entry { #define NUM_REASSEMBLY 4 struct hci_dev { struct list_head list; - spinlock_t lock; + struct mutex lock; atomic_t refcnt; char name[8]; @@ -150,6 +147,19 @@ struct hci_dev { __u16 sniff_min_interval; __u16 sniff_max_interval; + __u8 amp_status; + __u32 amp_total_bw; + __u32 amp_max_bw; + __u32 amp_min_latency; + __u32 amp_max_pdu; + __u8 amp_type; + __u16 amp_pal_cap; + __u16 amp_assoc_size; + __u32 amp_max_flush_to; + __u32 amp_be_flush_to; + + __u8 flow_ctl_mode; + unsigned int auto_accept_delay; unsigned long quirks; @@ -166,6 +176,11 @@ struct hci_dev { unsigned int sco_pkts; unsigned int le_pkts; + __u16 block_len; + __u16 block_mtu; + __u16 num_blocks; + __u16 block_cnt; + unsigned long acl_last_tx; unsigned long sco_last_tx; unsigned long le_last_tx; @@ -173,13 +188,18 @@ struct hci_dev { struct workqueue_struct *workqueue; struct work_struct power_on; - struct work_struct power_off; - struct timer_list off_timer; + struct delayed_work power_off; + + __u16 discov_timeout; + struct delayed_work discov_off; + + struct delayed_work service_cache; struct timer_list cmd_timer; - struct tasklet_struct cmd_task; - struct tasklet_struct rx_task; - struct tasklet_struct tx_task; + + struct work_struct rx_work; + struct work_struct cmd_work; + struct work_struct tx_work; struct sk_buff_head rx_q; struct sk_buff_head raw_q; @@ -195,6 +215,8 @@ struct hci_dev { __u16 init_last_cmd; + struct list_head mgmt_pending; + struct inquiry_cache inq_cache; struct hci_conn_hash conn_hash; struct list_head blacklist; @@ -206,7 +228,7 @@ struct hci_dev { struct list_head remote_oob_data; struct list_head adv_entries; - struct timer_list adv_timer; + struct delayed_work adv_work; struct hci_dev_stats stat; @@ -226,6 +248,8 @@ struct hci_dev { struct module *owner; + unsigned long dev_flags; + int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); @@ -273,20 +297,19 @@ struct hci_conn { unsigned int sent; struct sk_buff_head data_q; + struct list_head chan_list; - struct timer_list disc_timer; + struct delayed_work disc_work; struct timer_list idle_timer; struct timer_list auto_accept_timer; - struct work_struct work_add; - struct work_struct work_del; - struct device dev; atomic_t devref; struct hci_dev *hdev; void *l2cap_data; void *sco_data; + void *smp_conn; struct hci_conn *link; @@ -295,25 +318,39 @@ struct hci_conn { void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); }; -extern struct hci_proto *hci_proto[]; +struct hci_chan { + struct list_head list; + + struct hci_conn *conn; + struct sk_buff_head data_q; + unsigned int sent; +}; + extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; extern rwlock_t hci_cb_list_lock; +/* ----- HCI interface to upper protocols ----- */ +extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); +extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status); +extern int l2cap_disconn_ind(struct hci_conn *hcon); +extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); +extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); +extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); + +extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); +extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); +extern int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason); +extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); + /* ----- Inquiry cache ----- */ #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ -#define inquiry_cache_lock(c) spin_lock(&c->lock) -#define inquiry_cache_unlock(c) spin_unlock(&c->lock) -#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock) -#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock) - static inline void inquiry_cache_init(struct hci_dev *hdev) { struct inquiry_cache *c = &hdev->inq_cache; - spin_lock_init(&c->lock); c->list = NULL; } @@ -353,15 +390,15 @@ static inline void hci_conn_hash_init(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; INIT_LIST_HEAD(&h->list); - spin_lock_init(&h->lock); h->acl_num = 0; h->sco_num = 0; + h->le_num = 0; } static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - list_add(&c->list, &h->list); + list_add_rcu(&c->list, &h->list); switch (c->type) { case ACL_LINK: h->acl_num++; @@ -379,7 +416,10 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - list_del(&c->list); + + list_del_rcu(&c->list); + synchronize_rcu(); + switch (c->type) { case ACL_LINK: h->acl_num--; @@ -414,14 +454,18 @@ static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; struct hci_conn *c; - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); - if (c->handle == handle) + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->handle == handle) { + rcu_read_unlock(); return c; + } } + rcu_read_unlock(); + return NULL; } @@ -429,14 +473,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, __u8 type, bdaddr_t *ba) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; struct hci_conn *c; - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); - if (c->type == type && !bacmp(&c->dst, ba)) + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && !bacmp(&c->dst, ba)) { + rcu_read_unlock(); return c; + } } + + rcu_read_unlock(); + return NULL; } @@ -444,14 +493,19 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; struct hci_conn *c; - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); - if (c->type == type && c->state == state) + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && c->state == state) { + rcu_read_unlock(); return c; + } } + + rcu_read_unlock(); + return NULL; } @@ -466,6 +520,10 @@ int hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); +struct hci_chan *hci_chan_create(struct hci_conn *conn); +int hci_chan_del(struct hci_chan *chan); +void hci_chan_list_flush(struct hci_conn *conn); + struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); int hci_conn_check_link_mode(struct hci_conn *conn); @@ -475,7 +533,6 @@ int hci_conn_change_link_key(struct hci_conn *conn); int hci_conn_switch_role(struct hci_conn *conn, __u8 role); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); -void hci_conn_enter_sniff_mode(struct hci_conn *conn); void hci_conn_hold_device(struct hci_conn *conn); void hci_conn_put_device(struct hci_conn *conn); @@ -483,7 +540,7 @@ void hci_conn_put_device(struct hci_conn *conn); static inline void hci_conn_hold(struct hci_conn *conn) { atomic_inc(&conn->refcnt); - del_timer(&conn->disc_timer); + cancel_delayed_work_sync(&conn->disc_work); } static inline void hci_conn_put(struct hci_conn *conn) @@ -502,7 +559,9 @@ static inline void hci_conn_put(struct hci_conn *conn) } else { timeo = msecs_to_jiffies(10); } - mod_timer(&conn->disc_timer, jiffies + timeo); + cancel_delayed_work_sync(&conn->disc_work); + queue_delayed_work(conn->hdev->workqueue, + &conn->disc_work, jiffies + timeo); } } @@ -534,10 +593,8 @@ static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d) try_module_get(d->owner) ? __hci_dev_hold(d) : NULL; \ }) -#define hci_dev_lock(d) spin_lock(&d->lock) -#define hci_dev_unlock(d) spin_unlock(&d->lock) -#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock) -#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock) +#define hci_dev_lock(d) mutex_lock(&d->lock) +#define hci_dev_unlock(d) mutex_unlock(&d->lock) struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); @@ -545,7 +602,7 @@ struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); struct hci_dev *hci_alloc_dev(void); void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); -int hci_unregister_dev(struct hci_dev *hdev); +void hci_unregister_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); @@ -599,8 +656,9 @@ int hci_recv_frame(struct sk_buff *skb); int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count); -int hci_register_sysfs(struct hci_dev *hdev); -void hci_unregister_sysfs(struct hci_dev *hdev); +void hci_init_sysfs(struct hci_dev *hdev); +int hci_add_sysfs(struct hci_dev *hdev); +void hci_del_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); void hci_conn_add_sysfs(struct hci_conn *conn); void hci_conn_del_sysfs(struct hci_conn *conn); @@ -621,53 +679,40 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE) /* ----- HCI protocols ----- */ -struct hci_proto { - char *name; - unsigned int id; - unsigned long flags; - - void *priv; - - int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, - __u8 type); - int (*connect_cfm) (struct hci_conn *conn, __u8 status); - int (*disconn_ind) (struct hci_conn *conn); - int (*disconn_cfm) (struct hci_conn *conn, __u8 reason); - int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, - __u16 flags); - int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); - int (*security_cfm) (struct hci_conn *conn, __u8 status, - __u8 encrypt); -}; - static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) { - register struct hci_proto *hp; - int mask = 0; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->connect_ind) - mask |= hp->connect_ind(hdev, bdaddr, type); + switch (type) { + case ACL_LINK: + return l2cap_connect_ind(hdev, bdaddr); - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->connect_ind) - mask |= hp->connect_ind(hdev, bdaddr, type); + case SCO_LINK: + case ESCO_LINK: + return sco_connect_ind(hdev, bdaddr); - return mask; + default: + BT_ERR("unknown link type %d", type); + return -EINVAL; + } } static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) { - register struct hci_proto *hp; + switch (conn->type) { + case ACL_LINK: + case LE_LINK: + l2cap_connect_cfm(conn, status); + break; - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->connect_cfm) - hp->connect_cfm(conn, status); + case SCO_LINK: + case ESCO_LINK: + sco_connect_cfm(conn, status); + break; - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->connect_cfm) - hp->connect_cfm(conn, status); + default: + BT_ERR("unknown link type %d", conn->type); + break; + } if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); @@ -675,31 +720,29 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status) static inline int hci_proto_disconn_ind(struct hci_conn *conn) { - register struct hci_proto *hp; - int reason = 0x13; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->disconn_ind) - reason = hp->disconn_ind(conn); + if (conn->type != ACL_LINK && conn->type != LE_LINK) + return HCI_ERROR_REMOTE_USER_TERM; - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->disconn_ind) - reason = hp->disconn_ind(conn); - - return reason; + return l2cap_disconn_ind(conn); } static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) { - register struct hci_proto *hp; + switch (conn->type) { + case ACL_LINK: + case LE_LINK: + l2cap_disconn_cfm(conn, reason); + break; - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->disconn_cfm) - hp->disconn_cfm(conn, reason); + case SCO_LINK: + case ESCO_LINK: + sco_disconn_cfm(conn, reason); + break; - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->disconn_cfm) - hp->disconn_cfm(conn, reason); + default: + BT_ERR("unknown link type %d", conn->type); + break; + } if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); @@ -707,21 +750,16 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason) static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) { - register struct hci_proto *hp; __u8 encrypt; + if (conn->type != ACL_LINK && conn->type != LE_LINK) + return; + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) return; encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->security_cfm) - hp->security_cfm(conn, status, encrypt); - - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->security_cfm) - hp->security_cfm(conn, status, encrypt); + l2cap_security_cfm(conn, status, encrypt); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); @@ -730,23 +768,15 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) { - register struct hci_proto *hp; - - hp = hci_proto[HCI_PROTO_L2CAP]; - if (hp && hp->security_cfm) - hp->security_cfm(conn, status, encrypt); + if (conn->type != ACL_LINK && conn->type != LE_LINK) + return; - hp = hci_proto[HCI_PROTO_SCO]; - if (hp && hp->security_cfm) - hp->security_cfm(conn, status, encrypt); + l2cap_security_cfm(conn, status, encrypt); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } -int hci_register_proto(struct hci_proto *hproto); -int hci_unregister_proto(struct hci_proto *hproto); - /* ----- HCI callbacks ----- */ struct hci_cb { struct list_head list; @@ -771,13 +801,13 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00; - read_lock_bh(&hci_cb_list_lock); + read_lock(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, @@ -793,26 +823,26 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, hci_proto_encrypt_cfm(conn, status, encrypt); - read_lock_bh(&hci_cb_list_lock); + read_lock(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct list_head *p; - read_lock_bh(&hci_cb_list_lock); + read_lock(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->key_change_cfm) cb->key_change_cfm(conn, status); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, @@ -820,13 +850,13 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, { struct list_head *p; - read_lock_bh(&hci_cb_list_lock); + read_lock(&hci_cb_list_lock); list_for_each(p, &hci_cb_list) { struct hci_cb *cb = list_entry(p, struct hci_cb, list); if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); } - read_unlock_bh(&hci_cb_list_lock); + read_unlock(&hci_cb_list_lock); } int hci_register_cb(struct hci_cb *hcb); @@ -836,7 +866,7 @@ int hci_register_notifier(struct notifier_block *nb); int hci_unregister_notifier(struct notifier_block *nb); int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param); -void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags); +void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); @@ -849,44 +879,63 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb, /* Management interface */ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); -int mgmt_index_added(u16 index); -int mgmt_index_removed(u16 index); -int mgmt_powered(u16 index, u8 powered); -int mgmt_discoverable(u16 index, u8 discoverable); -int mgmt_connectable(u16 index, u8 connectable); -int mgmt_new_key(u16 index, struct link_key *key, u8 persistent); -int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type); -int mgmt_disconnected(u16 index, bdaddr_t *bdaddr); -int mgmt_disconnect_failed(u16 index); -int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status); -int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure); -int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); -int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); -int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, - u8 confirm_hint); -int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); -int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, +int mgmt_index_added(struct hci_dev *hdev); +int mgmt_index_removed(struct hci_dev *hdev); +int mgmt_powered(struct hci_dev *hdev, u8 powered); +int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); +int mgmt_connectable(struct hci_dev *hdev, u8 connectable); +int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); +int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, + u8 persistent); +int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type); +int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type); +int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); +int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 status); +int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); +int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); -int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status); -int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status); -int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, +int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); -int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, - u8 *eir); -int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name); -int mgmt_discovering(u16 index, u8 discovering); -int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr); -int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr); +int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, + __le32 value, u8 confirm_hint); +int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status); +int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 status); +int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr); +int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status); +int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 status); +int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); +int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); +int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, + u8 *randomizer, u8 status); +int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir); +int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name); +int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status); +int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status); +int mgmt_discovering(struct hci_dev *hdev, u8 discovering); +int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr); +int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr); /* HCI info for socket */ #define hci_pi(sk) ((struct hci_pinfo *) sk) +/* HCI socket flags */ +#define HCI_PI_MGMT_INIT 0 + struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; __u32 cmsg_mask; unsigned short channel; + unsigned long flags; }; /* HCI security filter */ @@ -915,4 +964,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]); void hci_le_ltk_neg_reply(struct hci_conn *conn); +int hci_do_inquiry(struct hci_dev *hdev, u8 length); +int hci_cancel_inquiry(struct hci_dev *hdev); + #endif /* __HCI_CORE_H */ diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index ab90ae0970a..68f58915069 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -27,20 +27,29 @@ #ifndef __L2CAP_H #define __L2CAP_H +#include <asm/unaligned.h> + /* L2CAP defaults */ #define L2CAP_DEFAULT_MTU 672 #define L2CAP_DEFAULT_MIN_MTU 48 #define L2CAP_DEFAULT_FLUSH_TO 0xffff #define L2CAP_DEFAULT_TX_WINDOW 63 +#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF #define L2CAP_DEFAULT_MAX_TX 3 #define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ #define L2CAP_DEFAULT_ACK_TO 200 #define L2CAP_LE_DEFAULT_MTU 23 +#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF +#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF +#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF -#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ -#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ +#define L2CAP_DISC_TIMEOUT (100) +#define L2CAP_DISC_REJ_TIMEOUT (5000) /* 5 seconds */ +#define L2CAP_ENC_TIMEOUT (5000) /* 5 seconds */ +#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ +#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */ /* L2CAP socket address */ struct sockaddr_l2 { @@ -88,52 +97,82 @@ struct l2cap_conninfo { #define L2CAP_ECHO_RSP 0x09 #define L2CAP_INFO_REQ 0x0a #define L2CAP_INFO_RSP 0x0b +#define L2CAP_CREATE_CHAN_REQ 0x0c +#define L2CAP_CREATE_CHAN_RSP 0x0d +#define L2CAP_MOVE_CHAN_REQ 0x0e +#define L2CAP_MOVE_CHAN_RSP 0x0f +#define L2CAP_MOVE_CHAN_CFM 0x10 +#define L2CAP_MOVE_CHAN_CFM_RSP 0x11 #define L2CAP_CONN_PARAM_UPDATE_REQ 0x12 #define L2CAP_CONN_PARAM_UPDATE_RSP 0x13 -/* L2CAP feature mask */ +/* L2CAP extended feature mask */ #define L2CAP_FEAT_FLOWCTL 0x00000001 #define L2CAP_FEAT_RETRANS 0x00000002 +#define L2CAP_FEAT_BIDIR_QOS 0x00000004 #define L2CAP_FEAT_ERTM 0x00000008 #define L2CAP_FEAT_STREAMING 0x00000010 #define L2CAP_FEAT_FCS 0x00000020 +#define L2CAP_FEAT_EXT_FLOW 0x00000040 #define L2CAP_FEAT_FIXED_CHAN 0x00000080 +#define L2CAP_FEAT_EXT_WINDOW 0x00000100 +#define L2CAP_FEAT_UCD 0x00000200 /* L2CAP checksum option */ #define L2CAP_FCS_NONE 0x00 #define L2CAP_FCS_CRC16 0x01 +/* L2CAP fixed channels */ +#define L2CAP_FC_L2CAP 0x02 +#define L2CAP_FC_A2MP 0x08 + /* L2CAP Control Field bit masks */ -#define L2CAP_CTRL_SAR 0xC000 -#define L2CAP_CTRL_REQSEQ 0x3F00 -#define L2CAP_CTRL_TXSEQ 0x007E -#define L2CAP_CTRL_RETRANS 0x0080 -#define L2CAP_CTRL_FINAL 0x0080 -#define L2CAP_CTRL_POLL 0x0010 -#define L2CAP_CTRL_SUPERVISE 0x000C -#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ - -#define L2CAP_CTRL_TXSEQ_SHIFT 1 -#define L2CAP_CTRL_REQSEQ_SHIFT 8 -#define L2CAP_CTRL_SAR_SHIFT 14 +#define L2CAP_CTRL_SAR 0xC000 +#define L2CAP_CTRL_REQSEQ 0x3F00 +#define L2CAP_CTRL_TXSEQ 0x007E +#define L2CAP_CTRL_SUPERVISE 0x000C + +#define L2CAP_CTRL_RETRANS 0x0080 +#define L2CAP_CTRL_FINAL 0x0080 +#define L2CAP_CTRL_POLL 0x0010 +#define L2CAP_CTRL_FRAME_TYPE 0x0001 /* I- or S-Frame */ + +#define L2CAP_CTRL_TXSEQ_SHIFT 1 +#define L2CAP_CTRL_SUPER_SHIFT 2 +#define L2CAP_CTRL_REQSEQ_SHIFT 8 +#define L2CAP_CTRL_SAR_SHIFT 14 + +/* L2CAP Extended Control Field bit mask */ +#define L2CAP_EXT_CTRL_TXSEQ 0xFFFC0000 +#define L2CAP_EXT_CTRL_SAR 0x00030000 +#define L2CAP_EXT_CTRL_SUPERVISE 0x00030000 +#define L2CAP_EXT_CTRL_REQSEQ 0x0000FFFC + +#define L2CAP_EXT_CTRL_POLL 0x00040000 +#define L2CAP_EXT_CTRL_FINAL 0x00000002 +#define L2CAP_EXT_CTRL_FRAME_TYPE 0x00000001 /* I- or S-Frame */ + +#define L2CAP_EXT_CTRL_REQSEQ_SHIFT 2 +#define L2CAP_EXT_CTRL_SAR_SHIFT 16 +#define L2CAP_EXT_CTRL_SUPER_SHIFT 16 +#define L2CAP_EXT_CTRL_TXSEQ_SHIFT 18 /* L2CAP Supervisory Function */ -#define L2CAP_SUPER_RCV_READY 0x0000 -#define L2CAP_SUPER_REJECT 0x0004 -#define L2CAP_SUPER_RCV_NOT_READY 0x0008 -#define L2CAP_SUPER_SELECT_REJECT 0x000C +#define L2CAP_SUPER_RR 0x00 +#define L2CAP_SUPER_REJ 0x01 +#define L2CAP_SUPER_RNR 0x02 +#define L2CAP_SUPER_SREJ 0x03 /* L2CAP Segmentation and Reassembly */ -#define L2CAP_SDU_UNSEGMENTED 0x0000 -#define L2CAP_SDU_START 0x4000 -#define L2CAP_SDU_END 0x8000 -#define L2CAP_SDU_CONTINUE 0xC000 +#define L2CAP_SAR_UNSEGMENTED 0x00 +#define L2CAP_SAR_START 0x01 +#define L2CAP_SAR_END 0x02 +#define L2CAP_SAR_CONTINUE 0x03 /* L2CAP Command rej. reasons */ -#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 -#define L2CAP_REJ_MTU_EXCEEDED 0x0001 -#define L2CAP_REJ_INVALID_CID 0x0002 - +#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 +#define L2CAP_REJ_MTU_EXCEEDED 0x0001 +#define L2CAP_REJ_INVALID_CID 0x0002 /* L2CAP structures */ struct l2cap_hdr { @@ -141,6 +180,12 @@ struct l2cap_hdr { __le16 cid; } __packed; #define L2CAP_HDR_SIZE 4 +#define L2CAP_ENH_HDR_SIZE 6 +#define L2CAP_EXT_HDR_SIZE 8 + +#define L2CAP_FCS_SIZE 2 +#define L2CAP_SDULEN_SIZE 2 +#define L2CAP_PSMLEN_SIZE 2 struct l2cap_cmd_hdr { __u8 code; @@ -185,14 +230,15 @@ struct l2cap_conn_rsp { #define L2CAP_CID_DYN_START 0x0040 #define L2CAP_CID_DYN_END 0xffff -/* connect result */ +/* connect/create channel results */ #define L2CAP_CR_SUCCESS 0x0000 #define L2CAP_CR_PEND 0x0001 #define L2CAP_CR_BAD_PSM 0x0002 #define L2CAP_CR_SEC_BLOCK 0x0003 #define L2CAP_CR_NO_MEM 0x0004 +#define L2CAP_CR_BAD_AMP 0x0005 -/* connect status */ +/* connect/create channel status */ #define L2CAP_CS_NO_INFO 0x0000 #define L2CAP_CS_AUTHEN_PEND 0x0001 #define L2CAP_CS_AUTHOR_PEND 0x0002 @@ -214,6 +260,8 @@ struct l2cap_conf_rsp { #define L2CAP_CONF_UNACCEPT 0x0001 #define L2CAP_CONF_REJECT 0x0002 #define L2CAP_CONF_UNKNOWN 0x0003 +#define L2CAP_CONF_PENDING 0x0004 +#define L2CAP_CONF_EFS_REJECT 0x0005 struct l2cap_conf_opt { __u8 type; @@ -230,6 +278,8 @@ struct l2cap_conf_opt { #define L2CAP_CONF_QOS 0x03 #define L2CAP_CONF_RFC 0x04 #define L2CAP_CONF_FCS 0x05 +#define L2CAP_CONF_EFS 0x06 +#define L2CAP_CONF_EWS 0x07 #define L2CAP_CONF_MAX_SIZE 22 @@ -248,6 +298,21 @@ struct l2cap_conf_rfc { #define L2CAP_MODE_ERTM 0x03 #define L2CAP_MODE_STREAMING 0x04 +struct l2cap_conf_efs { + __u8 id; + __u8 stype; + __le16 msdu; + __le32 sdu_itime; + __le32 acc_lat; + __le32 flush_to; +} __packed; + +#define L2CAP_SERV_NOTRAFIC 0x00 +#define L2CAP_SERV_BESTEFFORT 0x01 +#define L2CAP_SERV_GUARANTEED 0x02 + +#define L2CAP_BESTEFFORT_ID 0x01 + struct l2cap_disconn_req { __le16 dcid; __le16 scid; @@ -268,14 +333,57 @@ struct l2cap_info_rsp { __u8 data[0]; } __packed; +struct l2cap_create_chan_req { + __le16 psm; + __le16 scid; + __u8 amp_id; +} __packed; + +struct l2cap_create_chan_rsp { + __le16 dcid; + __le16 scid; + __le16 result; + __le16 status; +} __packed; + +struct l2cap_move_chan_req { + __le16 icid; + __u8 dest_amp_id; +} __packed; + +struct l2cap_move_chan_rsp { + __le16 icid; + __le16 result; +} __packed; + +#define L2CAP_MR_SUCCESS 0x0000 +#define L2CAP_MR_PEND 0x0001 +#define L2CAP_MR_BAD_ID 0x0002 +#define L2CAP_MR_SAME_ID 0x0003 +#define L2CAP_MR_NOT_SUPP 0x0004 +#define L2CAP_MR_COLLISION 0x0005 +#define L2CAP_MR_NOT_ALLOWED 0x0006 + +struct l2cap_move_chan_cfm { + __le16 icid; + __le16 result; +} __packed; + +#define L2CAP_MC_CONFIRMED 0x0000 +#define L2CAP_MC_UNCONFIRMED 0x0001 + +struct l2cap_move_chan_cfm_rsp { + __le16 icid; +} __packed; + /* info type */ -#define L2CAP_IT_CL_MTU 0x0001 -#define L2CAP_IT_FEAT_MASK 0x0002 -#define L2CAP_IT_FIXED_CHAN 0x0003 +#define L2CAP_IT_CL_MTU 0x0001 +#define L2CAP_IT_FEAT_MASK 0x0002 +#define L2CAP_IT_FIXED_CHAN 0x0003 /* info result */ -#define L2CAP_IR_SUCCESS 0x0000 -#define L2CAP_IR_NOTSUPP 0x0001 +#define L2CAP_IR_SUCCESS 0x0000 +#define L2CAP_IR_NOTSUPP 0x0001 struct l2cap_conn_param_update_req { __le16 min; @@ -294,7 +402,7 @@ struct l2cap_conn_param_update_rsp { /* ----- L2CAP channels and connections ----- */ struct srej_list { - __u8 tx_seq; + __u16 tx_seq; struct list_head list; }; @@ -316,14 +424,11 @@ struct l2cap_chan { __u16 flush_to; __u8 mode; __u8 chan_type; + __u8 chan_policy; __le16 sport; __u8 sec_level; - __u8 role_switch; - __u8 force_reliable; - __u8 flushable; - __u8 force_active; __u8 ident; @@ -334,7 +439,8 @@ struct l2cap_chan { __u8 fcs; - __u8 tx_win; + __u16 tx_win; + __u16 tx_win_max; __u8 max_tx; __u16 retrans_timeout; __u16 monitor_timeout; @@ -342,29 +448,45 @@ struct l2cap_chan { unsigned long conf_state; unsigned long conn_state; - - __u8 next_tx_seq; - __u8 expected_ack_seq; - __u8 expected_tx_seq; - __u8 buffer_seq; - __u8 buffer_seq_srej; - __u8 srej_save_reqseq; - __u8 frames_sent; - __u8 unacked_frames; + unsigned long flags; + + __u16 next_tx_seq; + __u16 expected_ack_seq; + __u16 expected_tx_seq; + __u16 buffer_seq; + __u16 buffer_seq_srej; + __u16 srej_save_reqseq; + __u16 frames_sent; + __u16 unacked_frames; __u8 retry_count; __u8 num_acked; __u16 sdu_len; struct sk_buff *sdu; struct sk_buff *sdu_last_frag; - __u8 remote_tx_win; + __u16 remote_tx_win; __u8 remote_max_tx; __u16 remote_mps; - struct timer_list chan_timer; - struct timer_list retrans_timer; - struct timer_list monitor_timer; - struct timer_list ack_timer; + __u8 local_id; + __u8 local_stype; + __u16 local_msdu; + __u32 local_sdu_itime; + __u32 local_acc_lat; + __u32 local_flush_to; + + __u8 remote_id; + __u8 remote_stype; + __u16 remote_msdu; + __u32 remote_sdu_itime; + __u32 remote_acc_lat; + __u32 remote_flush_to; + + struct delayed_work chan_timer; + struct delayed_work retrans_timer; + struct delayed_work monitor_timer; + struct delayed_work ack_timer; + struct sk_buff *tx_send_head; struct sk_buff_head tx_q; struct sk_buff_head srej_q; @@ -388,6 +510,7 @@ struct l2cap_ops { struct l2cap_conn { struct hci_conn *hcon; + struct hci_chan *hchan; bdaddr_t *dst; bdaddr_t *src; @@ -399,7 +522,7 @@ struct l2cap_conn { __u8 info_state; __u8 info_ident; - struct timer_list info_timer; + struct delayed_work info_timer; spinlock_t lock; @@ -409,11 +532,11 @@ struct l2cap_conn { __u8 disc_reason; - struct timer_list security_timer; + struct delayed_work security_timer; struct smp_chan *smp_chan; struct list_head chan_l; - rwlock_t chan_lock; + struct mutex chan_lock; }; #define L2CAP_INFO_CL_MTU_REQ_SENT 0x01 @@ -442,6 +565,9 @@ enum { CONF_CONNECT_PEND, CONF_NO_FCS_RECV, CONF_STATE2_DEVICE, + CONF_EWS_RECV, + CONF_LOC_CONF_PEND, + CONF_REM_CONF_PEND, }; #define L2CAP_CONF_MAX_CONF_REQ 2 @@ -459,6 +585,44 @@ enum { CONN_RNR_SENT, }; +/* Definitions for flags in l2cap_chan */ +enum { + FLAG_ROLE_SWITCH, + FLAG_FORCE_ACTIVE, + FLAG_FORCE_RELIABLE, + FLAG_FLUSHABLE, + FLAG_EXT_CTRL, + FLAG_EFS_ENABLE, +}; + +static inline void l2cap_chan_hold(struct l2cap_chan *c) +{ + atomic_inc(&c->refcnt); +} + +static inline void l2cap_chan_put(struct l2cap_chan *c) +{ + if (atomic_dec_and_test(&c->refcnt)) + kfree(c); +} + +static inline void l2cap_set_timer(struct l2cap_chan *chan, + struct delayed_work *work, long timeout) +{ + BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout); + + if (!__cancel_delayed_work(work)) + l2cap_chan_hold(chan); + schedule_delayed_work(work, timeout); +} + +static inline void l2cap_clear_timer(struct l2cap_chan *chan, + struct delayed_work *work) +{ + if (__cancel_delayed_work(work)) + l2cap_chan_put(chan); +} + #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) #define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \ @@ -471,6 +635,22 @@ enum { L2CAP_DEFAULT_ACK_TO); #define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer) +static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2) +{ + int offset; + + offset = (seq1 - seq2) % (chan->tx_win_max + 1); + if (offset < 0) + offset += (chan->tx_win_max + 1); + + return offset; +} + +static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq) +{ + return (seq + 1) % (chan->tx_win_max + 1); +} + static inline int l2cap_tx_window_full(struct l2cap_chan *ch) { int sub; @@ -483,13 +663,164 @@ static inline int l2cap_tx_window_full(struct l2cap_chan *ch) return sub == ch->remote_tx_win; } -#define __get_txseq(ctrl) (((ctrl) & L2CAP_CTRL_TXSEQ) >> 1) -#define __get_reqseq(ctrl) (((ctrl) & L2CAP_CTRL_REQSEQ) >> 8) -#define __is_iframe(ctrl) (!((ctrl) & L2CAP_CTRL_FRAME_TYPE)) -#define __is_sframe(ctrl) ((ctrl) & L2CAP_CTRL_FRAME_TYPE) -#define __is_sar_start(ctrl) (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START) +static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >> + L2CAP_EXT_CTRL_REQSEQ_SHIFT; + else + return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; +} + +static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) & + L2CAP_EXT_CTRL_REQSEQ; + else + return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ; +} + +static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >> + L2CAP_EXT_CTRL_TXSEQ_SHIFT; + else + return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; +} + +static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) & + L2CAP_EXT_CTRL_TXSEQ; + else + return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ; +} + +static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE; + else + return ctrl & L2CAP_CTRL_FRAME_TYPE; +} + +static inline __u32 __set_sframe(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_FRAME_TYPE; + else + return L2CAP_CTRL_FRAME_TYPE; +} + +static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; + else + return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; +} + +static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR; + else + return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR; +} + +static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl) +{ + return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START; +} + +static inline __u32 __get_sar_mask(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_SAR; + else + return L2CAP_CTRL_SAR; +} + +static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >> + L2CAP_EXT_CTRL_SUPER_SHIFT; + else + return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; +} + +static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) & + L2CAP_EXT_CTRL_SUPERVISE; + else + return (super << L2CAP_CTRL_SUPER_SHIFT) & + L2CAP_CTRL_SUPERVISE; +} + +static inline __u32 __set_ctrl_final(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_FINAL; + else + return L2CAP_CTRL_FINAL; +} + +static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_FINAL; + else + return ctrl & L2CAP_CTRL_FINAL; +} + +static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_POLL; + else + return L2CAP_CTRL_POLL; +} + +static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_POLL; + else + return ctrl & L2CAP_CTRL_POLL; +} + +static inline __u32 __get_control(struct l2cap_chan *chan, void *p) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return get_unaligned_le32(p); + else + return get_unaligned_le16(p); +} + +static inline void __put_control(struct l2cap_chan *chan, __u32 control, + void *p) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return put_unaligned_le32(control, p); + else + return put_unaligned_le16(control, p); +} + +static inline __u8 __ctrl_size(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE; + else + return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE; +} -extern int disable_ertm; +extern bool disable_ertm; int l2cap_init_sockets(void); void l2cap_cleanup_sockets(void); @@ -503,8 +834,11 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid); struct l2cap_chan *l2cap_chan_create(struct sock *sk); void l2cap_chan_close(struct l2cap_chan *chan, int reason); void l2cap_chan_destroy(struct l2cap_chan *chan); -int l2cap_chan_connect(struct l2cap_chan *chan); -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len); +inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, + bdaddr_t *dst); +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, + u32 priority); void l2cap_chan_busy(struct l2cap_chan *chan, int busy); +int l2cap_chan_check_security(struct l2cap_chan *chan); #endif /* __L2CAP_H */ diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index d66da0f94f9..be65d341788 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -23,6 +23,23 @@ #define MGMT_INDEX_NONE 0xFFFF +#define MGMT_STATUS_SUCCESS 0x00 +#define MGMT_STATUS_UNKNOWN_COMMAND 0x01 +#define MGMT_STATUS_NOT_CONNECTED 0x02 +#define MGMT_STATUS_FAILED 0x03 +#define MGMT_STATUS_CONNECT_FAILED 0x04 +#define MGMT_STATUS_AUTH_FAILED 0x05 +#define MGMT_STATUS_NOT_PAIRED 0x06 +#define MGMT_STATUS_NO_RESOURCES 0x07 +#define MGMT_STATUS_TIMEOUT 0x08 +#define MGMT_STATUS_ALREADY_CONNECTED 0x09 +#define MGMT_STATUS_BUSY 0x0a +#define MGMT_STATUS_REJECTED 0x0b +#define MGMT_STATUS_NOT_SUPPORTED 0x0c +#define MGMT_STATUS_INVALID_PARAMS 0x0d +#define MGMT_STATUS_DISCONNECTED 0x0e +#define MGMT_STATUS_NOT_POWERED 0x0f + struct mgmt_hdr { __le16 opcode; __le16 index; @@ -44,22 +61,29 @@ struct mgmt_rp_read_index_list { /* Reserve one extra byte for names in management messages so that they * are always guaranteed to be nul-terminated */ #define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1) +#define MGMT_MAX_SHORT_NAME_LENGTH (10 + 1) + +#define MGMT_SETTING_POWERED 0x00000001 +#define MGMT_SETTING_CONNECTABLE 0x00000002 +#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004 +#define MGMT_SETTING_DISCOVERABLE 0x00000008 +#define MGMT_SETTING_PAIRABLE 0x00000010 +#define MGMT_SETTING_LINK_SECURITY 0x00000020 +#define MGMT_SETTING_SSP 0x00000040 +#define MGMT_SETTING_BREDR 0x00000080 +#define MGMT_SETTING_HS 0x00000100 +#define MGMT_SETTING_LE 0x00000200 #define MGMT_OP_READ_INFO 0x0004 struct mgmt_rp_read_info { - __u8 type; - __u8 powered; - __u8 connectable; - __u8 discoverable; - __u8 pairable; - __u8 sec_mode; bdaddr_t bdaddr; + __u8 version; + __le16 manufacturer; + __le32 supported_settings; + __le32 current_settings; __u8 dev_class[3]; - __u8 features[8]; - __u16 manufacturer; - __u8 hci_ver; - __u16 hci_rev; __u8 name[MGMT_MAX_NAME_LENGTH]; + __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; } __packed; struct mgmt_mode { @@ -69,70 +93,97 @@ struct mgmt_mode { #define MGMT_OP_SET_POWERED 0x0005 #define MGMT_OP_SET_DISCOVERABLE 0x0006 +struct mgmt_cp_set_discoverable { + __u8 val; + __u16 timeout; +} __packed; #define MGMT_OP_SET_CONNECTABLE 0x0007 -#define MGMT_OP_SET_PAIRABLE 0x0008 +#define MGMT_OP_SET_FAST_CONNECTABLE 0x0008 -#define MGMT_OP_ADD_UUID 0x0009 -struct mgmt_cp_add_uuid { - __u8 uuid[16]; - __u8 svc_hint; -} __packed; +#define MGMT_OP_SET_PAIRABLE 0x0009 -#define MGMT_OP_REMOVE_UUID 0x000A -struct mgmt_cp_remove_uuid { - __u8 uuid[16]; -} __packed; +#define MGMT_OP_SET_LINK_SECURITY 0x000A + +#define MGMT_OP_SET_SSP 0x000B + +#define MGMT_OP_SET_HS 0x000C + +#define MGMT_OP_SET_LE 0x000D -#define MGMT_OP_SET_DEV_CLASS 0x000B +#define MGMT_OP_SET_DEV_CLASS 0x000E struct mgmt_cp_set_dev_class { __u8 major; __u8 minor; } __packed; -#define MGMT_OP_SET_SERVICE_CACHE 0x000C -struct mgmt_cp_set_service_cache { - __u8 enable; +#define MGMT_OP_SET_LOCAL_NAME 0x000F +struct mgmt_cp_set_local_name { + __u8 name[MGMT_MAX_NAME_LENGTH]; +} __packed; + +#define MGMT_OP_ADD_UUID 0x0010 +struct mgmt_cp_add_uuid { + __u8 uuid[16]; + __u8 svc_hint; +} __packed; + +#define MGMT_OP_REMOVE_UUID 0x0011 +struct mgmt_cp_remove_uuid { + __u8 uuid[16]; } __packed; -struct mgmt_key_info { +struct mgmt_link_key_info { bdaddr_t bdaddr; u8 type; u8 val[16]; u8 pin_len; - u8 dlen; - u8 data[0]; } __packed; -#define MGMT_OP_LOAD_KEYS 0x000D -struct mgmt_cp_load_keys { +#define MGMT_OP_LOAD_LINK_KEYS 0x0012 +struct mgmt_cp_load_link_keys { __u8 debug_keys; __le16 key_count; - struct mgmt_key_info keys[0]; + struct mgmt_link_key_info keys[0]; } __packed; -#define MGMT_OP_REMOVE_KEY 0x000E -struct mgmt_cp_remove_key { +#define MGMT_OP_REMOVE_KEYS 0x0013 +struct mgmt_cp_remove_keys { bdaddr_t bdaddr; __u8 disconnect; } __packed; +struct mgmt_rp_remove_keys { + bdaddr_t bdaddr; + __u8 status; +}; -#define MGMT_OP_DISCONNECT 0x000F +#define MGMT_OP_DISCONNECT 0x0014 struct mgmt_cp_disconnect { bdaddr_t bdaddr; } __packed; struct mgmt_rp_disconnect { bdaddr_t bdaddr; + __u8 status; } __packed; -#define MGMT_OP_GET_CONNECTIONS 0x0010 +#define MGMT_ADDR_BREDR 0x00 +#define MGMT_ADDR_LE_PUBLIC 0x01 +#define MGMT_ADDR_LE_RANDOM 0x02 +#define MGMT_ADDR_INVALID 0xff + +struct mgmt_addr_info { + bdaddr_t bdaddr; + __u8 type; +} __packed; + +#define MGMT_OP_GET_CONNECTIONS 0x0015 struct mgmt_rp_get_connections { __le16 conn_count; - bdaddr_t conn[0]; + struct mgmt_addr_info addr[0]; } __packed; -#define MGMT_OP_PIN_CODE_REPLY 0x0011 +#define MGMT_OP_PIN_CODE_REPLY 0x0016 struct mgmt_cp_pin_code_reply { bdaddr_t bdaddr; __u8 pin_len; @@ -143,27 +194,27 @@ struct mgmt_rp_pin_code_reply { uint8_t status; } __packed; -#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012 +#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0017 struct mgmt_cp_pin_code_neg_reply { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_SET_IO_CAPABILITY 0x0013 +#define MGMT_OP_SET_IO_CAPABILITY 0x0018 struct mgmt_cp_set_io_capability { __u8 io_capability; } __packed; -#define MGMT_OP_PAIR_DEVICE 0x0014 +#define MGMT_OP_PAIR_DEVICE 0x0019 struct mgmt_cp_pair_device { - bdaddr_t bdaddr; + struct mgmt_addr_info addr; __u8 io_cap; } __packed; struct mgmt_rp_pair_device { - bdaddr_t bdaddr; + struct mgmt_addr_info addr; __u8 status; } __packed; -#define MGMT_OP_USER_CONFIRM_REPLY 0x0015 +#define MGMT_OP_USER_CONFIRM_REPLY 0x001A struct mgmt_cp_user_confirm_reply { bdaddr_t bdaddr; } __packed; @@ -172,48 +223,69 @@ struct mgmt_rp_user_confirm_reply { __u8 status; } __packed; -#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016 +#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x001B +struct mgmt_cp_user_confirm_neg_reply { + bdaddr_t bdaddr; +} __packed; -#define MGMT_OP_SET_LOCAL_NAME 0x0017 -struct mgmt_cp_set_local_name { - __u8 name[MGMT_MAX_NAME_LENGTH]; +#define MGMT_OP_USER_PASSKEY_REPLY 0x001C +struct mgmt_cp_user_passkey_reply { + bdaddr_t bdaddr; + __le32 passkey; +} __packed; +struct mgmt_rp_user_passkey_reply { + bdaddr_t bdaddr; + __u8 status; } __packed; -#define MGMT_OP_READ_LOCAL_OOB_DATA 0x0018 +#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x001D +struct mgmt_cp_user_passkey_neg_reply { + bdaddr_t bdaddr; +} __packed; + +#define MGMT_OP_READ_LOCAL_OOB_DATA 0x001E struct mgmt_rp_read_local_oob_data { __u8 hash[16]; __u8 randomizer[16]; } __packed; -#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0019 +#define MGMT_OP_ADD_REMOTE_OOB_DATA 0x001F struct mgmt_cp_add_remote_oob_data { bdaddr_t bdaddr; __u8 hash[16]; __u8 randomizer[16]; } __packed; -#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x001A +#define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0020 struct mgmt_cp_remove_remote_oob_data { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_START_DISCOVERY 0x001B +#define MGMT_OP_START_DISCOVERY 0x0021 +struct mgmt_cp_start_discovery { + __u8 type; +} __packed; -#define MGMT_OP_STOP_DISCOVERY 0x001C +#define MGMT_OP_STOP_DISCOVERY 0x0022 -#define MGMT_OP_BLOCK_DEVICE 0x001D -struct mgmt_cp_block_device { +#define MGMT_OP_CONFIRM_NAME 0x0023 +struct mgmt_cp_confirm_name { bdaddr_t bdaddr; + __u8 name_known; +} __packed; +struct mgmt_rp_confirm_name { + bdaddr_t bdaddr; + __u8 status; } __packed; -#define MGMT_OP_UNBLOCK_DEVICE 0x001E -struct mgmt_cp_unblock_device { +#define MGMT_OP_BLOCK_DEVICE 0x0024 +struct mgmt_cp_block_device { bdaddr_t bdaddr; } __packed; -#define MGMT_OP_SET_FAST_CONNECTABLE 0x001F -struct mgmt_cp_set_fast_connectable { - __u8 enable; +#define MGMT_OP_UNBLOCK_DEVICE 0x0025 +struct mgmt_cp_unblock_device { + bdaddr_t bdaddr; } __packed; #define MGMT_EV_CMD_COMPLETE 0x0001 @@ -237,83 +309,82 @@ struct mgmt_ev_controller_error { #define MGMT_EV_INDEX_REMOVED 0x0005 -#define MGMT_EV_POWERED 0x0006 +#define MGMT_EV_NEW_SETTINGS 0x0006 -#define MGMT_EV_DISCOVERABLE 0x0007 - -#define MGMT_EV_CONNECTABLE 0x0008 +#define MGMT_EV_CLASS_OF_DEV_CHANGED 0x0007 +struct mgmt_ev_class_of_dev_changed { + __u8 dev_class[3]; +}; -#define MGMT_EV_PAIRABLE 0x0009 +#define MGMT_EV_LOCAL_NAME_CHANGED 0x0008 +struct mgmt_ev_local_name_changed { + __u8 name[MGMT_MAX_NAME_LENGTH]; + __u8 short_name[MGMT_MAX_SHORT_NAME_LENGTH]; +} __packed; -#define MGMT_EV_NEW_KEY 0x000A -struct mgmt_ev_new_key { +#define MGMT_EV_NEW_LINK_KEY 0x0009 +struct mgmt_ev_new_link_key { __u8 store_hint; - struct mgmt_key_info key; + struct mgmt_link_key_info key; } __packed; -#define MGMT_EV_CONNECTED 0x000B -struct mgmt_ev_connected { - bdaddr_t bdaddr; - __u8 link_type; -} __packed; +#define MGMT_EV_CONNECTED 0x000A -#define MGMT_EV_DISCONNECTED 0x000C -struct mgmt_ev_disconnected { - bdaddr_t bdaddr; -} __packed; +#define MGMT_EV_DISCONNECTED 0x000B -#define MGMT_EV_CONNECT_FAILED 0x000D +#define MGMT_EV_CONNECT_FAILED 0x000C struct mgmt_ev_connect_failed { - bdaddr_t bdaddr; + struct mgmt_addr_info addr; __u8 status; } __packed; -#define MGMT_EV_PIN_CODE_REQUEST 0x000E +#define MGMT_EV_PIN_CODE_REQUEST 0x000D struct mgmt_ev_pin_code_request { bdaddr_t bdaddr; __u8 secure; } __packed; -#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F +#define MGMT_EV_USER_CONFIRM_REQUEST 0x000E struct mgmt_ev_user_confirm_request { bdaddr_t bdaddr; __u8 confirm_hint; __le32 value; } __packed; +#define MGMT_EV_USER_PASSKEY_REQUEST 0x000F +struct mgmt_ev_user_passkey_request { + bdaddr_t bdaddr; +} __packed; + #define MGMT_EV_AUTH_FAILED 0x0010 struct mgmt_ev_auth_failed { bdaddr_t bdaddr; __u8 status; } __packed; -#define MGMT_EV_LOCAL_NAME_CHANGED 0x0011 -struct mgmt_ev_local_name_changed { - __u8 name[MGMT_MAX_NAME_LENGTH]; -} __packed; - -#define MGMT_EV_DEVICE_FOUND 0x0012 +#define MGMT_EV_DEVICE_FOUND 0x0011 struct mgmt_ev_device_found { - bdaddr_t bdaddr; + struct mgmt_addr_info addr; __u8 dev_class[3]; __s8 rssi; + __u8 confirm_name; __u8 eir[HCI_MAX_EIR_LENGTH]; } __packed; -#define MGMT_EV_REMOTE_NAME 0x0013 +#define MGMT_EV_REMOTE_NAME 0x0012 struct mgmt_ev_remote_name { bdaddr_t bdaddr; __u8 name[MGMT_MAX_NAME_LENGTH]; } __packed; -#define MGMT_EV_DISCOVERING 0x0014 +#define MGMT_EV_DISCOVERING 0x0013 -#define MGMT_EV_DEVICE_BLOCKED 0x0015 +#define MGMT_EV_DEVICE_BLOCKED 0x0014 struct mgmt_ev_device_blocked { bdaddr_t bdaddr; } __packed; -#define MGMT_EV_DEVICE_UNBLOCKED 0x0016 +#define MGMT_EV_DEVICE_UNBLOCKED 0x0015 struct mgmt_ev_device_unblocked { bdaddr_t bdaddr; } __packed; diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h index 15b97d54944..aeaf5fa2b9f 100644 --- a/include/net/bluetooth/smp.h +++ b/include/net/bluetooth/smp.h @@ -115,6 +115,10 @@ struct smp_cmd_security_req { #define SMP_MIN_ENC_KEY_SIZE 7 #define SMP_MAX_ENC_KEY_SIZE 16 +#define SMP_FLAG_TK_VALID 1 +#define SMP_FLAG_CFM_PENDING 2 +#define SMP_FLAG_MITM_AUTH 3 + struct smp_chan { struct l2cap_conn *conn; u8 preq[7]; /* SMP Pairing Request */ @@ -124,6 +128,7 @@ struct smp_chan { u8 pcnf[16]; /* SMP Pairing Confirm */ u8 tk[16]; /* SMP Temporary Key */ u8 smp_key_size; + unsigned long smp_flags; struct crypto_blkcipher *tfm; struct work_struct confirm; struct work_struct random; @@ -134,6 +139,7 @@ struct smp_chan { int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level); int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); +int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); void smp_chan_destroy(struct l2cap_conn *conn); diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h index c011281d92c..ef2dd9438bb 100644 --- a/include/net/caif/caif_dev.h +++ b/include/net/caif/caif_dev.h @@ -9,6 +9,7 @@ #include <net/caif/caif_layer.h> #include <net/caif/cfcnfg.h> +#include <net/caif/caif_device.h> #include <linux/caif/caif_socket.h> #include <linux/if.h> #include <linux/net.h> @@ -104,4 +105,24 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer, */ void caif_free_client(struct cflayer *adap_layer); +/** + * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer + * @dev: Network device to enroll. + * @caifdev: Configuration information from CAIF Link Layer + * @link_support: Link layer support layer + * @head_room: Head room needed by link support layer + * @layer: Lowest layer in CAIF stack + * @rcv_fun: Receive function for CAIF stack. + * + * This function enroll a CAIF link layer into CAIF Stack and + * expects the interface to be able to handle CAIF payload. + * The link_support layer is used to add any Link Layer specific + * framing. + */ +void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, int (**rcv_func)( + struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *)); + #endif /* CAIF_DEV_H_ */ diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h index 35bc7883cf9..0f3a39125f9 100644 --- a/include/net/caif/caif_layer.h +++ b/include/net/caif/caif_layer.h @@ -121,9 +121,7 @@ enum caif_direction { * @transmit: Packet transmit funciton. * @ctrlcmd: Used for control signalling upwards in the stack. * @modemcmd: Used for control signaling downwards in the stack. - * @prio: Priority of this layer. * @id: The identity of this layer - * @type: The type of this layer * @name: Name of the layer. * * This structure defines the layered structure in CAIF. @@ -230,9 +228,7 @@ struct cflayer { */ int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl); - unsigned short prio; unsigned int id; - unsigned int type; char name[CAIF_LAYER_NAME_SZ]; }; diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h index 87c3d11b8e5..aa6a485b054 100644 --- a/include/net/caif/caif_spi.h +++ b/include/net/caif/caif_spi.h @@ -55,8 +55,8 @@ struct cfspi_xfer { u16 tx_dma_len; u16 rx_dma_len; - void *va_tx; - dma_addr_t pa_tx; + void *va_tx[2]; + dma_addr_t pa_tx[2]; void *va_rx; dma_addr_t pa_rx; }; diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h index 3e93a4a4b67..90b4ff8bad8 100644 --- a/include/net/caif/cfcnfg.h +++ b/include/net/caif/cfcnfg.h @@ -14,18 +14,6 @@ struct cfcnfg; /** - * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack - * - * @CFPHYTYPE_FRAG: Fragmented frames physical interface. - * @CFPHYTYPE_CAIF: Generic CAIF physical interface - */ -enum cfcnfg_phy_type { - CFPHYTYPE_FRAG = 1, - CFPHYTYPE_CAIF, - CFPHYTYPE_MAX -}; - -/** * enum cfcnfg_phy_preference - Physical preference HW Abstraction * * @CFPHYPREF_UNSPECIFIED: Default physical interface @@ -66,21 +54,20 @@ void cfcnfg_remove(struct cfcnfg *cfg); * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack. * @cnfg: Pointer to a CAIF configuration object, created by * cfcnfg_create(). - * @phy_type: Specifies the type of physical interface, e.g. - * CFPHYTYPE_FRAG. * @dev: Pointer to link layer device * @phy_layer: Specify the physical layer. The transmit function * MUST be set in the structure. * @pref: The phy (link layer) preference. + * @link_support: Protocol implementation for link layer specific protocol. * @fcs: Specify if checksum is used in CAIF Framing Layer. - * @stx: Specify if Start Of Frame eXtention is used. + * @head_room: Head space needed by link specific protocol. */ - void -cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, +cfcnfg_add_phy_layer(struct cfcnfg *cnfg, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, - bool fcs, bool stx); + struct cflayer *link_support, + bool fcs, int head_room); /** * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack. diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h index b8374321b36..f121299a342 100644 --- a/include/net/caif/cfserl.h +++ b/include/net/caif/cfserl.h @@ -8,5 +8,5 @@ #define CFSERL_H_ #include <net/caif/caif_layer.h> -struct cflayer *cfserl_create(int type, int instance, bool use_stx); -#endif /* CFSERL_H_ */ +struct cflayer *cfserl_create(int instance, bool use_stx); +#endif diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 92cf1c2c30c..15f4be7d768 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -391,6 +391,8 @@ struct cfg80211_crypto_settings { * @assocresp_ies: extra information element(s) to add into (Re)Association * Response frames or %NULL * @assocresp_ies_len: length of assocresp_ies in octets + * @probe_resp_len: length of probe response template (@probe_resp) + * @probe_resp: probe response template (AP mode only) */ struct beacon_parameters { u8 *head, *tail; @@ -408,6 +410,8 @@ struct beacon_parameters { size_t proberesp_ies_len; const u8 *assocresp_ies; size_t assocresp_ies_len; + int probe_resp_len; + u8 *probe_resp; }; /** @@ -456,6 +460,9 @@ enum station_parameters_apply_mask { * as the AC bitmap in the QoS info field * @max_sp: max Service Period. same format as the MAX_SP in the * QoS info field (but already shifted down) + * @sta_modify_mask: bitmap indicating which parameters changed + * (for those that don't have a natural "no change" value), + * see &enum station_parameters_apply_mask */ struct station_parameters { u8 *supported_rates; @@ -498,6 +505,7 @@ struct station_parameters { * @STATION_INFO_CONNECTED_TIME: @connected_time filled * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled * @STATION_INFO_STA_FLAGS: @sta_flags filled + * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -518,7 +526,8 @@ enum station_info_flags { STATION_INFO_BSS_PARAM = 1<<15, STATION_INFO_CONNECTED_TIME = 1<<16, STATION_INFO_ASSOC_REQ_IES = 1<<17, - STATION_INFO_STA_FLAGS = 1<<18 + STATION_INFO_STA_FLAGS = 1<<18, + STATION_INFO_BEACON_LOSS_COUNT = 1<<19 }; /** @@ -615,6 +624,8 @@ struct sta_bss_parameters { * user space MLME/SME implementation. The information is provided for * the cfg80211_new_sta() calls to notify user space of the IEs. * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. + * @sta_flags: station flags mask & values + * @beacon_loss_count: Number of times beacon loss event has triggered. */ struct station_info { u32 filled; @@ -642,6 +653,8 @@ struct station_info { const u8 *assoc_req_ies; size_t assoc_req_ies_len; + u32 beacon_loss_count; + /* * Note: Add a new enum station_info_flags value for each new field and * use it to check which fields are initialized. @@ -774,6 +787,7 @@ struct mesh_config { u16 min_discovery_timeout; u32 dot11MeshHWMPactivePathTimeout; u16 dot11MeshHWMPpreqMinInterval; + u16 dot11MeshHWMPperrMinInterval; u16 dot11MeshHWMPnetDiameterTraversalTime; u8 dot11MeshHWMPRootMode; u16 dot11MeshHWMPRannInterval; @@ -794,6 +808,7 @@ struct mesh_config { * @ie_len: length of vendor information elements * @is_authenticated: this mesh requires authentication * @is_secure: this mesh uses security + * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] * * These parameters are fixed when the mesh is created. */ @@ -806,6 +821,7 @@ struct mesh_setup { u8 ie_len; bool is_authenticated; bool is_secure; + int mcast_rate[IEEE80211_NUM_BANDS]; }; /** @@ -1036,6 +1052,15 @@ struct cfg80211_auth_request { }; /** + * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. + * + * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) + */ +enum cfg80211_assoc_req_flags { + ASSOC_REQ_DISABLE_HT = BIT(0), +}; + +/** * struct cfg80211_assoc_request - (Re)Association request data * * This structure provides information needed to complete IEEE 802.11 @@ -1046,6 +1071,10 @@ struct cfg80211_auth_request { * @use_mfp: Use management frame protection (IEEE 802.11w) in this association * @crypto: crypto settings * @prev_bssid: previous BSSID, if not %NULL use reassociate frame + * @flags: See &enum cfg80211_assoc_req_flags + * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask + * will be used in ht_capa. Un-supported values will be ignored. + * @ht_capa_mask: The bits of ht_capa which are to be used. */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; @@ -1053,6 +1082,9 @@ struct cfg80211_assoc_request { size_t ie_len; struct cfg80211_crypto_settings crypto; bool use_mfp; + u32 flags; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; }; /** @@ -1122,6 +1154,7 @@ struct cfg80211_ibss_params { u8 *ssid; u8 *bssid; struct ieee80211_channel *channel; + enum nl80211_channel_type channel_type; u8 *ie; u8 ssid_len, ie_len; u16 beacon_interval; @@ -1151,6 +1184,10 @@ struct cfg80211_ibss_params { * @key_len: length of WEP key for shared key authentication * @key_idx: index of WEP key for shared key authentication * @key: WEP key for shared key authentication + * @flags: See &enum cfg80211_assoc_req_flags + * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask + * will be used in ht_capa. Un-supported values will be ignored. + * @ht_capa_mask: The bits of ht_capa which are to be used. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1164,6 +1201,9 @@ struct cfg80211_connect_params { struct cfg80211_crypto_settings crypto; const u8 *key; u8 key_len, key_idx; + u32 flags; + struct ieee80211_ht_cap ht_capa; + struct ieee80211_ht_cap ht_capa_mask; }; /** @@ -1311,7 +1351,12 @@ struct cfg80211_gtk_rekey_data { * * @add_station: Add a new station. * @del_station: Remove a station; @mac may be NULL to remove all stations. - * @change_station: Modify a given station. + * @change_station: Modify a given station. Note that flags changes are not much + * validated in cfg80211, in particular the auth/assoc/authorized flags + * might come to the driver in invalid combinations -- make sure to check + * them, also against the existing state! Also, supported_rates changes are + * not checked in station mode -- drivers need to reject (or ignore) them + * for anything but TDLS peers. * @get_station: get station information for the station identified by @mac * @dump_station: dump station callback -- resume dump at index @idx * @@ -1338,6 +1383,9 @@ struct cfg80211_gtk_rekey_data { * doesn't verify much. Note, however, that the passed netdev may be * %NULL as well if the user requested changing the channel for the * device itself, or for a monitor interface. + * @get_channel: Get the current operating channel, should return %NULL if + * there's no single defined operating channel if for example the + * device implements channel hopping for multi-channel virtual interfaces. * * @scan: Request to do a scan. If returning zero, the scan request is given * the driver, and will be valid until passed to cfg80211_scan_done(). @@ -1365,7 +1413,8 @@ struct cfg80211_gtk_rekey_data { * have changed. The actual parameter values are available in * struct wiphy. If returning an error, no value should be changed. * - * @set_tx_power: set the transmit power according to the parameters + * @set_tx_power: set the transmit power according to the parameters, + * the power passed is in mBm, to get dBm use MBM_TO_DBM(). * @get_tx_power: store the current TX power into the dbm variable; * return 0 if successful * @@ -1428,6 +1477,11 @@ struct cfg80211_gtk_rekey_data { * * @tdls_mgmt: Transmit a TDLS management frame. * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup). + * + * @probe_client: probe an associated client, must return a cookie that it + * later passes to cfg80211_probe_status(). + * + * @set_noack_map: Set the NoAck Map for the TIDs. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -1581,7 +1635,7 @@ struct cfg80211_ops { enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - u64 *cookie); + bool dont_wait_for_ack, u64 *cookie); int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy, struct net_device *dev, u64 cookie); @@ -1617,6 +1671,15 @@ struct cfg80211_ops { u16 status_code, const u8 *buf, size_t len); int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper); + + int (*probe_client)(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u64 *cookie); + + int (*set_noack_map)(struct wiphy *wiphy, + struct net_device *dev, + u16 noack_map); + + struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy); }; /* @@ -1641,7 +1704,9 @@ struct cfg80211_ops { * regulatory domain no user regulatory domain can enable these channels * at a later time. This can be used for devices which do not have * calibration information guaranteed for frequencies or settings - * outside of its regulatory domain. + * outside of its regulatory domain. If used in combination with + * WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings + * will be followed. * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure * that passive scan flags and beaconing flags may not be lifted by * cfg80211 due to regulatory beacon hints. For more information on beacon @@ -1675,6 +1740,14 @@ struct cfg80211_ops { * teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT * command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be * used for asking the driver/firmware to perform a TDLS operation. + * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME + * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes + * when there are virtual interfaces in AP mode by calling + * cfg80211_report_obss_beacon(). + * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device + * responds to probe-requests in hardware. + * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. + * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. */ enum wiphy_flags { WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), @@ -1693,6 +1766,11 @@ enum wiphy_flags { WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16), + WIPHY_FLAG_HAVE_AP_SME = BIT(17), + WIPHY_FLAG_REPORTS_OBSS = BIT(18), + WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), + WIPHY_FLAG_OFFCHAN_TX = BIT(20), + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), }; /** @@ -1865,6 +1943,7 @@ struct wiphy_wowlan_support { * @software_iftypes: bitmask of software interface types, these are not * subject to any restrictions since they are purely managed in SW. * @flags: wiphy flags, see &enum wiphy_flags + * @features: features advertised to nl80211, see &enum nl80211_feature_flags. * @bss_priv_size: each BSS struct has private data allocated with it, * this variable determines its size * @max_scan_ssids: maximum number of SSIDs the device can scan for in @@ -1903,6 +1982,10 @@ struct wiphy_wowlan_support { * may request, if implemented. * * @wowlan: WoWLAN support information + * + * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. + * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. + * If null, then none can be over-ridden. */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -1924,7 +2007,9 @@ struct wiphy { /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ u16 interface_modes; - u32 flags; + u32 flags, features; + + u32 ap_sme_capa; enum cfg80211_signal_type signal_type; @@ -1956,6 +2041,13 @@ struct wiphy { u32 available_antennas_tx; u32 available_antennas_rx; + /* + * Bitmap of supported protocols for probe response offloading + * see &enum nl80211_probe_resp_offload_support_attr. Only valid + * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. + */ + u32 probe_resp_offload; + /* If multiple wiphys are registered and you're handed e.g. * a regular netdev with assigned ieee80211_ptr, you won't * know whether it points to a wiphy your driver has registered @@ -1983,6 +2075,8 @@ struct wiphy { /* dir in debugfs: ieee80211/<wiphyname> */ struct dentry *debugfsdir; + const struct ieee80211_ht_cap *ht_capa_mod_mask; + #ifdef CONFIG_NET_NS /* the network namespace this phy lives in currently */ struct net *_net; @@ -2179,6 +2273,8 @@ struct wireless_dev { int beacon_interval; + u32 ap_unexpected_nlpid; + #ifdef CONFIG_CFG80211_WEXT /* wext data */ struct { @@ -2345,69 +2441,6 @@ extern int ieee80211_radiotap_iterator_next( extern const unsigned char rfc1042_header[6]; extern const unsigned char bridge_tunnel_header[6]; -/* Parsed Information Elements */ -struct ieee802_11_elems { - u8 *ie_start; - size_t total_len; - - /* pointers to IEs */ - u8 *ssid; - u8 *supp_rates; - u8 *fh_params; - u8 *ds_params; - u8 *cf_params; - struct ieee80211_tim_ie *tim; - u8 *ibss_params; - u8 *challenge; - u8 *wpa; - u8 *rsn; - u8 *erp_info; - u8 *ext_supp_rates; - u8 *wmm_info; - u8 *wmm_param; - struct ieee80211_ht_cap *ht_cap_elem; - struct ieee80211_ht_info *ht_info_elem; - struct ieee80211_meshconf_ie *mesh_config; - u8 *mesh_id; - u8 *peering; - u8 *preq; - u8 *prep; - u8 *perr; - struct ieee80211_rann_ie *rann; - u8 *ch_switch_elem; - u8 *country_elem; - u8 *pwr_constr_elem; - u8 *quiet_elem; /* first quite element */ - u8 *timeout_int; - - /* length of them, respectively */ - u8 ssid_len; - u8 supp_rates_len; - u8 fh_params_len; - u8 ds_params_len; - u8 cf_params_len; - u8 tim_len; - u8 ibss_params_len; - u8 challenge_len; - u8 wpa_len; - u8 rsn_len; - u8 erp_info_len; - u8 ext_supp_rates_len; - u8 wmm_info_len; - u8 wmm_param_len; - u8 mesh_id_len; - u8 peering_len; - u8 preq_len; - u8 prep_len; - u8 perr_len; - u8 ch_switch_elem_len; - u8 country_elem_len; - u8 pwr_constr_elem_len; - u8 quiet_elem_len; - u8 num_of_quiet_elem; /* can be more the one */ - u8 timeout_int_len; -}; - /** * ieee80211_get_hdrlen_from_skb - get header length from data * @@ -2632,8 +2665,10 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy); * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. + * + * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()! */ -struct cfg80211_bss* +struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_channel *channel, struct ieee80211_mgmt *mgmt, size_t len, @@ -2655,8 +2690,10 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, * * This informs cfg80211 that BSS information was found and * the BSS should be updated/added. + * + * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()! */ -struct cfg80211_bss* +struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, @@ -3039,6 +3076,32 @@ void cfg80211_roamed(struct net_device *dev, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); /** + * cfg80211_roamed_bss - notify cfg80211 of roaming + * + * @dev: network device + * @bss: entry of bss to which STA got roamed + * @req_ie: association request IEs (maybe be %NULL) + * @req_ie_len: association request IEs length + * @resp_ie: association response IEs (may be %NULL) + * @resp_ie_len: assoc response IEs length + * @gfp: allocation flags + * + * This is just a wrapper to notify cfg80211 of roaming event with driver + * passing bss to avoid a race in timeout of the bss entry. It should be + * called by the underlying driver whenever it roamed from one AP to another + * while connected. Drivers which have roaming implemented in firmware + * may use this function to avoid a race in bss entry timeout where the bss + * entry of the new AP is seen in the driver, but gets timed out by the time + * it is accessed in __cfg80211_roamed() due to delay in scheduling + * rdev->event_work. In case of any failures, the reference is released + * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise, + * it will be released while diconneting from the current bss. + */ +void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); + +/** * cfg80211_disconnected - notify cfg80211 that connection was dropped * * @dev: network device @@ -3185,6 +3248,74 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp); +/** + * cfg80211_rx_spurious_frame - inform userspace about a spurious frame + * @dev: The device the frame matched to + * @addr: the transmitter address + * @gfp: context flags + * + * This function is used in AP mode (only!) to inform userspace that + * a spurious class 3 frame was received, to be able to deauth the + * sender. + * Returns %true if the frame was passed to userspace (or this failed + * for a reason other than not having a subscription.) + */ +bool cfg80211_rx_spurious_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp); + +/** + * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame + * @dev: The device the frame matched to + * @addr: the transmitter address + * @gfp: context flags + * + * This function is used in AP mode (only!) to inform userspace that + * an associated station sent a 4addr frame but that wasn't expected. + * It is allowed and desirable to send this event only once for each + * station to avoid event flooding. + * Returns %true if the frame was passed to userspace (or this failed + * for a reason other than not having a subscription.) + */ +bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp); + +/** + * cfg80211_probe_status - notify userspace about probe status + * @dev: the device the probe was sent on + * @addr: the address of the peer + * @cookie: the cookie filled in @probe_client previously + * @acked: indicates whether probe was acked or not + * @gfp: allocation flags + */ +void cfg80211_probe_status(struct net_device *dev, const u8 *addr, + u64 cookie, bool acked, gfp_t gfp); + +/** + * cfg80211_report_obss_beacon - report beacon from other APs + * @wiphy: The wiphy that received the beacon + * @frame: the frame + * @len: length of the frame + * @freq: frequency the frame was received on + * @gfp: allocation flags + * + * Use this function to report to userspace when a beacon was + * received. It is not useful to call this when there is no + * netdev that is in AP/GO mode. + */ +void cfg80211_report_obss_beacon(struct wiphy *wiphy, + const u8 *frame, size_t len, + int freq, gfp_t gfp); + +/* + * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used + * @wiphy: the wiphy + * @chan: main channel + * @channel_type: HT mode + */ +int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type); + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 839f768f9e3..7828ebf99ee 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -11,6 +11,11 @@ #ifndef __LINUX_NET_DSA_H #define __LINUX_NET_DSA_H +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/timer.h> +#include <linux/workqueue.h> + #define DSA_MAX_SWITCHES 4 #define DSA_MAX_PORTS 12 @@ -54,8 +59,143 @@ struct dsa_platform_data { struct dsa_chip_data *chip; }; -extern bool dsa_uses_dsa_tags(void *dsa_ptr); -extern bool dsa_uses_trailer_tags(void *dsa_ptr); +struct dsa_switch_tree { + /* + * Configuration data for the platform device that owns + * this dsa switch tree instance. + */ + struct dsa_platform_data *pd; + + /* + * Reference to network device to use, and which tagging + * protocol to use. + */ + struct net_device *master_netdev; + __be16 tag_protocol; + + /* + * The switch and port to which the CPU is attached. + */ + s8 cpu_switch; + s8 cpu_port; + + /* + * Link state polling. + */ + int link_poll_needed; + struct work_struct link_poll_work; + struct timer_list link_poll_timer; + + /* + * Data for the individual switch chips. + */ + struct dsa_switch *ds[DSA_MAX_SWITCHES]; +}; + +struct dsa_switch { + /* + * Parent switch tree, and switch index. + */ + struct dsa_switch_tree *dst; + int index; + + /* + * Configuration data for this switch. + */ + struct dsa_chip_data *pd; + + /* + * The used switch driver. + */ + struct dsa_switch_driver *drv; + + /* + * Reference to mii bus to use. + */ + struct mii_bus *master_mii_bus; + + /* + * Slave mii_bus and devices for the individual ports. + */ + u32 dsa_port_mask; + u32 phys_port_mask; + struct mii_bus *slave_mii_bus; + struct net_device *ports[DSA_MAX_PORTS]; +}; + +static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) +{ + return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); +} + +static inline u8 dsa_upstream_port(struct dsa_switch *ds) +{ + struct dsa_switch_tree *dst = ds->dst; + + /* + * If this is the root switch (i.e. the switch that connects + * to the CPU), return the cpu port number on this switch. + * Else return the (DSA) port number that connects to the + * switch that is one hop closer to the cpu. + */ + if (dst->cpu_switch == ds->index) + return dst->cpu_port; + else + return ds->pd->rtable[dst->cpu_switch]; +} + +struct dsa_switch_driver { + struct list_head list; + + __be16 tag_protocol; + int priv_size; + + /* + * Probing and setup. + */ + char *(*probe)(struct mii_bus *bus, int sw_addr); + int (*setup)(struct dsa_switch *ds); + int (*set_addr)(struct dsa_switch *ds, u8 *addr); + + /* + * Access to the switch's PHY registers. + */ + int (*phy_read)(struct dsa_switch *ds, int port, int regnum); + int (*phy_write)(struct dsa_switch *ds, int port, + int regnum, u16 val); + + /* + * Link state polling and IRQ handling. + */ + void (*poll_link)(struct dsa_switch *ds); + + /* + * ethtool hardware statistics. + */ + void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); + void (*get_ethtool_stats)(struct dsa_switch *ds, + int port, uint64_t *data); + int (*get_sset_count)(struct dsa_switch *ds); +}; + +void register_switch_driver(struct dsa_switch_driver *type); +void unregister_switch_driver(struct dsa_switch_driver *type); + +/* + * The original DSA tag format and some other tag formats have no + * ethertype, which means that we need to add a little hack to the + * networking receive path to make sure that received frames get + * the right ->protocol assigned to them when one of those tag + * formats is in use. + */ +static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst) +{ + return !!(dst->tag_protocol == htons(ETH_P_DSA)); +} +static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst) +{ + return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); +} #endif diff --git a/include/net/dst.h b/include/net/dst.h index 4fb6c438179..344c8dd0287 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -53,6 +53,7 @@ struct dst_entry { #define DST_NOHASH 0x0008 #define DST_NOCACHE 0x0010 #define DST_NOCOUNT 0x0020 +#define DST_NOPEER 0x0040 short error; short obsolete; @@ -86,12 +87,12 @@ struct dst_entry { }; }; -static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst) +static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst) { return rcu_dereference(dst->_neighbour); } -static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst) +static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst) { return rcu_dereference_raw(dst->_neighbour); } @@ -205,12 +206,7 @@ dst_feature(const struct dst_entry *dst, u32 feature) static inline u32 dst_mtu(const struct dst_entry *dst) { - u32 mtu = dst_metric_raw(dst, RTAX_MTU); - - if (!mtu) - mtu = dst->ops->default_mtu(dst); - - return mtu; + return dst->ops->mtu(dst); } /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ @@ -397,7 +393,7 @@ static inline void dst_confirm(struct dst_entry *dst) struct neighbour *n; rcu_read_lock(); - n = dst_get_neighbour(dst); + n = dst_get_neighbour_noref(dst); neigh_confirm(n); rcu_read_unlock(); } diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 9adb99845a5..e1c2ee0eef4 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -17,7 +17,7 @@ struct dst_ops { int (*gc)(struct dst_ops *ops); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); unsigned int (*default_advmss)(const struct dst_entry *); - unsigned int (*default_mtu)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, diff --git a/include/net/flow.h b/include/net/flow.h index a09447749e2..da1f064a81b 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -59,8 +59,11 @@ struct flowi4 { #define flowi4_proto __fl_common.flowic_proto #define flowi4_flags __fl_common.flowic_flags #define flowi4_secid __fl_common.flowic_secid - __be32 daddr; + + /* (saddr,daddr) must be grouped, same order as in IP header */ __be32 saddr; + __be32 daddr; + union flowi_uli uli; #define fl4_sport uli.ports.sport #define fl4_dport uli.ports.dport @@ -207,6 +210,7 @@ extern struct flow_cache_object *flow_cache_lookup( u8 dir, flow_resolve_t resolver, void *ctx); extern void flow_cache_flush(void); +extern void flow_cache_flush_deferred(void); extern atomic_t flow_cache_genid; #endif diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h new file mode 100644 index 00000000000..80461c1ae9e --- /dev/null +++ b/include/net/flow_keys.h @@ -0,0 +1,16 @@ +#ifndef _NET_FLOW_KEYS_H +#define _NET_FLOW_KEYS_H + +struct flow_keys { + /* (src,dst) must be grouped, in the same way than in IP header */ + __be32 src; + __be32 dst; + union { + __be32 ports; + __be16 port16[2]; + }; + u8 ip_proto; +}; + +extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); +#endif diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 82d8d09faa4..7db32995ccd 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -128,6 +128,8 @@ extern int genl_register_mc_group(struct genl_family *family, struct genl_multicast_group *grp); extern void genl_unregister_mc_group(struct genl_family *family, struct genl_multicast_group *grp); +extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); /** * genlmsg_put - Add generic netlink header to netlink message diff --git a/include/net/icmp.h b/include/net/icmp.h index f0698b955b7..75d61564907 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -31,8 +31,8 @@ struct icmp_err { extern const struct icmp_err icmp_err_convert[]; #define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) #define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) -#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256) -#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field) +#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) +#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) struct dst_entry; struct net_proto_family; diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 7e2c4d483ad..71392545d0a 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -271,14 +271,6 @@ enum ieee80211_radiotap_type { #define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 -/* Ugly macro to convert literal channel numbers into their mhz equivalents - * There are certianly some conditions that will break this (like feeding it '30') - * but they shouldn't arise since nothing talks on channel 30. */ -#define ieee80211chan2mhz(x) \ - (((x) <= 14) ? \ - (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ - ((x) + 1000) * 5) - /* helpers */ static inline int ieee80211_get_radiotap_len(unsigned char *data) { diff --git a/include/net/ieee802154.h b/include/net/ieee802154.h index d52685defb1..ee59f8b188d 100644 --- a/include/net/ieee802154.h +++ b/include/net/ieee802154.h @@ -21,11 +21,14 @@ * Maxim Gorbachyov <maxim.gorbachev@siemens.com> * Maxim Osipov <maxim.osipov@siemens.com> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #ifndef NET_IEEE802154_H #define NET_IEEE802154_H +#define IEEE802154_MTU 127 + #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ #define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ @@ -56,6 +59,9 @@ (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) +/* MAC footer size */ +#define IEEE802154_MFR_SIZE 2 /* 2 octets */ + /* MAC's Command Frames Identifiers */ #define IEEE802154_CMD_ASSOCIATION_REQ 0x01 #define IEEE802154_CMD_ASSOCIATION_RESP 0x02 diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index e46674d5dae..00cbb4384c7 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -15,7 +15,7 @@ #define _INET6_HASHTABLES_H -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/types.h> @@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, const int dif); -#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */ +#endif /* IS_ENABLED(CONFIG_IPV6) */ #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index e6db62e756d..dbf9aab34c8 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -143,9 +143,9 @@ static inline void *inet_csk_ca(const struct sock *sk) return (void *)inet_csk(sk)->icsk_ca_priv; } -extern struct sock *inet_csk_clone(struct sock *sk, - const struct request_sock *req, - const gfp_t priority); +extern struct sock *inet_csk_clone_lock(const struct sock *sk, + const struct request_sock *req, + const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index b897d6e6d0a..e3e405106af 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -31,6 +31,7 @@ /** struct ip_options - IP Options * * @faddr - Saved first hop address + * @nexthop - Saved nexthop address in LSRR and SSRR * @is_data - Options in __data, rather than skb * @is_strictroute - Strict source route * @srr_is_hit - Packet destination addr was our one @@ -41,6 +42,7 @@ */ struct ip_options { __be32 faddr; + __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; @@ -69,7 +71,7 @@ struct ip_options_data { struct inet_request_sock { struct request_sock req; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) u16 inet6_rsk_offset; #endif __be16 loc_port; @@ -137,7 +139,7 @@ struct rtable; struct inet_sock { /* sk and pinet6 has to be the first two members of inet_sock */ struct sock sk; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *pinet6; #endif /* Socket demultiplex comparisons on incoming packets. */ @@ -186,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to, memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1, sk_from->sk_prot->obj_size - ancestor_size); } -#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) +#if !(IS_ENABLED(CONFIG_IPV6)) static inline void inet_sk_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index e8c25b98120..ba52c830a7a 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -218,20 +218,12 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo, static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) { -#ifdef CONFIG_NET_NS - return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */ - /* reference counting, */ - /* initialization, or RCU. */ -#else - return &init_net; -#endif + return read_pnet(&twsk->tw_net); } static inline void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net) { -#ifdef CONFIG_NET_NS - rcu_assign_pointer(twsk->tw_net, net); -#endif + write_pnet(&twsk->tw_net, net); } #endif /* _INET_TIMEWAIT_SOCK_ */ diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 78c83e62218..06b795dd590 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -35,6 +35,7 @@ struct inet_peer { u32 metrics[RTAX_MAX]; u32 rate_tokens; /* rate limiting for ICMP */ + int redirect_genid; unsigned long rate_last; unsigned long pmtu_expires; u32 pmtu_orig; @@ -86,7 +87,7 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, { struct inetpeer_addr daddr; - ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr); + *(struct in6_addr *)daddr.addr.a6 = *v6daddr; daddr.family = AF_INET6; return inet_getpeer(&daddr, create); } diff --git a/include/net/ip.h b/include/net/ip.h index eca0ef7a495..775009f9eab 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, memcpy(buf, &naddr, sizeof(naddr)); } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif static __inline__ void inet_reset_saddr(struct sock *sk) { inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); @@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk) switch (sk->sk_family) { case AF_INET: return inet_sk(sk)->mc_loop; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return inet6_sk(sk)->mc_loop; #endif @@ -450,7 +450,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb); * Functions provided by ip_sockglue.c */ -extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +extern void ipv4_pktinfo_prepare(struct sk_buff *skb); extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); extern int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 5735a0f979c..b26bb810198 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -86,9 +86,6 @@ struct fib6_table; struct rt6_info { struct dst_entry dst; -#define rt6i_dev dst.dev -#define rt6i_expires dst.expires - /* * Tail elements of dst_entry (__refcnt etc.) * and these elements (rarely used in hot path) are in @@ -202,6 +199,10 @@ struct fib6_node *fib6_locate(struct fib6_node *root, const struct in6_addr *daddr, int dst_len, const struct in6_addr *saddr, int src_len); +extern void fib6_clean_all_ro(struct net *net, + int (*func)(struct rt6_info *, void *arg), + int prune, void *arg); + extern void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg), int prune, void *arg); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 5e91b72fc71..2ad92ca4e6f 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -70,6 +70,8 @@ extern void ip6_route_input(struct sk_buff *skb); extern struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6); +extern struct dst_entry * ip6_route_lookup(struct net *net, + struct flowi6 *fl6, int flags); extern int ip6_route_init(void); extern void ip6_route_cleanup(void); @@ -95,14 +97,14 @@ extern struct rt6_info *rt6_lookup(struct net *net, extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct neighbour *neigh, - const struct in6_addr *addr); + struct flowi6 *fl6); extern int icmp6_dst_gc(void); extern void fib6_force_start_gc(struct net *net); extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, - int anycast); + bool anycast); extern int ip6_dst_hoplimit(struct dst_entry *dst); diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 873d5be7926..ebe517f2da9 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -21,7 +21,7 @@ #include <linux/netfilter.h> /* for union nf_inet_addr */ #include <linux/ip.h> #include <linux/ipv6.h> /* for struct ipv6hdr */ -#include <net/ipv6.h> /* for ipv6_addr_copy */ +#include <net/ipv6.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netfilter/nf_conntrack.h> #endif @@ -119,8 +119,8 @@ ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr) const struct ipv6hdr *iph = nh; iphdr->len = sizeof(struct ipv6hdr); iphdr->protocol = iph->nexthdr; - ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr); - ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr); + iphdr->saddr.in6 = iph->saddr; + iphdr->daddr.in6 = iph->daddr; } else #endif { @@ -137,7 +137,7 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) - ipv6_addr_copy(&dst->in6, &src->in6); + dst->in6 = src->in6; else #endif dst->ip = src->ip; @@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void); extern struct ip_vs_dest * ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, - __u16 protocol, __u32 fwmark); + __u16 protocol, __u32 fwmark, __u32 flags); extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a366a8a1fe2..e4170a22fc6 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -132,6 +132,15 @@ extern struct ctl_path net_ipv6_ctl_path[]; SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ }) +/* per device and per net counters are atomic_long_t */ +#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ +({ \ + struct inet6_dev *_idev = (idev); \ + if (likely(_idev != NULL)) \ + SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ + SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ +}) + #define _DEVADD(net, statname, modifier, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ @@ -168,11 +177,11 @@ extern struct ctl_path net_ipv6_ctl_path[]; _DEVINCATOMIC(net, icmpv6, _BH, idev, field) #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ - _DEVINCATOMIC(net, icmpv6msg, , idev, field +256) + _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) #define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \ - _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256) + _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) #define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \ - _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field) + _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) struct ip6_ra_chain { struct ip6_ra_chain *next; @@ -300,11 +309,6 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); } -static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) -{ - memcpy(a1, a2, sizeof(struct in6_addr)); -} - static inline void ipv6_addr_prefix(struct in6_addr *pfx, const struct in6_addr *addr, int plen) @@ -554,7 +558,7 @@ extern void ipv6_push_frag_opts(struct sk_buff *skb, u8 *proto); extern int ipv6_skip_exthdr(const struct sk_buff *, int start, - u8 *nexthdrp); + u8 *nexthdrp, __be16 *frag_offp); extern int ipv6_ext_hdr(u8 nexthdr); diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index f2419cf44ce..0954ec95915 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -27,7 +27,6 @@ enum { IUCV_OPEN, IUCV_BOUND, IUCV_LISTEN, - IUCV_SEVERED, IUCV_DISCONN, IUCV_CLOSING, IUCV_CLOSED @@ -146,7 +145,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); -int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo); void iucv_accept_enqueue(struct sock *parent, struct sock *sk); void iucv_accept_unlink(struct sock *sk); struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 72eddd1b410..d49928ba5d0 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -166,6 +166,7 @@ struct ieee80211_low_level_stats { * that it is only ever disabled for station mode. * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode) + * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode) */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -184,6 +185,7 @@ enum ieee80211_bss_change { BSS_CHANGED_QOS = 1<<13, BSS_CHANGED_IDLE = 1<<14, BSS_CHANGED_SSID = 1<<15, + BSS_CHANGED_AP_PROBE_RESP = 1<<16, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -518,7 +520,7 @@ struct ieee80211_tx_rate { * @flags: transmit info flags, defined above * @band: the band to transmit on (use for checking for races) * @antenna_sel_tx: antenna to use, 0 for automatic diversity - * @pad: padding, ignore + * @ack_frame_id: internal frame ID for TX status, used internally * @control: union for control data * @status: union for status data * @driver_data: array of driver_data pointers @@ -535,8 +537,7 @@ struct ieee80211_tx_info { u8 antenna_sel_tx; - /* 2 byte hole */ - u8 pad[2]; + u16 ack_frame_id; union { struct { @@ -901,6 +902,10 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a * CCMP key if it requires CCMP encryption of management frames (MFP) to * be done in software. + * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver + * for a CCMP key if space should be prepared for the IV, but the IV + * itself should not be generated. Do not set together with + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_WMM_STA = 1<<0, @@ -908,6 +913,7 @@ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, IEEE80211_KEY_FLAG_SW_MGMT = 1<<4, + IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5, }; /** @@ -1304,6 +1310,16 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, } /** + * ieee80211_free_txskb - free TX skb + * @hw: the hardware + * @skb: the skb + * + * Free a transmit skb. Use this funtion when some failure + * to transmit happened and thus status cannot be reported. + */ +void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); + +/** * DOC: Hardware crypto acceleration * * mac80211 is capable of taking advantage of many hardware @@ -1423,7 +1439,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, * DOC: Beacon filter support * * Some hardware have beacon filter support to reduce host cpu wakeups - * which will reduce system power consumption. It usuallly works so that + * which will reduce system power consumption. It usually works so that * the firmware creates a checksum of the beacon but omits all constantly * changing elements (TSF, TIM etc). Whenever the checksum changes the * beacon is forwarded to the host, otherwise it will be just dropped. That @@ -1744,11 +1760,21 @@ enum ieee80211_frame_release_type { * skb contains the buffer starting from the IEEE 802.11 header. * The low-level driver should send the frame out based on * configuration in the TX control data. This handler should, - * preferably, never fail and stop queues appropriately, more - * importantly, however, it must never fail for A-MPDU-queues. - * This function should return NETDEV_TX_OK except in very - * limited cases. - * Must be implemented and atomic. + * preferably, never fail and stop queues appropriately. + * This must be implemented if @tx_frags is not. + * Must be atomic. + * + * @tx_frags: Called to transmit multiple fragments of a single MSDU. + * This handler must consume all fragments, sending out some of + * them only is useless and it can't ask for some of them to be + * queued again. If the frame is not fragmented the queue has a + * single SKB only. To avoid issues with the networking stack + * when TX status is reported the frames should be removed from + * the skb queue. + * If this is used, the tx_info @vif and @sta pointers will be + * invalid -- you must not use them in that case. + * This must be implemented if @tx isn't. + * Must be atomic. * * @start: Called before the first netdevice attached to the hardware * is enabled. This should turn on the hardware and must turn on @@ -2085,6 +2111,8 @@ enum ieee80211_frame_release_type { */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); + void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct sk_buff_head *skbs); int (*start)(struct ieee80211_hw *hw); void (*stop)(struct ieee80211_hw *hw); #ifdef CONFIG_PM @@ -2661,6 +2689,19 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, } /** + * ieee80211_proberesp_get - retrieve a Probe Response template + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Creates a Probe Response template which can, for example, be uploaded to + * hardware. The destination address should be set by the caller. + * + * Can only be called in AP mode. + */ +struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/** * ieee80211_pspoll_get - retrieve a PS Poll template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. @@ -3461,9 +3502,12 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn); * * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have * changed, rate control algorithm can update its internal state if needed. + * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate + * control algorithm needs to adjust accordingly. */ enum rate_control_changed { - IEEE80211_RC_HT_CHANGED = BIT(0) + IEEE80211_RC_HT_CHANGED = BIT(0), + IEEE80211_RC_SMPS_CHANGED = BIT(1), }; /** diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 62beeb97c4b..e3133c23980 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -79,6 +79,42 @@ struct nd_opt_hdr { __u8 nd_opt_len; } __packed; +static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd) +{ + const u32 *p32 = pkey; + + return (((p32[0] ^ dev->ifindex) * hash_rnd[0]) + + (p32[1] * hash_rnd[1]) + + (p32[2] * hash_rnd[2]) + + (p32[3] * hash_rnd[3])); +} + +static inline struct neighbour *__ipv6_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, const void *pkey) +{ + struct neigh_hash_table *nht; + const u32 *p32 = pkey; + struct neighbour *n; + u32 hash_val; + + rcu_read_lock_bh(); + nht = rcu_dereference_bh(tbl->nht); + hash_val = ndisc_hashfn(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); + n != NULL; + n = rcu_dereference_bh(n->next)) { + u32 *n32 = (u32 *) n->primary_key; + if (n->dev == dev && + ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | + (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0) { + if (!atomic_inc_not_zero(&n->refcnt)) + n = NULL; + break; + } + } + rcu_read_unlock_bh(); + + return n; +} extern int ndisc_init(void); @@ -145,13 +181,4 @@ int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl, extern void inet6_ifinfo_notify(int event, struct inet6_dev *idev); -static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr) -{ - - if (dev) - return __neigh_lookup_errno(&nd_tbl, addr, dev); - - return ERR_PTR(-ENODEV); -} - #endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 2720884287c..34c996f4618 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -59,7 +59,7 @@ struct neigh_parms { int reachable_time; int delay_probe_time; - int queue_len; + int queue_len_bytes; int ucast_probes; int app_probes; int mcast_probes; @@ -99,6 +99,7 @@ struct neighbour { rwlock_t lock; atomic_t refcnt; struct sk_buff_head arp_queue; + unsigned int arp_queue_len_bytes; struct timer_list timer; unsigned long used; atomic_t probes; @@ -138,10 +139,12 @@ struct pneigh_entry { * neighbour table manipulation */ +#define NEIGH_NUM_HASH_RND 4 + struct neigh_hash_table { struct neighbour __rcu **hash_buckets; unsigned int hash_shift; - __u32 hash_rnd; + __u32 hash_rnd[NEIGH_NUM_HASH_RND]; struct rcu_head rcu; }; @@ -153,7 +156,7 @@ struct neigh_table { int key_len; __u32 (*hash)(const void *pkey, const struct net_device *dev, - __u32 hash_rnd); + __u32 *hash_rnd); int (*constructor)(struct neighbour *); int (*pconstructor)(struct pneigh_entry *); void (*pdestructor)(struct pneigh_entry *); @@ -172,12 +175,18 @@ struct neigh_table { atomic_t entries; rwlock_t lock; unsigned long last_rand; - struct kmem_cache *kmem_cachep; struct neigh_statistics __percpu *stats; struct neigh_hash_table __rcu *nht; struct pneigh_entry **phash_buckets; }; +#define NEIGH_PRIV_ALIGN sizeof(long long) + +static inline void *neighbour_priv(const struct neighbour *n) +{ + return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN); +} + /* flags for neigh_update() */ #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 3bb6fa0eace..ee547c14981 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -77,7 +77,7 @@ struct net { struct netns_packet packet; struct netns_unix unx; struct netns_ipv4 ipv4; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; #endif #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index 4e9c63a20db..463ae8e1669 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -15,8 +15,8 @@ #include <net/netfilter/nf_conntrack_extend.h> struct nf_conn_counter { - u_int64_t packets; - u_int64_t bytes; + atomic64_t packets; + atomic64_t bytes; }; static inline diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 4283508b3e1..a88fb693938 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -67,18 +67,18 @@ struct nf_ct_event_notifier { int (*fcn)(unsigned int events, struct nf_ct_event *item); }; -extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; -extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb); -extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb); +extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb); +extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb); extern void nf_ct_deliver_cached_events(struct nf_conn *ct); static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) { + struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; - if (nf_conntrack_event_cb == NULL) + if (net->ct.nf_conntrack_event_cb == NULL) return; e = nf_ct_ecache_find(ct); @@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask, int report) { int ret = 0; + struct net *net = nf_ct_net(ct); struct nf_ct_event_notifier *notify; struct nf_conntrack_ecache *e; rcu_read_lock(); - notify = rcu_dereference(nf_conntrack_event_cb); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); if (notify == NULL) goto out_unlock; @@ -164,9 +165,8 @@ struct nf_exp_event_notifier { int (*fcn)(unsigned int events, struct nf_exp_event *item); }; -extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb; -extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb); -extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb); +extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb); +extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb); static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, @@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event, u32 pid, int report) { + struct net *net = nf_ct_exp_net(exp); struct nf_exp_event_notifier *notify; struct nf_conntrack_ecache *e; rcu_read_lock(); - notify = rcu_dereference(nf_expect_event_cb); + notify = rcu_dereference(net->ct.nf_expect_event_cb); if (notify == NULL) goto out_unlock; diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 0f8a8c58753..4619caadd9d 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -91,7 +91,6 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) void nf_ct_remove_expectations(struct nf_conn *ct); void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); -void nf_ct_remove_userspace_expectations(void); /* Allocate space for an expectation: this is mandatory before calling nf_ct_expect_related. You will have to call put afterwards. */ diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index 2f8fb77bfdd..aea3f8221be 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -12,7 +12,6 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> -#include <linux/netfilter_ipv4/nf_nat.h> #include <linux/list_nulls.h> /* A `tuple' is a structure containing the information to uniquely diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index b8872df7285..b4de990b55f 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -1,14 +1,12 @@ #ifndef _NF_NAT_H #define _NF_NAT_H #include <linux/netfilter_ipv4.h> -#include <linux/netfilter_ipv4/nf_nat.h> +#include <linux/netfilter/nf_nat.h> #include <net/netfilter/nf_conntrack_tuple.h> -#define NF_NAT_MAPPING_TYPE_MAX_NAMELEN 16 - enum nf_nat_manip_type { - IP_NAT_MANIP_SRC, - IP_NAT_MANIP_DST + NF_NAT_MANIP_SRC, + NF_NAT_MANIP_DST }; /* SRC manip occurs POST_ROUTING or LOCAL_IN */ @@ -52,7 +50,7 @@ struct nf_conn_nat { /* Set up the info structure to map into this range. */ extern unsigned int nf_nat_setup_info(struct nf_conn *ct, - const struct nf_nat_range *range, + const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype); /* Is this tuple already taken? (not by us)*/ diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h index 3dc7b98effe..b13d8d18d59 100644 --- a/include/net/netfilter/nf_nat_core.h +++ b/include/net/netfilter/nf_nat_core.h @@ -20,7 +20,7 @@ extern int nf_nat_icmp_reply_translation(struct nf_conn *ct, static inline int nf_nat_initialized(struct nf_conn *ct, enum nf_nat_manip_type manip) { - if (manip == IP_NAT_MANIP_SRC) + if (manip == NF_NAT_MANIP_SRC) return ct->status & IPS_SRC_NAT_DONE; else return ct->status & IPS_DST_NAT_DONE; diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h index 93cc90d28e6..7b0b51165f7 100644 --- a/include/net/netfilter/nf_nat_protocol.h +++ b/include/net/netfilter/nf_nat_protocol.h @@ -4,14 +4,12 @@ #include <net/netfilter/nf_nat.h> #include <linux/netfilter/nfnetlink_conntrack.h> -struct nf_nat_range; +struct nf_nat_ipv4_range; struct nf_nat_protocol { /* Protocol number. */ unsigned int protonum; - struct module *me; - /* Translate a packet to the target according to manip type. Return true if succeeded. */ bool (*manip_pkt)(struct sk_buff *skb, @@ -30,15 +28,12 @@ struct nf_nat_protocol { possible. Per-protocol part of tuple is initialized to the incoming packet. */ void (*unique_tuple)(struct nf_conntrack_tuple *tuple, - const struct nf_nat_range *range, + const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct); - int (*range_to_nlattr)(struct sk_buff *skb, - const struct nf_nat_range *range); - int (*nlattr_to_range)(struct nlattr *tb[], - struct nf_nat_range *range); + struct nf_nat_ipv4_range *range); }; /* Protocol registration. */ @@ -61,14 +56,12 @@ extern bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple, const union nf_conntrack_man_proto *max); extern void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, - const struct nf_nat_range *range, + const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct, u_int16_t *rover); -extern int nf_nat_proto_range_to_nlattr(struct sk_buff *skb, - const struct nf_nat_range *range); extern int nf_nat_proto_nlattr_to_range(struct nlattr *tb[], - struct nf_nat_range *range); + struct nf_nat_ipv4_range *range); #endif /*_NF_NAT_PROTO_H*/ diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index e505358d899..75ca9291cf2 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h @@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, return sk; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) static inline struct sock * nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 0249399e51a..7a911eca0f1 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -18,6 +18,8 @@ struct netns_ct { struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; struct ip_conntrack_stat __percpu *stat; + struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; + struct nf_exp_event_notifier __rcu *nf_expect_event_cb; int sysctl_events; unsigned int sysctl_events_retry_timeout; int sysctl_acct; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index d786b4fc02a..bbd023a1c9b 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -55,6 +55,7 @@ struct netns_ipv4 { int current_rt_cache_rebuild_count; unsigned int sysctl_ping_group_range[2]; + long sysctl_tcp_mem[3]; atomic_t rt_genid; atomic_t dev_addr_genid; diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h index 0b44112e236..d542a4b28cc 100644 --- a/include/net/netns/mib.h +++ b/include/net/netns/mib.h @@ -10,15 +10,15 @@ struct netns_mib { DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); - DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics); + DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics); -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct proc_dir_entry *proc_net_devsnmp6; DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); - DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); + DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics); #endif #ifdef CONFIG_XFRM_STATISTICS DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 748f91f87cd..5299e69a32a 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -56,7 +56,7 @@ struct netns_xfrm { #endif struct dst_ops xfrm4_dst_ops; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct dst_ops xfrm6_dst_ops; #endif }; diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h new file mode 100644 index 00000000000..e503b87c4c1 --- /dev/null +++ b/include/net/netprio_cgroup.h @@ -0,0 +1,57 @@ +/* + * netprio_cgroup.h Control Group Priority set + * + * + * Authors: Neil Horman <nhorman@tuxdriver.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef _NETPRIO_CGROUP_H +#define _NETPRIO_CGROUP_H +#include <linux/module.h> +#include <linux/cgroup.h> +#include <linux/hardirq.h> +#include <linux/rcupdate.h> + + +struct netprio_map { + struct rcu_head rcu; + u32 priomap_len; + u32 priomap[]; +}; + +#ifdef CONFIG_CGROUPS + +struct cgroup_netprio_state { + struct cgroup_subsys_state css; + u32 prioidx; +}; + +#ifndef CONFIG_NETPRIO_CGROUP +extern int net_prio_subsys_id; +#endif + +extern void sock_update_netprioidx(struct sock *sk); + +static inline struct cgroup_netprio_state + *task_netprio_state(struct task_struct *p) +{ +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) + return container_of(task_subsys_state(p, net_prio_subsys_id), + struct cgroup_netprio_state, css); +#else + return NULL; +#endif +} + +#else + +#define sock_update_netprioidx(sk) +#endif + +#endif /* _NET_CLS_CGROUP_H */ diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index 39b85bc0804..2be95e2626c 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -34,32 +34,30 @@ #define NCI_MAX_NUM_CONN 10 /* NCI Status Codes */ -#define NCI_STATUS_OK 0x00 -#define NCI_STATUS_REJECTED 0x01 -#define NCI_STATUS_MESSAGE_CORRUPTED 0x02 -#define NCI_STATUS_BUFFER_FULL 0x03 -#define NCI_STATUS_FAILED 0x04 -#define NCI_STATUS_NOT_INITIALIZED 0x05 -#define NCI_STATUS_SYNTAX_ERROR 0x06 -#define NCI_STATUS_SEMANTIC_ERROR 0x07 -#define NCI_STATUS_UNKNOWN_GID 0x08 -#define NCI_STATUS_UNKNOWN_OID 0x09 -#define NCI_STATUS_INVALID_PARAM 0x0a -#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0b +#define NCI_STATUS_OK 0x00 +#define NCI_STATUS_REJECTED 0x01 +#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02 +#define NCI_STATUS_FAILED 0x03 +#define NCI_STATUS_NOT_INITIALIZED 0x04 +#define NCI_STATUS_SYNTAX_ERROR 0x05 +#define NCI_STATUS_SEMANTIC_ERROR 0x06 +#define NCI_STATUS_UNKNOWN_GID 0x07 +#define NCI_STATUS_UNKNOWN_OID 0x08 +#define NCI_STATUS_INVALID_PARAM 0x09 +#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a /* Discovery Specific Status Codes */ -#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 -#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 +#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 +#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 +#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2 /* RF Interface Specific Status Codes */ -#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 -#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 -#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 -#define NCI_STATUS_RF_LINK_LOSS_ERROR 0xb3 +#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 +#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 +#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 /* NFCEE Interface Specific Status Codes */ -#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0 -#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1 -#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2 -#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3 -#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4 +#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc0 +#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc1 +#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc2 +#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc3 /* NCI RF Technology and Mode */ #define NCI_NFC_A_PASSIVE_POLL_MODE 0x00 @@ -67,11 +65,28 @@ #define NCI_NFC_F_PASSIVE_POLL_MODE 0x02 #define NCI_NFC_A_ACTIVE_POLL_MODE 0x03 #define NCI_NFC_F_ACTIVE_POLL_MODE 0x05 +#define NCI_NFC_15693_PASSIVE_POLL_MODE 0x06 #define NCI_NFC_A_PASSIVE_LISTEN_MODE 0x80 #define NCI_NFC_B_PASSIVE_LISTEN_MODE 0x81 #define NCI_NFC_F_PASSIVE_LISTEN_MODE 0x82 #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 +#define NCI_NFC_15693_PASSIVE_LISTEN_MODE 0x86 + +/* NCI RF Technologies */ +#define NCI_NFC_RF_TECHNOLOGY_A 0x00 +#define NCI_NFC_RF_TECHNOLOGY_B 0x01 +#define NCI_NFC_RF_TECHNOLOGY_F 0x02 +#define NCI_NFC_RF_TECHNOLOGY_15693 0x03 + +/* NCI Bit Rates */ +#define NCI_NFC_BIT_RATE_106 0x00 +#define NCI_NFC_BIT_RATE_212 0x01 +#define NCI_NFC_BIT_RATE_424 0x02 +#define NCI_NFC_BIT_RATE_848 0x03 +#define NCI_NFC_BIT_RATE_1695 0x04 +#define NCI_NFC_BIT_RATE_3390 0x05 +#define NCI_NFC_BIT_RATE_6780 0x06 /* NCI RF Protocols */ #define NCI_RF_PROTOCOL_UNKNOWN 0x00 @@ -82,37 +97,30 @@ #define NCI_RF_PROTOCOL_NFC_DEP 0x05 /* NCI RF Interfaces */ -#define NCI_RF_INTERFACE_RFU 0x00 -#define NCI_RF_INTERFACE_FRAME 0x01 -#define NCI_RF_INTERFACE_ISO_DEP 0x02 -#define NCI_RF_INTERFACE_NFC_DEP 0x03 +#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 +#define NCI_RF_INTERFACE_FRAME 0x01 +#define NCI_RF_INTERFACE_ISO_DEP 0x02 +#define NCI_RF_INTERFACE_NFC_DEP 0x03 + +/* NCI Reset types */ +#define NCI_RESET_TYPE_KEEP_CONFIG 0x00 +#define NCI_RESET_TYPE_RESET_CONFIG 0x01 + +/* NCI Static RF connection ID */ +#define NCI_STATIC_RF_CONN_ID 0x00 + +/* NCI Data Flow Control */ +#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff /* NCI RF_DISCOVER_MAP_CMD modes */ #define NCI_DISC_MAP_MODE_POLL 0x01 #define NCI_DISC_MAP_MODE_LISTEN 0x02 -#define NCI_DISC_MAP_MODE_BOTH 0x03 - -/* NCI Discovery Types */ -#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00 -#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01 -#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02 -#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03 -#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05 -#define NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE 0x06 -#define NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE 0x07 -#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09 -#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80 -#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81 -#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82 -#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83 -#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85 /* NCI Deactivation Type */ -#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 -#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 -#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 -#define NCI_DEACTIVATE_TYPE_RF_LINK_LOSS 0x03 -#define NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR 0x04 +#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 +#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 +#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 +#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03 /* Message Type (MT) */ #define NCI_MT_DATA_PKT 0x00 @@ -144,10 +152,10 @@ #define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f) /* GID values */ -#define NCI_GID_CORE 0x0 -#define NCI_GID_RF_MGMT 0x1 -#define NCI_GID_NFCEE_MGMT 0x2 -#define NCI_GID_PROPRIETARY 0xf +#define NCI_GID_CORE 0x0 +#define NCI_GID_RF_MGMT 0x1 +#define NCI_GID_NFCEE_MGMT 0x2 +#define NCI_GID_PROPRIETARY 0xf /* ---- NCI Packet structures ---- */ #define NCI_CTRL_HDR_SIZE 3 @@ -169,24 +177,17 @@ struct nci_data_hdr { /* ----- NCI Commands ---- */ /* ------------------------ */ #define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00) - -#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01) - -#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02) - -#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04) -struct nci_core_conn_create_cmd { - __u8 target_handle; - __u8 num_target_specific_params; +struct nci_core_reset_cmd { + __u8 reset_type; } __packed; -#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x06) +#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01) #define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) struct disc_map_config { __u8 rf_protocol; __u8 mode; - __u8 rf_interface_type; + __u8 rf_interface; } __packed; struct nci_rf_disc_map_cmd { @@ -197,7 +198,7 @@ struct nci_rf_disc_map_cmd { #define NCI_OP_RF_DISCOVER_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) struct disc_config { - __u8 type; + __u8 rf_tech_and_mode; __u8 frequency; } __packed; @@ -218,6 +219,7 @@ struct nci_rf_deactivate_cmd { struct nci_core_reset_rsp { __u8 status; __u8 nci_ver; + __u8 config_status; } __packed; #define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01) @@ -232,24 +234,12 @@ struct nci_core_init_rsp_1 { struct nci_core_init_rsp_2 { __u8 max_logical_connections; __le16 max_routing_table_size; - __u8 max_control_packet_payload_length; - __le16 rf_sending_buffer_size; - __le16 rf_receiving_buffer_size; - __le16 manufacturer_id; + __u8 max_ctrl_pkt_payload_len; + __le16 max_size_for_large_params; + __u8 manufact_id; + __le32 manufact_specific_info; } __packed; -#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02) - -#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04) -struct nci_core_conn_create_rsp { - __u8 status; - __u8 max_pkt_payload_size; - __u8 initial_num_credits; - __u8 conn_id; -} __packed; - -#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x06) - #define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00) #define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03) @@ -259,7 +249,7 @@ struct nci_core_conn_create_rsp { /* --------------------------- */ /* ---- NCI Notifications ---- */ /* --------------------------- */ -#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x07) +#define NCI_OP_CORE_CONN_CREDITS_NTF nci_opcode_pack(NCI_GID_CORE, 0x06) struct conn_credit_entry { __u8 conn_id; __u8 credits; @@ -270,12 +260,13 @@ struct nci_core_conn_credit_ntf { struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN]; } __packed; -#define NCI_OP_RF_FIELD_INFO_NTF nci_opcode_pack(NCI_GID_CORE, 0x08) -struct nci_rf_field_info_ntf { - __u8 rf_field_status; +#define NCI_OP_CORE_INTF_ERROR_NTF nci_opcode_pack(NCI_GID_CORE, 0x08) +struct nci_core_intf_error_ntf { + __u8 status; + __u8 conn_id; } __packed; -#define NCI_OP_RF_ACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) +#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) struct rf_tech_specific_params_nfca_poll { __u16 sens_res; __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ @@ -289,17 +280,22 @@ struct activation_params_nfca_poll_iso_dep { __u8 rats_res[20]; }; -struct nci_rf_activate_ntf { - __u8 target_handle; +struct nci_rf_intf_activated_ntf { + __u8 rf_discovery_id; + __u8 rf_interface; __u8 rf_protocol; - __u8 rf_tech_and_mode; + __u8 activation_rf_tech_and_mode; + __u8 max_data_pkt_payload_size; + __u8 initial_num_credits; __u8 rf_tech_specific_params_len; union { struct rf_tech_specific_params_nfca_poll nfca_poll; } rf_tech_specific_params; - __u8 rf_interface_type; + __u8 data_exch_rf_tech_and_mode; + __u8 data_exch_tx_bit_rate; + __u8 data_exch_rx_bit_rate; __u8 activation_params_len; union { @@ -309,5 +305,9 @@ struct nci_rf_activate_ntf { } __packed; #define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06) +struct nci_rf_deactivate_ntf { + __u8 type; + __u8 reason; +} __packed; #endif /* __NCI_H */ diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index b8b4bbd7e0f..bccd89e9d4c 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -109,15 +109,14 @@ struct nci_dev { [NCI_MAX_SUPPORTED_RF_INTERFACES]; __u8 max_logical_connections; __u16 max_routing_table_size; - __u8 max_control_packet_payload_length; - __u16 rf_sending_buffer_size; - __u16 rf_receiving_buffer_size; - __u16 manufacturer_id; + __u8 max_ctrl_pkt_payload_len; + __u16 max_size_for_large_params; + __u8 manufact_id; + __u32 manufact_specific_info; - /* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */ - __u8 max_pkt_payload_size; + /* received during NCI_OP_RF_INTF_ACTIVATED_NTF */ + __u8 max_data_pkt_payload_size; __u8 initial_num_credits; - __u8 conn_id; /* stored during nci_data_exchange */ data_exchange_cb_t data_exchange_cb; diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 6a7f602aa84..8696b773a69 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -52,6 +52,9 @@ struct nfc_ops { int (*dev_down)(struct nfc_dev *dev); int (*start_poll)(struct nfc_dev *dev, u32 protocols); void (*stop_poll)(struct nfc_dev *dev); + int (*dep_link_up)(struct nfc_dev *dev, int target_idx, + u8 comm_mode, u8 rf_mode); + int (*dep_link_down)(struct nfc_dev *dev); int (*activate_target)(struct nfc_dev *dev, u32 target_idx, u32 protocol); void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx); @@ -60,11 +63,17 @@ struct nfc_ops { void *cb_context); }; +#define NFC_TARGET_IDX_ANY -1 +#define NFC_MAX_GT_LEN 48 +#define NFC_MAX_NFCID1_LEN 10 + struct nfc_target { u32 idx; u32 supported_protocols; u16 sens_res; u8 sel_res; + u8 nfcid1_len; + u8 nfcid1[NFC_MAX_NFCID1_LEN]; }; struct nfc_genl_data { @@ -83,6 +92,8 @@ struct nfc_dev { bool dev_up; bool polling; bool remote_activated; + bool dep_link_up; + u32 dep_rf_mode; struct nfc_genl_data genl_data; u32 supported_protocols; @@ -157,9 +168,20 @@ static inline const char *nfc_device_name(struct nfc_dev *dev) return dev_name(&dev->dev); } -struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp); +struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, + unsigned int flags, unsigned int size, + unsigned int *err); +struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp); + +int nfc_set_remote_general_bytes(struct nfc_dev *dev, + u8 *gt, u8 gt_len); + +u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, u8 *gt_len); int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int ntargets); +int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, + u8 comm_mode, u8 rf_mode); + #endif /* __NET_NFC_H */ diff --git a/include/net/protocol.h b/include/net/protocol.h index 6f7eb800974..875f4895b03 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -25,7 +25,7 @@ #define _PROTOCOL_H #include <linux/in6.h> -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <linux/ipv6.h> #endif @@ -38,7 +38,7 @@ struct net_protocol { void (*err_handler)(struct sk_buff *skb, u32 info); int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - u32 features); + netdev_features_t features); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); @@ -46,7 +46,7 @@ struct net_protocol { netns_ok:1; }; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { int (*handler)(struct sk_buff *skb); @@ -57,7 +57,7 @@ struct inet6_protocol { int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, - u32 features); + netdev_features_t features); struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); @@ -91,7 +91,7 @@ struct inet_protosw { extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS]; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS]; #endif @@ -100,7 +100,7 @@ extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num) extern void inet_register_protosw(struct inet_protosw *p); extern void inet_unregister_protosw(struct inet_protosw *p); -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num); extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num); extern int inet6_register_protosw(struct inet_protosw *p); diff --git a/include/net/red.h b/include/net/red.h index 3319f16b3be..baab385a473 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -5,6 +5,7 @@ #include <net/pkt_sched.h> #include <net/inet_ecn.h> #include <net/dsfield.h> +#include <linux/reciprocal_div.h> /* Random Early Detection (RED) algorithm. ======================================= @@ -87,6 +88,29 @@ etc. */ +/* + * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM + * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001 + * + * Every 500 ms: + * if (avg > target and max_p <= 0.5) + * increase max_p : max_p += alpha; + * else if (avg < target and max_p >= 0.01) + * decrease max_p : max_p *= beta; + * + * target :[qth_min + 0.4*(qth_min - qth_max), + * qth_min + 0.6*(qth_min - qth_max)]. + * alpha : min(0.01, max_p / 4) + * beta : 0.9 + * max_P is a Q0.32 fixed point number (with 32 bits mantissa) + * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ] + */ +#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100)) + +#define MAX_P_MIN (1 * RED_ONE_PERCENT) +#define MAX_P_MAX (50 * RED_ONE_PERCENT) +#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4) + #define RED_STAB_SIZE 256 #define RED_STAB_MASK (RED_STAB_SIZE - 1) @@ -101,82 +125,112 @@ struct red_stats { struct red_parms { /* Parameters */ - u32 qth_min; /* Min avg length threshold: A scaled */ - u32 qth_max; /* Max avg length threshold: A scaled */ + u32 qth_min; /* Min avg length threshold: Wlog scaled */ + u32 qth_max; /* Max avg length threshold: Wlog scaled */ u32 Scell_max; - u32 Rmask; /* Cached random mask, see red_rmask */ + u32 max_P; /* probability, [0 .. 1.0] 32 scaled */ + u32 max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */ + u32 qth_delta; /* max_th - min_th */ + u32 target_min; /* min_th + 0.4*(max_th - min_th) */ + u32 target_max; /* min_th + 0.6*(max_th - min_th) */ u8 Scell_log; u8 Wlog; /* log(W) */ u8 Plog; /* random number bits */ u8 Stab[RED_STAB_SIZE]; +}; +struct red_vars { /* Variables */ int qcount; /* Number of packets since last random number generation */ u32 qR; /* Cached random number */ - unsigned long qavg; /* Average queue length: A scaled */ - psched_time_t qidlestart; /* Start of current idle period */ + unsigned long qavg; /* Average queue length: Wlog scaled */ + ktime_t qidlestart; /* Start of current idle period */ }; -static inline u32 red_rmask(u8 Plog) +static inline u32 red_maxp(u8 Plog) { - return Plog < 32 ? ((1 << Plog) - 1) : ~0UL; + return Plog < 32 ? (~0U >> Plog) : ~0U; } -static inline void red_set_parms(struct red_parms *p, - u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, - u8 Scell_log, u8 *stab) +static inline void red_set_vars(struct red_vars *v) { /* Reset average queue length, the value is strictly bound * to the parameters below, reseting hurts a bit but leaving * it might result in an unreasonable qavg for a while. --TGR */ - p->qavg = 0; + v->qavg = 0; + + v->qcount = -1; +} + +static inline void red_set_parms(struct red_parms *p, + u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, + u8 Scell_log, u8 *stab, u32 max_P) +{ + int delta = qth_max - qth_min; + u32 max_p_delta; - p->qcount = -1; p->qth_min = qth_min << Wlog; p->qth_max = qth_max << Wlog; p->Wlog = Wlog; p->Plog = Plog; - p->Rmask = red_rmask(Plog); + if (delta < 0) + delta = 1; + p->qth_delta = delta; + if (!max_P) { + max_P = red_maxp(Plog); + max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */ + } + p->max_P = max_P; + max_p_delta = max_P / delta; + max_p_delta = max(max_p_delta, 1U); + p->max_P_reciprocal = reciprocal_value(max_p_delta); + + /* RED Adaptative target : + * [min_th + 0.4*(min_th - max_th), + * min_th + 0.6*(min_th - max_th)]. + */ + delta /= 5; + p->target_min = qth_min + 2*delta; + p->target_max = qth_min + 3*delta; + p->Scell_log = Scell_log; p->Scell_max = (255 << Scell_log); memcpy(p->Stab, stab, sizeof(p->Stab)); } -static inline int red_is_idling(struct red_parms *p) +static inline int red_is_idling(const struct red_vars *v) { - return p->qidlestart != PSCHED_PASTPERFECT; + return v->qidlestart.tv64 != 0; } -static inline void red_start_of_idle_period(struct red_parms *p) +static inline void red_start_of_idle_period(struct red_vars *v) { - p->qidlestart = psched_get_time(); + v->qidlestart = ktime_get(); } -static inline void red_end_of_idle_period(struct red_parms *p) +static inline void red_end_of_idle_period(struct red_vars *v) { - p->qidlestart = PSCHED_PASTPERFECT; + v->qidlestart.tv64 = 0; } -static inline void red_restart(struct red_parms *p) +static inline void red_restart(struct red_vars *v) { - red_end_of_idle_period(p); - p->qavg = 0; - p->qcount = -1; + red_end_of_idle_period(v); + v->qavg = 0; + v->qcount = -1; } -static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) +static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p, + const struct red_vars *v) { - psched_time_t now; - long us_idle; + s64 delta = ktime_us_delta(ktime_get(), v->qidlestart); + long us_idle = min_t(s64, delta, p->Scell_max); int shift; - now = psched_get_time(); - us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); - /* * The problem: ideally, average length queue recalcultion should * be done over constant clock intervals. This is too expensive, so @@ -200,7 +254,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; if (shift) - return p->qavg >> shift; + return v->qavg >> shift; else { /* Approximate initial part of exponent with linear function: * @@ -209,16 +263,17 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) * Seems, it is the best solution to * problem of too coarse exponent tabulation. */ - us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log; + us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log; - if (us_idle < (p->qavg >> 1)) - return p->qavg - us_idle; + if (us_idle < (v->qavg >> 1)) + return v->qavg - us_idle; else - return p->qavg >> 1; + return v->qavg >> 1; } } -static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p, +static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, + const struct red_vars *v, unsigned int backlog) { /* @@ -230,42 +285,46 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p, * * --ANK (980924) */ - return p->qavg + (backlog - (p->qavg >> p->Wlog)); + return v->qavg + (backlog - (v->qavg >> p->Wlog)); } -static inline unsigned long red_calc_qavg(struct red_parms *p, +static inline unsigned long red_calc_qavg(const struct red_parms *p, + const struct red_vars *v, unsigned int backlog) { - if (!red_is_idling(p)) - return red_calc_qavg_no_idle_time(p, backlog); + if (!red_is_idling(v)) + return red_calc_qavg_no_idle_time(p, v, backlog); else - return red_calc_qavg_from_idle_time(p); + return red_calc_qavg_from_idle_time(p, v); } -static inline u32 red_random(struct red_parms *p) + +static inline u32 red_random(const struct red_parms *p) { - return net_random() & p->Rmask; + return reciprocal_divide(net_random(), p->max_P_reciprocal); } -static inline int red_mark_probability(struct red_parms *p, unsigned long qavg) +static inline int red_mark_probability(const struct red_parms *p, + const struct red_vars *v, + unsigned long qavg) { /* The formula used below causes questions. - OK. qR is random number in the interval 0..Rmask + OK. qR is random number in the interval + (0..1/max_P)*(qth_max-qth_min) i.e. 0..(2^Plog). If we used floating point arithmetics, it would be: (2^Plog)*rnd_num, where rnd_num is less 1. Taking into account, that qavg have fixed - point at Wlog, and Plog is related to max_P by - max_P = (qth_max-qth_min)/2^Plog; two lines + point at Wlog, two lines below have the following floating point equivalent: max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount Any questions? --ANK (980924) */ - return !(((qavg - p->qth_min) >> p->Wlog) * p->qcount < p->qR); + return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR); } enum { @@ -274,7 +333,7 @@ enum { RED_ABOVE_MAX_TRESH, }; -static inline int red_cmp_thresh(struct red_parms *p, unsigned long qavg) +static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg) { if (qavg < p->qth_min) return RED_BELOW_MIN_THRESH; @@ -290,27 +349,29 @@ enum { RED_HARD_MARK, }; -static inline int red_action(struct red_parms *p, unsigned long qavg) +static inline int red_action(const struct red_parms *p, + struct red_vars *v, + unsigned long qavg) { switch (red_cmp_thresh(p, qavg)) { case RED_BELOW_MIN_THRESH: - p->qcount = -1; + v->qcount = -1; return RED_DONT_MARK; case RED_BETWEEN_TRESH: - if (++p->qcount) { - if (red_mark_probability(p, qavg)) { - p->qcount = 0; - p->qR = red_random(p); + if (++v->qcount) { + if (red_mark_probability(p, v, qavg)) { + v->qcount = 0; + v->qR = red_random(p); return RED_PROB_MARK; } } else - p->qR = red_random(p); + v->qR = red_random(p); return RED_DONT_MARK; case RED_ABOVE_MAX_TRESH: - p->qcount = -1; + v->qcount = -1; return RED_HARD_MARK; } @@ -318,4 +379,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg) return RED_DONT_MARK; } +static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) +{ + unsigned long qavg; + u32 max_p_delta; + + qavg = v->qavg; + if (red_is_idling(v)) + qavg = red_calc_qavg_from_idle_time(p, v); + + /* p->qavg is fixed point number with point at Wlog */ + qavg >>= p->Wlog; + + if (qavg > p->target_max && p->max_P <= MAX_P_MAX) + p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */ + else if (qavg < p->target_min && p->max_P >= MAX_P_MIN) + p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */ + + max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta); + max_p_delta = max(max_p_delta, 1U); + p->max_P_reciprocal = reciprocal_value(max_p_delta); +} #endif diff --git a/include/net/regulatory.h b/include/net/regulatory.h index eb7d3c2d427..a5f79933e21 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -48,6 +48,10 @@ enum environment_cap { * 99 - built by driver but a specific alpha2 cannot be determined * 98 - result of an intersection between two regulatory domains * 97 - regulatory domain has not yet been configured + * @dfs_region: If CRDA responded with a regulatory domain that requires + * DFS master operation on a known DFS region (NL80211_DFS_*), + * dfs_region represents that region. Drivers can use this and the + * @alpha2 to adjust their device's DFS parameters as required. * @intersect: indicates whether the wireless core should intersect * the requested regulatory domain with the presently set regulatory * domain. @@ -67,6 +71,7 @@ struct regulatory_request { int wiphy_idx; enum nl80211_reg_initiator initiator; char alpha2[2]; + u8 dfs_region; bool intersect; bool processed; enum environment_cap country_ie_env; @@ -93,6 +98,7 @@ struct ieee80211_reg_rule { struct ieee80211_regdomain { u32 n_reg_rules; char alpha2[2]; + u8 dfs_region; struct ieee80211_reg_rule reg_rules[]; }; diff --git a/include/net/route.h b/include/net/route.h index db7b3432f07..91855d185b5 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -71,12 +71,12 @@ struct rtable { struct fib_info *fi; /* for client ref to shared metrics */ }; -static inline bool rt_is_input_route(struct rtable *rt) +static inline bool rt_is_input_route(const struct rtable *rt) { return rt->rt_route_iif != 0; } -static inline bool rt_is_output_route(struct rtable *rt) +static inline bool rt_is_output_route(const struct rtable *rt) { return rt->rt_route_iif == 0; } diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 6a72a58cde5..d3685615a8b 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -71,7 +71,7 @@ #include <linux/jiffies.h> #include <linux/idr.h> -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ip6_route.h> #endif @@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; } /* Size of Supported Address Parameter for 'x' address types. */ #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16)) -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) void sctp_v6_pf_init(void); void sctp_v6_pf_exit(void); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e90e7a9935d..88949a99453 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -235,12 +235,15 @@ extern struct sctp_globals { /* Flag to indicate whether computing and verifying checksum * is disabled. */ - int checksum_disable; + bool checksum_disable; /* Threshold for rwnd update SACKS. Receive buffer shifted this many * bits is an indicator of when to send and window update SACK. */ int rwnd_update_shift; + + /* Threshold for autoclose timeout, in seconds. */ + unsigned long max_autoclose; } sctp_globals; #define sctp_rto_initial (sctp_globals.rto_initial) @@ -281,6 +284,7 @@ extern struct sctp_globals { #define sctp_auth_enable (sctp_globals.auth_enable) #define sctp_checksum_disable (sctp_globals.checksum_disable) #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) +#define sctp_max_autoclose (sctp_globals.max_autoclose) /* SCTP Socket type: UDP or TCP style. */ typedef enum { @@ -365,7 +369,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp) return (struct sock *)sp; } -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct sctp6_sock { struct sctp_sock sctp; struct ipv6_pinfo inet6; @@ -1085,6 +1089,7 @@ void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *); void sctp_transport_update_pmtu(struct sctp_transport *, u32); +void sctp_transport_immediate_rtx(struct sctp_transport *); /* This is the structure we use to queue packets as they come into diff --git a/include/net/snmp.h b/include/net/snmp.h index 8f0f9ac0307..2f65e1686fc 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -67,7 +67,7 @@ struct icmp_mib { #define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX struct icmpmsg_mib { - unsigned long mibs[ICMPMSG_MIB_MAX]; + atomic_long_t mibs[ICMPMSG_MIB_MAX]; }; /* ICMP6 (IPv6-ICMP) */ @@ -84,7 +84,7 @@ struct icmpv6_mib_device { #define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX /* per network ns counters */ struct icmpv6msg_mib { - unsigned long mibs[ICMP6MSG_MIB_MAX]; + atomic_long_t mibs[ICMP6MSG_MIB_MAX]; }; /* per device counters, (shared on all cpus) */ struct icmpv6msg_mib_device { diff --git a/include/net/sock.h b/include/net/sock.h index abb6e0f0c3c..bb972d254df 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -53,6 +53,8 @@ #include <linux/security.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/memcontrol.h> +#include <linux/res_counter.h> #include <linux/filter.h> #include <linux/rculist_nulls.h> @@ -62,6 +64,22 @@ #include <net/dst.h> #include <net/checksum.h> +struct cgroup; +struct cgroup_subsys; +#ifdef CONFIG_NET +int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); +void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss); +#else +static inline +int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) +{ + return 0; +} +static inline +void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) +{ +} +#endif /* * This structure really needs to be cleaned up. * Most of it is for TCP, and not used by any of @@ -167,6 +185,7 @@ struct sock_common { /* public: */ }; +struct cg_proto; /** * struct sock - network layer representation of sockets * @__sk_common: shared layout with inet_timewait_sock @@ -227,6 +246,7 @@ struct sock_common { * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_classid: this socket's cgroup classid + * @sk_cgrp: this socket's cgroup-specific proto data * @sk_write_pending: a write to stream socket waits to start * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed @@ -306,8 +326,8 @@ struct sock { kmemcheck_bitfield_end(flags); int sk_wmem_queued; gfp_t sk_allocation; - int sk_route_caps; - int sk_route_nocaps; + netdev_features_t sk_route_caps; + netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; int sk_rcvlowat; @@ -320,6 +340,9 @@ struct sock { unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; +#ifdef CONFIG_CGROUPS + __u32 sk_cgrp_prioidx; +#endif struct pid *sk_peer_pid; const struct cred *sk_peer_cred; long sk_rcvtimeo; @@ -338,6 +361,7 @@ struct sock { #endif __u32 sk_mark; u32 sk_classid; + struct cg_proto *sk_cgrp; void (*sk_state_change)(struct sock *sk); void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); @@ -563,6 +587,7 @@ enum sock_flags { SOCK_FASYNC, /* fasync() active */ SOCK_RXQ_OVFL, SOCK_ZEROCOPY, /* buffers from userspace */ + SOCK_WIFI_STATUS, /* push wifi status to userspace */ }; static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) @@ -637,12 +662,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) /* * Take into account size of receive queue and backlog queue + * Do not take into account this skb truesize, + * to allow even a single big packet to come. */ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); - return qsize + skb->truesize > sk->sk_rcvbuf; + return qsize > sk->sk_rcvbuf; } /* The per-socket spinlock must be held here. */ @@ -833,6 +860,37 @@ struct proto { #ifdef SOCK_REFCNT_DEBUG atomic_t socks; #endif +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM + /* + * cgroup specific init/deinit functions. Called once for all + * protocols that implement it, from cgroups populate function. + * This function has to setup any files the protocol want to + * appear in the kmem cgroup filesystem. + */ + int (*init_cgroup)(struct cgroup *cgrp, + struct cgroup_subsys *ss); + void (*destroy_cgroup)(struct cgroup *cgrp, + struct cgroup_subsys *ss); + struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); +#endif +}; + +struct cg_proto { + void (*enter_memory_pressure)(struct sock *sk); + struct res_counter *memory_allocated; /* Current allocated memory. */ + struct percpu_counter *sockets_allocated; /* Current number of sockets. */ + int *memory_pressure; + long *sysctl_mem; + /* + * memcg field is used to find which memcg we belong directly + * Each memcg struct can hold more than one cg_proto, so container_of + * won't really cut. + * + * The elegant solution would be having an inverse function to + * proto_cgroup in struct proto, but that means polluting the structure + * for everybody, instead of just for memcg users. + */ + struct mem_cgroup *memcg; }; extern int proto_register(struct proto *prot, int alloc_slab); @@ -851,7 +909,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk) sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); } -static inline void sk_refcnt_debug_release(const struct sock *sk) +inline void sk_refcnt_debug_release(const struct sock *sk) { if (atomic_read(&sk->sk_refcnt) != 1) printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", @@ -863,6 +921,208 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM +extern struct jump_label_key memcg_socket_limit_enabled; +static inline struct cg_proto *parent_cg_proto(struct proto *proto, + struct cg_proto *cg_proto) +{ + return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); +} +#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled) +#else +#define mem_cgroup_sockets_enabled 0 +static inline struct cg_proto *parent_cg_proto(struct proto *proto, + struct cg_proto *cg_proto) +{ + return NULL; +} +#endif + + +static inline bool sk_has_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure != NULL; +} + +static inline bool sk_under_memory_pressure(const struct sock *sk) +{ + if (!sk->sk_prot->memory_pressure) + return false; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return !!*sk->sk_cgrp->memory_pressure; + + return !!*sk->sk_prot->memory_pressure; +} + +static inline void sk_leave_memory_pressure(struct sock *sk) +{ + int *memory_pressure = sk->sk_prot->memory_pressure; + + if (!memory_pressure) + return; + + if (*memory_pressure) + *memory_pressure = 0; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + struct proto *prot = sk->sk_prot; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + if (*cg_proto->memory_pressure) + *cg_proto->memory_pressure = 0; + } + +} + +static inline void sk_enter_memory_pressure(struct sock *sk) +{ + if (!sk->sk_prot->enter_memory_pressure) + return; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + struct proto *prot = sk->sk_prot; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + cg_proto->enter_memory_pressure(sk); + } + + sk->sk_prot->enter_memory_pressure(sk); +} + +static inline long sk_prot_mem_limits(const struct sock *sk, int index) +{ + long *prot = sk->sk_prot->sysctl_mem; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + prot = sk->sk_cgrp->sysctl_mem; + return prot[index]; +} + +static inline void memcg_memory_allocated_add(struct cg_proto *prot, + unsigned long amt, + int *parent_status) +{ + struct res_counter *fail; + int ret; + + ret = res_counter_charge(prot->memory_allocated, + amt << PAGE_SHIFT, &fail); + + if (ret < 0) + *parent_status = OVER_LIMIT; +} + +static inline void memcg_memory_allocated_sub(struct cg_proto *prot, + unsigned long amt) +{ + res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT); +} + +static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) +{ + u64 ret; + ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE); + return ret >> PAGE_SHIFT; +} + +static inline long +sk_memory_allocated(const struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return memcg_memory_allocated_read(sk->sk_cgrp); + + return atomic_long_read(prot->memory_allocated); +} + +static inline long +sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); + /* update the root cgroup regardless */ + atomic_long_add_return(amt, prot->memory_allocated); + return memcg_memory_allocated_read(sk->sk_cgrp); + } + + return atomic_long_add_return(amt, prot->memory_allocated); +} + +static inline void +sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp && + parent_status != OVER_LIMIT) /* Otherwise was uncharged already */ + memcg_memory_allocated_sub(sk->sk_cgrp, amt); + + atomic_long_sub(amt, prot->memory_allocated); +} + +static inline void sk_sockets_allocated_dec(struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + percpu_counter_dec(cg_proto->sockets_allocated); + } + + percpu_counter_dec(prot->sockets_allocated); +} + +static inline void sk_sockets_allocated_inc(struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { + struct cg_proto *cg_proto = sk->sk_cgrp; + + for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) + percpu_counter_inc(cg_proto->sockets_allocated); + } + + percpu_counter_inc(prot->sockets_allocated); +} + +static inline int +sk_sockets_allocated_read_positive(struct sock *sk) +{ + struct proto *prot = sk->sk_prot; + + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated); + + return percpu_counter_sum_positive(prot->sockets_allocated); +} + +static inline int +proto_sockets_allocated_sum_positive(struct proto *prot) +{ + return percpu_counter_sum_positive(prot->sockets_allocated); +} + +static inline long +proto_memory_allocated(struct proto *prot) +{ + return atomic_long_read(prot->memory_allocated); +} + +static inline bool +proto_memory_pressure(struct proto *prot) +{ + if (!prot->memory_pressure) + return false; + return !!*prot->memory_pressure; +} + #ifdef CONFIG_PROC_FS /* Called with local bh disabled */ @@ -1089,8 +1349,8 @@ extern struct sock *sk_alloc(struct net *net, int family, struct proto *prot); extern void sk_free(struct sock *sk); extern void sk_release_kernel(struct sock *sk); -extern struct sock *sk_clone(const struct sock *sk, - const gfp_t priority); +extern struct sock *sk_clone_lock(const struct sock *sk, + const gfp_t priority); extern struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, @@ -1393,7 +1653,7 @@ static inline int sk_can_gso(const struct sock *sk) extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); -static inline void sk_nocaps_add(struct sock *sk, int flags) +static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) { sk->sk_route_nocaps |= flags; sk->sk_route_caps &= ~flags; @@ -1670,7 +1930,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) page = alloc_pages(sk->sk_allocation, 0); if (!page) { - sk->sk_prot->enter_memory_pressure(sk); + sk_enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); } return page; @@ -1714,6 +1974,8 @@ static inline int sock_intr_errno(long timeo) extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); +extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); static __inline__ void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) @@ -1741,6 +2003,9 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) __sock_recv_timestamp(msg, sk, skb); else sk->sk_stamp = kt; + + if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) + __sock_recv_wifi_status(msg, sk, skb); } extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, diff --git a/include/net/tcp.h b/include/net/tcp.h index bb18c4d69ab..0118ea999f6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -44,6 +44,7 @@ #include <net/dst.h> #include <linux/seq_file.h> +#include <linux/memcontrol.h> extern struct inet_hashinfo tcp_hashinfo; @@ -229,7 +230,6 @@ extern int sysctl_tcp_fack; extern int sysctl_tcp_reordering; extern int sysctl_tcp_ecn; extern int sysctl_tcp_dsack; -extern long sysctl_tcp_mem[3]; extern int sysctl_tcp_wmem[3]; extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_app_win; @@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift) } if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && - atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) + sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) return true; return false; } @@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk); struct tcp_skb_cb { union { struct inet_skb_parm h4; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; /* For incoming frames */ @@ -773,12 +773,12 @@ static inline int tcp_is_reno(const struct tcp_sock *tp) static inline int tcp_is_fack(const struct tcp_sock *tp) { - return tp->rx_opt.sack_ok & 2; + return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; } static inline void tcp_enable_fack(struct tcp_sock *tp) { - tp->rx_opt.sack_ok |= 2; + tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; } static inline unsigned int tcp_left_out(const struct tcp_sock *tp) @@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); +/* The maximum number of MSS of available cwnd for which TSO defers + * sending if not using sysctl_tcp_tso_win_divisor. + */ +static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) +{ + return 3; +} + /* Slow start with delack produces 3 packets of burst, so that * it is safe "de facto". This will be the default - same as * the default reordering threshold - but if reordering increases, @@ -1144,7 +1152,7 @@ struct tcp6_md5sig_key { /* - sock block */ struct tcp_md5sig_info { struct tcp4_md5sig_key *keys4; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct tcp6_md5sig_key *keys6; u32 entries6; u32 alloced6; @@ -1171,7 +1179,7 @@ struct tcp6_pseudohdr { union tcp_md5sum_block { struct tcp4_pseudohdr ip4; -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct tcp6_pseudohdr ip6; #endif }; @@ -1430,7 +1438,8 @@ extern struct request_sock_ops tcp6_request_sock_ops; extern void tcp_v4_destroy_sock(struct sock *sk); extern int tcp_v4_gso_send_check(struct sk_buff *skb); -extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features); +extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, + netdev_features_t features); extern struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head, diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h new file mode 100644 index 00000000000..3512082fa90 --- /dev/null +++ b/include/net/tcp_memcontrol.h @@ -0,0 +1,19 @@ +#ifndef _TCP_MEMCG_H +#define _TCP_MEMCG_H + +struct tcp_memcontrol { + struct cg_proto cg_proto; + /* per-cgroup tcp memory pressure knobs */ + struct res_counter tcp_memory_allocated; + struct percpu_counter tcp_sockets_allocated; + /* those two are read-mostly, leave them at the end */ + long tcp_prot_mem[3]; + int tcp_memory_pressure; +}; + +struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); +int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); +void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); +unsigned long long tcp_max_memory(const struct mem_cgroup *memcg); +void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx); +#endif /* _TCP_MEMCG_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 3b285f402f4..e39592f682c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -41,7 +41,7 @@ struct udp_skb_cb { union { struct inet_skb_parm h4; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) struct inet6_skb_parm h6; #endif } header; @@ -194,9 +194,15 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); +extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, + __be32 daddr, __be16 dport, + int dif, struct udp_table *tbl); extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif); +extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, __be16 dport, + int dif, struct udp_table *tbl); /* * SNMP statistics for UDP and UDP-Lite @@ -217,7 +223,7 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ } while(0) -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#if IS_ENABLED(CONFIG_IPV6) #define UDPX_INC_STATS_BH(sk, field) \ do { \ if ((sk)->sk_family == AF_INET) \ @@ -258,5 +264,6 @@ extern void udp4_proc_exit(void); extern void udp_init(void); extern int udp4_ufo_send_check(struct sk_buff *skb); -extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features); +extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + netdev_features_t features); #endif /* _UDP_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index b203e14d26b..89174e29dca 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -827,6 +827,14 @@ static inline bool addr_match(const void *token1, const void *token2, return true; } +static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen) +{ + /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */ + if (prefixlen == 0) + return true; + return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen))); +} + static __inline__ __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) { @@ -1209,8 +1217,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl, memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); break; case AF_INET6: - ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr); - ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr); + *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr; + *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr; break; } } diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 639a4491fc0..99965395c5f 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -281,7 +281,7 @@ static inline u16 rdma_get_vlan_id(union ib_gid *dgid) static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN ? - vlan_dev_real_dev(dev) : 0; + vlan_dev_real_dev(dev) : NULL; } #endif /* IB_ADDR_H */ diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index c8f94e8db69..83f77ac3395 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -38,6 +38,9 @@ #include <rdma/ib_mad.h> #include <rdma/ib_sa.h> +/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */ +extern struct class cm_class; + enum ib_cm_state { IB_CM_IDLE, IB_CM_LISTEN, diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index d1e95c6ac77..5a35a2a2d3c 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -147,6 +147,7 @@ struct fcoe_ctlr { u8 map_dest; u8 spma; u8 probe_tries; + u8 priority; u8 dest_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN]; @@ -301,6 +302,7 @@ struct fcoe_percpu_s { * @lport: The associated local port * @fcoe_pending_queue: The pending Rx queue of skbs * @fcoe_pending_queue_active: Indicates if the pending queue is active + * @priority: Packet priority (DCB) * @max_queue_depth: Max queue depth of pending queue * @min_queue_depth: Min queue depth of pending queue * @timer: The queue timer @@ -316,6 +318,7 @@ struct fcoe_port { struct fc_lport *lport; struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; + u8 priority; u32 max_queue_depth; u32 min_queue_depth; struct timer_list timer; diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 5994bcc1b01..87f34c3d447 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -142,7 +142,7 @@ struct iscsi_transport { int (*get_iface_param) (struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf); - mode_t (*attr_is_visible)(int param_type, int param); + umode_t (*attr_is_visible)(int param_type, int param); int (*bsg_request)(struct bsg_job *job); }; diff --git a/include/sound/info.h b/include/sound/info.h index 5492cc40dc5..9ca1a493d37 100644 --- a/include/sound/info.h +++ b/include/sound/info.h @@ -72,7 +72,7 @@ struct snd_info_entry_ops { struct snd_info_entry { const char *name; - mode_t mode; + umode_t mode; long size; unsigned short content; union { diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 7f5fed3c89e..6873c7dd914 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -103,9 +103,10 @@ enum se_cmd_flags_table { SCF_SCSI_NON_DATA_CDB = 0x00000040, SCF_SCSI_CDB_EXCEPTION = 0x00000080, SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, - SCF_SE_CMD_FAILED = 0x00000400, + SCF_FUA = 0x00000200, SCF_SE_LUN_CMD = 0x00000800, SCF_SE_ALLOW_EOO = 0x00001000, + SCF_BIDI = 0x00002000, SCF_SENT_CHECK_CONDITION = 0x00004000, SCF_OVERFLOW_BIT = 0x00008000, SCF_UNDERFLOW_BIT = 0x00010000, @@ -154,6 +155,7 @@ enum tcm_sense_reason_table { TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, TCM_CHECK_CONDITION_NOT_READY = 0x0f, + TCM_RESERVATION_CONFLICT = 0x10, }; struct se_obj { @@ -211,7 +213,6 @@ struct t10_alua_lu_gp { u16 lu_gp_id; int lu_gp_valid_id; u32 lu_gp_members; - atomic_t lu_gp_shutdown; atomic_t lu_gp_ref_cnt; spinlock_t lu_gp_lock; struct config_group lu_gp_group; @@ -422,11 +423,9 @@ struct se_cmd { int sam_task_attr; /* Transport protocol dependent state, see transport_state_table */ enum transport_state_table t_state; - /* Transport specific error status */ - int transport_error_status; /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ - int check_release:1; - int cmd_wait_set:1; + unsigned check_release:1; + unsigned cmd_wait_set:1; /* See se_cmd_flags_table */ u32 se_cmd_flags; u32 se_ordered_id; @@ -441,13 +440,10 @@ struct se_cmd { /* Used for sense data */ void *sense_buffer; struct list_head se_delayed_node; - struct list_head se_ordered_node; struct list_head se_lun_node; struct list_head se_qf_node; struct se_device *se_dev; struct se_dev_entry *se_deve; - struct se_device *se_obj_ptr; - struct se_device *se_orig_obj_ptr; struct se_lun *se_lun; /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; @@ -463,8 +459,6 @@ struct se_cmd { unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; unsigned long long t_task_lba; int t_tasks_failed; - int t_tasks_fua; - bool t_tasks_bidi; u32 t_tasks_sg_chained_no; atomic_t t_fe_count; atomic_t t_se_count; @@ -489,14 +483,6 @@ struct se_cmd { struct work_struct work; - /* - * Used for pre-registered fabric SGL passthrough WRITE and READ - * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop - * and other HW target mode fabric modules. - */ - struct scatterlist *t_task_pt_sgl; - u32 t_task_pt_sgl_num; - struct scatterlist *t_data_sg; unsigned int t_data_nents; struct scatterlist *t_bidi_data_sg; @@ -562,7 +548,7 @@ struct se_node_acl { } ____cacheline_aligned; struct se_session { - int sess_tearing_down:1; + unsigned sess_tearing_down:1; u64 sess_bin_isid; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; @@ -683,7 +669,6 @@ struct se_subsystem_dev { struct t10_reservation t10_pr; spinlock_t se_dev_lock; void *se_dev_su_ptr; - struct list_head se_dev_node; struct config_group se_dev_group; /* For T10 Reservations */ struct config_group se_dev_pr_group; @@ -692,9 +677,6 @@ struct se_subsystem_dev { } ____cacheline_aligned; struct se_device { - /* Set to 1 if thread is NOT sleeping on thread_sem */ - u8 thread_active; - u8 dev_status_timer_flags; /* RELATIVE TARGET PORT IDENTIFER Counter */ u16 dev_rpti_counter; /* Used for SAM Task Attribute ordering */ @@ -719,14 +701,10 @@ struct se_device { u64 write_bytes; spinlock_t stats_lock; /* Active commands on this virtual SE device */ - atomic_t active_cmds; atomic_t simple_cmds; atomic_t depth_left; atomic_t dev_ordered_id; - atomic_t dev_tur_active; atomic_t execute_tasks; - atomic_t dev_status_thr_count; - atomic_t dev_hoq_count; atomic_t dev_ordered_sync; atomic_t dev_qf_count; struct se_obj dev_obj; @@ -734,14 +712,9 @@ struct se_device { struct se_obj dev_export_obj; struct se_queue_obj dev_queue_obj; spinlock_t delayed_cmd_lock; - spinlock_t ordered_cmd_lock; spinlock_t execute_task_lock; - spinlock_t state_task_lock; - spinlock_t dev_alua_lock; spinlock_t dev_reservation_lock; - spinlock_t dev_state_lock; spinlock_t dev_status_lock; - spinlock_t dev_status_thr_lock; spinlock_t se_port_lock; spinlock_t se_tmr_lock; spinlock_t qf_cmd_lock; @@ -753,14 +726,10 @@ struct se_device { struct t10_pr_registration *dev_pr_res_holder; struct list_head dev_sep_list; struct list_head dev_tmr_list; - struct timer_list dev_status_timer; /* Pointer to descriptor for processing thread */ struct task_struct *process_thread; - pid_t process_thread_pid; - struct task_struct *dev_mgmt_thread; struct work_struct qf_work_queue; struct list_head delayed_cmd_list; - struct list_head ordered_cmd_list; struct list_head execute_task_list; struct list_head state_task_list; struct list_head qf_cmd_list; @@ -771,8 +740,6 @@ struct se_device { struct se_subsystem_api *transport; /* Linked list for struct se_hba struct se_device list */ struct list_head dev_list; - /* Linked list for struct se_global->g_se_dev_list */ - struct list_head g_se_dev_list; } ____cacheline_aligned; struct se_hba { @@ -834,7 +801,6 @@ struct se_port { u32 sep_index; struct scsi_port_stats sep_stats; /* Used for ALUA Target Port Groups membership */ - atomic_t sep_tg_pt_gp_active; atomic_t sep_tg_pt_secondary_offline; /* Used for PR ALL_TG_PT=1 */ atomic_t sep_tg_pt_ref_cnt; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c16e9431dd0..dac4f2d859f 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -10,29 +10,6 @@ #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ -#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0 -#define PYX_TRANSPORT_WRITE_PENDING 1 - -#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1 -#define PYX_TRANSPORT_HBA_QUEUE_FULL -2 -#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3 -#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4 -#define PYX_TRANSPORT_INVALID_CDB_FIELD -5 -#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6 -#define PYX_TRANSPORT_LU_COMM_FAILURE -7 -#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 -#define PYX_TRANSPORT_WRITE_PROTECTED -9 -#define PYX_TRANSPORT_RESERVATION_CONFLICT -10 -#define PYX_TRANSPORT_ILLEGAL_REQUEST -11 -#define PYX_TRANSPORT_USE_SENSE_REASON -12 - -#ifndef SAM_STAT_RESERVATION_CONFLICT -#define SAM_STAT_RESERVATION_CONFLICT 0x18 -#endif - -#define TRANSPORT_PLUGIN_FREE 0 -#define TRANSPORT_PLUGIN_REGISTERED 1 - #define TRANSPORT_PLUGIN_PHBA_PDEV 1 #define TRANSPORT_PLUGIN_VHBA_PDEV 2 #define TRANSPORT_PLUGIN_VHBA_VDEV 3 @@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); extern int transport_handle_cdb_direct(struct se_cmd *); extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); -extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); extern bool target_stop_task(struct se_task *task, unsigned long *flags); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 669fbd62ec2..d2d88bed891 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -241,24 +241,73 @@ TRACE_EVENT(rcu_fqs, /* * Tracepoint for dyntick-idle entry/exit events. These take a string - * as argument: "Start" for entering dyntick-idle mode and "End" for - * leaving it. + * as argument: "Start" for entering dyntick-idle mode, "End" for + * leaving it, "--=" for events moving towards idle, and "++=" for events + * moving away from idle. "Error on entry: not idle task" and "Error on + * exit: not idle task" indicate that a non-idle task is erroneously + * toying with the idle loop. + * + * These events also take a pair of numbers, which indicate the nesting + * depth before and after the event of interest. Note that task-related + * events use the upper bits of each number, while interrupt-related + * events use the lower bits. */ TRACE_EVENT(rcu_dyntick, - TP_PROTO(char *polarity), + TP_PROTO(char *polarity, long long oldnesting, long long newnesting), - TP_ARGS(polarity), + TP_ARGS(polarity, oldnesting, newnesting), TP_STRUCT__entry( __field(char *, polarity) + __field(long long, oldnesting) + __field(long long, newnesting) ), TP_fast_assign( __entry->polarity = polarity; + __entry->oldnesting = oldnesting; + __entry->newnesting = newnesting; + ), + + TP_printk("%s %llx %llx", __entry->polarity, + __entry->oldnesting, __entry->newnesting) +); + +/* + * Tracepoint for RCU preparation for idle, the goal being to get RCU + * processing done so that the current CPU can shut off its scheduling + * clock and enter dyntick-idle mode. One way to accomplish this is + * to drain all RCU callbacks from this CPU, and the other is to have + * done everything RCU requires for the current grace period. In this + * latter case, the CPU will be awakened at the end of the current grace + * period in order to process the remainder of its callbacks. + * + * These tracepoints take a string as argument: + * + * "No callbacks": Nothing to do, no callbacks on this CPU. + * "In holdoff": Nothing to do, holding off after unsuccessful attempt. + * "Begin holdoff": Attempt failed, don't retry until next jiffy. + * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. + * "More callbacks": Still more callbacks, try again to clear them out. + * "Callbacks drained": All callbacks processed, off to dyntick idle! + * "Timer": Timer fired to cause CPU to continue processing callbacks. + */ +TRACE_EVENT(rcu_prep_idle, + + TP_PROTO(char *reason), + + TP_ARGS(reason), + + TP_STRUCT__entry( + __field(char *, reason) + ), + + TP_fast_assign( + __entry->reason = reason; ), - TP_printk("%s", __entry->polarity) + TP_printk("%s", __entry->reason) ); /* @@ -412,27 +461,71 @@ TRACE_EVENT(rcu_invoke_kfree_callback, /* * Tracepoint for exiting rcu_do_batch after RCU callbacks have been - * invoked. The first argument is the name of the RCU flavor and - * the second argument is number of callbacks actually invoked. + * invoked. The first argument is the name of the RCU flavor, + * the second argument is number of callbacks actually invoked, + * the third argument (cb) is whether or not any of the callbacks that + * were ready to invoke at the beginning of this batch are still + * queued, the fourth argument (nr) is the return value of need_resched(), + * the fifth argument (iit) is 1 if the current task is the idle task, + * and the sixth argument (risk) is the return value from + * rcu_is_callbacks_kthread(). */ TRACE_EVENT(rcu_batch_end, - TP_PROTO(char *rcuname, int callbacks_invoked), + TP_PROTO(char *rcuname, int callbacks_invoked, + bool cb, bool nr, bool iit, bool risk), - TP_ARGS(rcuname, callbacks_invoked), + TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), TP_STRUCT__entry( __field(char *, rcuname) __field(int, callbacks_invoked) + __field(bool, cb) + __field(bool, nr) + __field(bool, iit) + __field(bool, risk) ), TP_fast_assign( __entry->rcuname = rcuname; __entry->callbacks_invoked = callbacks_invoked; + __entry->cb = cb; + __entry->nr = nr; + __entry->iit = iit; + __entry->risk = risk; + ), + + TP_printk("%s CBs-invoked=%d idle=%c%c%c%c", + __entry->rcuname, __entry->callbacks_invoked, + __entry->cb ? 'C' : '.', + __entry->nr ? 'S' : '.', + __entry->iit ? 'I' : '.', + __entry->risk ? 'R' : '.') +); + +/* + * Tracepoint for rcutorture readers. The first argument is the name + * of the RCU flavor from rcutorture's viewpoint and the second argument + * is the callback address. + */ +TRACE_EVENT(rcu_torture_read, + + TP_PROTO(char *rcutorturename, struct rcu_head *rhp), + + TP_ARGS(rcutorturename, rhp), + + TP_STRUCT__entry( + __field(char *, rcutorturename) + __field(struct rcu_head *, rhp) + ), + + TP_fast_assign( + __entry->rcutorturename = rcutorturename; + __entry->rhp = rhp; ), - TP_printk("%s CBs-invoked=%d", - __entry->rcuname, __entry->callbacks_invoked) + TP_printk("%s torture read %p", + __entry->rcutorturename, __entry->rhp) ); #else /* #ifdef CONFIG_RCU_TRACE */ @@ -443,13 +536,16 @@ TRACE_EVENT(rcu_batch_end, #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) -#define trace_rcu_dyntick(polarity) do { } while (0) +#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) +#define trace_rcu_prep_idle(reason) do { } while (0) #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) -#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0) +#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ + do { } while (0) +#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) #endif /* #else #ifdef CONFIG_RCU_TRACE */ diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 1e3193b8fcc..12fbf43524e 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h @@ -55,6 +55,15 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read, ); +DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, + + TP_PROTO(struct device *dev, unsigned int reg, + unsigned int val), + + TP_ARGS(dev, reg, val) + +); + DECLARE_EVENT_CLASS(regmap_block, TP_PROTO(struct device *dev, unsigned int reg, int count), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 959ff18b63b..6ba596b07a7 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -331,6 +331,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait, TP_ARGS(tsk, delay)); /* + * Tracepoint for accounting blocked time (time the task is in uninterruptible). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_blocked, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* * Tracepoint for accounting runtime (time the task is executing * on a CPU). */ @@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime, (unsigned long long)__entry->vruntime) ); +#ifdef CREATE_TRACE_POINTS +static inline u64 trace_get_sleeptime(struct task_struct *tsk) +{ +#ifdef CONFIG_SCHEDSTATS + u64 block, sleep; + + block = tsk->se.statistics.block_start; + sleep = tsk->se.statistics.sleep_start; + tsk->se.statistics.block_start = 0; + tsk->se.statistics.sleep_start = 0; + + return block ? block : sleep ? sleep : 0; +#else + return 0; +#endif +} +#endif + +/* + * Tracepoint for accounting sleeptime (time the task is sleeping + * or waiting for I/O). + */ +TRACE_EVENT(sched_stat_sleeptime, + + TP_PROTO(struct task_struct *tsk, u64 now), + + TP_ARGS(tsk, now), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( u64, sleeptime ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->sleeptime = trace_get_sleeptime(tsk); + __entry->sleeptime = __entry->sleeptime ? + now - __entry->sleeptime : 0; + ) + TP_perf_assign( + __perf_count(__entry->sleeptime); + ), + + TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->sleeptime) +); + /* * Tracepoint for showing priority inheritance modifying a tasks * priority. diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index b99caa8b780..99d1d0decf8 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -21,6 +21,16 @@ {I_REFERENCED, "I_REFERENCED"} \ ) +#define WB_WORK_REASON \ + {WB_REASON_BACKGROUND, "background"}, \ + {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \ + {WB_REASON_SYNC, "sync"}, \ + {WB_REASON_PERIODIC, "periodic"}, \ + {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \ + {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \ + {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \ + {WB_REASON_FORKER_THREAD, "forker_thread"} + struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_work_class, @@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __entry->for_kupdate, __entry->range_cyclic, __entry->for_background, - wb_reason_name[__entry->reason] + __print_symbolic(__entry->reason, WB_WORK_REASON) ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ @@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io, __entry->older, /* older_than_this in jiffies */ __entry->age, /* older_than_this in relative milliseconds */ __entry->moved, - wb_reason_name[__entry->reason]) + __print_symbolic(__entry->reason, WB_WORK_REASON) + ) ); TRACE_EVENT(global_dirty_state, diff --git a/include/video/omapdss.h b/include/video/omapdss.h index b66ebb2032c..378c7ed6760 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -307,15 +307,8 @@ struct omap_dss_board_info { void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); }; -#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS) /* Init with the board info */ extern int omap_display_init(struct omap_dss_board_info *board_data); -#else -static inline int omap_display_init(struct omap_dss_board_info *board_data) -{ - return 0; -} -#endif struct omap_display_platform_data { struct omap_dss_board_info *board_data; diff --git a/include/xen/balloon.h b/include/xen/balloon.h index d29c153705b..cc2e1a7e44e 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -29,11 +29,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem); void free_xenballooned_pages(int nr_pages, struct page **pages); -struct sys_device; +struct device; #ifdef CONFIG_XEN_SELFBALLOONING -extern int register_xen_selfballooning(struct sys_device *sysdev); +extern int register_xen_selfballooning(struct device *dev); #else -static inline int register_xen_selfballooning(struct sys_device *sysdev) +static inline int register_xen_selfballooning(struct device *dev) { return -ENOSYS; } diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h index f0b6890370b..f6f07aa35af 100644 --- a/include/xen/interface/io/xs_wire.h +++ b/include/xen/interface/io/xs_wire.h @@ -29,8 +29,7 @@ enum xsd_sockmsg_type XS_IS_DOMAIN_INTRODUCED, XS_RESUME, XS_SET_TARGET, - XS_RESTRICT, - XS_RESET_WATCHES + XS_RESTRICT }; #define XS_WRITE_NONE "NONE" diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h index a785a3b0c8c..438c256c274 100644 --- a/include/xen/platform_pci.h +++ b/include/xen/platform_pci.h @@ -29,8 +29,7 @@ static inline int xen_must_unplug_nics(void) { #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ defined(CONFIG_XEN_NETDEV_FRONTEND_MODULE)) && \ - (defined(CONFIG_XEN_PLATFORM_PCI) || \ - defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) + defined(CONFIG_XEN_PVHVM) return 1; #else return 0; @@ -40,8 +39,7 @@ static inline int xen_must_unplug_nics(void) { static inline int xen_must_unplug_disks(void) { #if (defined(CONFIG_XEN_BLKDEV_FRONTEND) || \ defined(CONFIG_XEN_BLKDEV_FRONTEND_MODULE)) && \ - (defined(CONFIG_XEN_PLATFORM_PCI) || \ - defined(CONFIG_XEN_PLATFORM_PCI_MODULE)) + defined(CONFIG_XEN_PVHVM) return 1; #else return 0; |