diff options
Diffstat (limited to 'include/linux')
202 files changed, 4459 insertions, 1428 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 407a12f663e..6bff83b1f29 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -28,6 +28,7 @@ #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/device.h> +#include <linux/property.h> #ifndef _LINUX #define _LINUX @@ -123,6 +124,10 @@ int acpi_numa_init (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); +int __init acpi_parse_entries(char *id, unsigned long table_size, + acpi_tbl_entry_handler handler, + struct acpi_table_header *table_header, + int entry_id, unsigned int max_entries); int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, @@ -423,14 +428,11 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); -static inline bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv) -{ - return !!acpi_match_device(drv->acpi_match_table, dev); -} - +extern bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); +void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) @@ -443,6 +445,23 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +struct fwnode_handle; + +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return NULL; +} + static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; @@ -553,16 +572,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr, #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif -#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) +#if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_runtime_suspend(struct device *dev); int acpi_dev_runtime_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); +struct acpi_device *acpi_dev_pm_get_node(struct device *dev); +int acpi_dev_pm_attach(struct device *dev, bool power_on); #else static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } +static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) +{ + return NULL; +} +static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) +{ + return -ENODEV; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) @@ -585,20 +614,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #endif -#if defined(CONFIG_ACPI) && defined(CONFIG_PM) -struct acpi_device *acpi_dev_pm_get_node(struct device *dev); -int acpi_dev_pm_attach(struct device *dev, bool power_on); -#else -static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) -{ - return NULL; -} -static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) -{ - return -ENODEV; -} -#endif - #ifdef CONFIG_ACPI __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, @@ -659,4 +674,114 @@ do { \ #endif #endif +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; +}; + +#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); + +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) +{ + if (adev) + adev->driver_gpios = NULL; +} +#else +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} +#endif + +/* Device properties */ + +#define MAX_ACPI_REFERENCE_ARGS 8 +struct acpi_reference_args { + struct acpi_device *adev; + size_t nargs; + u64 args[MAX_ACPI_REFERENCE_ARGS]; +}; + +#ifdef CONFIG_ACPI +int acpi_dev_get_property(struct acpi_device *adev, const char *name, + acpi_object_type type, const union acpi_object **obj); +int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, + acpi_object_type type, + const union acpi_object **obj); +int acpi_dev_get_property_reference(struct acpi_device *adev, + const char *name, size_t index, + struct acpi_reference_args *args); + +int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, + void **valptr); +int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val); +int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); + +struct acpi_device *acpi_get_next_child(struct device *dev, + struct acpi_device *child); +#else +static inline int acpi_dev_get_property(struct acpi_device *adev, + const char *name, acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} +static inline int acpi_dev_get_property_array(struct acpi_device *adev, + const char *name, + acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} +static inline int acpi_dev_get_property_reference(struct acpi_device *adev, + const char *name, const char *cells_name, + size_t index, struct acpi_reference_args *args) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_get(struct acpi_device *adev, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read_single(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline struct acpi_device *acpi_get_next_child(struct device *dev, + struct acpi_device *child) +{ + return NULL; +} + +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a..33eb274cd0e 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,8 +31,11 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 91b77f8d495..9177947bf03 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -11,6 +11,7 @@ * @detect_pin: GPIO pin wired to the card detect switch * @wp_pin: GPIO pin wired to the write protect sensor * @detect_is_active_high: The state of the detect pin when it is active + * @non_removable: The slot is not removable, only detect once * * If a given slot is not present on the board, @bus_width should be * set to 0. The other fields are ignored in this case. @@ -26,6 +27,7 @@ struct mci_slot_pdata { int detect_pin; int wp_pin; bool detect_is_active_high; + bool non_removable; }; /** diff --git a/include/linux/audit.h b/include/linux/audit.h index e58fe7df8b9..0c04917c2f1 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -130,6 +130,7 @@ extern void audit_putname(struct filename *name); #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); +extern void __audit_file(const struct file *); extern void __audit_inode_child(const struct inode *parent, const struct dentry *dentry, const unsigned char type); @@ -183,6 +184,11 @@ static inline void audit_inode(struct filename *name, __audit_inode(name, dentry, flags); } } +static inline void audit_file(struct file *file) +{ + if (unlikely(!audit_dummy_context())) + __audit_file(file); +} static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { @@ -357,6 +363,9 @@ static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int parent) { } +static inline void audit_file(struct file *file) +{ +} static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20..eb1c6a47b67 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ extern u32 bcma_core_dma_translation(struct bcma_device *core); +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + #endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4dd..0b3b32aeeb8 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -43,12 +43,12 @@ struct bcma_drv_mips { extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); -extern unsigned int bcma_core_irq(struct bcma_device *core); +extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); #else static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } -static inline unsigned int bcma_core_irq(struct bcma_device *core) +static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) { return 0; } diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a..5d858e02997 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -18,8 +18,11 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c9be1589415..15f7034aa37 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -167,6 +167,23 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, +}; + +u32 blk_mq_unique_tag(struct request *rq); + +static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) +{ + return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; +} + +static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +{ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; +} + struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aac0f9ea952..0495e385424 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -398,7 +398,7 @@ struct request_queue { */ struct kobject mq_kobj; -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct device *dev; int rpm_status; unsigned int nr_pending; @@ -1057,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *); /* * block layer runtime pm functions */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); @@ -1136,7 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) /* * tag stuff */ -#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3cf91754a95..bbfceb75645 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -22,7 +22,7 @@ struct bpf_map_ops { /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); - int (*map_update_elem)(struct bpf_map *map, void *key, void *value); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); }; @@ -128,9 +128,18 @@ struct bpf_prog_aux { struct work_struct work; }; +#ifdef CONFIG_BPF_SYSCALL void bpf_prog_put(struct bpf_prog *prog); +#else +static inline void bpf_prog_put(struct bpf_prog *prog) {} +#endif struct bpf_prog *bpf_prog_get(u32 ufd); /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +/* verifier prototypes for helper functions called from eBPF programs */ +extern struct bpf_func_proto bpf_map_lookup_elem_proto; +extern struct bpf_func_proto bpf_map_update_elem_proto; +extern struct bpf_func_proto bpf_map_delete_elem_proto; + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 6992afc6ba7..c05ff0f9f9a 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -99,6 +99,12 @@ inval_skb: return 1; } +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); @@ -121,6 +127,9 @@ void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state); + void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d519688904..da0dae0600e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) } /** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + +/** * css_tryget - try to obtain a reference on the specified css * @css: target css * @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) percpu_ref_put(&css->refcnt); } +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ @@ -367,8 +393,8 @@ struct css_set { * struct cftype: handler definitions for cgroup control files * * When reading/writing to a file: - * - the cgroup to use is file->f_dentry->d_parent->d_fsdata - * - the 'cftype' of the file is file->f_dentry->d_fsdata + * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata + * - the 'cftype' of the file is file->f_path.dentry->d_fsdata */ /* cftype->flags */ @@ -612,8 +638,10 @@ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); int (*css_online)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css); + void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); + void (*css_e_css_changed)(struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_subsys_state *css, struct cgroup_taskset *tset); @@ -908,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); +struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index be21af149f1..2839c639f09 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -352,7 +352,6 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) extern const struct clk_ops clk_divider_ops; -extern const struct clk_ops clk_divider_ro_ops; struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index f75acbf70e9..74e5341463c 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -254,13 +254,26 @@ extern const struct clk_ops ti_clk_mux_ops; void omap2_init_clk_hw_omap_clocks(struct clk *clk); int omap3_noncore_dpll_enable(struct clk_hw *hw); void omap3_noncore_dpll_disable(struct clk_hw *hw); +int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index); int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate); +int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate, + u8 index); +long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk); unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, unsigned long parent_rate); long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, unsigned long target_rate, unsigned long *parent_rate); +long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk); u8 omap2_init_dpll_parent(struct clk_hw *hw); unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, @@ -278,6 +291,8 @@ int omap2_clk_disable_autoidle_all(void); void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, unsigned long parent_rate); +int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate, u8 index); int omap2_dflt_clk_enable(struct clk_hw *hw); void omap2_dflt_clk_disable(struct clk_hw *hw); int omap2_dflt_clk_is_enabled(struct clk_hw *hw); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a..3238ffa33f6 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone); + int alloc_flags, int classzone_idx); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order); +extern unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone) + int alloc_flags, int classzone_idx) { return COMPACT_CONTINUE; } @@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order) +static inline unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 503b085b783..4d078cebafd 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name) struct cpufreq_driver { - char name[CPUFREQ_NAME_LEN]; - u8 flags; - void *driver_data; + char name[CPUFREQ_NAME_LEN]; + u8 flags; + void *driver_data; /* needed by all drivers */ - int (*init) (struct cpufreq_policy *policy); - int (*verify) (struct cpufreq_policy *policy); + int (*init)(struct cpufreq_policy *policy); + int (*verify)(struct cpufreq_policy *policy); /* define one out of two */ - int (*setpolicy) (struct cpufreq_policy *policy); + int (*setpolicy)(struct cpufreq_policy *policy); /* * On failure, should always restore frequency to policy->restore_freq * (i.e. old freq). */ - int (*target) (struct cpufreq_policy *policy, /* Deprecated */ - unsigned int target_freq, - unsigned int relation); - int (*target_index) (struct cpufreq_policy *policy, - unsigned int index); + int (*target)(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); /* Deprecated */ + int (*target_index)(struct cpufreq_policy *policy, + unsigned int index); /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -252,27 +252,31 @@ struct cpufreq_driver { * wish to switch to intermediate frequency for some target frequency. * In that case core will directly call ->target_index(). */ - unsigned int (*get_intermediate)(struct cpufreq_policy *policy, - unsigned int index); - int (*target_intermediate)(struct cpufreq_policy *policy, - unsigned int index); + unsigned int (*get_intermediate)(struct cpufreq_policy *policy, + unsigned int index); + int (*target_intermediate)(struct cpufreq_policy *policy, + unsigned int index); /* should be defined, if possible */ - unsigned int (*get) (unsigned int cpu); + unsigned int (*get)(unsigned int cpu); /* optional */ - int (*bios_limit) (int cpu, unsigned int *limit); + int (*bios_limit)(int cpu, unsigned int *limit); + + int (*exit)(struct cpufreq_policy *policy); + void (*stop_cpu)(struct cpufreq_policy *policy); + int (*suspend)(struct cpufreq_policy *policy); + int (*resume)(struct cpufreq_policy *policy); + + /* Will be called after the driver is fully initialized */ + void (*ready)(struct cpufreq_policy *policy); - int (*exit) (struct cpufreq_policy *policy); - void (*stop_cpu) (struct cpufreq_policy *policy); - int (*suspend) (struct cpufreq_policy *policy); - int (*resume) (struct cpufreq_policy *policy); - struct freq_attr **attr; + struct freq_attr **attr; /* platform specific boost support code */ - bool boost_supported; - bool boost_enabled; - int (*set_boost) (int state); + bool boost_supported; + bool boost_enabled; + int (*set_boost)(int state); }; /* flags */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 25e0df6155a..a07e087f54b 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -53,7 +53,7 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */ #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ @@ -90,7 +90,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); * cpuidle_get_last_residency - retrieves the last state's residency time * @dev: the target CPU * - * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set + * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set */ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) { diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2f073db7392..1b357997cac 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); -extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); +extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); -static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || - __cpuset_node_allowed_softwall(node, gfp_mask); + return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); } -static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || - __cpuset_node_allowed_hardwall(node, gfp_mask); -} - -static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) -{ - return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); -} - -static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) -{ - return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); + return cpuset_node_allowed(zone_to_nid(z), gfp_mask); } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } -static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) -{ - return 1; -} - -static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) -{ - return 1; -} - -static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) { return 1; } -static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return 1; } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b2a2a08523b..5a813988e6d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -124,15 +124,15 @@ struct dentry { void *d_fsdata; /* fs-specific data */ struct list_head d_lru; /* LRU list */ + struct list_head d_child; /* child of parent list */ + struct list_head d_subdirs; /* our children */ /* - * d_child and d_rcu can share memory + * d_alias and d_rcu can share memory */ union { - struct list_head d_child; /* child of parent list */ + struct hlist_node d_alias; /* inode alias list */ struct rcu_head d_rcu; } d_u; - struct list_head d_subdirs; /* our children */ - struct hlist_node d_alias; /* inode alias list */ }; /* @@ -230,7 +230,6 @@ extern seqlock_t rename_lock; */ extern void d_instantiate(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); -extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d0b4d1aa13..d84f8c254a8 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -92,8 +92,8 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, struct dentry *parent, struct debugfs_regset32 *regset); -int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, - int nregs, void __iomem *base, char *prefix); +void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix); struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, struct dentry *parent, @@ -233,10 +233,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name, return ERR_PTR(-ENODEV); } -static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, +static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix) { - return 0; } static inline bool debugfs_initialized(void) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e1707de043a..ca6d2acc5eb 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -64,6 +64,7 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti, union map_info *map_context); typedef void (*dm_presuspend_fn) (struct dm_target *ti); +typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); typedef void (*dm_postsuspend_fn) (struct dm_target *ti); typedef int (*dm_preresume_fn) (struct dm_target *ti); typedef void (*dm_resume_fn) (struct dm_target *ti); @@ -145,6 +146,7 @@ struct target_type { dm_endio_fn end_io; dm_request_endio_fn rq_end_io; dm_presuspend_fn presuspend; + dm_presuspend_undo_fn presuspend_undo; dm_postsuspend_fn postsuspend; dm_preresume_fn preresume; dm_resume_fn resume; diff --git a/include/linux/device.h b/include/linux/device.h index ce1f21608b1..41d6a7555c6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev) mutex_unlock(&dev->mutex); } +static inline void device_lock_assert(struct device *dev) +{ + lockdep_assert_held(&dev->mutex); +} + void driver_init(void); /* diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 653a1fd07ae..40cd75e21ea 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -447,7 +447,8 @@ struct dmaengine_unmap_data { * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation - * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine * ---async_tx api specific fields--- diff --git a/include/linux/edac.h b/include/linux/edac.h index e1e68da6f35..da3b72e95db 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -194,7 +194,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR3: DDR3 RAM * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. - * @MEM_DDR4: DDR4 RAM + * @MEM_LRDDR3 Load-Reduced DDR3 memory. + * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. */ @@ -216,6 +217,7 @@ enum mem_type { MEM_XDR, MEM_DDR3, MEM_RDDR3, + MEM_LRDDR3, MEM_DDR4, MEM_RDDR4, }; diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index e50f98b0297..eb0b1988050 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, u16 *data); extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, __le16 *data, const u16 words); +extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data); +extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data, const u16 bytes); extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); diff --git a/include/linux/efi.h b/include/linux/efi.h index 0949f9c7e87..0238d612750 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -547,6 +547,9 @@ void efi_native_runtime_setup(void); #define SMBIOS_TABLE_GUID \ EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) +#define SMBIOS3_TABLE_GUID \ + EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 ) + #define SAL_SYSTEM_TABLE_GUID \ EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) @@ -810,7 +813,8 @@ extern struct efi { unsigned long mps; /* MPS table */ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ unsigned long acpi20; /* ACPI table (ACPI 2.0) */ - unsigned long smbios; /* SM BIOS table */ + unsigned long smbios; /* SMBIOS table (32 bit entry point) */ + unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ unsigned long sal_systab; /* SAL system table */ unsigned long boot_info; /* boot info table */ unsigned long hcdp; /* HCDP table */ diff --git a/include/linux/elf.h b/include/linux/elf.h index 67a5fa7830c..20fa8d8ae31 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -15,6 +15,11 @@ set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #endif +#ifndef SET_PERSONALITY2 +#define SET_PERSONALITY2(ex, state) \ + SET_PERSONALITY(ex) +#endif + #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 733980fce8e..41c891d05f0 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) #endif } +/** + * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame + * @skb: Buffer to pad + * + * An Ethernet frame should have a minimum size of 60 bytes. This function + * takes short frames and pads them with zeros up to the 60 byte limit. + */ +static inline int eth_skb_pad(struct sk_buff *skb) +{ + return skb_put_padto(skb, ETH_ZLEN); +} + #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1a2d60dfb8..653dc9c4eba 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -59,6 +59,26 @@ enum ethtool_phys_id_state { ETHTOOL_ID_OFF }; +enum { + ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ + ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ + + /* + * Add your fresh new hash function bits above and remember to update + * rss_hash_func_strings[] in ethtool.c + */ + ETH_RSS_HASH_FUNCS_COUNT +}; + +#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) +#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) + +#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) +#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) + +#define ETH_RSS_HASH_UNKNOWN 0 +#define ETH_RSS_HASH_NO_CHANGE 0 + struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ @@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. - * @get_rxfh: Get the contents of the RX flow hash indirection table and hash - * key. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. - * Returns a negative error code or zero. - * @set_rxfh: Set the contents of the RX flow hash indirection table and/or - * hash key. In case only the indirection table or hash key is to be - * changed, the other argument will be %NULL. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. + * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key + * and/or hash function. * Returns a negative error code or zero. + * @set_rxfh: Set the contents of the RX flow hash indirection table, hash + * key, and/or hash function. Arguments which are set to %NULL or zero + * will remain unchanged. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -241,9 +258,10 @@ struct ethtool_ops { int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); - int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); + int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, - const u8 *key); + const u8 *key, const u8 hfunc); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 860313a33a4..87f14e90e98 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -33,7 +33,8 @@ #define F2FS_META_INO(sbi) (sbi->meta_ino_num) /* This flag is used by node and meta inodes, and by recovery */ -#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -170,14 +171,12 @@ struct f2fs_extent { #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ +#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ +#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ F2FS_INLINE_XATTR_ADDRS - 1)) -#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\ - sizeof(__le32) * (DEF_ADDRS_PER_INODE + \ - DEF_NIDS_PER_INODE - 1)) - struct f2fs_inode { __le16 i_mode; /* file mode */ __u8 i_advise; /* file hints */ @@ -435,6 +434,24 @@ struct f2fs_dentry_block { __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; } __packed; +/* for inline dir */ +#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + BITS_PER_BYTE + 1)) +#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \ + BITS_PER_BYTE - 1) / BITS_PER_BYTE) +#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE)) + +/* inline directory entry structure */ +struct f2fs_inline_dentry { + __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE]; + __u8 reserved[INLINE_RESERVED_SIZE]; + struct f2fs_dir_entry dentry[NR_INLINE_DENTRY]; + __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN]; +} __packed; + /* file types used in inode_info->flags */ enum { F2FS_FT_UNKNOWN, diff --git a/include/linux/fence.h b/include/linux/fence.h index d174585b874..39efee130d2 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -128,8 +128,8 @@ struct fence_cb { * from irq context, so normal spinlocks can be used. * * A return value of false indicates the fence already passed, - * or some failure occured that made it impossible to enable - * signaling. True indicates succesful enabling. + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. * * fence->status may be set in enable_signaling, but only when false is * returned. diff --git a/include/linux/file.h b/include/linux/file.h index 4d69123377a..f87d30882a2 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); extern void put_filp(struct file *); extern int get_unused_fd_flags(unsigned flags); -#define get_unused_fd() get_unused_fd_flags(0) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/filter.h b/include/linux/filter.h index ca95abd2bed..caac2087a4d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 7fd81b8c489..6b7fd9cf5ea 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, * defined in <linux/wait.h> */ -#define wait_event_freezekillable(wq, condition) \ -({ \ - int __retval; \ - freezer_do_not_count(); \ - __retval = wait_event_killable(wq, (condition)); \ - freezer_count(); \ - __retval; \ -}) - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ #define wait_event_freezekillable_unsafe(wq, condition) \ ({ \ @@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, __retval; \ }) -#define wait_event_freezable(wq, condition) \ -({ \ - int __retval; \ - freezer_do_not_count(); \ - __retval = wait_event_interruptible(wq, (condition)); \ - freezer_count(); \ - __retval; \ -}) - -#define wait_event_freezable_timeout(wq, condition, timeout) \ -({ \ - long __retval = timeout; \ - freezer_do_not_count(); \ - __retval = wait_event_interruptible_timeout(wq, (condition), \ - __retval); \ - freezer_count(); \ - __retval; \ -}) - -#define wait_event_freezable_exclusive(wq, condition) \ -({ \ - int __retval; \ - freezer_do_not_count(); \ - __retval = wait_event_interruptible_exclusive(wq, condition); \ - freezer_count(); \ - __retval; \ -}) - - #else /* !CONFIG_FREEZER */ static inline bool frozen(struct task_struct *p) { return false; } static inline bool freezing(struct task_struct *p) { return false; } @@ -331,18 +293,6 @@ static inline void set_freezable(void) {} #define freezable_schedule_hrtimeout_range(expires, delta, mode) \ schedule_hrtimeout_range(expires, delta, mode) -#define wait_event_freezable(wq, condition) \ - wait_event_interruptible(wq, condition) - -#define wait_event_freezable_timeout(wq, condition, timeout) \ - wait_event_interruptible_timeout(wq, condition, timeout) - -#define wait_event_freezable_exclusive(wq, condition) \ - wait_event_interruptible_exclusive(wq, condition) - -#define wait_event_freezekillable(wq, condition) \ - wait_event_killable(wq, condition) - #define wait_event_freezekillable_unsafe(wq, condition) \ wait_event_killable(wq, condition) diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ab779e8a63..bb29b02d9bb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -606,9 +606,6 @@ struct inode { const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock *i_flock; struct address_space i_data; -#ifdef CONFIG_QUOTA - struct dquot *i_dquot[MAXQUOTAS]; -#endif struct list_head i_devices; union { struct pipe_inode_info *i_pipe; @@ -789,7 +786,6 @@ struct file { struct rcu_head fu_rcuhead; } f_u; struct path f_path; -#define f_dentry f_path.dentry struct inode *f_inode; /* cached value */ const struct file_operations *f_op; @@ -1224,6 +1220,7 @@ struct super_block { struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; + unsigned int s_quota_types; /* Bitmask of supported quota types */ struct quota_info s_dquot; /* Diskquota specific options */ struct sb_writers s_writers; @@ -1467,7 +1464,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); * This allows the kernel to read directories into kernel space or * to have different dirent layouts depending on the binary type. */ -typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); +struct dir_context; +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, + unsigned); + struct dir_context { const filldir_t actor; loff_t pos; @@ -1513,7 +1513,7 @@ struct file_operations { int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); - int (*show_fdinfo)(struct seq_file *m, struct file *f); + void (*show_fdinfo)(struct seq_file *m, struct file *f); }; struct inode_operations { @@ -1577,7 +1577,9 @@ struct super_operations { void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_super) (struct super_block *); int (*freeze_fs) (struct super_block *); + int (*thaw_super) (struct super_block *); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); @@ -1590,6 +1592,7 @@ struct super_operations { #ifdef CONFIG_QUOTA ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, int); @@ -2786,6 +2789,11 @@ static inline void inode_has_no_xattr(struct inode *inode) inode->i_flags |= S_NOSEC; } +static inline bool is_root_inode(struct inode *inode) +{ + return inode == inode->i_sb->s_root->d_inode; +} + static inline bool dir_emit(struct dir_context *ctx, const char *name, int namelen, u64 ino, unsigned type) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 662697babd4..ed501953f0b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. + * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and + * IPMODIFY are a kind of attribute flags which can be set only before + * registering the ftrace_ops, and can not be modified while registered. + * Changing those attribute flags after regsitering ftrace_ops will + * cause unexpected results. * * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically @@ -94,6 +99,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ADDING - The ops is in the process of being added. * REMOVING - The ops is in the process of being removed. * MODIFYING - The ops is in the process of changing its filter functions. + * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. + * The arch specific code sets this flag when it allocated a + * trampoline. This lets the arch know that it can update the + * trampoline in case the callback function changes. + * The ftrace_ops trampoline can be set by the ftrace users, and + * in such cases the arch must not modify it. Only the arch ftrace + * core code should set this flag. + * IPMODIFY - The ops can modify the IP register. This can only be set with + * SAVE_REGS. If another ops with this flag set is already registered + * for any of the functions that this ops will be registered for, then + * this ops will fail to register or set_filter_ip. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -108,6 +124,8 @@ enum { FTRACE_OPS_FL_ADDING = 1 << 9, FTRACE_OPS_FL_REMOVING = 1 << 10, FTRACE_OPS_FL_MODIFYING = 1 << 11, + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, + FTRACE_OPS_FL_IPMODIFY = 1 << 13, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -142,6 +160,7 @@ struct ftrace_ops { struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash old_hash; unsigned long trampoline; + unsigned long trampoline_size; #endif }; @@ -255,7 +274,9 @@ struct ftrace_func_command { int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_post_process(void); -void ftrace_bug(int err, unsigned long ip); +struct dyn_ftrace; + +void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; @@ -287,6 +308,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); extern int ftrace_nr_registered_ops(void); +bool is_ftrace_trampoline(unsigned long addr); + /* * The dyn_ftrace record's flags field is split into two parts. * the first part which is '0-FTRACE_REF_MAX' is a counter of @@ -297,6 +320,7 @@ extern int ftrace_nr_registered_ops(void); * ENABLED - the function is being traced * REGS - the record wants the function to save regs * REGS_EN - the function is set up to save regs. + * IPMODIFY - the record allows for the IP address to be changed. * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been @@ -310,10 +334,11 @@ enum { FTRACE_FL_REGS_EN = (1UL << 29), FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP_EN = (1UL << 27), + FTRACE_FL_IPMODIFY = (1UL << 26), }; -#define FTRACE_REF_MAX_SHIFT 27 -#define FTRACE_FL_BITS 5 +#define FTRACE_REF_MAX_SHIFT 26 +#define FTRACE_FL_BITS 6 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) @@ -586,6 +611,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user size_t cnt, loff_t *ppos) { return -ENODEV; } static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } + +static inline bool is_ftrace_trampoline(unsigned long addr) +{ + return false; +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 28672e87e91..0bebb5c348b 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -138,6 +138,17 @@ enum print_line_t { TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; +/* + * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq + * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function + * simplifies those functions and keeps them in sync. + */ +static inline enum print_line_t trace_handle_return(struct trace_seq *s) +{ + return trace_seq_has_overflowed(s) ? + TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; +} + void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d04..07d2699cdb5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -381,8 +381,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); -void drain_all_pages(void); -void drain_local_pages(void *dummy); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 12f146fa660..00b1b70d68b 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -94,6 +94,13 @@ int gpiod_to_irq(const struct gpio_desc *desc); struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); +/* Child properties interface */ +struct fwnode_handle; + +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname); +struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + struct fwnode_handle *child); #else /* CONFIG_GPIOLIB */ static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 8b622468952..ee2d8c6f913 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -2,6 +2,7 @@ #define _GPIO_KEYS_H struct device; +struct gpio_desc; /** * struct gpio_keys_button - configuration parameters @@ -17,6 +18,7 @@ struct device; * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys + * @gpiod: GPIO descriptor */ struct gpio_keys_button { unsigned int code; @@ -29,6 +31,7 @@ struct gpio_keys_button { bool can_disable; int value; unsigned int irq; + struct gpio_desc *gpiod; }; /** diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c39939..1afde47e152 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -15,7 +15,6 @@ */ #include <asm/types.h> -#include <asm/hash.h> #include <linux/compiler.h> /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ @@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } -struct fast_hash_ops { - u32 (*hash)(const void *data, u32 len, u32 seed); - u32 (*hash2)(const u32 *data, u32 len, u32 seed); -}; - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); - #endif /* _LINUX_HASH_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 78ea9bf941c..06c4607744f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -234,6 +234,33 @@ struct hid_item { #define HID_DG_BARRELSWITCH 0x000d0044 #define HID_DG_ERASER 0x000d0045 #define HID_DG_TABLETPICK 0x000d0046 + +#define HID_CP_CONSUMERCONTROL 0x000c0001 +#define HID_CP_NUMERICKEYPAD 0x000c0002 +#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 +#define HID_CP_MICROPHONE 0x000c0004 +#define HID_CP_HEADPHONE 0x000c0005 +#define HID_CP_GRAPHICEQUALIZER 0x000c0006 +#define HID_CP_FUNCTIONBUTTONS 0x000c0036 +#define HID_CP_SELECTION 0x000c0080 +#define HID_CP_MEDIASELECTION 0x000c0087 +#define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_PLAYBACKSPEED 0x000c00f1 +#define HID_CP_PROXIMITY 0x000c0109 +#define HID_CP_SPEAKERSYSTEM 0x000c0160 +#define HID_CP_CHANNELLEFT 0x000c0161 +#define HID_CP_CHANNELRIGHT 0x000c0162 +#define HID_CP_CHANNELCENTER 0x000c0163 +#define HID_CP_CHANNELFRONT 0x000c0164 +#define HID_CP_CHANNELCENTERFRONT 0x000c0165 +#define HID_CP_CHANNELSIDE 0x000c0166 +#define HID_CP_CHANNELSURROUND 0x000c0167 +#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 +#define HID_CP_CHANNELTOP 0x000c0169 +#define HID_CP_CHANNELUNKNOWN 0x000c016a +#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 +#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 + #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -312,11 +339,8 @@ struct hid_item { * Vendor specific HID device groups */ #define HID_GROUP_RMI 0x0100 - -/* - * Vendor specific HID device groups - */ #define HID_GROUP_WACOM 0x0101 +#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 /* * This is the global environment of the parser. This information is @@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev) hdev->ll_driver->wait(hdev); } +/** + * hid_report_len - calculate the report length + * + * @report: the report we want to know the length + */ +static inline int hid_report_len(struct hid_report *report) +{ + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); +} + int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, int interrupt); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6e6d338641f..431b7fc605c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, } #endif /* !CONFIG_HUGETLB_PAGE */ +/* + * hugepages at page global directory. If arch support + * hugepages at pgd level, they need to define this. + */ +#ifndef pgd_huge +#define pgd_huge(x) 0 +#endif + +#ifndef pgd_write +static inline int pgd_write(pgd_t pgd) +{ + BUG(); + return 0; +} +#endif + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif + +#ifndef is_hugepd +/* + * Some architectures requires a hugepage directory format that is + * required to support multiple hugepage sizes. For example + * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" + * introduced the same on powerpc. This allows for a more flexible hugepage + * pagetable layout. + */ +typedef struct { unsigned long pd; } hugepd_t; +#define is_hugepd(hugepd) (0) +#define __hugepd(x) ((hugepd_t) { (x) }) +static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr) +{ + return 0; +} +#else +extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr); +#endif #define HUGETLB_ANON_FILE "anon_hugepage" @@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log) { if (!page_size_log) return &default_hstate; - return size_to_hstate(1 << page_size_log); + + return size_to_hstate(1UL << page_size_log); } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98..bcc853eccc8 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -16,7 +16,6 @@ #define _LINUX_HUGETLB_CGROUP_H #include <linux/mmdebug.h> -#include <linux/res_counter.h> struct hugetlb_cgroup; /* diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 08cfaff8a07..476c685ca6f 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -650,6 +650,8 @@ struct vmbus_channel { u8 monitor_grp; u8 monitor_bit; + bool rescind; /* got rescind msg */ + u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b556e0ab946..70ee0d3a2be 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -359,7 +359,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * to name two of the most common. * * The return codes from the @master_xfer field should indicate the type of - * error code that occured during the transfer, as documented in the kernel + * error code that occurred during the transfer, as documented in the kernel * Documentation file Documentation/i2c/fault-codes. */ struct i2c_algorithm { diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h index 69280db02c4..ee3c2aba2a8 100644 --- a/include/linux/i2c/pmbus.h +++ b/include/linux/i2c/pmbus.h @@ -40,6 +40,10 @@ struct pmbus_platform_data { u32 flags; /* Device specific flags */ + + /* regulator support */ + int num_regulators; + struct regulator_init_data *reg_init_data; }; #endif /* _PMBUS_H_ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1be39c7693..4f4eea8a628 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/if_ether.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> /* * DS bit usage @@ -1066,6 +1067,12 @@ struct ieee80211_pspoll { /* TDLS */ +/* Channel switch timing */ +struct ieee80211_ch_switch_timing { + __le16 switch_time; + __le16 switch_timeout; +} __packed; + /* Link-id information element */ struct ieee80211_tdls_lnkie { u8 ie_type; /* Link Identifier IE */ @@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data { u8 dialog_token; u8 variable[0]; } __packed discover_req; + struct { + u8 target_channel; + u8 oper_class; + u8 variable[0]; + } __packed chan_switch_req; + struct { + __le16 status_code; + u8 variable[0]; + } __packed chan_switch_resp; } u; } __packed; @@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap { #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 /* - * Maximum length of AMPDU that the STA can receive. + * Maximum length of AMPDU that the STA can receive in high-throughput (HT). * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ enum ieee80211_max_ampdu_length_exp { @@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp { IEEE80211_HT_MAX_AMPDU_64K = 3 }; +/* + * Maximum length of AMPDU that the STA can receive in VHT. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_vht_max_ampdu_length_exp { + IEEE80211_VHT_MAX_AMPDU_8K = 0, + IEEE80211_VHT_MAX_AMPDU_16K = 1, + IEEE80211_VHT_MAX_AMPDU_32K = 2, + IEEE80211_VHT_MAX_AMPDU_64K = 3, + IEEE80211_VHT_MAX_AMPDU_128K = 4, + IEEE80211_VHT_MAX_AMPDU_256K = 5, + IEEE80211_VHT_MAX_AMPDU_512K = 6, + IEEE80211_VHT_MAX_AMPDU_1024K = 7 +}; + #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 /* Minimum MPDU start spacing */ @@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode { WLAN_TDLS_DISCOVERY_REQUEST = 10, }; +/* Extended Channel Switching capability to be set in the 1st byte of + * the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) + +/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) +#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) +#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) + /* Interworking capabilities are set in 7th bit of 4th byte of the * @WLAN_EID_EXT_CAPABILITY information element */ @@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) +#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) @@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode { /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 +/* BSS Coex IE information field bits */ +#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) + /** * enum - mesh synchronization method identifier * @@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, return !!(tim->virtual_map[index] & mask); } +/** + * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * @skb: the skb containing the frame, length will not be checked + * @hdr_size: the size of the ieee80211_hdr that starts at skb->data + * + * This function assumes the frame is a data frame, and that the network header + * is in the correct place. + */ +static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +{ + if (!skb_is_nonlinear(skb) && + skb->len > (skb_network_offset(skb) + 2)) { + /* Point to where the indication of TDLS should start */ + const u8 *tdls_data = skb_network_header(skb) - 2; + + if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && + tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && + tdls_data[3] == WLAN_CATEGORY_TDLS) + return tdls_data[4]; + } + + return -1; +} + /* convert time units */ #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h new file mode 100644 index 00000000000..6e82d888287 --- /dev/null +++ b/include/linux/ieee802154.h @@ -0,0 +1,242 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007, 2008 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Maxim Osipov <maxim.osipov@siemens.com> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#ifndef LINUX_IEEE802154_H +#define LINUX_IEEE802154_H + +#include <linux/types.h> +#include <linux/random.h> +#include <asm/byteorder.h> + +#define IEEE802154_MTU 127 +#define IEEE802154_MIN_PSDU_LEN 5 + +#define IEEE802154_PAN_ID_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe + +#define IEEE802154_EXTENDED_ADDR_LEN 8 + +#define IEEE802154_LIFS_PERIOD 40 +#define IEEE802154_SIFS_PERIOD 12 + +#define IEEE802154_MAX_CHANNEL 26 +#define IEEE802154_MAX_PAGE 31 + +#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ +#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ +#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ +#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */ + +#define IEEE802154_FC_TYPE_SHIFT 0 +#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1) +#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT) +#define IEEE802154_FC_SET_TYPE(v, x) do { \ + v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \ + (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \ + } while (0) + +#define IEEE802154_FC_SECEN_SHIFT 3 +#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT) +#define IEEE802154_FC_FRPEND_SHIFT 4 +#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT) +#define IEEE802154_FC_ACK_REQ_SHIFT 5 +#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT) +#define IEEE802154_FC_INTRA_PAN_SHIFT 6 +#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT) + +#define IEEE802154_FC_SAMODE_SHIFT 14 +#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT) +#define IEEE802154_FC_DAMODE_SHIFT 10 +#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_FC_VERSION_SHIFT 12 +#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT) +#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT) + +#define IEEE802154_FC_SAMODE(x) \ + (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT) + +#define IEEE802154_FC_DAMODE(x) \ + (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_SCF_SECLEVEL_MASK 7 +#define IEEE802154_SCF_SECLEVEL_SHIFT 0 +#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK) +#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3 +#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT) +#define IEEE802154_SCF_KEY_ID_MODE(x) \ + ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT) + +#define IEEE802154_SCF_KEY_IMPLICIT 0 +#define IEEE802154_SCF_KEY_INDEX 1 +#define IEEE802154_SCF_KEY_SHORT_INDEX 2 +#define IEEE802154_SCF_KEY_HW_INDEX 3 + +#define IEEE802154_SCF_SECLEVEL_NONE 0 +#define IEEE802154_SCF_SECLEVEL_MIC32 1 +#define IEEE802154_SCF_SECLEVEL_MIC64 2 +#define IEEE802154_SCF_SECLEVEL_MIC128 3 +#define IEEE802154_SCF_SECLEVEL_ENC 4 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7 + +/* MAC footer size */ +#define IEEE802154_MFR_SIZE 2 /* 2 octets */ + +/* MAC's Command Frames Identifiers */ +#define IEEE802154_CMD_ASSOCIATION_REQ 0x01 +#define IEEE802154_CMD_ASSOCIATION_RESP 0x02 +#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03 +#define IEEE802154_CMD_DATA_REQ 0x04 +#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05 +#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06 +#define IEEE802154_CMD_BEACON_REQ 0x07 +#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08 +#define IEEE802154_CMD_GTS_REQ 0x09 + +/* + * The return values of MAC operations + */ +enum { + /* + * The requested operation was completed successfully. + * For a transmission request, this value indicates + * a successful transmission. + */ + IEEE802154_SUCCESS = 0x0, + + /* The beacon was lost following a synchronization request. */ + IEEE802154_BEACON_LOSS = 0xe0, + /* + * A transmission could not take place due to activity on the + * channel, i.e., the CSMA-CA mechanism has failed. + */ + IEEE802154_CHNL_ACCESS_FAIL = 0xe1, + /* The GTS request has been denied by the PAN coordinator. */ + IEEE802154_DENINED = 0xe2, + /* The attempt to disable the transceiver has failed. */ + IEEE802154_DISABLE_TRX_FAIL = 0xe3, + /* + * The received frame induces a failed security check according to + * the security suite. + */ + IEEE802154_FAILED_SECURITY_CHECK = 0xe4, + /* + * The frame resulting from secure processing has a length that is + * greater than aMACMaxFrameSize. + */ + IEEE802154_FRAME_TOO_LONG = 0xe5, + /* + * The requested GTS transmission failed because the specified GTS + * either did not have a transmit GTS direction or was not defined. + */ + IEEE802154_INVALID_GTS = 0xe6, + /* + * A request to purge an MSDU from the transaction queue was made using + * an MSDU handle that was not found in the transaction table. + */ + IEEE802154_INVALID_HANDLE = 0xe7, + /* A parameter in the primitive is out of the valid range.*/ + IEEE802154_INVALID_PARAMETER = 0xe8, + /* No acknowledgment was received after aMaxFrameRetries. */ + IEEE802154_NO_ACK = 0xe9, + /* A scan operation failed to find any network beacons.*/ + IEEE802154_NO_BEACON = 0xea, + /* No response data were available following a request. */ + IEEE802154_NO_DATA = 0xeb, + /* The operation failed because a short address was not allocated. */ + IEEE802154_NO_SHORT_ADDRESS = 0xec, + /* + * A receiver enable request was unsuccessful because it could not be + * completed within the CAP. + */ + IEEE802154_OUT_OF_CAP = 0xed, + /* + * A PAN identifier conflict has been detected and communicated to the + * PAN coordinator. + */ + IEEE802154_PANID_CONFLICT = 0xee, + /* A coordinator realignment command has been received. */ + IEEE802154_REALIGMENT = 0xef, + /* The transaction has expired and its information discarded. */ + IEEE802154_TRANSACTION_EXPIRED = 0xf0, + /* There is no capacity to store the transaction. */ + IEEE802154_TRANSACTION_OVERFLOW = 0xf1, + /* + * The transceiver was in the transmitter enabled state when the + * receiver was requested to be enabled. + */ + IEEE802154_TX_ACTIVE = 0xf2, + /* The appropriate key is not available in the ACL. */ + IEEE802154_UNAVAILABLE_KEY = 0xf3, + /* + * A SET/GET request was issued with the identifier of a PIB attribute + * that is not supported. + */ + IEEE802154_UNSUPPORTED_ATTR = 0xf4, + /* + * A request to perform a scan operation failed because the MLME was + * in the process of performing a previously initiated scan operation. + */ + IEEE802154_SCAN_IN_PROGRESS = 0xfc, +}; + +/** + * ieee802154_is_valid_psdu_len - check if psdu len is valid + * @len: psdu len with (MHR + payload + MFR) + */ +static inline bool ieee802154_is_valid_psdu_len(const u8 len) +{ + return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); +} + +/** + * ieee802154_is_valid_psdu_len - check if extended addr is valid + * @addr: extended addr to check + */ +static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) +{ + /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff + * is used internally as extended to short address broadcast mapping. + * This is currently a workaround because neighbor discovery can't + * deal with short addresses types right now. + */ + return ((addr != cpu_to_le64(0x0000000000000000ULL)) && + (addr != cpu_to_le64(0xffffffffffffffffULL))); +} + +/** + * ieee802154_random_extended_addr - generates a random extended address + * @addr: extended addr pointer to place the random address + */ +static inline void ieee802154_random_extended_addr(__le64 *addr) +{ + get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); + + /* toggle some bit if we hit an invalid extended addr */ + if (!ieee802154_is_valid_extended_addr(*addr)) + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; +} + +#endif /* LINUX_IEEE802154_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 808dcb8cc04..0a8ce762a47 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -15,6 +15,7 @@ #include <linux/netdevice.h> #include <uapi/linux/if_bridge.h> +#include <linux/bitops.h> struct br_ip { union { @@ -32,11 +33,41 @@ struct br_ip_list { struct br_ip addr; }; +#define BR_HAIRPIN_MODE BIT(0) +#define BR_BPDU_GUARD BIT(1) +#define BR_ROOT_BLOCK BIT(2) +#define BR_MULTICAST_FAST_LEAVE BIT(3) +#define BR_ADMIN_COST BIT(4) +#define BR_LEARNING BIT(5) +#define BR_FLOOD BIT(6) +#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) +#define BR_PROMISC BIT(7) +#define BR_PROXYARP BIT(8) +#define BR_LEARNING_SYNC BIT(9) + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; +#if IS_ENABLED(CONFIG_BRIDGE) +int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid); +int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid); +#else +static inline int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +static inline int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +#endif + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d69f0577a31..515a35e2a48 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, } /** - * vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. + * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { struct vlan_ethhdr *veth; - if (skb_cow_head(skb, VLAN_HLEN) < 0) { - dev_kfree_skb_any(skb); - return NULL; - } + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ @@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); + return 0; +} + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + int err; + + err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } return skb; } /** - * __vlan_put_tag - regular VLAN tag inserting + * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload @@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) { skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); if (skb) @@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, return skb; } -/** - * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting +/* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag - * @vlan_proto: VLAN encapsulation protocol - * @vlan_tci: VLAN TCI to insert * - * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, - __be16 vlan_proto, - u16 vlan_tci) +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { - skb->vlan_proto = vlan_proto; - skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (likely(skb)) + skb->vlan_tci = 0; + return skb; +} +/* + * vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the + * VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } /** - * vlan_put_tag - inserts VLAN tag according to device features + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * - * Assumes skb->dev is the target that will xmit this frame. - * Returns a VLAN tagged skb. + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { - if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { - return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); - } else { - return __vlan_put_tag(skb, vlan_proto, vlan_tci); - } + skb->vlan_proto = vlan_proto; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; } /** diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index 8bbd7bc1043..03fa332ad2a 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -72,7 +72,7 @@ struct iio_event_data { #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) -#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) +#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0068708161f..0a21fbefdfb 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) - return htonl(~((1<<(32-logmask))-1)); + return htonl(~((1U<<(32-logmask))-1)); return 0; } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 77fc43f8fb7..3037fc085e8 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -102,7 +102,7 @@ extern struct group_info init_groups; #define INIT_IDS #endif -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_TREE_PREEMPT() \ .rcu_blocked_node = NULL, #else @@ -166,6 +166,15 @@ extern struct task_group root_task_group; # define INIT_RT_MUTEXES(tsk) #endif +#ifdef CONFIG_NUMA_BALANCING +# define INIT_NUMA_BALANCING(tsk) \ + .numa_preferred_nid = -1, \ + .numa_group = NULL, \ + .numa_faults = NULL, +#else +# define INIT_NUMA_BALANCING(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -237,6 +246,7 @@ extern struct task_group root_task_group; INIT_CPUSET_SEQ(tsk) \ INIT_RT_MUTEXES(tsk) \ INIT_VTIME(tsk) \ + INIT_NUMA_BALANCING(tsk) \ } diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 76d2acbfa7c..838dbfa3c33 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -37,6 +37,7 @@ #include <linux/list.h> #include <linux/proc_fs.h> +#include <linux/acpi.h> /* For acpi_handle */ struct module; struct device; @@ -278,15 +279,18 @@ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, SI_PCI, SI_DEVICETREE, SI_DEFAULT }; +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); union ipmi_smi_info_union { +#ifdef CONFIG_ACPI /* * the acpi_info element is defined for the SI_ACPI * address type */ struct { - void *acpi_handle; + acpi_handle acpi_handle; } acpi_info; +#endif }; struct ipmi_smi_info { diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index bd349240d50..0b1e569f5ff 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -98,12 +98,11 @@ struct ipmi_smi_handlers { operation is not allowed to fail. If an error occurs, it should report back the error in a received message. It may do this in the current call context, since no write locks - are held when this is run. If the priority is > 0, the - message will go into a high-priority queue and be sent - first. Otherwise, it goes into a normal-priority queue. */ + are held when this is run. Message are delivered one at + a time by the message handler, a new message will not be + delivered until the previous message is returned. */ void (*sender)(void *send_info, - struct ipmi_smi_msg *msg, - int priority); + struct ipmi_smi_msg *msg); /* Called by the upper layer to request that we try to get events from the BMC we are attached to. */ @@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *dev, - const char *sysfs_name, unsigned char slave_addr); /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd6..c694e7baa62 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -42,6 +42,7 @@ struct ipv6_devconf { __s32 accept_ra_from_local; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD __s32 optimistic_dad; + __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE __s32 mc_forwarding; @@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ - -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ - ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) - #endif /* _IPV6_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 03f48d936f6..d09ec7a1243 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -15,11 +15,13 @@ #include <linux/spinlock.h> #include <linux/cpumask.h> #include <linux/gfp.h> +#include <linux/irqhandler.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/errno.h> #include <linux/topology.h> #include <linux/wait.h> +#include <linux/io.h> #include <asm/irq.h> #include <asm/ptrace.h> @@ -27,11 +29,7 @@ struct seq_file; struct module; -struct irq_desc; -struct irq_data; -typedef void (*irq_flow_handler_t)(unsigned int irq, - struct irq_desc *desc); -typedef void (*irq_preflow_handler_t)(struct irq_data *data); +struct msi_msg; /* * IRQ line status. @@ -113,10 +111,14 @@ enum { * * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to + * support stacked irqchips, which indicates skipping + * all descendent irqchips. */ enum { IRQ_SET_MASK_OK = 0, IRQ_SET_MASK_OK_NOCOPY, + IRQ_SET_MASK_OK_DONE, }; struct msi_desc; @@ -133,6 +135,8 @@ struct irq_domain; * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. + * @parent_data: pointer to parent struct irq_data to support hierarchy + * irq_domain * @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations @@ -151,6 +155,9 @@ struct irq_data { unsigned int state_use_accessors; struct irq_chip *chip; struct irq_domain *domain; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + struct irq_data *parent_data; +#endif void *handler_data; void *chip_data; struct msi_desc *msi_desc; @@ -315,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * any other callback related to this irq * @irq_release_resources: optional to release resources acquired with * irq_request_resources + * @irq_compose_msi_msg: optional to compose message content for MSI + * @irq_write_msi_msg: optional to write message content for MSI * @flags: chip specific flags */ struct irq_chip { @@ -351,6 +360,9 @@ struct irq_chip { int (*irq_request_resources)(struct irq_data *data); void (*irq_release_resources)(struct irq_data *data); + void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); + void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); + unsigned long flags; }; @@ -438,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); +extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +extern void irq_chip_ack_parent(struct irq_data *data); +extern int irq_chip_retrigger_hierarchy(struct irq_data *data); +extern void irq_chip_mask_parent(struct irq_data *data); +extern void irq_chip_unmask_parent(struct irq_data *data); +extern void irq_chip_eoi_parent(struct irq_data *data); +extern int irq_chip_set_affinity_parent(struct irq_data *data, + const struct cpumask *dest, + bool force); +#endif + /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret); @@ -639,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq); void irq_init_desc(unsigned int irq); #endif -#ifndef irq_reg_writel -# define irq_reg_writel(val, addr) writel(val, addr) -#endif -#ifndef irq_reg_readl -# define irq_reg_readl(addr) readl(addr) -#endif - /** * struct irq_chip_regs - register offsets for struct irq_gci * @enable: Enable register offset to reg_base @@ -692,6 +709,8 @@ struct irq_chip_type { * struct irq_chip_generic - Generic irq chip data structure * @lock: Lock to protect register and cache data access * @reg_base: Register base address (virtual) + * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) + * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) * @irq_base: Interrupt base nr for this chip * @irq_cnt: Number of interrupts handled by this chip * @mask_cache: Cached mask register shared between all chip types @@ -716,6 +735,8 @@ struct irq_chip_type { struct irq_chip_generic { raw_spinlock_t lock; void __iomem *reg_base; + u32 (*reg_readl)(void __iomem *addr); + void (*reg_writel)(u32 val, void __iomem *addr); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; @@ -740,12 +761,14 @@ struct irq_chip_generic { * the parent irq. Usually GPIO implementations * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask + * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) */ enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1 << 0, IRQ_GC_INIT_NESTED_LOCK = 1 << 1, IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, IRQ_GC_NO_MASK = 1 << 3, + IRQ_GC_BE_IO = 1 << 4, }; /* @@ -821,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } #endif +static inline void irq_reg_writel(struct irq_chip_generic *gc, + u32 val, int reg_offset) +{ + if (gc->reg_writel) + gc->reg_writel(val, gc->reg_base + reg_offset); + else + writel(val, gc->reg_base + reg_offset); +} + +static inline u32 irq_reg_readl(struct irq_chip_generic *gc, + int reg_offset) +{ + if (gc->reg_readl) + return gc->reg_readl(gc->reg_base + reg_offset); + else + return readl(gc->reg_base + reg_offset); +} + #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h new file mode 100644 index 00000000000..420f77b34d0 --- /dev/null +++ b/include/linux/irqchip/mips-gic.h @@ -0,0 +1,249 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + */ +#ifndef __LINUX_IRQCHIP_MIPS_GIC_H +#define __LINUX_IRQCHIP_MIPS_GIC_H + +#include <linux/clocksource.h> + +#define GIC_MAX_INTRS 256 + +/* Constants */ +#define GIC_POL_POS 1 +#define GIC_POL_NEG 0 +#define GIC_TRIG_EDGE 1 +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_DUAL_ENABLE 1 +#define GIC_TRIG_DUAL_DISABLE 0 + +#define MSK(n) ((1 << (n)) - 1) + +/* Accessors */ +#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS) + +/* GIC Address Space */ +#define SHARED_SECTION_OFS 0x0000 +#define SHARED_SECTION_SIZE 0x8000 +#define VPE_LOCAL_SECTION_OFS 0x8000 +#define VPE_LOCAL_SECTION_SIZE 0x4000 +#define VPE_OTHER_SECTION_OFS 0xc000 +#define VPE_OTHER_SECTION_SIZE 0x4000 +#define USM_VISIBLE_SECTION_OFS 0x10000 +#define USM_VISIBLE_SECTION_SIZE 0x10000 + +/* Register Map for Shared Section */ + +#define GIC_SH_CONFIG_OFS 0x0000 + +/* Shared Global Counter */ +#define GIC_SH_COUNTER_31_00_OFS 0x0010 +#define GIC_SH_COUNTER_63_32_OFS 0x0014 +#define GIC_SH_REVISIONID_OFS 0x0020 + +/* Convert an interrupt number to a byte offset/bit for multi-word registers */ +#define GIC_INTR_OFS(intr) (((intr) / 32) * 4) +#define GIC_INTR_BIT(intr) ((intr) % 32) + +/* Polarity : Reset Value is always 0 */ +#define GIC_SH_SET_POLARITY_OFS 0x0100 + +/* Triggering : Reset Value is always 0 */ +#define GIC_SH_SET_TRIGGER_OFS 0x0180 + +/* Dual edge triggering : Reset Value is always 0 */ +#define GIC_SH_SET_DUAL_OFS 0x0200 + +/* Set/Clear corresponding bit in Edge Detect Register */ +#define GIC_SH_WEDGE_OFS 0x0280 + +/* Mask manipulation */ +#define GIC_SH_RMASK_OFS 0x0300 +#define GIC_SH_SMASK_OFS 0x0380 + +/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ +#define GIC_SH_MASK_OFS 0x0400 + +/* Pending Global Interrupts (RO) */ +#define GIC_SH_PEND_OFS 0x0480 + +/* Maps Interrupt X to a Pin */ +#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 +#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr)) + +/* Maps Interrupt X to a VPE */ +#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 +#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ + ((32 * (intr)) + (((vpe) / 32) * 4)) +#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) + +/* Register Map for Local Section */ +#define GIC_VPE_CTL_OFS 0x0000 +#define GIC_VPE_PEND_OFS 0x0004 +#define GIC_VPE_MASK_OFS 0x0008 +#define GIC_VPE_RMASK_OFS 0x000c +#define GIC_VPE_SMASK_OFS 0x0010 +#define GIC_VPE_WD_MAP_OFS 0x0040 +#define GIC_VPE_COMPARE_MAP_OFS 0x0044 +#define GIC_VPE_TIMER_MAP_OFS 0x0048 +#define GIC_VPE_FDC_MAP_OFS 0x004c +#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 +#define GIC_VPE_SWINT0_MAP_OFS 0x0054 +#define GIC_VPE_SWINT1_MAP_OFS 0x0058 +#define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VPE_WD_CONFIG0_OFS 0x0090 +#define GIC_VPE_WD_COUNT0_OFS 0x0094 +#define GIC_VPE_WD_INITIAL0_OFS 0x0098 +#define GIC_VPE_COMPARE_LO_OFS 0x00a0 +#define GIC_VPE_COMPARE_HI_OFS 0x00a4 + +#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 +#define GIC_VPE_EIC_SS(intr) (4 * (intr)) + +#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800 +#define GIC_VPE_EIC_VEC(intr) (4 * (intr)) + +#define GIC_VPE_TENABLE_NMI_OFS 0x1000 +#define GIC_VPE_TENABLE_YQ_OFS 0x1004 +#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 +#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 + +/* User Mode Visible Section Register Map */ +#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 +#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 + +/* Masks */ +#define GIC_SH_CONFIG_COUNTSTOP_SHF 28 +#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) + +#define GIC_SH_CONFIG_COUNTBITS_SHF 24 +#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) + +#define GIC_SH_CONFIG_NUMINTRS_SHF 16 +#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) + +#define GIC_SH_CONFIG_NUMVPES_SHF 0 +#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) + +#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31)) +#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31)) + +#define GIC_MAP_TO_PIN_SHF 31 +#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) +#define GIC_MAP_TO_NMI_SHF 30 +#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) +#define GIC_MAP_TO_YQ_SHF 29 +#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) +#define GIC_MAP_SHF 0 +#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) + +/* GIC_VPE_CTL Masks */ +#define GIC_VPE_CTL_FDC_RTBL_SHF 4 +#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF) +#define GIC_VPE_CTL_SWINT_RTBL_SHF 3 +#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF) +#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 +#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) +#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 +#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) +#define GIC_VPE_CTL_EIC_MODE_SHF 0 +#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) + +/* GIC_VPE_PEND Masks */ +#define GIC_VPE_PEND_WD_SHF 0 +#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) +#define GIC_VPE_PEND_CMP_SHF 1 +#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) +#define GIC_VPE_PEND_TIMER_SHF 2 +#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) +#define GIC_VPE_PEND_PERFCOUNT_SHF 3 +#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) +#define GIC_VPE_PEND_SWINT0_SHF 4 +#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) +#define GIC_VPE_PEND_SWINT1_SHF 5 +#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) + +/* GIC_VPE_RMASK Masks */ +#define GIC_VPE_RMASK_WD_SHF 0 +#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) +#define GIC_VPE_RMASK_CMP_SHF 1 +#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) +#define GIC_VPE_RMASK_TIMER_SHF 2 +#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) +#define GIC_VPE_RMASK_PERFCNT_SHF 3 +#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) +#define GIC_VPE_RMASK_SWINT0_SHF 4 +#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) +#define GIC_VPE_RMASK_SWINT1_SHF 5 +#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) + +/* GIC_VPE_SMASK Masks */ +#define GIC_VPE_SMASK_WD_SHF 0 +#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) +#define GIC_VPE_SMASK_CMP_SHF 1 +#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) +#define GIC_VPE_SMASK_TIMER_SHF 2 +#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) +#define GIC_VPE_SMASK_PERFCNT_SHF 3 +#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) +#define GIC_VPE_SMASK_SWINT0_SHF 4 +#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) +#define GIC_VPE_SMASK_SWINT1_SHF 5 +#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) + +/* GIC nomenclature for Core Interrupt Pins. */ +#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ +#define GIC_CPU_INT1 1 /* . */ +#define GIC_CPU_INT2 2 /* . */ +#define GIC_CPU_INT3 3 /* . */ +#define GIC_CPU_INT4 4 /* . */ +#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ + +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 + +/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ +#define GIC_CPU_TO_VEC_OFFSET 2 + +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Local GIC interrupts. */ +#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */ +#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */ +#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */ +#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */ +#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */ +#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */ +#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */ +#define GIC_NUM_LOCAL_INTRS 7 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +extern unsigned int gic_present; + +extern void gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, unsigned int cpu_vec, + unsigned int irqbase); +extern void gic_clocksource_init(unsigned int); +extern cycle_t gic_read_count(void); +extern unsigned int gic_get_count_width(void); +extern cycle_t gic_read_compare(void); +extern void gic_write_compare(cycle_t cnt); +extern void gic_write_cpu_compare(cycle_t cnt, int cpu); +extern void gic_send_ipi(unsigned int intr); +extern unsigned int plat_ipi_call_int_xlate(unsigned int); +extern unsigned int plat_ipi_resched_int_xlate(unsigned int); +extern unsigned int gic_get_timer_pending(void); +extern int gic_get_c0_compare_int(void); +extern int gic_get_c0_perfcount_int(void); +#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index b0f9d16e48f..676d7306a36 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -33,11 +33,14 @@ #define _LINUX_IRQDOMAIN_H #include <linux/types.h> +#include <linux/irqhandler.h> #include <linux/radix-tree.h> struct device_node; struct irq_domain; struct of_device_id; +struct irq_chip; +struct irq_data; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -64,6 +67,16 @@ struct irq_domain_ops { int (*xlate)(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type); + +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + /* extended V2 interfaces to support hierarchy irq_domains */ + int (*alloc)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg); + void (*free)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs); + void (*activate)(struct irq_domain *d, struct irq_data *irq_data); + void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); +#endif }; extern struct irq_domain_ops irq_generic_chip_ops; @@ -77,6 +90,7 @@ struct irq_domain_chip_generic; * @ops: pointer to irq_domain methods * @host_data: private data pointer for use by owner. Not touched by irq_domain * core code. + * @flags: host per irq_domain flags * * Optional elements * @of_node: Pointer to device tree nodes associated with the irq_domain. Used @@ -84,6 +98,7 @@ struct irq_domain_chip_generic; * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. + * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that @@ -97,10 +112,14 @@ struct irq_domain { const char *name; const struct irq_domain_ops *ops; void *host_data; + unsigned int flags; /* Optional data */ struct device_node *of_node; struct irq_domain_chip_generic *gc; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + struct irq_domain *parent; +#endif /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; @@ -110,6 +129,22 @@ struct irq_domain { unsigned int linear_revmap[]; }; +/* Irq domain flags */ +enum { + /* Irq domain is hierarchical */ + IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), + + /* Core calls alloc/free recursive through the domain hierarchy. */ + IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), + + /* + * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved + * for implementation specific purposes and ignored by the + * core code. + */ + IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), +}; + #ifdef CONFIG_IRQ_DOMAIN struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, irq_hw_number_t hwirq_max, int direct_max, @@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); +/* V2 interfaces to support hierarchy IRQ domains. */ +extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, + unsigned int virq); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, + unsigned int flags, unsigned int size, + struct device_node *node, + const struct irq_domain_ops *ops, void *host_data); +extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc); +extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern void irq_domain_activate_irq(struct irq_data *irq_data); +extern void irq_domain_deactivate_irq(struct irq_data *irq_data); + +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, + unsigned int nr_irqs, int node, void *arg) +{ + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); +} + +extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq, + struct irq_chip *chip, + void *chip_data); +extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq, struct irq_chip *chip, + void *chip_data, irq_flow_handler_t handler, + void *handler_data, const char *handler_name); +extern void irq_domain_reset_irq_data(struct irq_data *irq_data); +extern void irq_domain_free_irqs_common(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs); +extern void irq_domain_free_irqs_top(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs); + +extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg); + +extern void irq_domain_free_irqs_parent(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs); + +static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; +} +#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ +static inline void irq_domain_activate_irq(struct irq_data *data) { } +static inline void irq_domain_deactivate_irq(struct irq_data *data) { } +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, + unsigned int nr_irqs, int node, void *arg) +{ + return -1; +} + +static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) +{ + return false; +} +#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } +static inline void irq_domain_activate_irq(struct irq_data *data) { } +static inline void irq_domain_deactivate_irq(struct irq_data *data) { } #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h new file mode 100644 index 00000000000..62d54300419 --- /dev/null +++ b/include/linux/irqhandler.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_IRQHANDLER_H +#define _LINUX_IRQHANDLER_H + +/* + * Interrupt flow handler typedefs are defined here to avoid circular + * include dependencies. + */ + +struct irq_desc; +struct irq_data; +typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); +typedef void (*irq_preflow_handler_t)(struct irq_data *data); + +#endif diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index 866caaa9e2b..c2ce155d83c 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h @@ -22,4 +22,17 @@ */ #define KERN_CONT "" +/* integer equivalents of KERN_<LEVEL> */ +#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code + * are set to this special level */ +#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ +#define LOGLEVEL_EMERG 0 /* system is unusable */ +#define LOGLEVEL_ALERT 1 /* action must be taken immediately */ +#define LOGLEVEL_CRIT 2 /* critical conditions */ +#define LOGLEVEL_ERR 3 /* error conditions */ +#define LOGLEVEL_WARNING 4 /* warning conditions */ +#define LOGLEVEL_NOTICE 5 /* normal but significant condition */ +#define LOGLEVEL_INFO 6 /* informational */ +#define LOGLEVEL_DEBUG 7 /* debug-level messages */ + #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3d770f5564b..233ea810703 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -162,6 +162,7 @@ extern int _cond_resched(void); #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + void ___might_sleep(const char *file, int line, int preempt_offset); void __might_sleep(const char *file, int line, int preempt_offset); /** * might_sleep - annotation for functions that can sleep @@ -175,10 +176,14 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) +# define sched_annotate_sleep() __set_current_state(TASK_RUNNING) #else + static inline void ___might_sleep(const char *file, int line, + int preempt_offset) { } static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) +# define sched_annotate_sleep() do { } while (0) #endif #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) @@ -422,6 +427,7 @@ extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; +extern int panic_on_warn; extern int sysctl_panic_on_stackoverflow; /* * Only to be used by arch init code. If the user over-wrote the default diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8422b4ed688..b9376cd5a18 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -/* - * Lock/unlock the current runqueue - to extract task statistics: - */ -extern unsigned long long task_delta_exec(struct task_struct *); - extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); extern void account_steal_time(cputime_t); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index f7296e57d61..5297f9fa0ef 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, extern int arch_prepare_kprobe_ftrace(struct kprobe *p); #endif +int arch_check_ftrace_location(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea53b04993f..a6059bdf7b0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); -bool kvm_is_mmio_pfn(pfn_t pfn); +bool kvm_is_reserved_pfn(pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; diff --git a/include/linux/leds.h b/include/linux/leds.h index a57611d0c94..361101fef27 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -261,6 +261,7 @@ struct gpio_led { unsigned retain_state_suspended : 1; unsigned default_state : 2; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ + struct gpio_desc *gpiod; }; #define LEDS_GPIO_DEFSTATE_OFF 0 #define LEDS_GPIO_DEFSTATE_ON 1 @@ -273,7 +274,7 @@ struct gpio_led_platform_data { #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ #define GPIO_LED_BLINK 2 /* Please, blink */ - int (*gpio_blink_set)(unsigned gpio, int state, + int (*gpio_blink_set)(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); }; diff --git a/include/linux/libata.h b/include/linux/libata.h index bd5fefeaf54..2d182413b1d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -191,7 +191,8 @@ enum { ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ - ATA_DEV_NONE = 9, /* no device */ + ATA_DEV_ZAC = 9, /* ZAC device */ + ATA_DEV_NONE = 10, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ @@ -1191,9 +1192,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, - int queue_depth, int reason); + int queue_depth); extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, - int queue_depth, int reason); + int queue_depth); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); @@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag) static inline unsigned int ata_class_enabled(unsigned int class) { return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || - class == ATA_DEV_PMP || class == ATA_DEV_SEMB; + class == ATA_DEV_PMP || class == ATA_DEV_SEMB || + class == ATA_DEV_ZAC; } static inline unsigned int ata_class_disabled(unsigned int class) diff --git a/include/linux/list.h b/include/linux/list.h index f33f831eb3c..feb773c76ee 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) @@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry_or_null - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. */ @@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list, /** * list_next_entry - get the next element in list * @pos: the type * to cursor - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) @@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list, /** * list_prev_entry - get the prev element in list * @pos: the type * to cursor - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) @@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ @@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ @@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ @@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. @@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ @@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ @@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. @@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. @@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. @@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_safe_reset_next - reset a stale list_for_each_entry_safe loop * @pos: the loop cursor used in the list_for_each_entry_safe loop * @n: temporary storage used in list_for_each_entry_safe - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * list_safe_reset_next is not safe to use in general if the list may be * modified concurrently (eg. the lock is dropped in the loop body). An diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h index 257d3779f2a..0ca8109934e 100644 --- a/include/linux/lockd/debug.h +++ b/include/linux/lockd/debug.h @@ -17,12 +17,8 @@ * Enable lockd debugging. * Requires RPC_DEBUG. */ -#ifdef RPC_DEBUG -# define LOCKD_DEBUG 1 -#endif - #undef ifdebug -#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) #else # define ifdebug(flag) if (0) diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 307d9cab202..1726ccbd800 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -25,6 +25,8 @@ struct mbox_chan; * if the client receives some ACK packet for transmission. * Unused if the controller already has TX_Done/RTR IRQ. * @rx_callback: Atomic callback to provide client the data received + * @tx_prepare: Atomic callback to ask client to prepare the payload + * before initiating the transmission if required. * @tx_done: Atomic callback to tell client of data transmission */ struct mbox_client { @@ -34,6 +36,7 @@ struct mbox_client { bool knows_txdone; void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_prepare)(struct mbox_client *cl, void *mssg); void (*tx_done)(struct mbox_client *cl, void *mssg, int r); }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093..e6982ac3200 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -16,6 +16,7 @@ #define MARVELL_PHY_ID_88E1318S 0x01410e90 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 +#define MARVELL_PHY_ID_88E3016 0x01410e60 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 550c88fb026..611b69fa859 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -61,6 +61,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) } #endif +int mvebu_mbus_save_cpu_target(u32 *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); int mvebu_mbus_add_window_remap_by_id(unsigned int target, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5a..6ea9f919e88 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,7 +25,6 @@ #include <linux/jump_label.h> struct mem_cgroup; -struct page_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg); -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg); +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); -static inline -bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; - bool match; + bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + if (task_memcg) + match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } @@ -141,8 +140,8 @@ static inline bool mem_cgroup_disabled(void) struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags); void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); @@ -174,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, void mem_cgroup_split_huge_fixup(struct page *head); #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page); -void mem_cgroup_print_bad_page(struct page *page); -#endif #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -297,7 +292,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, } static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, - bool locked, unsigned long flags) + bool *locked, unsigned long *flags) { } @@ -347,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) -static inline bool -mem_cgroup_bad_page_check(struct page *page) -{ - return false; -} - -static inline void -mem_cgroup_print_bad_page(struct page *page) -{ -} -#endif - enum { UNDER_LIMIT, SOFT_LIMIT, @@ -447,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) /* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it with - * res_counter_charge_nofail, but we hope those allocations are rare, - * and won't be worth the trouble. + * unaccounted. We could in theory charge it forcibly, but we hope + * those allocations are rare, and won't be worth the trouble. */ if (gfp & __GFP_NOFAIL) return true; @@ -467,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. - * - * there is no need to specify memcg here, since it is embedded in page_cgroup */ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) @@ -485,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit the memcg given by @memcg to the - * corresponding page_cgroup. + * charges. Otherwise, it will commit @page to @memcg. */ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h index adba89d9c66..689312745b2 100644 --- a/include/linux/mfd/abx500/ab8500-sysctrl.h +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h @@ -12,7 +12,6 @@ int ab8500_sysctrl_read(u16 reg, u8 *value); int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); -void ab8500_restart(char mode, const char *cmd); #else diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index f34723f7663..910e3aa1e96 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -141,6 +141,7 @@ struct arizona { uint16_t dac_comp_coeff; uint8_t dac_comp_enabled; + struct mutex dac_comp_lock; }; int arizona_clk32k_enable(struct arizona *arizona); diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index c0b075f6bc3..aacc10d7789 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -125,6 +125,8 @@ #define ARIZONA_MIC_BIAS_CTRL_1 0x218 #define ARIZONA_MIC_BIAS_CTRL_2 0x219 #define ARIZONA_MIC_BIAS_CTRL_3 0x21A +#define ARIZONA_HP_CTRL_1L 0x225 +#define ARIZONA_HP_CTRL_1R 0x226 #define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 #define ARIZONA_HEADPHONE_DETECT_1 0x29B #define ARIZONA_HEADPHONE_DETECT_2 0x29C @@ -279,8 +281,16 @@ #define ARIZONA_AIF2_FRAME_CTRL_2 0x548 #define ARIZONA_AIF2_FRAME_CTRL_3 0x549 #define ARIZONA_AIF2_FRAME_CTRL_4 0x54A +#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B +#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C +#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D +#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E #define ARIZONA_AIF2_FRAME_CTRL_11 0x551 #define ARIZONA_AIF2_FRAME_CTRL_12 0x552 +#define ARIZONA_AIF2_FRAME_CTRL_13 0x553 +#define ARIZONA_AIF2_FRAME_CTRL_14 0x554 +#define ARIZONA_AIF2_FRAME_CTRL_15 0x555 +#define ARIZONA_AIF2_FRAME_CTRL_16 0x556 #define ARIZONA_AIF2_TX_ENABLES 0x559 #define ARIZONA_AIF2_RX_ENABLES 0x55A #define ARIZONA_AIF2_FORCE_WRITE 0x55B @@ -2245,6 +2255,46 @@ #define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ /* + * R549 (0x225) - HP Ctrl 1L + */ +#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */ +#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */ +#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */ + +/* + * R550 (0x226) - HP Ctrl 1R + */ +#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */ +#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */ +#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */ + +/* * R659 (0x293) - Accessory Detect Mode 1 */ #define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h new file mode 100644 index 00000000000..1279ab1644b --- /dev/null +++ b/include/linux/mfd/atmel-hlcdc.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2014 Free Electrons + * Copyright (C) 2014 Atmel + * + * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __LINUX_MFD_HLCDC_H +#define __LINUX_MFD_HLCDC_H + +#include <linux/clk.h> +#include <linux/regmap.h> + +#define ATMEL_HLCDC_CFG(i) ((i) * 0x4) +#define ATMEL_HLCDC_SIG_CFG LCDCFG(5) +#define ATMEL_HLCDC_HSPOL BIT(0) +#define ATMEL_HLCDC_VSPOL BIT(1) +#define ATMEL_HLCDC_VSPDLYS BIT(2) +#define ATMEL_HLCDC_VSPDLYE BIT(3) +#define ATMEL_HLCDC_DISPPOL BIT(4) +#define ATMEL_HLCDC_DITHER BIT(6) +#define ATMEL_HLCDC_DISPDLY BIT(7) +#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8) +#define ATMEL_HLCDC_PP BIT(10) +#define ATMEL_HLCDC_VSPSU BIT(12) +#define ATMEL_HLCDC_VSPHO BIT(13) +#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16) + +#define ATMEL_HLCDC_EN 0x20 +#define ATMEL_HLCDC_DIS 0x24 +#define ATMEL_HLCDC_SR 0x28 +#define ATMEL_HLCDC_IER 0x2c +#define ATMEL_HLCDC_IDR 0x30 +#define ATMEL_HLCDC_IMR 0x34 +#define ATMEL_HLCDC_ISR 0x38 + +#define ATMEL_HLCDC_CLKPOL BIT(0) +#define ATMEL_HLCDC_CLKSEL BIT(2) +#define ATMEL_HLCDC_CLKPWMSEL BIT(3) +#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i)) +#define ATMEL_HLCDC_CLKDIV_SHFT 16 +#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16) +#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT) + +#define ATMEL_HLCDC_PIXEL_CLK BIT(0) +#define ATMEL_HLCDC_SYNC BIT(1) +#define ATMEL_HLCDC_DISP BIT(2) +#define ATMEL_HLCDC_PWM BIT(3) +#define ATMEL_HLCDC_SIP BIT(4) + +#define ATMEL_HLCDC_SOF BIT(0) +#define ATMEL_HLCDC_SYNCDIS BIT(1) +#define ATMEL_HLCDC_FIFOERR BIT(4) +#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8) + +/** + * Structure shared by the MFD device and its subdevices. + * + * @regmap: register map used to access HLCDC IP registers + * @periph_clk: the hlcdc peripheral clock + * @sys_clk: the hlcdc system clock + * @slow_clk: the system slow clk + * @irq: the hlcdc irq + */ +struct atmel_hlcdc { + struct regmap *regmap; + struct clk *periph_clk; + struct clk *sys_clk; + struct clk *slow_clk; + int irq; +}; + +#endif /* __LINUX_MFD_HLCDC_H */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index d0e31a2287a..81589d176ae 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -14,6 +14,8 @@ enum { AXP202_ID = 0, AXP209_ID, + AXP288_ID, + NR_AXP20X_VARIANTS, }; #define AXP20X_DATACACHE(m) (0x04 + (m)) @@ -49,11 +51,13 @@ enum { #define AXP20X_IRQ3_EN 0x42 #define AXP20X_IRQ4_EN 0x43 #define AXP20X_IRQ5_EN 0x44 +#define AXP20X_IRQ6_EN 0x45 #define AXP20X_IRQ1_STATE 0x48 #define AXP20X_IRQ2_STATE 0x49 #define AXP20X_IRQ3_STATE 0x4a #define AXP20X_IRQ4_STATE 0x4b #define AXP20X_IRQ5_STATE 0x4c +#define AXP20X_IRQ6_STATE 0x4d /* ADC */ #define AXP20X_ACIN_V_ADC_H 0x56 @@ -116,6 +120,15 @@ enum { #define AXP20X_CC_CTRL 0xb8 #define AXP20X_FG_RES 0xb9 +/* AXP288 specific registers */ +#define AXP288_PMIC_ADC_H 0x56 +#define AXP288_PMIC_ADC_L 0x57 +#define AXP288_ADC_TS_PIN_CTRL 0x84 + +#define AXP288_PMIC_ADC_EN 0x84 +#define AXP288_FG_TUNE5 0xed + + /* Regulators IDs */ enum { AXP20X_LDO1 = 0, @@ -169,12 +182,58 @@ enum { AXP20X_IRQ_GPIO0_INPUT, }; +enum axp288_irqs { + AXP288_IRQ_VBUS_FALL = 2, + AXP288_IRQ_VBUS_RISE, + AXP288_IRQ_OV, + AXP288_IRQ_FALLING_ALT, + AXP288_IRQ_RISING_ALT, + AXP288_IRQ_OV_ALT, + AXP288_IRQ_DONE = 10, + AXP288_IRQ_CHARGING, + AXP288_IRQ_SAFE_QUIT, + AXP288_IRQ_SAFE_ENTER, + AXP288_IRQ_ABSENT, + AXP288_IRQ_APPEND, + AXP288_IRQ_QWBTU, + AXP288_IRQ_WBTU, + AXP288_IRQ_QWBTO, + AXP288_IRQ_WBTO, + AXP288_IRQ_QCBTU, + AXP288_IRQ_CBTU, + AXP288_IRQ_QCBTO, + AXP288_IRQ_CBTO, + AXP288_IRQ_WL2, + AXP288_IRQ_WL1, + AXP288_IRQ_GPADC, + AXP288_IRQ_OT = 31, + AXP288_IRQ_GPIO0, + AXP288_IRQ_GPIO1, + AXP288_IRQ_POKO, + AXP288_IRQ_POKL, + AXP288_IRQ_POKS, + AXP288_IRQ_POKN, + AXP288_IRQ_POKP, + AXP288_IRQ_TIMER, + AXP288_IRQ_MV_CHNG, + AXP288_IRQ_BC_USB_CHNG, +}; + +#define AXP288_TS_ADC_H 0x58 +#define AXP288_TS_ADC_L 0x59 +#define AXP288_GP_ADC_H 0x5a +#define AXP288_GP_ADC_L 0x5b + struct axp20x_dev { struct device *dev; struct i2c_client *i2c_client; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; long variant; + int nr_cells; + struct mfd_cell *cells; + const struct regmap_config *regmap_cfg; + const struct regmap_irq_chip *regmap_irq_chip; }; #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 73e1709d4c0..a76bc100bf9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -111,6 +111,13 @@ extern int mfd_add_devices(struct device *parent, int id, struct resource *mem_base, int irq_base, struct irq_domain *irq_domain); +static inline int mfd_add_hotplug_devices(struct device *parent, + const struct mfd_cell *cells, int n_devs) +{ + return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs, + NULL, 0, NULL); +} + extern void mfd_remove_devices(struct device *parent); #endif diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index cb01496bfa4..8e1cdbef3da 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -99,12 +99,6 @@ struct davinci_vcif { dma_addr_t dma_rx_addr; }; -struct cq93vc { - struct platform_device *pdev; - struct snd_soc_codec *codec; - u32 sysclk; -}; - struct davinci_vc; struct davinci_vc { @@ -122,7 +116,6 @@ struct davinci_vc { /* Client devices */ struct davinci_vcif davinci_vcif; - struct cq93vc cq93vc; }; #endif diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h new file mode 100644 index 00000000000..004b24576da --- /dev/null +++ b/include/linux/mfd/dln2.h @@ -0,0 +1,103 @@ +#ifndef __LINUX_USB_DLN2_H +#define __LINUX_USB_DLN2_H + +#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8)) + +struct dln2_platform_data { + u16 handle; /* sub-driver handle (internally used only) */ + u8 port; /* I2C/SPI port */ +}; + +/** + * dln2_event_cb_t - event callback function signature + * + * @pdev - the sub-device that registered this callback + * @echo - the echo header field received in the message + * @data - the data payload + * @len - the data payload length + * + * The callback function is called in interrupt context and the data payload is + * only valid during the call. If the user needs later access of the data, it + * must copy it. + */ + +typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo, + const void *data, int len); + +/** + * dl2n_register_event_cb - register a callback function for an event + * + * @pdev - the sub-device that registers the callback + * @event - the event for which to register a callback + * @event_cb - the callback function + * + * @return 0 in case of success, negative value in case of error + */ +int dln2_register_event_cb(struct platform_device *pdev, u16 event, + dln2_event_cb_t event_cb); + +/** + * dln2_unregister_event_cb - unregister the callback function for an event + * + * @pdev - the sub-device that registered the callback + * @event - the event for which to register a callback + */ +void dln2_unregister_event_cb(struct platform_device *pdev, u16 event); + +/** + * dln2_transfer - issue a DLN2 command and wait for a response and the + * associated data + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @obuf - the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len - the size of the buffer to be sent to the device + * @ibuf - any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len - must be initialized to the input buffer size; it will be modified + * to indicate the actual data transferred; + * + * @return 0 for success, negative value for errors + */ +int dln2_transfer(struct platform_device *pdev, u16 cmd, + const void *obuf, unsigned obuf_len, + void *ibuf, unsigned *ibuf_len); + +/** + * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @ibuf - any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len - must be initialized to the input buffer size; it will be modified + * to indicate the actual data transferred; + * + * @return 0 for success, negative value for errors + */ + +static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd, + void *ibuf, unsigned *ibuf_len) +{ + return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len); +} + +/** + * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @obuf - the buffer to be sent to the device; it can be NULL if the + * user doesn't need to transmit data with this command + * @obuf_len - the size of the buffer to be sent to the device + * + * @return 0 for success, negative value for errors + */ +static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd, + const void *obuf, unsigned obuf_len) +{ + return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL); +} + +#endif diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index 7e6dc4b2b79..553f7d09258 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h @@ -131,13 +131,6 @@ enum max77686_opmode { MAX77686_OPMODE_STANDBY, }; -enum max77802_opmode { - MAX77802_OPMODE_OFF, - MAX77802_OPMODE_STANDBY, - MAX77802_OPMODE_LP, - MAX77802_OPMODE_NORMAL, -}; - struct max77686_opmode_data { int id; int mode; diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 582e67f3405..08dae01258b 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -26,7 +26,6 @@ #include <linux/i2c.h> -#define MAX77693_NUM_IRQ_MUIC_REGS 3 #define MAX77693_REG_INVALID (0xff) /* Slave addr = 0xCC: PMIC, Charger, Flash LED */ diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 74346d5e789..0c12628e91c 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -558,6 +558,7 @@ #define SD_SAMPLE_POINT_CTL 0xFDA7 #define SD_PUSH_POINT_CTL 0xFDA8 #define SD_CMD0 0xFDA9 +#define SD_CMD_START 0x40 #define SD_CMD1 0xFDAA #define SD_CMD2 0xFDAB #define SD_CMD3 0xFDAC @@ -707,6 +708,14 @@ #define PM_CTRL1 0xFF44 #define PM_CTRL2 0xFF45 #define PM_CTRL3 0xFF46 +#define SDIO_SEND_PME_EN 0x80 +#define FORCE_RC_MODE_ON 0x40 +#define FORCE_RX50_LINK_ON 0x20 +#define D3_DELINK_MODE_EN 0x10 +#define USE_PESRTB_CTL_DELINK 0x08 +#define DELAY_PIN_WAKE 0x04 +#define RESET_PIN_WAKE 0x02 +#define PM_WAKE_EN 0x01 #define PM_CTRL4 0xFF47 /* Memory mapping */ @@ -752,6 +761,14 @@ #define PHY_DUM_REG 0x1F #define LCTLR 0x80 +#define LCTLR_EXT_SYNC 0x80 +#define LCTLR_COMMON_CLOCK_CFG 0x40 +#define LCTLR_RETRAIN_LINK 0x20 +#define LCTLR_LINK_DISABLE 0x10 +#define LCTLR_RCB 0x08 +#define LCTLR_RESERVED 0x04 +#define LCTLR_ASPM_CTL_MASK 0x03 + #define PCR_SETTING_REG1 0x724 #define PCR_SETTING_REG2 0x814 #define PCR_SETTING_REG3 0x747 @@ -967,4 +984,24 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) return (u8 *)(pcr->host_cmds_ptr); } +static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, + u8 mask, u8 append) +{ + int err; + u8 val; + + err = pci_read_config_byte(pcr->pci, addr, &val); + if (err < 0) + return err; + return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); +} + +static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) +{ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val); +} + #endif diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 1825edacbda..3fdb7cfbffb 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -28,6 +28,7 @@ #define MIN_800_MV 800000 #define MIN_750_MV 750000 #define MIN_600_MV 600000 +#define MIN_500_MV 500000 /* Macros to represent steps for LDO/BUCK */ #define STEP_50_MV 50000 @@ -41,6 +42,7 @@ enum sec_device_type { S5M8767X, S2MPA01, S2MPS11X, + S2MPS13X, S2MPS14X, S2MPU02, }; diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h new file mode 100644 index 00000000000..ce5dda8958f --- /dev/null +++ b/include/linux/mfd/samsung/s2mps13.h @@ -0,0 +1,186 @@ +/* + * s2mps13.h + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_S2MPS13_H +#define __LINUX_MFD_S2MPS13_H + +/* S2MPS13 registers */ +enum s2mps13_reg { + S2MPS13_REG_ID, + S2MPS13_REG_INT1, + S2MPS13_REG_INT2, + S2MPS13_REG_INT3, + S2MPS13_REG_INT1M, + S2MPS13_REG_INT2M, + S2MPS13_REG_INT3M, + S2MPS13_REG_ST1, + S2MPS13_REG_ST2, + S2MPS13_REG_PWRONSRC, + S2MPS13_REG_OFFSRC, + S2MPS13_REG_BU_CHG, + S2MPS13_REG_RTCCTRL, + S2MPS13_REG_CTRL1, + S2MPS13_REG_CTRL2, + S2MPS13_REG_RSVD1, + S2MPS13_REG_RSVD2, + S2MPS13_REG_RSVD3, + S2MPS13_REG_RSVD4, + S2MPS13_REG_RSVD5, + S2MPS13_REG_RSVD6, + S2MPS13_REG_CTRL3, + S2MPS13_REG_RSVD7, + S2MPS13_REG_RSVD8, + S2MPS13_REG_WRSTBI, + S2MPS13_REG_B1CTRL, + S2MPS13_REG_B1OUT, + S2MPS13_REG_B2CTRL, + S2MPS13_REG_B2OUT, + S2MPS13_REG_B3CTRL, + S2MPS13_REG_B3OUT, + S2MPS13_REG_B4CTRL, + S2MPS13_REG_B4OUT, + S2MPS13_REG_B5CTRL, + S2MPS13_REG_B5OUT, + S2MPS13_REG_B6CTRL, + S2MPS13_REG_B6OUT, + S2MPS13_REG_B7CTRL, + S2MPS13_REG_B7OUT, + S2MPS13_REG_B8CTRL, + S2MPS13_REG_B8OUT, + S2MPS13_REG_B9CTRL, + S2MPS13_REG_B9OUT, + S2MPS13_REG_B10CTRL, + S2MPS13_REG_B10OUT, + S2MPS13_REG_BB1CTRL, + S2MPS13_REG_BB1OUT, + S2MPS13_REG_BUCK_RAMP1, + S2MPS13_REG_BUCK_RAMP2, + S2MPS13_REG_LDO_DVS1, + S2MPS13_REG_LDO_DVS2, + S2MPS13_REG_LDO_DVS3, + S2MPS13_REG_B6OUT2, + S2MPS13_REG_L1CTRL, + S2MPS13_REG_L2CTRL, + S2MPS13_REG_L3CTRL, + S2MPS13_REG_L4CTRL, + S2MPS13_REG_L5CTRL, + S2MPS13_REG_L6CTRL, + S2MPS13_REG_L7CTRL, + S2MPS13_REG_L8CTRL, + S2MPS13_REG_L9CTRL, + S2MPS13_REG_L10CTRL, + S2MPS13_REG_L11CTRL, + S2MPS13_REG_L12CTRL, + S2MPS13_REG_L13CTRL, + S2MPS13_REG_L14CTRL, + S2MPS13_REG_L15CTRL, + S2MPS13_REG_L16CTRL, + S2MPS13_REG_L17CTRL, + S2MPS13_REG_L18CTRL, + S2MPS13_REG_L19CTRL, + S2MPS13_REG_L20CTRL, + S2MPS13_REG_L21CTRL, + S2MPS13_REG_L22CTRL, + S2MPS13_REG_L23CTRL, + S2MPS13_REG_L24CTRL, + S2MPS13_REG_L25CTRL, + S2MPS13_REG_L26CTRL, + S2MPS13_REG_L27CTRL, + S2MPS13_REG_L28CTRL, + S2MPS13_REG_L30CTRL, + S2MPS13_REG_L31CTRL, + S2MPS13_REG_L32CTRL, + S2MPS13_REG_L33CTRL, + S2MPS13_REG_L34CTRL, + S2MPS13_REG_L35CTRL, + S2MPS13_REG_L36CTRL, + S2MPS13_REG_L37CTRL, + S2MPS13_REG_L38CTRL, + S2MPS13_REG_L39CTRL, + S2MPS13_REG_L40CTRL, + S2MPS13_REG_LDODSCH1, + S2MPS13_REG_LDODSCH2, + S2MPS13_REG_LDODSCH3, + S2MPS13_REG_LDODSCH4, + S2MPS13_REG_LDODSCH5, +}; + +/* regulator ids */ +enum s2mps13_regulators { + S2MPS13_LDO1, + S2MPS13_LDO2, + S2MPS13_LDO3, + S2MPS13_LDO4, + S2MPS13_LDO5, + S2MPS13_LDO6, + S2MPS13_LDO7, + S2MPS13_LDO8, + S2MPS13_LDO9, + S2MPS13_LDO10, + S2MPS13_LDO11, + S2MPS13_LDO12, + S2MPS13_LDO13, + S2MPS13_LDO14, + S2MPS13_LDO15, + S2MPS13_LDO16, + S2MPS13_LDO17, + S2MPS13_LDO18, + S2MPS13_LDO19, + S2MPS13_LDO20, + S2MPS13_LDO21, + S2MPS13_LDO22, + S2MPS13_LDO23, + S2MPS13_LDO24, + S2MPS13_LDO25, + S2MPS13_LDO26, + S2MPS13_LDO27, + S2MPS13_LDO28, + S2MPS13_LDO29, + S2MPS13_LDO30, + S2MPS13_LDO31, + S2MPS13_LDO32, + S2MPS13_LDO33, + S2MPS13_LDO34, + S2MPS13_LDO35, + S2MPS13_LDO36, + S2MPS13_LDO37, + S2MPS13_LDO38, + S2MPS13_LDO39, + S2MPS13_LDO40, + S2MPS13_BUCK1, + S2MPS13_BUCK2, + S2MPS13_BUCK3, + S2MPS13_BUCK4, + S2MPS13_BUCK5, + S2MPS13_BUCK6, + S2MPS13_BUCK7, + S2MPS13_BUCK8, + S2MPS13_BUCK9, + S2MPS13_BUCK10, + + S2MPS13_REGULATOR_MAX, +}; + +/* + * Default ramp delay in uv/us. Datasheet says that ramp delay can be + * controlled however it does not specify which register is used for that. + * Let's assume that default value will be set. + */ +#define S2MPS13_BUCK_RAMP_DELAY 12500 + +#endif /* __LINUX_MFD_S2MPS13_H */ diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index ff44374a1a4..c877cad61a1 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -395,4 +395,43 @@ #define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) #define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) +/* For imx6sx iomux gpr register field define */ +#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20) +#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20) +#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20) +#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19) +#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19) +#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19) +#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13) +#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17) +#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13) + +#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3) +#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4) + +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3) +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3) +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3) + +#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4) + +#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) + #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h index e6088c2e209..e1c12d84c26 100644 --- a/include/linux/mfd/tc3589x.h +++ b/include/linux/mfd/tc3589x.h @@ -164,13 +164,10 @@ struct tc3589x_keypad_platform_data { /** * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data - * @gpio_base: first gpio number assigned to TC3589x. A maximum of - * %TC3589x_NR_GPIOS GPIOs will be allocated. * @setup: callback for board-specific initialization * @remove: callback for board-specific teardown */ struct tc3589x_gpio_platform_data { - int gpio_base; void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); }; @@ -178,18 +175,13 @@ struct tc3589x_gpio_platform_data { /** * struct tc3589x_platform_data - TC3589x platform data * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) - * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used. * @gpio: GPIO-specific platform data * @keypad: keypad-specific platform data */ struct tc3589x_platform_data { unsigned int block; - int irq_base; struct tc3589x_gpio_platform_data *gpio; const struct tc3589x_keypad_platform_data *keypad; }; -#define TC3589x_NR_GPIOS 24 -#define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS) - #endif diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 53d33dee70e..2e5b194b9b1 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,7 +37,6 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 -#define MICREL_PHY_25MHZ_CLK 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab..64d25941b32 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -67,6 +67,8 @@ enum { MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, + MLX4_CMD_ACCESS_REG = 0x3b, + /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, MLX4_CMD_GET_OP_REQ = 0x59, @@ -197,6 +199,33 @@ enum { MLX4_CMD_NATIVE }; +/* + * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - + * Receive checksum value is reported in CQE also for non TCP/UDP packets. + * + * MLX4_RX_CSUM_MODE_L4 - + * L4_CSUM bit in CQE, which indicates whether or not L4 checksum + * was validated correctly, is supported. + * + * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - + * IP_OK CQE's field is supported also for non TCP/UDP IP packets. + * + * MLX4_RX_CSUM_MODE_MULTI_VLAN - + * Receive Checksum offload is supported for packets with more than 2 vlan headers. + */ +enum mlx4_rx_csum_mode { + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, + MLX4_RX_CSUM_MODE_L4 = 1UL << 1, + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, + MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 +}; + +struct mlx4_config_dev_params { + u16 vxlan_udp_dport; + u8 rx_csum_flags_port_1; + u8 rx_csum_flags_port_2; +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d022..25c791e295f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -95,7 +95,7 @@ enum { enum { MLX4_MAX_NUM_PF = 16, - MLX4_MAX_NUM_VF = 64, + MLX4_MAX_NUM_VF = 126, MLX4_MAX_NUM_VF_P_PORT = 64, MLX4_MFUNC_MAX = 80, MLX4_MAX_EQ_NUM = 1024, @@ -117,6 +117,14 @@ enum { MLX4_STEERING_MODE_DEVICE_MANAGED }; +enum { + MLX4_STEERING_DMFS_A0_DEFAULT, + MLX4_STEERING_DMFS_A0_DYNAMIC, + MLX4_STEERING_DMFS_A0_STATIC, + MLX4_STEERING_DMFS_A0_DISABLE, + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED +}; + static inline const char *mlx4_steering_mode_str(int steering_mode) { switch (steering_mode) { @@ -186,7 +194,31 @@ enum { MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, - MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, + MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, + MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, + MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 +}; + +enum { + MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, + MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 +}; + +/* bit enums for an 8-bit flags field indicating special use + * QPs which require special handling in qp_reserve_range. + * Currently, this only includes QPs used by the ETH interface, + * where we expect to use blueflame. These QPs must not have + * bits 6 and 7 set in their qp number. + * + * This enum may use only bits 0..7. + */ +enum { + MLX4_RESERVE_A0_QP = 1 << 6, + MLX4_RESERVE_ETH_BF_QP = 1 << 7, }; enum { @@ -202,7 +234,8 @@ enum { enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, - MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, + MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 }; @@ -328,6 +361,8 @@ enum { enum mlx4_qp_region { MLX4_QP_REGION_FW = 0, + MLX4_QP_REGION_RSS_RAW_ETH, + MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, MLX4_QP_REGION_ETH_ADDR, MLX4_QP_REGION_FC_ADDR, MLX4_QP_REGION_FC_EXCH, @@ -379,6 +414,13 @@ enum { #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) +enum mlx4_module_id { + MLX4_MODULE_ID_SFP = 0x3, + MLX4_MODULE_ID_QSFP = 0xC, + MLX4_MODULE_ID_QSFP_PLUS = 0xD, + MLX4_MODULE_ID_QSFP28 = 0x11, +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -433,6 +475,7 @@ struct mlx4_caps { int num_cqs; int max_cqes; int reserved_cqs; + int num_sys_eqs; int num_eqs; int reserved_eqs; int num_comp_vectors; @@ -449,6 +492,7 @@ struct mlx4_caps { int reserved_mcgs; int num_qp_per_mgm; int steering_mode; + int dmfs_high_steer_mode; int fs_log_max_ucast_qp_range_size; int num_pds; int reserved_pds; @@ -487,6 +531,10 @@ struct mlx4_caps { u16 hca_core_clock; u64 phys_port_id[MLX4_MAX_PORTS + 1]; int tunnel_offload_mode; + u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; + u8 alloc_res_qp_mask; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; }; struct mlx4_buf_list { @@ -607,6 +655,11 @@ struct mlx4_cq { atomic_t refcount; struct completion free; + struct { + struct list_head list; + void (*comp)(struct mlx4_cq *); + void *priv; + } tasklet_ctx; }; struct mlx4_qp { @@ -799,6 +852,26 @@ struct mlx4_init_port_param { u64 si_guid; }; +#define MAD_IFC_DATA_SZ 192 +/* MAD IFC Mailbox */ +struct mlx4_mad_ifc { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[MAD_IFC_DATA_SZ]; +} __packed; + #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if ((type) == (dev)->caps.port_mask[(port)]) @@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) { return (qpn < dev->phys_caps.base_sqpn + 8 + - 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && + qpn >= dev->phys_caps.base_sqpn) || + (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); } static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) @@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); - -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, @@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry); +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data); + /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { return is_kdump_kernel(); } +/* ACCESS REG commands */ +enum mlx4_access_reg_method { + MLX4_ACCESS_REG_QUERY = 0x1, + MLX4_ACCESS_REG_WRITE = 0x2, +}; + +/* ACCESS PTYS Reg command */ +enum mlx4_ptys_proto { + MLX4_PTYS_IB = 1<<0, + MLX4_PTYS_EN = 1<<2, +}; + +struct mlx4_ptys_reg { + u8 resrvd1; + u8 local_port; + u8 resrvd2; + u8 proto_mask; + __be32 resrvd3[2]; + __be32 eth_proto_cap; + __be16 ib_width_cap; + __be16 ib_speed_cap; + __be32 resrvd4; + __be32 eth_proto_admin; + __be16 ib_width_admin; + __be16 ib_speed_admin; + __be32 resrvd5; + __be32 eth_proto_oper; + __be16 ib_width_oper; + __be16 ib_speed_oper; + __be32 resrvd6; + __be32 eth_proto_lp_adv; +} __packed; + +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg); + #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 5f4e36cf009..467ccdf94c9 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -120,13 +120,15 @@ enum { MLX4_RSS_QPC_FLAG_OFFSET = 13, }; +#define MLX4_EN_RSS_KEY_SIZE 40 + struct mlx4_rss_context { __be32 base_qpn; __be32 default_qpn; u16 reserved; u8 hash_fn; u8 flags; - __be32 rss_key[10]; + __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; __be32 base_qpn_udp; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1d67fd32e71..ea4f1c46f76 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -219,23 +219,15 @@ enum { }; enum { - MLX5_DEV_CAP_FLAG_RC = 1LL << 0, - MLX5_DEV_CAP_FLAG_UC = 1LL << 1, - MLX5_DEV_CAP_FLAG_UD = 1LL << 2, MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, - MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, - MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, - MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, - MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, - MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 246310dc8be..b1bf41556b3 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -633,14 +633,6 @@ static inline void *mlx5_vzalloc(unsigned long size) return rtn; } -static inline void mlx5_vfree(const void *addr) -{ - if (addr && is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; diff --git a/include/linux/mm.h b/include/linux/mm.h index b46461116cd..3b337efbe53 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -56,6 +56,17 @@ extern int sysctl_legacy_va_layout; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +/* + * To prevent common memory management code establishing + * a zero page mapping on a read fault. + * This macro should be defined within <asm/pgtable.h>. + * s390 does this to prevent multiplexing of hardware bits + * related to the physical page in case of virtualization. + */ +#ifndef mm_forbids_zeropage +#define mm_forbids_zeropage(X) (0) +#endif + extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; @@ -128,6 +139,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ +#define VM_ARCH_2 0x02000000 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ #ifdef CONFIG_MEM_SOFT_DIRTY @@ -155,6 +167,11 @@ extern unsigned int kobjsize(const void *objp); # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif +#if defined(CONFIG_X86) +/* MPX specific bounds table or bounds directory */ +# define VM_MPX VM_ARCH_2 +#endif + #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e0b286649f..bf9f57529dc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,7 @@ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; +struct mem_cgroup; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ @@ -167,6 +168,10 @@ struct page { struct page *first_page; /* Compound tail pages */ }; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with @@ -454,6 +459,10 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_X86_INTEL_MPX + /* address of the bounds directory */ + void __user *bd_addr; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index b0692d28f8e..4d69c00497b 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -88,6 +88,9 @@ struct mmc_ext_csd { unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ unsigned int boot_ro_lock; /* ro lock support */ bool boot_ro_lockable; + bool ffu_capable; /* Firmware upgrade support */ +#define MMC_FIRMWARE_LEN 8 + u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ u8 raw_exception_status; /* 54 */ u8 raw_partition_support; /* 160 */ u8 raw_rpmb_size_mult; /* 168 */ @@ -509,24 +512,8 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c) #define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) -#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) -#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) -#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d) - -/* - * MMC device driver (e.g., Flash card, I/O card...) - */ -struct mmc_driver { - struct device_driver drv; - int (*probe)(struct mmc_card *); - void (*remove)(struct mmc_card *); - int (*suspend)(struct mmc_card *); - int (*resume)(struct mmc_card *); - void (*shutdown)(struct mmc_card *); -}; - -extern int mmc_register_driver(struct mmc_driver *); -extern void mmc_unregister_driver(struct mmc_driver *); +extern int mmc_register_driver(struct device_driver *); +extern void mmc_unregister_driver(struct device_driver *); extern void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table); diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index f206e29f94d..cb2b0400d28 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -154,7 +154,8 @@ extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, bool, bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); -extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); +extern int mmc_send_tuning(struct mmc_host *host); +extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 #define MMC_SECURE_ERASE_ARG 0x80000000 diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 001366927cf..42b724e8d50 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -54,6 +54,7 @@ struct mmc_data; * transfer is in progress. * @use_dma: Whether DMA channel is initialized or not. * @using_dma: Whether DMA is in use for the current transfer. + * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. * @sg_dma: Bus address of DMA buffer. * @sg_cpu: Virtual address of DMA buffer. * @dma_ops: Pointer to platform-specific DMA callbacks. @@ -96,6 +97,7 @@ struct mmc_data; * @quirks: Set of quirks that apply to specific versions of the IP. * @irq_flags: The flags to be passed to request_irq. * @irq: The irq value to be passed to request_irq. + * @sdio_id0: Number of slot0 in the SDIO interrupt registers. * * Locking * ======= @@ -135,11 +137,11 @@ struct dw_mci { struct mmc_command stop_abort; unsigned int prev_blksz; unsigned char timing; - struct workqueue_struct *card_workqueue; /* DMA interface members*/ int use_dma; int using_dma; + int dma_64bit_address; dma_addr_t sg_dma; void *sg_cpu; @@ -154,7 +156,6 @@ struct dw_mci { u32 stop_cmdr; u32 dir_status; struct tasklet_struct tasklet; - struct work_struct card_work; unsigned long pending_events; unsigned long completed_events; enum dw_mci_state state; @@ -193,6 +194,8 @@ struct dw_mci { bool vqmmc_enabled; unsigned long irq_flags; /* IRQ flags */ int irq; + + int sdio_id0; }; /* DMA ops for Internal/External DMAC interface */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index df0c15396bb..9f322706f7c 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -289,6 +289,7 @@ struct mmc_host { #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ MMC_CAP2_HS400_1_2V) +#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) mmc_pm_flag_t pm_caps; /* supported pm features */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 1cd00b3a75b..49ad7a94363 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -296,6 +296,7 @@ struct _mmc_csd { #define EXT_CSD_SANITIZE_START 165 /* W */ #define EXT_CSD_WR_REL_PARAM 166 /* RO */ #define EXT_CSD_RPMB_MULT 168 /* RO */ +#define EXT_CSD_FW_CONFIG 169 /* R/W */ #define EXT_CSD_BOOT_WP 173 /* R/W */ #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ #define EXT_CSD_PART_CONFIG 179 /* R/W */ @@ -332,6 +333,8 @@ struct _mmc_csd { #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ +#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ +#define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index dba793e3a33..375af80bde7 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -100,6 +100,12 @@ struct sdhci_host { #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) +/* Controller does not support 64-bit DMA */ +#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) +/* need clear transfer mode register before send cmd */ +#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10) +/* Capability register bit-63 indicates HS400 support */ +#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -130,6 +136,7 @@ struct sdhci_host { #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ +#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ unsigned int version; /* SDHCI spec. version */ @@ -155,12 +162,19 @@ struct sdhci_host { int sg_count; /* Mapped sg entries */ - u8 *adma_desc; /* ADMA descriptor table */ - u8 *align_buffer; /* Bounce buffer */ + void *adma_table; /* ADMA descriptor table */ + void *align_buffer; /* Bounce buffer */ + + size_t adma_table_sz; /* ADMA descriptor table size */ + size_t align_buffer_sz; /* Bounce buffer size */ dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ + unsigned int desc_sz; /* ADMA descriptor size */ + unsigned int align_sz; /* ADMA alignment */ + unsigned int align_mask; /* ADMA alignment mask */ + struct tasklet_struct finish_tasklet; /* Tasklet structures */ struct timer_list timer; /* Timer for timeouts */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 50f0bc95232..aab032a6ae6 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -84,8 +84,6 @@ struct sdio_driver { struct device_driver drv; }; -#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) - /** * SDIO_DEVICE - macro used to describe a specific SDIO device * @vend: the 16 bit manufacturer code diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c0..3879d7664df 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -722,9 +722,6 @@ typedef struct pglist_data { int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; -#ifdef CONFIG_MEMCG - struct page_cgroup *node_page_cgroup; -#endif #endif #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; @@ -1078,7 +1075,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) struct page; -struct page_cgroup; struct mem_section { /* * This is, logically, a pointer to an array of struct @@ -1096,14 +1092,6 @@ struct mem_section { /* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; -#ifdef CONFIG_MEMCG - /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) - */ - struct page_cgroup *page_cgroup; - unsigned long pad; -#endif /* * WARNING: mem_section must be a power-of-2 in size for the * calculation and use of SECTION_ROOT_MASK to make sense. diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 44eeef0da18..745def86258 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -69,7 +69,7 @@ struct ieee1394_device_id { * @bDeviceClass: Class of device; numbers are assigned * by the USB forum. Products may choose to implement classes, * or be vendor-specific. Device classes specify behavior of all - * the interfaces on a devices. + * the interfaces on a device. * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. * @bInterfaceClass: Class of interface; numbers are assigned diff --git a/include/linux/msi.h b/include/linux/msi.h index 44f4746d033..8ac4a68ffae 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -10,17 +10,12 @@ struct msi_msg { u32 data; /* 16 bits of msi message data */ }; +extern int pci_msi_ignore_mask; /* Helper functions */ struct irq_data; struct msi_desc; -void mask_msi_irq(struct irq_data *data); -void unmask_msi_irq(struct irq_data *data); -void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -void read_msi_msg(unsigned int irq, struct msi_msg *msg); void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); -void write_msi_msg(unsigned int irq, struct msi_msg *msg); struct msi_desc { struct { @@ -48,6 +43,52 @@ struct msi_desc { struct msi_msg msg; }; +/* Helpers to hide struct msi_desc implementation details */ +#define msi_desc_to_dev(desc) (&(desc)->dev.dev) +#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list) +#define first_msi_entry(dev) \ + list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) +#define for_each_msi_entry(desc, dev) \ + list_for_each_entry((desc), dev_to_msi_list((dev)), list) + +#ifdef CONFIG_PCI_MSI +#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) +#define for_each_pci_msi_entry(desc, pdev) \ + for_each_msi_entry((desc), &(pdev)->dev) + +static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) +{ + return desc->dev; +} +#endif /* CONFIG_PCI_MSI */ + +void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); + +u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); +u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); +void pci_msi_mask_irq(struct irq_data *data); +void pci_msi_unmask_irq(struct irq_data *data); + +/* Conversion helpers. Should be removed after merging */ +static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + __pci_write_msi_msg(entry, msg); +} +static inline void write_msi_msg(int irq, struct msi_msg *msg) +{ + pci_write_msi_msg(irq, msg); +} +static inline void mask_msi_irq(struct irq_data *data) +{ + pci_msi_mask_irq(data); +} +static inline void unmask_msi_irq(struct irq_data *data) +{ + pci_msi_unmask_irq(data); +} + /* * The arch hooks to setup up msi irqs. Those functions are * implemented as weak symbols so that they /can/ be overriden by @@ -61,18 +102,142 @@ void arch_restore_msi_irqs(struct pci_dev *dev); void default_teardown_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); -u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); -u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); -struct msi_chip { +struct msi_controller { struct module *owner; struct device *dev; struct device_node *of_node; struct list_head list; +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + struct irq_domain *domain; +#endif - int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, + int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, struct msi_desc *desc); - void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); + void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); +}; + +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + +#include <linux/irqhandler.h> +#include <asm/msi.h> + +struct irq_domain; +struct irq_chip; +struct device_node; +struct msi_domain_info; + +/** + * struct msi_domain_ops - MSI interrupt domain callbacks + * @get_hwirq: Retrieve the resulting hw irq number + * @msi_init: Domain specific init function for MSI interrupts + * @msi_free: Domain specific function to free a MSI interrupts + * @msi_check: Callback for verification of the domain/info/dev data + * @msi_prepare: Prepare the allocation of the interrupts in the domain + * @msi_finish: Optional callbacl to finalize the allocation + * @set_desc: Set the msi descriptor for an interrupt + * @handle_error: Optional error handler if the allocation fails + * + * @get_hwirq, @msi_init and @msi_free are callbacks used by + * msi_create_irq_domain() and related interfaces + * + * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error + * are callbacks used by msi_irq_domain_alloc_irqs() and related + * interfaces which are based on msi_desc. + */ +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, + msi_alloc_info_t *arg); + int (*msi_init)(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg); + void (*msi_free)(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq); + int (*msi_check)(struct irq_domain *domain, + struct msi_domain_info *info, + struct device *dev); + int (*msi_prepare)(struct irq_domain *domain, + struct device *dev, int nvec, + msi_alloc_info_t *arg); + void (*msi_finish)(msi_alloc_info_t *arg, int retval); + void (*set_desc)(msi_alloc_info_t *arg, + struct msi_desc *desc); + int (*handle_error)(struct irq_domain *domain, + struct msi_desc *desc, int error); +}; + +/** + * struct msi_domain_info - MSI interrupt domain data + * @flags: Flags to decribe features and capabilities + * @ops: The callback data structure + * @chip: Optional: associated interrupt chip + * @chip_data: Optional: associated interrupt chip data + * @handler: Optional: associated interrupt flow handler + * @handler_data: Optional: associated interrupt flow handler data + * @handler_name: Optional: associated interrupt flow handler name + * @data: Optional: domain specific data + */ +struct msi_domain_info { + u32 flags; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +/* Flags for msi_domain_info */ +enum { + /* + * Init non implemented ops callbacks with default MSI domain + * callbacks. + */ + MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), + /* + * Init non implemented chip callbacks with default MSI chip + * callbacks. + */ + MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), + /* Build identity map between hwirq and irq */ + MSI_FLAG_IDENTITY_MAP = (1 << 2), + /* Support multiple PCI MSI interrupts */ + MSI_FLAG_MULTI_PCI_MSI = (1 << 3), + /* Support PCI MSIX interrupts */ + MSI_FLAG_PCI_MSIX = (1 << 4), }; +int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force); + +struct irq_domain *msi_create_irq_domain(struct device_node *of_node, + struct msi_domain_info *info, + struct irq_domain *parent); +int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + int nvec); +void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); +struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); + +#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ + +#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN +void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); +struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, + struct msi_domain_info *info, + struct irq_domain *parent); +int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, + int nvec, int type); +void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); +struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, + struct msi_domain_info *info, struct irq_domain *parent); + +irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, + struct msi_desc *desc); +int pci_msi_domain_check_cap(struct irq_domain *domain, + struct msi_domain_info *info, struct device *dev); +#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ + #endif /* LINUX_MSI_H */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dcfdecbfa0b..8e30685affe 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -47,9 +47,9 @@ enum { NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ - NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_MPLS_BIT, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -118,7 +118,7 @@ enum { #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) -#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) +#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) @@ -181,7 +181,6 @@ enum { NETIF_F_GSO_IPIP | \ NETIF_F_GSO_SIT | \ NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM | \ - NETIF_F_GSO_MPLS) + NETIF_F_GSO_UDP_TUNNEL_CSUM) #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15..c31f74d76eb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -57,6 +57,8 @@ struct device; struct phy_device; /* 802.11 specific */ struct wireless_dev; +/* 802.15.4 specific */ +struct wpan_dev; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -314,6 +316,7 @@ struct napi_struct { struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; + struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; @@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); static inline bool napi_disable_pending(struct napi_struct *n) { @@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n) __napi_schedule(n); } +/** + * napi_schedule_irqoff - schedule NAPI poll + * @n: napi context + * + * Variant of napi_schedule(), assuming hard irqs are masked. + */ +static inline void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ static inline bool napi_reschedule(struct napi_struct *napi) { @@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } +void __napi_complete(struct napi_struct *n); +void napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: napi context * * Mark NAPI processing as complete. + * Consider using napi_complete_done() instead. */ -void __napi_complete(struct napi_struct *n); -void napi_complete(struct napi_struct *n); +static inline void napi_complete(struct napi_struct *n) +{ + return napi_complete_done(n, 0); +} /** * napi_by_id - lookup a NAPI by napi_id @@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi); * Stop NAPI from being scheduled on this context. * Waits till any outstanding processing completes. */ -static inline void napi_disable(struct napi_struct *n) -{ - might_sleep(); - set_bit(NAPI_STATE_DISABLE, &n->state); - while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - clear_bit(NAPI_STATE_DISABLE, &n->state); -} +void napi_disable(struct napi_struct *n); /** * napi_enable - enable NAPI scheduling @@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo { }; #endif -#define MAX_PHYS_PORT_ID_LEN 32 +#define MAX_PHYS_ITEM_ID_LEN 32 -/* This structure holds a unique identifier to identify the - * physical port used by a netdevice. +/* This structure holds a unique identifier to identify some + * physical item (port for example) used by a netdevice. */ -struct netdev_phys_port_id { - unsigned char id[MAX_PHYS_PORT_ID_LEN]; +struct netdev_phys_item_id { + unsigned char id[MAX_PHYS_ITEM_ID_LEN]; unsigned char id_len; }; @@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 flags) + * const unsigned char *addr, u16 vid, u16 flags) * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr) + * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, @@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. * * int (*ndo_get_phys_port_id)(struct net_device *dev, - * struct netdev_phys_port_id *ppid); + * struct netdev_phys_item_id *ppid); * Called to get ID of physical port of this device. If driver does * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. @@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * performing GSO on a packet. The device returns true if it is * able to GSO the packet, false otherwise. If the return value is * false the stack will do software GSO. + * + * int (*ndo_switch_parent_id_get)(struct net_device *dev, + * struct netdev_phys_item_id *psid); + * Called to get an ID of the switch chip this port is part of. + * If driver implements this, it indicates that it represents a port + * of a switch chip. + * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); + * Called to notify switch device port of bridge port STP + * state change. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1114,11 +1137,13 @@ struct net_device_ops { struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, @@ -1136,7 +1161,7 @@ struct net_device_ops { int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1155,6 +1180,12 @@ struct net_device_ops { int (*ndo_get_lock_subclass)(struct net_device *dev); bool (*ndo_gso_check) (struct sk_buff *skb, struct net_device *dev); +#ifdef CONFIG_NET_SWITCHDEV + int (*ndo_switch_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); + int (*ndo_switch_port_stp_update)(struct net_device *dev, + u8 state); +#endif }; /** @@ -1216,6 +1247,8 @@ enum netdev_priv_flags { IFF_LIVE_ADDR_CHANGE = 1<<20, IFF_MACVLAN = 1<<21, IFF_XMIT_DST_RELEASE_PERM = 1<<22, + IFF_IPVLAN_MASTER = 1<<23, + IFF_IPVLAN_SLAVE = 1<<24, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1241,6 +1274,8 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM +#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER +#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE /** * struct net_device - The DEVICE structure. @@ -1572,6 +1607,7 @@ struct net_device { struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -1590,6 +1626,7 @@ struct net_device { #endif + unsigned long gro_flush_timeout; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len; * Incoming packets are placed on per-cpu queues */ struct softnet_data { - struct Qdisc *output_queue; - struct Qdisc **output_queue_tailp; struct list_head poll_list; - struct sk_buff *completion_queue; struct sk_buff_head process_queue; /* stats */ @@ -2327,10 +2361,17 @@ struct softnet_data { unsigned int time_squeeze; unsigned int cpu_collision; unsigned int received_rps; - #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; +#endif +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +#endif + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; +#ifdef CONFIG_RPS /* Elements below can be accessed between CPUs for RPS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; @@ -2342,9 +2383,6 @@ struct softnet_data { struct sk_buff_head input_pkt_queue; struct napi_struct backlog; -#ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit __rcu *flow_limit; -#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) @@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, } #endif -static inline int netif_copy_real_num_queues(struct net_device *to_dev, - const struct net_device *from_dev) -{ - int err; - - err = netif_set_real_num_tx_queues(to_dev, - from_dev->real_num_tx_queues); - if (err) - return err; -#ifdef CONFIG_SYSFS - return netif_set_real_num_rx_queues(to_dev, - from_dev->real_num_rx_queues); -#else - return 0; -#endif -} - #ifdef CONFIG_SYSFS static inline unsigned int get_netdev_rx_queue_index( struct netdev_rx_queue *queue) @@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev, void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); + +/* RSS keys are 40 or 52 bytes long */ +#define NETDEV_RSS_KEY_LEN 52 +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; +void netdev_rss_key_fill(void *buffer, size_t len); + int dev_get_nest_level(struct net_device *dev, bool (*type_check)(struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); @@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev) return dev->priv_flags & IFF_MACVLAN; } +static inline bool netif_is_macvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN_PORT; +} + +static inline bool netif_is_ipvlan(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_SLAVE; +} + +static inline bool netif_is_ipvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_MASTER; +} + static inline bool netif_is_bond_master(struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 356acc2846f..022b761dbf0 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -490,6 +490,8 @@ enum { /* nfs42 */ NFSPROC4_CLNT_SEEK, + NFSPROC4_CLNT_ALLOCATE, + NFSPROC4_CLNT_DEALLOCATE, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c72d1ad41ad..6d627b92df5 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -163,7 +163,7 @@ struct nfs_inode { */ __be32 cookieverf[2]; - unsigned long npages; + unsigned long nrequests; struct nfs_mds_commit_info commit_info; /* Open contexts for shared mmap writes */ @@ -520,7 +520,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data); static inline int nfs_have_writebacks(struct inode *inode) { - return NFS_I(inode)->npages != 0; + return NFS_I(inode)->nrequests != 0; } /* diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a32ba0d7a98..1e37fbb78f7 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -231,5 +231,7 @@ struct nfs_server { #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) #define NFS_CAP_SECURITY_LABEL (1U << 18) #define NFS_CAP_SEEK (1U << 19) +#define NFS_CAP_ALLOCATE (1U << 20) +#define NFS_CAP_DEALLOCATE (1U << 21) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 47ebb4fafd8..467c84efb59 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1243,6 +1243,20 @@ nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 +struct nfs42_falloc_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *falloc_fh; + nfs4_stateid falloc_stateid; + u64 falloc_offset; + u64 falloc_length; +}; + +struct nfs42_falloc_res { + struct nfs4_sequence_res seq_res; + unsigned int status; +}; + struct nfs42_seek_args { struct nfs4_sequence_args seq_args; diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0ea..167342c2ce6 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef NL802154_H diff --git a/include/linux/of.h b/include/linux/of.h index 29f0adc5f3e..dfde07e77a6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -23,6 +23,8 @@ #include <linux/spinlock.h> #include <linux/topology.h> #include <linux/notifier.h> +#include <linux/property.h> +#include <linux/list.h> #include <asm/byteorder.h> #include <asm/errno.h> @@ -49,14 +51,13 @@ struct device_node { const char *type; phandle phandle; const char *full_name; + struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; /* removed properties */ struct device_node *parent; struct device_node *child; struct device_node *sibling; - struct device_node *next; /* next device of same type */ - struct device_node *allnext; /* next in list of all nodes */ struct kobject kobj; unsigned long _flags; void *data; @@ -74,11 +75,18 @@ struct of_phandle_args { uint32_t args[MAX_PHANDLE_ARGS]; }; +struct of_reconfig_data { + struct device_node *dn; + struct property *prop; + struct property *old_prop; +}; + /* initialize a node */ extern struct kobj_type of_node_ktype; static inline void of_node_init(struct device_node *node) { kobject_init(&node->kobj, &of_node_ktype); + node->fwnode.type = FWNODE_OF; } /* true when node is initialized */ @@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node) static inline void of_node_put(struct device_node *node) { } #endif /* !CONFIG_OF_DYNAMIC */ -#ifdef CONFIG_OF - /* Pointer for first entry in chain of all nodes. */ -extern struct device_node *of_allnodes; +extern struct device_node *of_root; extern struct device_node *of_chosen; extern struct device_node *of_aliases; extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; +#ifdef CONFIG_OF +static inline bool is_of_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_OF; +} + +static inline struct device_node *of_node(struct fwnode_handle *fwnode) +{ + return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; +} + static inline bool of_have_populated_dt(void) { - return of_allnodes != NULL; + return of_root != NULL; } static inline bool of_node_is_root(const struct device_node *node) @@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag clear_bit(flag, &p->_flags); } +extern struct device_node *__of_find_all_nodes(struct device_node *prev); extern struct device_node *of_find_all_nodes(struct device_node *prev); /* @@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np) return np ? np->full_name : "<no-node>"; } -#define for_each_of_allnodes(dn) \ - for (dn = of_allnodes; dn; dn = dn->allnext) +#define for_each_of_allnodes_from(from, dn) \ + for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) +#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); extern struct device_node *of_find_node_by_type(struct device_node *from, @@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match( const struct of_device_id *matches, const struct of_device_id **match); -extern struct device_node *of_find_node_by_path(const char *path); +extern struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts); +static inline struct device_node *of_find_node_by_path(const char *path) +{ + return of_find_node_opts_by_path(path, NULL); +} + extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); @@ -263,6 +288,10 @@ extern int of_property_read_u32_array(const struct device_node *np, size_t sz); extern int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value); +extern int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, + size_t sz); extern int of_property_read_string(struct device_node *np, const char *propname, @@ -275,7 +304,7 @@ extern int of_property_read_string_helper(struct device_node *np, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); -extern int of_device_is_available(const struct device_node *device); +extern bool of_device_is_available(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); @@ -317,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop); #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 -struct of_prop_reconfig { - struct device_node *dn; - struct property *prop; - struct property *old_prop; -}; - -extern int of_reconfig_notifier_register(struct notifier_block *); -extern int of_reconfig_notifier_unregister(struct notifier_block *); -extern int of_reconfig_notify(unsigned long, void *); - extern int of_attach_node(struct device_node *); extern int of_detach_node(struct device_node *); @@ -355,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index); #else /* CONFIG_OF */ +static inline bool is_of_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct device_node *of_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline const char* of_node_full_name(const struct device_node *np) { return "<no-node>"; @@ -385,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path) return NULL; } +static inline struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts) +{ + return NULL; +} + static inline struct device_node *of_get_parent(const struct device_node *node) { return NULL; @@ -426,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device, return 0; } -static inline int of_device_is_available(const struct device_node *device) +static inline bool of_device_is_available(const struct device_node *device) { - return 0; + return false; } static inline struct property *of_find_property(const struct device_node *np, @@ -477,6 +512,13 @@ static inline int of_property_read_u32_array(const struct device_node *np, return -ENOSYS; } +static inline int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, size_t sz) +{ + return -ENOSYS; +} + static inline int of_property_read_string(struct device_node *np, const char *propname, const char **out_string) @@ -760,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np, return of_property_read_u32_array(np, propname, out_value, 1); } +static inline int of_property_read_s32(const struct device_node *np, + const char *propname, + s32 *out_value) +{ + return of_property_read_u32(np, propname, (u32*) out_value); +} + #define of_property_for_each_u32(np, propname, prop, p, u) \ for (prop = of_find_property(np, propname, NULL), \ p = of_prop_next_u32(prop, NULL, &u); \ @@ -828,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np) = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else -#define _OF_DECLARE(table, name, compat, fn, fn_type) \ +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __attribute__((unused)) \ = { .compatible = compat, \ @@ -879,7 +928,19 @@ struct of_changeset { struct list_head entries; }; +enum of_reconfig_change { + OF_RECONFIG_NO_CHANGE = 0, + OF_RECONFIG_CHANGE_ADD, + OF_RECONFIG_CHANGE_REMOVE, +}; + #ifdef CONFIG_OF_DYNAMIC +extern int of_reconfig_notifier_register(struct notifier_block *); +extern int of_reconfig_notifier_unregister(struct notifier_block *); +extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); +extern int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg); + extern void of_changeset_init(struct of_changeset *ocs); extern void of_changeset_destroy(struct of_changeset *ocs); extern int of_changeset_apply(struct of_changeset *ocs); @@ -917,9 +978,69 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, { return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); } -#endif +#else /* CONFIG_OF_DYNAMIC */ +static inline int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notify(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +static inline int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +#endif /* CONFIG_OF_DYNAMIC */ /* CONFIG_OF_RESOLVE api */ extern int of_resolve_phandles(struct device_node *tree); +/** + * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node + * @np: Pointer to the given device_node + * + * return true if present false otherwise + */ +static inline bool of_device_is_system_power_controller(const struct device_node *np) +{ + return of_property_read_bool(np, "system-power-controller"); +} + +/** + * Overlay support + */ + +#ifdef CONFIG_OF_OVERLAY + +/* ID based overlays; the API for external users */ +int of_overlay_create(struct device_node *tree); +int of_overlay_destroy(int id); +int of_overlay_destroy_all(void); + +#else + +static inline int of_overlay_create(struct device_node *tree) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_destroy(int id) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_destroy_all(void) +{ + return -ENOTSUPP; +} + +#endif + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 8cb14eb393d..d88e81be636 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); void __iomem *of_iomap(struct device_node *node, int index); void __iomem *of_io_request_and_map(struct device_node *device, - int index, char *name); + int index, const char *name); #else #include <linux/io.h> @@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) } static inline void __iomem *of_io_request_and_map(struct device_node *device, - int index, char *name) + int index, const char *name) { return IOMEM_ERR_PTR(-EINVAL); } diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 1fd207e7a84..ce0e5abeb45 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -59,13 +59,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, #endif #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) -int of_pci_msi_chip_add(struct msi_chip *chip); -void of_pci_msi_chip_remove(struct msi_chip *chip); -struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); +int of_pci_msi_chip_add(struct msi_controller *chip); +void of_pci_msi_chip_remove(struct msi_controller *chip); +struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); #else -static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } -static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } -static inline struct msi_chip * +static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } +static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } +static inline struct msi_controller * of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } #endif diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index c65a18a0cfd..7e09244bb67 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h @@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); /* for building the device tree */ extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); -extern void (*of_pdt_build_more)(struct device_node *dp, - struct device_node ***nextp); +extern void (*of_pdt_build_more)(struct device_node *dp); #endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index c2b0627a231..8a860f096c3 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, static inline void of_platform_depopulate(struct device *parent) { } #endif +#ifdef CONFIG_OF_DYNAMIC +extern void of_platform_register_reconfig_notifier(void); +#else +static inline void of_platform_register_reconfig_notifier(void) { } +#endif + #endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h new file mode 100644 index 00000000000..c2080eebbb4 --- /dev/null +++ b/include/linux/omap-gpmc.h @@ -0,0 +1,199 @@ +/* + * OMAP GPMC (General Purpose Memory Controller) defines + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* Maximum Number of Chip Selects */ +#define GPMC_CS_NUM 8 + +#define GPMC_CONFIG_WP 0x00000005 + +#define GPMC_IRQ_FIFOEVENTENABLE 0x01 +#define GPMC_IRQ_COUNT_EVENT 0x02 + +#define GPMC_BURST_4 4 /* 4 word burst */ +#define GPMC_BURST_8 8 /* 8 word burst */ +#define GPMC_BURST_16 16 /* 16 word burst */ +#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ +#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ +#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ +#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ + +/* bool type time settings */ +struct gpmc_bool_timings { + bool cycle2cyclediffcsen; + bool cycle2cyclesamecsen; + bool we_extra_delay; + bool oe_extra_delay; + bool adv_extra_delay; + bool cs_extra_delay; + bool time_para_granularity; +}; + +/* + * Note that all values in this struct are in nanoseconds except sync_clk + * (which is in picoseconds), while the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode (in picoseconds) */ + u32 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u32 cs_on; /* Assertion time */ + u32 cs_rd_off; /* Read deassertion time */ + u32 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u32 adv_on; /* Assertion time */ + u32 adv_rd_off; /* Read deassertion time */ + u32 adv_wr_off; /* Write deassertion time */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u32 we_on; /* WE assertion time */ + u32 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u32 oe_on; /* OE assertion time */ + u32 oe_off; /* OE deassertion time */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u32 page_burst_access; /* Multiple access word delay */ + u32 access; /* Start-cycle to first data valid delay */ + u32 rd_cycle; /* Total read cycle time */ + u32 wr_cycle; /* Total write cycle time */ + + u32 bus_turnaround; + u32 cycle2cycle_delay; + + u32 wait_monitoring; + u32 clk_activation; + + /* The following are only on OMAP3430 */ + u32 wr_access; /* WRACCESSTIME */ + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ + + struct gpmc_bool_timings bool_timings; +}; + +/* Device timings in picoseconds */ +struct gpmc_device_timings { + u32 t_ceasu; /* address setup to CS valid */ + u32 t_avdasu; /* address setup to ADV valid */ + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is + * of tusb using these timings even for sync whilst + * ideally for adv_rd/(wr)_off it should have considered + * t_avdh instead. This indirectly necessitates r/w + * variations of t_avdp as it is possible to have one + * sync & other async + */ + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ + u32 t_avdp_w; + u32 t_aavdh; /* address hold time */ + u32 t_oeasu; /* address setup to OE valid */ + u32 t_aa; /* access time from ADV assertion */ + u32 t_iaa; /* initial access time */ + u32 t_oe; /* access time from OE assertion */ + u32 t_ce; /* access time from CS asertion */ + u32 t_rd_cycle; /* read cycle time */ + u32 t_cez_r; /* read CS deassertion to high Z */ + u32 t_cez_w; /* write CS deassertion to high Z */ + u32 t_oez; /* OE deassertion to high Z */ + u32 t_weasu; /* address setup to WE valid */ + u32 t_wpl; /* write assertion time */ + u32 t_wph; /* write deassertion time */ + u32 t_wr_cycle; /* write cycle time */ + + u32 clk; + u32 t_bacc; /* burst access valid clock to output delay */ + u32 t_ces; /* CS setup time to clk */ + u32 t_avds; /* ADV setup time to clk */ + u32 t_avdh; /* ADV hold time from clk */ + u32 t_ach; /* address hold time from clk */ + u32 t_rdyo; /* clk to ready valid */ + + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ + u32 t_ce_avd; /* CS on to ADV on delay */ + + /* XXX: check the possibility of combining + * cyc_aavhd_oe & cyc_aavdh_we + */ + u8 cyc_aavdh_oe;/* read address hold time in cycles */ + u8 cyc_aavdh_we;/* write address hold time in cycles */ + u8 cyc_oe; /* access time from OE assertion in cycles */ + u8 cyc_wpl; /* write deassertion time in cycles */ + u32 cyc_iaa; /* initial access time in cycles */ + + /* extra delays */ + bool ce_xdelay; + bool avd_xdelay; + bool oe_xdelay; + bool we_xdelay; +}; + +struct gpmc_settings { + bool burst_wrap; /* enables wrap bursting */ + bool burst_read; /* enables read page/burst mode */ + bool burst_write; /* enables write page/burst mode */ + bool device_nand; /* device is NAND */ + bool sync_read; /* enables synchronous reads */ + bool sync_write; /* enables synchronous writes */ + bool wait_on_read; /* monitor wait on reads */ + bool wait_on_write; /* monitor wait on writes */ + u32 burst_len; /* page/burst length */ + u32 device_width; /* device bus width (8 or 16 bit) */ + u32 mux_add_data; /* multiplex address & data */ + u32 wait_pin; /* wait-pin to be used */ +}; + +extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, + struct gpmc_settings *gpmc_s, + struct gpmc_device_timings *dev_t); + +struct gpmc_nand_regs; +struct device_node; + +extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); +extern int gpmc_get_client_irq(unsigned irq_config); + +extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); + +extern void gpmc_cs_write_reg(int cs, int idx, u32 val); +extern int gpmc_calc_divider(unsigned int sync_clk); +extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t); +extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p); +extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); +extern void gpmc_cs_free(int cs); +extern int gpmc_configure(int cmd, int wval); +extern void gpmc_read_settings_dt(struct device_node *np, + struct gpmc_settings *p); + +extern void omap3_gpmc_save_context(void); +extern void omap3_gpmc_restore_context(void); + +struct gpmc_timings; +struct omap_nand_platform_data; +struct omap_onenand_platform_data; + +#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) +extern int gpmc_nand_init(struct omap_nand_platform_data *d, + struct gpmc_timings *gpmc_t); +#else +static inline int gpmc_nand_init(struct omap_nand_platform_data *d, + struct gpmc_timings *gpmc_t) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) +extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); +#else +#define board_onenand_data NULL +static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) +{ +} +#endif diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index f8322d9cd23..587bbdd31f5 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -10,20 +10,20 @@ #define OMAP_MAILBOX_H typedef u32 mbox_msg_t; -struct omap_mbox; typedef int __bitwise omap_mbox_irq_t; #define IRQ_TX ((__force omap_mbox_irq_t) 1) #define IRQ_RX ((__force omap_mbox_irq_t) 2) -int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); +struct mbox_chan; +struct mbox_client; -struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); -void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); +struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, + const char *chan_name); -void omap_mbox_save_ctx(struct omap_mbox *mbox); -void omap_mbox_restore_ctx(struct omap_mbox *mbox); -void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); -void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); +void omap_mbox_save_ctx(struct mbox_chan *chan); +void omap_mbox_restore_ctx(struct mbox_chan *chan); +void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); +void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); #endif /* OMAP_MAILBOX_H */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 5c831f1eca7..00000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -enum { - /* flags for mem_cgroup */ - PCG_USED = 0x01, /* This page is charged to a memcg */ - PCG_MEM = 0x02, /* This page holds a memory charge */ - PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ -}; - -struct pglist_data; - -#ifdef CONFIG_MEMCG -struct mem_cgroup; - -/* - * Page Cgroup can be considered as an extended mem_map. - * A page_cgroup page is associated with every page descriptor. The - * page_cgroup helps us identify information about the cgroup - * All page cgroups are allocated at boot or memory hotplug event, - * then the page cgroup for pfn always exists. - */ -struct page_cgroup { - unsigned long flags; - struct mem_cgroup *mem_cgroup; -}; - -extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); - -#ifdef CONFIG_SPARSEMEM -static inline void page_cgroup_init_flatmem(void) -{ -} -extern void page_cgroup_init(void); -#else -extern void page_cgroup_init_flatmem(void); -static inline void page_cgroup_init(void) -{ -} -#endif - -struct page_cgroup *lookup_page_cgroup(struct page *page); - -static inline int PageCgroupUsed(struct page_cgroup *pc) -{ - return !!(pc->flags & PCG_USED); -} -#else /* !CONFIG_MEMCG */ -struct page_cgroup; - -static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ -} - -static inline struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - return NULL; -} - -static inline void page_cgroup_init(void) -{ -} - -static inline void page_cgroup_init_flatmem(void) -{ -} -#endif /* CONFIG_MEMCG */ - -#include <linux/swap.h> - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 00000000000..955421575d1 --- /dev/null +++ b/include/linux/page_counter.h @@ -0,0 +1,51 @@ +#ifndef _LINUX_PAGE_COUNTER_H +#define _LINUX_PAGE_COUNTER_H + +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <asm/page.h> + +struct page_counter { + atomic_long_t count; + unsigned long limit; + struct page_counter *parent; + + /* legacy */ + unsigned long watermark; + unsigned long failcnt; +}; + +#if BITS_PER_LONG == 32 +#define PAGE_COUNTER_MAX LONG_MAX +#else +#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) +#endif + +static inline void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->count, 0); + counter->limit = PAGE_COUNTER_MAX; + counter->parent = parent; +} + +static inline unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->count); +} + +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_limit(struct page_counter *counter, unsigned long limit); +int page_counter_memparse(const char *buf, unsigned long *nr_pages); + +static inline void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} + +#endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 5be8db45e36..44a27696ab6 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -331,6 +331,7 @@ struct pci_dev { unsigned int is_added:1; unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ @@ -449,7 +450,7 @@ struct pci_bus { struct resource busn_res; /* bus numbers routed to this bus */ struct pci_ops *ops; /* configuration access functions */ - struct msi_chip *msi; /* MSI controller */ + struct msi_controller *msi; /* MSI controller */ void *sysdata; /* hook for sys-specific extension */ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ @@ -1003,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); int pci_save_state(struct pci_dev *dev); void pci_restore_state(struct pci_dev *dev); struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); +int pci_load_saved_state(struct pci_dev *dev, + struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2706ee9a432..8c789506112 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -109,7 +109,6 @@ struct hotplug_slot { struct list_head slot_list; struct pci_slot *pci_slot; }; -#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1fa99a30181..97fb9f69aae 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -522,6 +522,8 @@ #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e +#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573 +#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574 #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d5c89e0dd0e..b4337646388 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -128,12 +128,16 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) static inline bool __ref_is_percpu(struct percpu_ref *ref, unsigned long __percpu **percpu_countp) { - unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); - /* paired with smp_store_release() in percpu_ref_reinit() */ - smp_read_barrier_depends(); + unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); - if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) + /* + * Theoretically, the following could test just ATOMIC; however, + * then we'd have to mask off DEAD separately as DEAD may be + * visible without ATOMIC if we race with percpu_ref_kill(). DEAD + * implies ATOMIC anyway. Test them together. + */ + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; @@ -141,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, } /** - * percpu_ref_get - increment a percpu refcount + * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get + * @nr: number of references to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_get(struct percpu_ref *ref) +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); else - atomic_long_inc(&ref->count); + atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } /** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + +/** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * @@ -225,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) } /** - * percpu_ref_put - decrement a percpu refcount + * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put + * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_put(struct percpu_ref *ref) +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_dec(*percpu_count); - else if (unlikely(atomic_long_dec_and_test(&ref->count))) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } /** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + +/** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test * diff --git a/include/linux/percpu.h b/include/linux/percpu.h index a3aa63e4763..caebf2a758d 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -5,6 +5,7 @@ #include <linux/preempt.h> #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/printk.h> #include <linux/pfn.h> #include <linux/init.h> @@ -134,4 +135,7 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +/* To avoid include hell, as printk can not declare this, we declare it here */ +DECLARE_PER_CPU(printk_func_t, printk_func); + #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 893a0d07986..486e84ccb1f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -79,7 +79,7 @@ struct perf_branch_stack { struct perf_branch_entry entries[0]; }; -struct perf_regs_user { +struct perf_regs { __u64 abi; struct pt_regs *regs; }; @@ -580,34 +580,40 @@ extern u64 perf_event_read_value(struct perf_event *event, struct perf_sample_data { - u64 type; + /* + * Fields set by perf_sample_data_init(), group so as to + * minimize the cachelines touched. + */ + u64 addr; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 period; + u64 weight; + u64 txn; + union perf_mem_data_src data_src; + /* + * The other fields, optionally {set,used} by + * perf_{prepare,output}_sample(). + */ + u64 type; u64 ip; struct { u32 pid; u32 tid; } tid_entry; u64 time; - u64 addr; u64 id; u64 stream_id; struct { u32 cpu; u32 reserved; } cpu_entry; - u64 period; - union perf_mem_data_src data_src; struct perf_callchain_entry *callchain; - struct perf_raw_record *raw; - struct perf_branch_stack *br_stack; - struct perf_regs_user regs_user; + struct perf_regs regs_user; + struct perf_regs regs_intr; u64 stack_user_size; - u64 weight; - /* - * Transaction flags for abort events: - */ - u64 txn; -}; +} ____cacheline_aligned; /* default value for data source */ #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ @@ -624,9 +630,6 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->raw = NULL; data->br_stack = NULL; data->period = period; - data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; - data->regs_user.regs = NULL; - data->stack_user_size = 0; data->weight = 0; data->data_src.val = PERF_MEM_NA; data->txn = 0; diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa16..22af8f8f580 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -433,6 +433,7 @@ struct phy_device { * by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) + * driver_data: static driver data * * The drivers must implement config_aneg and read_status. All * other functions are optional. Note that none of these @@ -448,6 +449,7 @@ struct phy_driver { unsigned int phy_id_mask; u32 features; u32 flags; + const void *driver_data; /* * Called to issue a PHY software reset @@ -772,4 +774,28 @@ int __init mdio_bus_init(void); void mdio_bus_exit(void); extern struct bus_type mdio_bus_type; + +/** + * module_phy_driver() - Helper macro for registering PHY drivers + * @__phy_drivers: array of PHY drivers to register + * + * Helper macro for PHY drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define phy_module_driver(__phy_drivers, __count) \ +static int __init phy_module_init(void) \ +{ \ + return phy_drivers_register(__phy_drivers, __count); \ +} \ +module_init(phy_module_init); \ +static void __exit phy_module_exit(void) \ +{ \ + phy_drivers_unregister(__phy_drivers, __count); \ +} \ +module_exit(phy_module_exit) + +#define module_phy_driver(__phy_drivers) \ + phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) + #endif /* __PHY_H */ diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index a6591c693eb..5e0bc779e6c 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -27,6 +27,7 @@ struct samsung_i2s { #define QUIRK_NO_MUXPSR (1 << 2) #define QUIRK_NEED_RSTCLR (1 << 3) #define QUIRK_SUPPORTS_TDM (1 << 4) +#define QUIRK_SUPPORTS_IDMA (1 << 5) /* Quirks of the I2S controller */ u32 quirks; dma_addr_t idma_addr; diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 00000000000..26af5432195 --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h @@ -0,0 +1,18 @@ +#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ +#define __LINUX_PLATFORM_DATA_BCMGENET_H__ + +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/phy.h> + +struct bcmgenet_platform_data { + bool mdio_enabled; + phy_interface_t phy_interface; + int phy_address; + int phy_speed; + int phy_duplex; + u8 mac_address[ETH_ALEN]; + int genet_version; +}; + +#endif diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 6a1357d3187..7d964e78729 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -41,6 +41,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_ESAI, /* ESAI */ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ + IMX_DMATYPE_SAI, /* SAI */ }; enum imx_dma_prio { diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h new file mode 100644 index 00000000000..67bbcf0785f --- /dev/null +++ b/include/linux/platform_data/hsmmc-omap.h @@ -0,0 +1,90 @@ +/* + * MMC definitions for OMAP2 + * + * Copyright (C) 2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * struct omap_hsmmc_dev_attr.flags possibilities + * + * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can + * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag + * should be set if this is the case. See for example Section 22.5.3 + * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia + * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). + * + * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers + * don't work correctly on some MMC controller instances on some + * OMAP3 SoCs; this flag should be set if this is the case. See + * for example Advisory 2.1.1.128 "MMC: Multiple Block Read + * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ + * Revision F (October 2010) (SPRZ278F). + */ +#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) +#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) +#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) + +struct omap_hsmmc_dev_attr { + u8 flags; +}; + +struct mmc_card; + +struct omap_hsmmc_platform_data { + /* back-link to device */ + struct device *dev; + + /* set if your board has components or wiring that limits the + * maximum frequency on the MMC bus */ + unsigned int max_freq; + + /* Integrating attributes from the omap_hwmod layer */ + u8 controller_flags; + + /* Register offset deviation */ + u16 reg_offset; + + /* + * 4/8 wires and any additional host capabilities + * need to OR'd all capabilities (ref. linux/mmc/host.h) + */ + u32 caps; /* Used for the MMC driver on 2430 and later */ + u32 pm_caps; /* PM capabilities of the mmc */ + + /* switch pin can be for card detect (default) or card cover */ + unsigned cover:1; + + /* use the internal clock */ + unsigned internal_clock:1; + + /* nonremovable e.g. eMMC */ + unsigned nonremovable:1; + + /* eMMC does not handle power off when not in sleep state */ + unsigned no_regulator_off_init:1; + + /* we can put the features above into this variable */ +#define HSMMC_HAS_PBIAS (1 << 0) +#define HSMMC_HAS_UPDATED_RESET (1 << 1) +#define HSMMC_HAS_HSPE_SUPPORT (1 << 2) + unsigned features; + + int switch_pin; /* gpio (card detect) */ + int gpio_wp; /* gpio (write protect) */ + + int (*set_power)(struct device *dev, int power_on, int vdd); + void (*remux)(struct device *dev, int power_on); + /* Call back before enabling / disabling regulators */ + void (*before_set_reg)(struct device *dev, int power_on, int vdd); + /* Call back after enabling / disabling regulators */ + void (*after_set_reg)(struct device *dev, int power_on, int vdd); + /* if we have special card, init it using this callback */ + void (*init_card)(struct mmc_card *card); + + const char *name; + u32 ocr_mask; +}; diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index 1b2ba24e4e0..9c7fd1efe49 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h @@ -136,6 +136,7 @@ struct lp855x_rom_data { Only valid when mode is PWM_BASED. * @size_program : total size of lp855x_rom_data * @rom_data : list of new eeprom/eprom registers + * @supply : regulator that supplies 3V input */ struct lp855x_platform_data { const char *name; @@ -144,6 +145,7 @@ struct lp855x_platform_data { unsigned int period_ns; int size_program; struct lp855x_rom_data *rom_data; + struct regulator *supply; }; #endif diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h new file mode 100644 index 00000000000..399a2d5a14b --- /dev/null +++ b/include/linux/platform_data/mmc-atmel-mci.h @@ -0,0 +1,22 @@ +#ifndef __MMC_ATMEL_MCI_H +#define __MMC_ATMEL_MCI_H + +#include <linux/platform_data/dma-atmel.h> +#include <linux/platform_data/dma-dw.h> + +/** + * struct mci_dma_data - DMA data for MCI interface + */ +struct mci_dma_data { +#ifdef CONFIG_ARM + struct at_dma_slave sdata; +#else + struct dw_dma_slave sdata; +#endif +}; + +/* accessor macros */ +#define slave_data_ptr(s) (&(s)->sdata) +#define find_slave_dev(s) ((s)->sdata.dma_dev) + +#endif /* __MMC_ATMEL_MCI_H */ diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index 51e70cf25cb..5c188f4e9be 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h @@ -10,32 +10,8 @@ #define OMAP_MMC_MAX_SLOTS 2 -/* - * struct omap_mmc_dev_attr.flags possibilities - * - * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can - * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag - * should be set if this is the case. See for example Section 22.5.3 - * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia - * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). - * - * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers - * don't work correctly on some MMC controller instances on some - * OMAP3 SoCs; this flag should be set if this is the case. See - * for example Advisory 2.1.1.128 "MMC: Multiple Block Read - * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ - * Revision F (October 2010) (SPRZ278F). - */ -#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) -#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) -#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) - struct mmc_card; -struct omap_mmc_dev_attr { - u8 flags; -}; - struct omap_mmc_platform_data { /* back-link to device */ struct device *dev; @@ -106,9 +82,6 @@ struct omap_mmc_platform_data { unsigned vcc_aux_disable_is_sleep:1; /* we can put the features above into this variable */ -#define HSMMC_HAS_PBIAS (1 << 0) -#define HSMMC_HAS_UPDATED_RESET (1 << 1) -#define HSMMC_HAS_HSPE_SUPPORT (1 << 2) #define MMC_OMAP7XX (1 << 3) #define MMC_OMAP15XX (1 << 4) #define MMC_OMAP16XX (1 << 5) diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h index 27d3156d093..9e20c2fb4ff 100644 --- a/include/linux/platform_data/pxa_sdhci.h +++ b/include/linux/platform_data/pxa_sdhci.h @@ -55,9 +55,4 @@ struct sdhci_pxa_platdata { unsigned int quirks2; unsigned int pm_caps; }; - -struct sdhci_pxa { - u8 clk_enable; - u8 power_mode; -}; #endif /* _PXA_SDHCI_H_ */ diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index c860c1b314c..d09275f3cde 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h @@ -38,9 +38,6 @@ struct omap_uart_port_info { unsigned int dma_rx_timeout; unsigned int autosuspend_timeout; unsigned int dma_rx_poll_rate; - int DTR_gpio; - int DTR_inverted; - int DTR_present; int (*get_context_loss_count)(struct device *); void (*enable_wakeup)(struct device *, bool); diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 1730312398f..5087fff96d8 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h @@ -24,7 +24,6 @@ #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" struct st21nfca_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_ena; unsigned int irq_polarity; }; diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 2d11f1f5efa..c3b432f5b63 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -24,7 +24,6 @@ #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" struct st21nfcb_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_reset; unsigned int irq_polarity; }; diff --git a/include/linux/plist.h b/include/linux/plist.h index 8b6c970cff6..97883604a3c 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * plist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop counter * @head: the head for your list - * @mem: the name of the list_struct within the struct + * @mem: the name of the list_head within the struct */ #define plist_for_each_entry(pos, head, mem) \ list_for_each_entry(pos, &(head)->node_list, mem.node_list) @@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * plist_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor * @head: the head for your list - * @m: the name of the list_struct within the struct + * @m: the name of the list_head within the struct * * Continue to iterate over list of given type, continuing after * the current position. @@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * @pos: the type * to use as a loop counter * @n: another type * to use as temporary storage * @head: the head for your list - * @m: the name of the list_struct within the struct + * @m: the name of the list_head within the struct * * Iterate over list of given type, safe against removal of list entry. */ @@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node) * plist_first_entry - get the struct for the first entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in - * @member: the name of the list_struct within the struct + * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_first_entry(head, type, member) \ @@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node) * plist_last_entry - get the struct for the last entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in - * @member: the name of the list_struct within the struct + * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_last_entry(head, type, member) \ diff --git a/include/linux/pm.h b/include/linux/pm.h index 383fd68aaee..66a656eb335 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -342,7 +342,7 @@ struct dev_pm_ops { #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ .runtime_suspend = suspend_fn, \ .runtime_resume = resume_fn, \ @@ -351,14 +351,7 @@ struct dev_pm_ops { #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) #endif -#ifdef CONFIG_PM -#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ - .runtime_suspend = suspend_fn, \ - .runtime_resume = resume_fn, \ - .runtime_idle = idle_fn, -#else -#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) -#endif +#define SET_PM_RUNTIME_PM_OPS SET_RUNTIME_PM_OPS /* * Use this if you want to use the same suspend and resume callbacks for suspend @@ -538,11 +531,7 @@ enum rpm_request { }; struct wakeup_source; - -struct pm_domain_data { - struct list_head list_node; - struct device *dev; -}; +struct pm_domain_data; struct pm_subsys_data { spinlock_t lock; @@ -576,7 +565,7 @@ struct dev_pm_info { #else unsigned int should_wakeup:1; #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8348866e7b0..0b003963441 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -18,6 +18,8 @@ struct pm_clk_notifier_block { char *con_ids[]; }; +struct clk; + #ifdef CONFIG_PM_CLK static inline bool pm_clk_no_clocks(struct device *dev) { @@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev); extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); +extern int pm_clk_add_clk(struct device *dev, struct clk *clk); extern void pm_clk_remove(struct device *dev, const char *con_id); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); @@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id) { return -EINVAL; } + +static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) +{ + return -EINVAL; +} static inline void pm_clk_remove(struct device *dev, const char *con_id) { } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 2e0e06daf8c..6cd20d5e651 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -17,6 +17,9 @@ #include <linux/notifier.h> #include <linux/cpuidle.h> +/* Defines used for the flags field in the struct generic_pm_domain */ +#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ + enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ @@ -76,6 +79,7 @@ struct generic_pm_domain { struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); + unsigned int flags; /* Bit field of configs for genpd */ }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -100,6 +104,11 @@ struct gpd_timing_data { bool cached_stop_ok; }; +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; @@ -147,6 +156,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, extern int pm_genpd_poweron(struct generic_pm_domain *genpd); extern int pm_genpd_name_poweron(const char *domain_name); +extern void pm_genpd_poweroff_unused(void); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -221,6 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name) { return -ENOSYS; } +static inline void pm_genpd_poweroff_unused(void) {} #define simple_qos_governor NULL #define pm_domain_always_on_gov NULL #endif @@ -237,12 +248,6 @@ static inline int pm_genpd_name_add_device(const char *domain_name, return __pm_genpd_name_add_device(domain_name, dev, NULL); } -#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME -extern void pm_genpd_poweroff_unused(void); -#else -static inline void pm_genpd_poweroff_unused(void) {} -#endif - #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP extern void pm_genpd_syscore_poweroff(struct device *dev); extern void pm_genpd_syscore_poweron(struct device *dev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0330217abfa..cec2d454091 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -21,7 +21,7 @@ struct dev_pm_opp; struct device; enum dev_pm_opp_event { - OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, + OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; #if defined(CONFIG_PM_OPP) @@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); +void dev_pm_opp_remove(struct device *dev, unsigned long freq); int dev_pm_opp_enable(struct device *dev, unsigned long freq); @@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, return -EINVAL; } +static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ +} + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; @@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int of_init_opp_table(struct device *dev); +void of_free_opp_table(struct device *dev); #else static inline int of_init_opp_table(struct device *dev) { return -EINVAL; } + +static inline void of_free_opp_table(struct device *dev) +{ +} #endif #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 636e8283450..7b3ae0cffc0 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -154,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); +void dev_pm_qos_hide_latency_limit(struct device *dev); +int dev_pm_qos_expose_flags(struct device *dev, s32 value); +void dev_pm_qos_hide_flags(struct device *dev); +int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); + +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) +{ + return dev->power.qos->resume_latency_req->data.pnode.prio; +} + +static inline s32 dev_pm_qos_requested_flags(struct device *dev) +{ + return dev->power.qos->flags_req->data.flr.flags; +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -200,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, enum dev_pm_qos_req_type type, s32 value) { return 0; } -#endif - -#ifdef CONFIG_PM_RUNTIME -int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); -void dev_pm_qos_hide_latency_limit(struct device *dev); -int dev_pm_qos_expose_flags(struct device *dev, s32 value); -void dev_pm_qos_hide_flags(struct device *dev); -int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); -s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); -int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); - -static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) -{ - return dev->power.qos->resume_latency_req->data.pnode.prio; -} - -static inline s32 dev_pm_qos_requested_flags(struct device *dev) -{ - return dev->power.qos->flags_req->data.flr.flags; -} -#else static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 367f49b9a1c..eda4feede04 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); -#else -static inline bool queue_pm_work(struct work_struct *work) { return false; } - -static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } -static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } -static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } -static inline int pm_runtime_force_resume(struct device *dev) { return 0; } -#endif - -#ifdef CONFIG_PM_RUNTIME extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); @@ -128,7 +118,14 @@ static inline void pm_runtime_mark_last_busy(struct device *dev) ACCESS_ONCE(dev->power.last_busy) = jiffies; } -#else /* !CONFIG_PM_RUNTIME */ +#else /* !CONFIG_PM */ + +static inline bool queue_pm_work(struct work_struct *work) { return false; } + +static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } +static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } +static inline int pm_runtime_force_resume(struct device *dev) { return 0; } static inline int __pm_runtime_idle(struct device *dev, int rpmflags) { @@ -179,7 +176,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration( static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* !CONFIG_PM */ static inline int pm_runtime_idle(struct device *dev) { diff --git a/include/linux/printk.h b/include/linux/printk.h index d78125f73ac..c8f170324e6 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); -void early_vprintk(const char *fmt, va_list ap); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif +typedef int(*printk_func_t)(const char *fmt, va_list args); + #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) int vprintk_emit(int facility, int level, diff --git a/include/linux/property.h b/include/linux/property.h new file mode 100644 index 00000000000..a6a3d98bd7e --- /dev/null +++ b/include/linux/property.h @@ -0,0 +1,143 @@ +/* + * property.h - Unified device property interface. + * + * Copyright (C) 2014, Intel Corporation + * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_PROPERTY_H_ +#define _LINUX_PROPERTY_H_ + +#include <linux/types.h> + +struct device; + +enum dev_prop_type { + DEV_PROP_U8, + DEV_PROP_U16, + DEV_PROP_U32, + DEV_PROP_U64, + DEV_PROP_STRING, + DEV_PROP_MAX, +}; + +bool device_property_present(struct device *dev, const char *propname); +int device_property_read_u8_array(struct device *dev, const char *propname, + u8 *val, size_t nval); +int device_property_read_u16_array(struct device *dev, const char *propname, + u16 *val, size_t nval); +int device_property_read_u32_array(struct device *dev, const char *propname, + u32 *val, size_t nval); +int device_property_read_u64_array(struct device *dev, const char *propname, + u64 *val, size_t nval); +int device_property_read_string_array(struct device *dev, const char *propname, + const char **val, size_t nval); +int device_property_read_string(struct device *dev, const char *propname, + const char **val); + +enum fwnode_type { + FWNODE_INVALID = 0, + FWNODE_OF, + FWNODE_ACPI, +}; + +struct fwnode_handle { + enum fwnode_type type; +}; + +bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); +int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, + const char *propname, u8 *val, + size_t nval); +int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, + const char *propname, u16 *val, + size_t nval); +int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, + const char *propname, u32 *val, + size_t nval); +int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, + const char *propname, u64 *val, + size_t nval); +int fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval); +int fwnode_property_read_string(struct fwnode_handle *fwnode, + const char *propname, const char **val); + +struct fwnode_handle *device_get_next_child_node(struct device *dev, + struct fwnode_handle *child); + +#define device_for_each_child_node(dev, child) \ + for (child = device_get_next_child_node(dev, NULL); child; \ + child = device_get_next_child_node(dev, child)) + +void fwnode_handle_put(struct fwnode_handle *fwnode); + +unsigned int device_get_child_node_count(struct device *dev); + +static inline bool device_property_read_bool(struct device *dev, + const char *propname) +{ + return device_property_present(dev, propname); +} + +static inline int device_property_read_u8(struct device *dev, + const char *propname, u8 *val) +{ + return device_property_read_u8_array(dev, propname, val, 1); +} + +static inline int device_property_read_u16(struct device *dev, + const char *propname, u16 *val) +{ + return device_property_read_u16_array(dev, propname, val, 1); +} + +static inline int device_property_read_u32(struct device *dev, + const char *propname, u32 *val) +{ + return device_property_read_u32_array(dev, propname, val, 1); +} + +static inline int device_property_read_u64(struct device *dev, + const char *propname, u64 *val) +{ + return device_property_read_u64_array(dev, propname, val, 1); +} + +static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_present(fwnode, propname); +} + +static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, + const char *propname, u8 *val) +{ + return fwnode_property_read_u8_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, + const char *propname, u16 *val) +{ + return fwnode_property_read_u16_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, + const char *propname, u32 *val) +{ + return fwnode_property_read_u32_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, + const char *propname, u64 *val) +{ + return fwnode_property_read_u64_array(fwnode, propname, val, 1); +} + +#endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9974975d40d..4af3fdc85b0 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -53,7 +53,8 @@ struct persistent_ram_zone { }; struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, - u32 sig, struct persistent_ram_ecc_info *ecc_info); + u32 sig, struct persistent_ram_ecc_info *ecc_info, + unsigned int memtype); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); @@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, struct ramoops_platform_data { unsigned long mem_size; unsigned long mem_address; + unsigned int mem_type; unsigned long record_size; unsigned long console_size; unsigned long ftrace_size; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index cc79eff4a1a..987a73a40ef 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); -extern void exit_ptrace(struct task_struct *tracer); +extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e79560..e1ab6e86cdb 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h @@ -4,6 +4,8 @@ #ifndef __LINUX_PXA168_ETH_H #define __LINUX_PXA168_ETH_H +#include <linux/phy.h> + struct pxa168_eth_platform_data { int port_number; int phy_addr; @@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { */ int speed; /* 0, SPEED_10, SPEED_100 */ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + phy_interface_t intf; /* * Override default RX/TX queue sizes if nonzero. diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index f2b40511616..77aed9ea1d2 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -108,6 +108,25 @@ #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ #endif +/* QUARK_X1000 SSCR0 bit definition */ +#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ +#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ +#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ +#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ + +#define RX_THRESH_QUARK_X1000_DFLT 1 +#define TX_THRESH_QUARK_X1000_DFLT 16 + +#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ +#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ + +#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ +#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ +#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ +#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ + /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ @@ -175,6 +194,7 @@ enum pxa_ssp_type { PXA910_SSP, CE4100_SSP, LPSS_SSP, + QUARK_X1000_SSP, }; struct ssp_device { diff --git a/include/linux/quota.h b/include/linux/quota.h index 80d345a3524..50978b781a1 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -56,6 +56,11 @@ enum quota_type { PRJQUOTA = 2, /* element used for project quotas */ }; +/* Masks for quota types when used as a bitmask */ +#define QTYPE_MASK_USR (1 << USRQUOTA) +#define QTYPE_MASK_GRP (1 << GRPQUOTA) +#define QTYPE_MASK_PRJ (1 << PRJQUOTA) + typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ typedef long long qsize_t; /* Type in which we store sizes */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 1d3eee594cd..f23538a6e41 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot); int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); void __dquot_free_space(struct inode *inode, qsize_t number, int flags); -int dquot_alloc_inode(const struct inode *inode); +int dquot_alloc_inode(struct inode *inode); int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); -void dquot_free_inode(const struct inode *inode); +void dquot_free_inode(struct inode *inode); void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); int dquot_disable(struct super_block *sb, int type, unsigned int flags); @@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode) { } -static inline int dquot_alloc_inode(const struct inode *inode) +static inline int dquot_alloc_inode(struct inode *inode) { return 0; } -static inline void dquot_free_inode(const struct inode *inode) +static inline void dquot_free_inode(struct inode *inode) { } diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 372ad5e0dcb..529bc946f45 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). @@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. * @@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() @@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ typeof(*(pos)), member)) +/** + * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from_rcu(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ + typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 53ff1a752d7..ed4f5939a45 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -57,7 +57,7 @@ enum rcutorture_type { INVALID_RCU_FLAVOR }; -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gpnum, unsigned long *completed); void rcutorture_record_test_transition(void); @@ -260,7 +260,7 @@ static inline int rcu_preempt_depth(void) void rcu_init(void); void rcu_sched_qs(void); void rcu_bh_qs(void); -void rcu_check_callbacks(int cpu, int user); +void rcu_check_callbacks(int user); struct notifier_block; void rcu_idle_enter(void); void rcu_idle_exit(void); @@ -348,8 +348,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu; */ #define cond_resched_rcu_qs() \ do { \ - rcu_note_voluntary_context_switch(current); \ - cond_resched(); \ + if (!cond_resched()) \ + rcu_note_voluntary_context_switch(current); \ } while (0) #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) @@ -365,7 +365,7 @@ typedef void call_rcu_func_t(struct rcu_head *head, void (*func)(struct rcu_head *head)); void wait_rcu_gp(call_rcu_func_t crf); -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> @@ -867,7 +867,7 @@ static inline void rcu_preempt_sleep_check(void) * * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. - * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT + * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU @@ -902,7 +902,9 @@ static inline void rcu_read_lock(void) * Unfortunately, this function acquires the scheduler's runqueue and * priority-inheritance spinlocks. This means that deadlock could result * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. + * any lock that is ever acquired while holding them; or any lock which + * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() + * does not disable irqs while taking ->wait_lock. * * That said, RCU readers are never priority boosted unless they were * preempted. Therefore, one way to avoid deadlock is to make sure @@ -1062,6 +1064,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ + rcu_dereference_sparse(p, __rcu); \ p = RCU_INITIALIZER(v); \ } while (0) @@ -1118,7 +1121,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) -static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) +static inline int rcu_needs_cpu(unsigned long *delta_jiffies) { *delta_jiffies = ULONG_MAX; return 0; diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 38cc5b1e252..0e536620015 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -78,7 +78,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, call_rcu(head, func); } -static inline void rcu_note_context_switch(int cpu) +static inline void rcu_note_context_switch(void) { rcu_sched_qs(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3e2f5d43274..52953790dcc 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,9 +30,9 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -void rcu_note_context_switch(int cpu); +void rcu_note_context_switch(void); #ifndef CONFIG_RCU_NOCB_CPU_ALL -int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); +int rcu_needs_cpu(unsigned long *delta_jiffies); #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ void rcu_cpu_stall_reset(void); @@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void); */ static inline void rcu_virt_note_context_switch(int cpu) { - rcu_note_context_switch(cpu); + rcu_note_context_switch(); } void synchronize_rcu_bh(void); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index c5ed83f49c4..4419b99d8d6 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -27,6 +27,7 @@ struct spmi_device; struct regmap; struct regmap_range_cfg; struct regmap_field; +struct snd_ac97; /* An enum of all the supported cache types */ enum regcache_type { @@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev, struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, void __iomem *regs, const struct regmap_config *config); +struct regmap *regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config); struct regmap *devm_regmap_init(struct device *dev, const struct regmap_bus *bus, @@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev, struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, void __iomem *regs, const struct regmap_config *config); +struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config); + +bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); /** * regmap_init_mmio(): Initialise register map diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f540b1496e2..d17e1ff7ad0 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -101,6 +101,8 @@ struct regmap; * Data passed is "struct pre_voltage_change_data" * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. * Data passed is old voltage cast to (void *). + * PRE_DISABLE Regulator is about to be disabled + * ABORT_DISABLE Regulator disable failed for some reason * * NOTE: These events can be OR'ed together when passed into handler. */ @@ -115,6 +117,8 @@ struct regmap; #define REGULATOR_EVENT_DISABLE 0x80 #define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 +#define REGULATOR_EVENT_PRE_DISABLE 0x400 +#define REGULATOR_EVENT_ABORT_DISABLE 0x800 /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event @@ -284,7 +288,7 @@ devm_regulator_get(struct device *dev, const char *id) static inline struct regulator *__must_check regulator_get_exclusive(struct device *dev, const char *id) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct regulator *__must_check diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index fc0ee0ce832..5f1e9ca4741 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -243,6 +243,8 @@ enum regulator_type { * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator + * + * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode */ struct regulator_desc { const char *name; @@ -285,6 +287,8 @@ struct regulator_desc { unsigned int enable_time; unsigned int off_on_delay; + + unsigned int (*of_map_mode)(unsigned int mode); }; /** @@ -301,6 +305,9 @@ struct regulator_desc { * NULL). * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is * insufficient. + * @ena_gpio_initialized: GPIO controlling regulator enable was properly + * initialized, meaning that >= 0 is a valid gpio + * identifier and < 0 is a non existent gpio. * @ena_gpio: GPIO controlling regulator enable. * @ena_gpio_invert: Sense for GPIO enable control. * @ena_gpio_flags: Flags to use when calling gpio_request_one() @@ -312,6 +319,7 @@ struct regulator_config { struct device_node *of_node; struct regmap *regmap; + bool ena_gpio_initialized; int ena_gpio; unsigned int ena_gpio_invert:1; unsigned int ena_gpio_flags; diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h index f9217965aaa..763953f7e3b 100644 --- a/include/linux/regulator/of_regulator.h +++ b/include/linux/regulator/of_regulator.h @@ -6,24 +6,29 @@ #ifndef __LINUX_OF_REG_H #define __LINUX_OF_REG_H +struct regulator_desc; + struct of_regulator_match { const char *name; void *driver_data; struct regulator_init_data *init_data; struct device_node *of_node; + const struct regulator_desc *desc; }; #if defined(CONFIG_OF) extern struct regulator_init_data *of_get_regulator_init_data(struct device *dev, - struct device_node *node); + struct device_node *node, + const struct regulator_desc *desc); extern int of_regulator_match(struct device *dev, struct device_node *node, struct of_regulator_match *matches, unsigned int num_matches); #else static inline struct regulator_init_data *of_get_regulator_init_data(struct device *dev, - struct device_node *node) + struct device_node *node, + const struct regulator_desc *desc) { return NULL; } diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h deleted file mode 100644 index 56b7bc32db4..00000000000 --- a/include/linux/res_counter.h +++ /dev/null @@ -1,223 +0,0 @@ -#ifndef __RES_COUNTER_H__ -#define __RES_COUNTER_H__ - -/* - * Resource Counters - * Contain common data types and routines for resource accounting - * - * Copyright 2007 OpenVZ SWsoft Inc - * - * Author: Pavel Emelianov <xemul@openvz.org> - * - * See Documentation/cgroups/resource_counter.txt for more - * info about what this counter is. - */ - -#include <linux/spinlock.h> -#include <linux/errno.h> - -/* - * The core object. the cgroup that wishes to account for some - * resource may include this counter into its structures and use - * the helpers described beyond - */ - -struct res_counter { - /* - * the current resource consumption level - */ - unsigned long long usage; - /* - * the maximal value of the usage from the counter creation - */ - unsigned long long max_usage; - /* - * the limit that usage cannot exceed - */ - unsigned long long limit; - /* - * the limit that usage can be exceed - */ - unsigned long long soft_limit; - /* - * the number of unsuccessful attempts to consume the resource - */ - unsigned long long failcnt; - /* - * the lock to protect all of the above. - * the routines below consider this to be IRQ-safe - */ - spinlock_t lock; - /* - * Parent counter, used for hierarchial resource accounting - */ - struct res_counter *parent; -}; - -#define RES_COUNTER_MAX ULLONG_MAX - -/** - * Helpers to interact with userspace - * res_counter_read_u64() - returns the value of the specified member. - * res_counter_read/_write - put/get the specified fields from the - * res_counter struct to/from the user - * - * @counter: the counter in question - * @member: the field to work with (see RES_xxx below) - * @buf: the buffer to opeate on,... - * @nbytes: its size... - * @pos: and the offset. - */ - -u64 res_counter_read_u64(struct res_counter *counter, int member); - -ssize_t res_counter_read(struct res_counter *counter, int member, - const char __user *buf, size_t nbytes, loff_t *pos, - int (*read_strategy)(unsigned long long val, char *s)); - -int res_counter_memparse_write_strategy(const char *buf, - unsigned long long *res); - -/* - * the field descriptors. one for each member of res_counter - */ - -enum { - RES_USAGE, - RES_MAX_USAGE, - RES_LIMIT, - RES_FAILCNT, - RES_SOFT_LIMIT, -}; - -/* - * helpers for accounting - */ - -void res_counter_init(struct res_counter *counter, struct res_counter *parent); - -/* - * charge - try to consume more resource. - * - * @counter: the counter - * @val: the amount of the resource. each controller defines its own - * units, e.g. numbers, bytes, Kbytes, etc - * - * returns 0 on success and <0 if the counter->usage will exceed the - * counter->limit - * - * charge_nofail works the same, except that it charges the resource - * counter unconditionally, and returns < 0 if the after the current - * charge we are over limit. - */ - -int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); -int res_counter_charge_nofail(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); - -/* - * uncharge - tell that some portion of the resource is released - * - * @counter: the counter - * @val: the amount of the resource - * - * these calls check for usage underflow and show a warning on the console - * - * returns the total charges still present in @counter. - */ - -u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); - -u64 res_counter_uncharge_until(struct res_counter *counter, - struct res_counter *top, - unsigned long val); -/** - * res_counter_margin - calculate chargeable space of a counter - * @cnt: the counter - * - * Returns the difference between the hard limit and the current usage - * of resource counter @cnt. - */ -static inline unsigned long long res_counter_margin(struct res_counter *cnt) -{ - unsigned long long margin; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->limit > cnt->usage) - margin = cnt->limit - cnt->usage; - else - margin = 0; - spin_unlock_irqrestore(&cnt->lock, flags); - return margin; -} - -/** - * Get the difference between the usage and the soft limit - * @cnt: The counter - * - * Returns 0 if usage is less than or equal to soft limit - * The difference between usage and soft limit, otherwise. - */ -static inline unsigned long long -res_counter_soft_limit_excess(struct res_counter *cnt) -{ - unsigned long long excess; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= cnt->soft_limit) - excess = 0; - else - excess = cnt->usage - cnt->soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return excess; -} - -static inline void res_counter_reset_max(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->max_usage = cnt->usage; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline void res_counter_reset_failcnt(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->failcnt = 0; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline int res_counter_set_limit(struct res_counter *cnt, - unsigned long long limit) -{ - unsigned long flags; - int ret = -EBUSY; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= limit) { - cnt->limit = limit; - ret = 0; - } - spin_unlock_irqrestore(&cnt->lock, flags); - return ret; -} - -static inline int -res_counter_set_soft_limit(struct res_counter *cnt, - unsigned long long soft_limit) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->soft_limit = soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return 0; -} - -#endif diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 41a4695fde0..ce6b962ffed 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -12,11 +12,13 @@ struct reset_controller_dev; * things to reset the device * @assert: manually assert the reset line, if supported * @deassert: manually deassert the reset line, if supported + * @status: return the status of the reset line, if supported */ struct reset_control_ops { int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); + int (*status)(struct reset_controller_dev *rcdev, unsigned long id); }; struct module; diff --git a/include/linux/reset.h b/include/linux/reset.h index 349f150ae12..da5602bd77d 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -10,6 +10,7 @@ struct reset_control; int reset_control_reset(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); +int reset_control_status(struct reset_control *rstc); struct reset_control *reset_control_get(struct device *dev, const char *id); void reset_control_put(struct reset_control *rstc); @@ -57,6 +58,12 @@ static inline int reset_control_deassert(struct reset_control *rstc) return 0; } +static inline int reset_control_status(struct reset_control *rstc) +{ + WARN_ON(1); + return 0; +} + static inline void reset_control_put(struct reset_control *rstc) { WARN_ON(1); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3..b93fd89b2e5 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -65,7 +65,10 @@ struct rhashtable_params { size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); - int (*mutex_is_held)(void); +#ifdef CONFIG_PROVE_LOCKING + int (*mutex_is_held)(void *parent); + void *parent; +#endif }; /** @@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags); + struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); -int rhashtable_expand(struct rhashtable *ht, gfp_t flags); -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); +int rhashtable_expand(struct rhashtable *ht); +int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(const struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, diff --git a/include/linux/rtc.h b/include/linux/rtc.h index c2c28975293..6d6be09a2fe 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -19,11 +19,28 @@ extern int rtc_month_days(unsigned int month, unsigned int year); extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); extern int rtc_valid_tm(struct rtc_time *tm); -extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); -extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); +extern time64_t rtc_tm_to_time64(struct rtc_time *tm); +extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); ktime_t rtc_tm_to_ktime(struct rtc_time tm); struct rtc_time rtc_ktime_to_tm(ktime_t kt); +/** + * Deprecated. Use rtc_time64_to_tm(). + */ +static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) +{ + rtc_time64_to_tm(time, tm); +} + +/** + * Deprecated. Use rtc_tm_to_time64(). + */ +static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) +{ + *time = rtc_tm_to_time64(tm); + + return 0; +} #include <linux/device.h> #include <linux/seq_file.h> diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 6cacbce1a06..5db76a32fca 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error); void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned change, gfp_t flags); +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, + gfp_t flags); + /* RTNL is used as a global lock for all changes to network configuration */ extern void rtnl_lock(void); @@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 flags); + u16 vid, + u16 flags); extern int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u16 mode); + struct net_device *dev, u16 mode, + u32 flags, u32 mask); #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 5e344bbe63e..55f5ee7cc3d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -243,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!( ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0) +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + +#define __set_task_state(tsk, state_value) \ + do { \ + (tsk)->task_state_change = _THIS_IP_; \ + (tsk)->state = (state_value); \ + } while (0) +#define set_task_state(tsk, state_value) \ + do { \ + (tsk)->task_state_change = _THIS_IP_; \ + set_mb((tsk)->state, (state_value)); \ + } while (0) + +/* + * set_current_state() includes a barrier so that the write of current->state + * is correctly serialised wrt the caller's subsequent test of whether to + * actually sleep: + * + * set_current_state(TASK_UNINTERRUPTIBLE); + * if (do_i_need_to_sleep()) + * schedule(); + * + * If the caller does not need such serialisation then use __set_current_state() + */ +#define __set_current_state(state_value) \ + do { \ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + } while (0) +#define set_current_state(state_value) \ + do { \ + current->task_state_change = _THIS_IP_; \ + set_mb(current->state, (state_value)); \ + } while (0) + +#else + #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) #define set_task_state(tsk, state_value) \ @@ -259,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!( * * If the caller does not need such serialisation then use __set_current_state() */ -#define __set_current_state(state_value) \ +#define __set_current_state(state_value) \ do { current->state = (state_value); } while (0) -#define set_current_state(state_value) \ +#define set_current_state(state_value) \ set_mb(current->state, (state_value)) +#endif + /* Task command name length */ #define TASK_COMM_LEN 16 @@ -1278,9 +1317,9 @@ struct task_struct { union rcu_special rcu_read_unlock_special; struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU struct rcu_node *rcu_blocked_node; -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TASKS_RCU unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; @@ -1558,28 +1597,23 @@ struct task_struct { struct numa_group *numa_group; /* - * Exponential decaying average of faults on a per-node basis. - * Scheduling placement decisions are made based on the these counts. - * The values remain static for the duration of a PTE scan + * numa_faults is an array split into four regions: + * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer + * in this precise order. + * + * faults_memory: Exponential decaying average of faults on a per-node + * basis. Scheduling placement decisions are made based on these + * counts. The values remain static for the duration of a PTE scan. + * faults_cpu: Track the nodes the process was running on when a NUMA + * hinting fault was incurred. + * faults_memory_buffer and faults_cpu_buffer: Record faults per node + * during the current scan window. When the scan completes, the counts + * in faults_memory and faults_cpu decay and these values are copied. */ - unsigned long *numa_faults_memory; + unsigned long *numa_faults; unsigned long total_numa_faults; /* - * numa_faults_buffer records faults per node during the current - * scan window. When the scan completes, the counts in - * numa_faults_memory decay and these values are copied. - */ - unsigned long *numa_faults_buffer_memory; - - /* - * Track the nodes the process was running on when a NUMA hinting - * fault was incurred. - */ - unsigned long *numa_faults_cpu; - unsigned long *numa_faults_buffer_cpu; - - /* * numa_faults_locality tracks if faults recorded during the last * scan window were remote/local. The task scan period is adapted * based on the locality of the faults with different weights @@ -1661,6 +1695,9 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -2052,6 +2089,10 @@ static inline void tsk_restore_flags(struct task_struct *task, task->flags |= orig_flags & flags; } +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, + const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, + const struct cpumask *cs_cpus_allowed); #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); @@ -2760,7 +2801,7 @@ static inline int signal_pending_state(long state, struct task_struct *p) extern int _cond_resched(void); #define cond_resched() ({ \ - __might_sleep(__FILE__, __LINE__, 0); \ + ___might_sleep(__FILE__, __LINE__, 0); \ _cond_resched(); \ }) @@ -2773,14 +2814,14 @@ extern int __cond_resched_lock(spinlock_t *lock); #endif #define cond_resched_lock(lock) ({ \ - __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) extern int __cond_resched_softirq(void); #define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ __cond_resched_softirq(); \ }) diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h new file mode 100644 index 00000000000..9aafe0e24c6 --- /dev/null +++ b/include/linux/seq_buf.h @@ -0,0 +1,136 @@ +#ifndef _LINUX_SEQ_BUF_H +#define _LINUX_SEQ_BUF_H + +#include <linux/fs.h> + +/* + * Trace sequences are used to allow a function to call several other functions + * to create a string of data to use. + */ + +/** + * seq_buf - seq buffer structure + * @buffer: pointer to the buffer + * @size: size of the buffer + * @len: the amount of data inside the buffer + * @readpos: The next position to read in the buffer. + */ +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +static inline void seq_buf_clear(struct seq_buf *s) +{ + s->len = 0; + s->readpos = 0; +} + +static inline void +seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) +{ + s->buffer = buf; + s->size = size; + seq_buf_clear(s); +} + +/* + * seq_buf have a buffer that might overflow. When this happens + * the len and size are set to be equal. + */ +static inline bool +seq_buf_has_overflowed(struct seq_buf *s) +{ + return s->len > s->size; +} + +static inline void +seq_buf_set_overflow(struct seq_buf *s) +{ + s->len = s->size + 1; +} + +/* + * How much buffer is left on the seq_buf? + */ +static inline unsigned int +seq_buf_buffer_left(struct seq_buf *s) +{ + if (seq_buf_has_overflowed(s)) + return 0; + + return s->size - s->len; +} + +/* How much buffer was written? */ +static inline unsigned int seq_buf_used(struct seq_buf *s) +{ + return min(s->len, s->size); +} + +/** + * seq_buf_get_buf - get buffer to write arbitrary data to + * @s: the seq_buf handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) +{ + WARN_ON(s->len > s->size + 1); + + if (s->len < s->size) { + *bufp = s->buffer + s->len; + return s->size - s->len; + } + + *bufp = NULL; + return 0; +} + +/** + * seq_buf_commit - commit data to the buffer + * @s: the seq_buf handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_buf_commit(struct seq_buf *s, int num) +{ + if (num < 0) { + seq_buf_set_overflow(s); + } else { + /* num must be negative on overflow */ + BUG_ON(s->len + num > s->size); + s->len += num; + } +} + +extern __printf(2, 3) +int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); +extern __printf(2, 0) +int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); +extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); +extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, + int cnt); +extern int seq_buf_puts(struct seq_buf *s, const char *str); +extern int seq_buf_putc(struct seq_buf *s, unsigned char c); +extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); +extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + unsigned int len); +extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); + +extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, + int nmaskbits); + +#ifdef CONFIG_BINARY_PRINTF +extern int +seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); +#endif + +#endif /* _LINUX_SEQ_BUF_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 52e0097f61f..cf6a9daaaf6 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -43,6 +43,21 @@ struct seq_operations { #define SEQ_SKIP 1 /** + * seq_has_overflowed - check if the buffer has overflowed + * @m: the seq_file handle + * + * seq_files have a buffer which may overflow. When this happens a larger + * buffer is reallocated and all the data will be printed again. + * The overflow state is true when m->count == m->size. + * + * Returns true if the buffer received more than it can hold. + */ +static inline bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} + +/** * seq_get_buf - get buffer to write arbitrary data to * @m: the seq_file handle * @bufp: the beginning of the buffer is stored here diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6c8b6f604e7..85ab7d72b54 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -20,6 +20,8 @@ #include <linux/time.h> #include <linux/bug.h> #include <linux/cache.h> +#include <linux/rbtree.h> +#include <linux/socket.h> #include <linux/atomic.h> #include <asm/types.h> @@ -148,6 +150,8 @@ struct net_device; struct scatterlist; struct pipe_inode_info; +struct iov_iter; +struct napi_struct; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -341,7 +345,6 @@ enum { SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ - SKB_FCLONE_FREE, /* this companion fclone skb is available */ }; enum { @@ -370,8 +373,7 @@ enum { SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, - SKB_GSO_MPLS = 1 << 12, - + SKB_GSO_TUNNEL_REMCSUM = 1 << 12, }; #if BITS_PER_LONG > 32 @@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @next: Next buffer in list * @prev: Previous buffer in list * @tstamp: Time we arrived/left + * @rbnode: RB tree node, alternative to next/prev for netem/tcp * @sk: Socket we are owned by * @dev: Device we arrived on/are leaving by * @cb: Control buffer. Free for use by every layer. Put private vars here @@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, */ struct sk_buff { - /* These two members must be first. */ - struct sk_buff *next; - struct sk_buff *prev; - union { - ktime_t tstamp; - struct skb_mstamp skb_mstamp; + struct { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + union { + ktime_t tstamp; + struct skb_mstamp skb_mstamp; + }; + }; + struct rb_node rbnode; /* used in netem & tcp stack */ }; - struct sock *sk; struct net_device *dev; @@ -597,7 +604,8 @@ struct sk_buff { #endif __u8 ipvs_property:1; __u8 inner_protocol_type:1; - /* 4 or 6 bit hole */ + __u8 remcsum_offload:1; + /* 3 or 5 bit hole */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -666,6 +674,7 @@ struct sk_buff { #define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_RX 0x02 +#define SKB_ALLOC_NAPI 0x04 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) @@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb->_skb_refdst = (unsigned long)dst; } -void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, - bool force); - /** * skb_dst_set_noref - sets skb dst, hopefully, without taking reference * @skb: buffer @@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, */ static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { - __skb_dst_set_noref(skb, dst, false); -} - -/** - * skb_dst_set_noref_force - sets skb dst, without taking reference - * @skb: buffer - * @dst: dst entry - * - * Sets skb dst, assuming a reference was not taken on dst. - * No reference is taken and no dst_release will be called. While for - * cached dsts deferred reclaim is a basic feature, for entries that are - * not cached it is caller's job to guarantee that last dst_release for - * provided dst happens when nobody uses it, eg. after a RCU grace period. - */ -static inline void skb_dst_set_noref_force(struct sk_buff *skb, - struct dst_entry *dst) -{ - __skb_dst_set_noref(skb, dst, true); + WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } /** @@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - fclones->skb2.fclone == SKB_FCLONE_CLONE && + atomic_read(&fclones->fclone_ref) > 1 && fclones->skb2.sk == sk; } @@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } +void *napi_alloc_frag(unsigned int fragsz); +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask); +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return __napi_alloc_skb(napi, length, GFP_ATOMIC); +} + /** - * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used - * @order: size of the allocation + * __dev_alloc_pages - allocate page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx + * @order: size of the allocation * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, - struct sk_buff *skb, - unsigned int order) -{ - struct page *page; - - gfp_mask |= __GFP_COLD; - - if (!(gfp_mask & __GFP_NOMEMALLOC)) - gfp_mask |= __GFP_MEMALLOC; +static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, + unsigned int order) +{ + /* This piece of code contains several assumptions. + * 1. This is for device Rx, therefor a cold page is preferred. + * 2. The expectation is the user wants a compound page. + * 3. If requesting a order 0 page it will not be compound + * due to the check to see if order has a value in prep_new_page + * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to + * code in gfp_to_alloc_flags that should be enforcing this. + */ + gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); - if (skb && page && page->pfmemalloc) - skb->pfmemalloc = true; + return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); +} - return page; +static inline struct page *dev_alloc_pages(unsigned int order) +{ + return __dev_alloc_pages(GFP_ATOMIC, order); } /** - * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used + * __dev_alloc_page - allocate a page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_page(gfp_t gfp_mask, - struct sk_buff *skb) +static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +{ + return __dev_alloc_pages(gfp_mask, 0); +} + +static inline struct page *dev_alloc_page(void) { - return __skb_alloc_pages(gfp_mask, skb, 0); + return __dev_alloc_page(GFP_ATOMIC); } /** @@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ - static inline int skb_padto(struct sk_buff *skb, unsigned int len) { unsigned int size = skb->len; @@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) return skb_pad(skb, len - size); } +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} + static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy) { @@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); -int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, - struct iovec *to, int size); -int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, - struct iovec *iov); -int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - const struct iovec *from, int from_offset, - int len); -int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, - int offset, size_t count); -int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, - const struct iovec *to, int to_offset, - int size); +int skb_copy_datagram_iter(const struct sk_buff *from, int offset, + struct iov_iter *to, int size); +static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, + struct msghdr *msg, int size) +{ + return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); +} +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, + struct msghdr *msg); +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len); +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); @@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); +int skb_ensure_writable(struct sk_buff *skb, int write_len); +int skb_vlan_pop(struct sk_buff *skb); +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); + +static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) +{ + /* XXX: stripping const */ + return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); +} + +static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) +{ + return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; +} struct skb_checksum_ops { __wsum (*update)(const void *mem, int len, __wsum wsum); diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57..8a2457d42fc 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -513,10 +513,6 @@ struct memcg_cache_params { int memcg_update_all_caches(int num_memcgs); -struct seq_file; -int cache_show(struct kmem_cache *s, struct seq_file *m); -void print_slabinfo_header(struct seq_file *m); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/include/linux/socket.h b/include/linux/socket.h index bb9b8364007..6e49a14365d 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -47,16 +47,25 @@ struct linger { struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ - struct iovec *msg_iov; /* scatter/gather array */ - __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + struct iov_iter msg_iter; /* data */ void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ }; + +struct user_msghdr { + void __user *msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + struct iovec __user *msg_iov; /* scatter/gather array */ + __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + void __user *msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ +}; /* For recvmmsg/sendmmsg */ struct mmsghdr { - struct msghdr msg_hdr; + struct user_msghdr msg_hdr; unsigned int msg_len; }; @@ -94,6 +103,10 @@ struct cmsghdr { (cmsg)->cmsg_len <= (unsigned long) \ ((mhdr)->msg_controllen - \ ((char *)(cmsg) - (char *)(mhdr)->msg_control))) +#define for_each_cmsghdr(cmsg, msg) \ + for (cmsg = CMSG_FIRSTHDR(msg); \ + cmsg; \ + cmsg = CMSG_NXTHDR(msg, cmsg)) /* * Get the next cmsg header @@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, extern unsigned long iov_pages(const struct iovec *iov, int offset, unsigned long nr_segs); -extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ -extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); -extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 46d188a9947..a6ef2a8e6de 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi) extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); +static inline bool +spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) +{ + return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); +} + #endif /* __LINUX_SPI_H */ diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 8e030075fe7..a7cbb570cc5 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -53,7 +53,7 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) unsigned long cr_magic; /* 0x0f4aa4f0 */ #endif unsigned long cr_expire; /* when to gc */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 70736b98c72..d86acc63b25 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -63,6 +63,9 @@ struct rpc_clnt { struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; const struct rpc_program *cl_program; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *cl_debugfs; /* debugfs directory */ +#endif }; /* @@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); +const char *rpc_proc_name(const struct rpc_task *task); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 9385bd74c86..c57d8ea0716 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -10,22 +10,10 @@ #include <uapi/linux/sunrpc/debug.h> - -/* - * Enable RPC debugging/profiling. - */ -#ifdef CONFIG_SUNRPC_DEBUG -#define RPC_DEBUG -#endif -#ifdef CONFIG_TRACEPOINTS -#define RPC_TRACEPOINTS -#endif -/* #define RPC_PROFILE */ - /* * Debugging macros etc */ -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) extern unsigned int rpc_debug; extern unsigned int nfs_debug; extern unsigned int nfsd_debug; @@ -36,7 +24,7 @@ extern unsigned int nlm_debug; #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) #undef ifdebug -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) # define dfprintk(fac, args...) \ @@ -65,9 +53,55 @@ extern unsigned int nlm_debug; /* * Sysctl interface for RPC debugging */ -#ifdef RPC_DEBUG + +struct rpc_clnt; +struct rpc_xprt; + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) void rpc_register_sysctl(void); void rpc_unregister_sysctl(void); +int sunrpc_debugfs_init(void); +void sunrpc_debugfs_exit(void); +int rpc_clnt_debugfs_register(struct rpc_clnt *); +void rpc_clnt_debugfs_unregister(struct rpc_clnt *); +int rpc_xprt_debugfs_register(struct rpc_xprt *); +void rpc_xprt_debugfs_unregister(struct rpc_xprt *); +#else +static inline int +sunrpc_debugfs_init(void) +{ + return 0; +} + +static inline void +sunrpc_debugfs_exit(void) +{ + return; +} + +static inline int +rpc_clnt_debugfs_register(struct rpc_clnt *clnt) +{ + return 0; +} + +static inline void +rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) +{ + return; +} + +static inline int +rpc_xprt_debugfs_register(struct rpc_xprt *xprt) +{ + return 0; +} + +static inline void +rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) +{ + return; +} #endif #endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1565bbe86d5..eecb5a71e6c 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -27,10 +27,13 @@ #include <linux/seq_file.h> #include <linux/ktime.h> +#include <linux/spinlock.h> #define RPC_IOSTATS_VERS "1.0" struct rpc_iostats { + spinlock_t om_lock; + /* * These counters give an idea about how many request * transmissions are required, on average, to complete that diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1a8959944c5..5f1e6bd4c31 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -79,7 +79,7 @@ struct rpc_task { unsigned short tk_flags; /* misc flags */ unsigned short tk_timeouts; /* maj timeouts */ -#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) unsigned short tk_pid; /* debugging aid */ #endif unsigned char tk_priority : 2,/* Task priority */ @@ -187,7 +187,7 @@ struct rpc_wait_queue { unsigned char nr; /* # tasks remaining for cookie */ unsigned short qlen; /* total # tasks waiting in queue */ struct rpc_timer timer_list; -#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) const char * name; #endif }; @@ -237,7 +237,7 @@ void rpc_free(void *); int rpciod_up(void); void rpciod_down(void); int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct net; void rpc_show_tasks(struct net *); #endif @@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) return __rpc_wait_for_completion_task(task, NULL); } -#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) static inline const char * rpc_qname(const struct rpc_wait_queue *q) { return ((q && q->name) ? q->name : "unknown"); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index cf391eef2e6..9d27ac45b90 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -239,6 +239,9 @@ struct rpc_xprt { struct net *xprt_net; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *debugfs; /* debugfs directory */ +#endif }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 1ad36cc25b2..7591788e9fb 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -17,6 +17,65 @@ void cleanup_socket_xprt(void); #define RPC_DEF_MIN_RESVPORT (665U) #define RPC_DEF_MAX_RESVPORT (1023U) +struct sock_xprt { + struct rpc_xprt xprt; + + /* + * Network layer + */ + struct socket * sock; + struct sock * inet; + + /* + * State of TCP reply receive + */ + __be32 tcp_fraghdr, + tcp_xid, + tcp_calldir; + + u32 tcp_offset, + tcp_reclen; + + unsigned long tcp_copied, + tcp_flags; + + /* + * Connection of transports + */ + struct delayed_work connect_worker; + struct sockaddr_storage srcaddr; + unsigned short srcport; + + /* + * UDP socket buffer size parameters + */ + size_t rcvsize, + sndsize; + + /* + * Saved socket callback addresses + */ + void (*old_data_ready)(struct sock *); + void (*old_state_change)(struct sock *); + void (*old_write_space)(struct sock *); + void (*old_error_report)(struct sock *); +}; + +/* + * TCP receive state flags + */ +#define TCP_RCV_LAST_FRAG (1UL << 0) +#define TCP_RCV_COPY_FRAGHDR (1UL << 1) +#define TCP_RCV_COPY_XID (1UL << 2) +#define TCP_RCV_COPY_DATA (1UL << 3) +#define TCP_RCV_READ_CALLDIR (1UL << 4) +#define TCP_RCV_COPY_CALLDIR (1UL << 5) + +/* + * TCP RPC flags + */ +#define TCP_RPC_REPLY (1UL << 6) + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 00000000000..145306bdc92 --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include <linux/swap.h> + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bda9b81357c..c9afdc7a7f8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -25,7 +25,7 @@ struct linux_dirent64; struct list_head; struct mmap_arg_struct; struct msgbuf; -struct msghdr; +struct user_msghdr; struct mmsghdr; struct msqid_ds; struct new_utsname; @@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int); -asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags); asmlinkage long sys_recv(int, void __user *, size_t, unsigned); asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *); -asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags, struct timespec __user *timeout); diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 98a3153c0f9..4b7b875a7ce 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h @@ -49,4 +49,13 @@ int do_syslog(int type, char __user *buf, int count, bool from_file); +#ifdef CONFIG_PRINTK +int check_syslog_permissions(int type, bool from_file); +#else +static inline int check_syslog_permissions(int type, bool from_file) +{ + return 0; +} +#endif + #endif /* _LINUX_SYSLOG_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa..67309ece077 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -130,7 +130,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ - u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ + u16 gso_segs; /* Max number of segs per GSO packet */ /* * Header prediction flags @@ -162,7 +162,7 @@ struct tcp_sock { struct { struct sk_buff_head prequeue; struct task_struct *task; - struct iovec *iov; + struct msghdr *msg; int memory; int len; } ucopy; @@ -204,10 +204,10 @@ struct tcp_sock { u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ - u8 reordering; /* Packet reordering metric. */ + u8 keepalive_probes; /* num of allowed keep alive probes */ + u32 reordering; /* Packet reordering metric. */ u32 snd_up; /* Urgent pointer */ - u8 keepalive_probes; /* num of allowed keep alive probes */ /* * Options received (usually on last packet, some only on SYN packets). */ diff --git a/include/linux/time.h b/include/linux/time.h index 8c42cf8d244..203c2ad40d7 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -39,9 +39,20 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva return lhs->tv_usec - rhs->tv_usec; } -extern unsigned long mktime(const unsigned int year, const unsigned int mon, - const unsigned int day, const unsigned int hour, - const unsigned int min, const unsigned int sec); +extern time64_t mktime64(const unsigned int year, const unsigned int mon, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec); + +/** + * Deprecated. Use mktime64(). + */ +static inline unsigned long mktime(const unsigned int year, + const unsigned int mon, const unsigned int day, + const unsigned int hour, const unsigned int min, + const unsigned int sec) +{ + return mktime64(year, mon, day, hour, min, sec); +} extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 95640dcd189..05af9a33489 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -42,6 +42,7 @@ struct tk_read_base { * struct timekeeper - Structure holding internal timekeeping values. * @tkr: The readout base structure * @xtime_sec: Current CLOCK_REALTIME time in seconds + * @ktime_sec: Current CLOCK_MONOTONIC time in seconds * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime @@ -77,6 +78,7 @@ struct tk_read_base { struct timekeeper { struct tk_read_base tkr; u64 xtime_sec; + unsigned long ktime_sec; struct timespec64 wall_to_monotonic; ktime_t offs_real; ktime_t offs_boot; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 1caa6b04fdc..9b63d13ba82 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -10,7 +10,7 @@ extern int timekeeping_suspended; * Get and set timeofday */ extern void do_gettimeofday(struct timeval *tv); -extern int do_settimeofday(const struct timespec *tv); +extern int do_settimeofday64(const struct timespec64 *ts); extern int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz); @@ -25,14 +25,24 @@ struct timespec __current_kernel_time(void); /* * timespec based interfaces */ -struct timespec get_monotonic_coarse(void); -extern void getrawmonotonic(struct timespec *ts); +struct timespec64 get_monotonic_coarse64(void); +extern void getrawmonotonic64(struct timespec64 *ts); extern void ktime_get_ts64(struct timespec64 *ts); +extern time64_t ktime_get_seconds(void); +extern time64_t ktime_get_real_seconds(void); extern int __getnstimeofday64(struct timespec64 *tv); extern void getnstimeofday64(struct timespec64 *tv); #if BITS_PER_LONG == 64 +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + return do_settimeofday64(ts); +} + static inline int __getnstimeofday(struct timespec *ts) { return __getnstimeofday64(ts); @@ -53,7 +63,27 @@ static inline void ktime_get_real_ts(struct timespec *ts) getnstimeofday64(ts); } +static inline void getrawmonotonic(struct timespec *ts) +{ + getrawmonotonic64(ts); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return get_monotonic_coarse64(); +} #else +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return do_settimeofday64(&ts64); +} + static inline int __getnstimeofday(struct timespec *ts) { struct timespec64 ts64; @@ -86,6 +116,19 @@ static inline void ktime_get_real_ts(struct timespec *ts) getnstimeofday64(&ts64); *ts = timespec64_to_timespec(ts64); } + +static inline void getrawmonotonic(struct timespec *ts) +{ + struct timespec64 ts64; + + getrawmonotonic64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return timespec64_to_timespec(get_monotonic_coarse64()); +} #endif extern void getboottime(struct timespec *ts); @@ -182,7 +225,7 @@ static inline void timekeeping_clocktai(struct timespec *ts) /* * RTC specific */ -extern void timekeeping_inject_sleeptime(struct timespec *delta); +extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* * PPS accessor diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index ea6c9dea79e..cfaf5a1d4ba 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -1,7 +1,7 @@ #ifndef _LINUX_TRACE_SEQ_H #define _LINUX_TRACE_SEQ_H -#include <linux/fs.h> +#include <linux/seq_buf.h> #include <asm/page.h> @@ -12,20 +12,36 @@ struct trace_seq { unsigned char buffer[PAGE_SIZE]; - unsigned int len; - unsigned int readpos; + struct seq_buf seq; int full; }; static inline void trace_seq_init(struct trace_seq *s) { - s->len = 0; - s->readpos = 0; + seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); s->full = 0; } /** + * trace_seq_used - amount of actual data written to buffer + * @s: trace sequence descriptor + * + * Returns the amount of data written to the buffer. + * + * IMPORTANT! + * + * Use this instead of @s->seq.len if you need to pass the amount + * of data from the buffer to another buffer (userspace, or what not). + * The @s->seq.len on overflow is bigger than the buffer size and + * using it can cause access to undefined memory. + */ +static inline int trace_seq_used(struct trace_seq *s) +{ + return seq_buf_used(&s->seq); +} + +/** * trace_seq_buffer_ptr - return pointer to next location in buffer * @s: trace sequence descriptor * @@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s) static inline unsigned char * trace_seq_buffer_ptr(struct trace_seq *s) { - return s->buffer + s->len; + return s->buffer + seq_buf_used(&s->seq); +} + +/** + * trace_seq_has_overflowed - return true if the trace_seq took too much + * @s: trace sequence descriptor + * + * Returns true if too much data was added to the trace_seq and it is + * now full and will not take anymore. + */ +static inline bool trace_seq_has_overflowed(struct trace_seq *s) +{ + return s->full || seq_buf_has_overflowed(&s->seq); } /* @@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s) */ #ifdef CONFIG_TRACING extern __printf(2, 3) -int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); +void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern __printf(2, 0) -int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); -extern int +void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); +extern void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt); -extern int trace_seq_puts(struct trace_seq *s, const char *str); -extern int trace_seq_putc(struct trace_seq *s, unsigned char c); -extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); -extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +extern void trace_seq_puts(struct trace_seq *s, const char *str); +extern void trace_seq_putc(struct trace_seq *s, unsigned char c); +extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); +extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len); extern int trace_seq_path(struct trace_seq *s, const struct path *path); -extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, +extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits); #else /* CONFIG_TRACING */ -static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { - return 0; } -static inline int +static inline void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { - return 0; } -static inline int +static inline void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits) { - return 0; } static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) @@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, { return 0; } -static inline int trace_seq_puts(struct trace_seq *s, const char *str) +static inline void trace_seq_puts(struct trace_seq *s, const char *str) { - return 0; } -static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) +static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) { - return 0; } -static inline int +static inline void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) { - return 0; } -static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len) { - return 0; } static inline int trace_seq_path(struct trace_seq *s, const struct path *path) { diff --git a/include/linux/uio.h b/include/linux/uio.h index 9b1581414cd..a41e252396c 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -31,6 +31,7 @@ struct iov_iter { size_t count; union { const struct iovec *iov; + const struct kvec *kvec; const struct bio_vec *bvec; }; unsigned long nr_segs; @@ -82,10 +83,13 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); +void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov, + unsigned long nr_segs, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, @@ -123,9 +127,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } +size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/include/linux/usb.h b/include/linux/usb.h index 447a7e2fc19..f89c24a03bd 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) #endif /* USB autosuspend and autoresume */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern void usb_enable_autosuspend(struct usb_device *udev); extern void usb_disable_autosuspend(struct usb_device *udev); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index cd96a2bc338..668898e29d0 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -93,7 +93,7 @@ struct usb_hcd { struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif @@ -625,16 +625,13 @@ extern int usb_find_interface_driver(struct usb_device *dev, extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); -#endif /* CONFIG_PM */ - -#ifdef CONFIG_PM_RUNTIME extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index a4c9547aae6..f8e76e08ebe 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -15,8 +15,6 @@ #define _LINUX_VEXPRESS_H #include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/reboot.h> #include <linux/regmap.h> #define VEXPRESS_SITE_MB 0 @@ -24,13 +22,6 @@ #define VEXPRESS_SITE_DB2 2 #define VEXPRESS_SITE_MASTER 0xf -#define VEXPRESS_RES_FUNC(_site, _func) \ -{ \ - .start = (_site), \ - .end = (_func), \ - .flags = IORESOURCE_BUS, \ -} - /* Config infrastructure */ void vexpress_config_set_master(u32 site); @@ -58,16 +49,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); /* Platform control */ -unsigned int vexpress_get_mci_cardin(struct device *dev); -u32 vexpress_get_procid(int site); -void *vexpress_get_24mhz_clock_base(void); void vexpress_flags_set(u32 data); -void vexpress_sysreg_early_init(void __iomem *base); -int vexpress_syscfg_device_register(struct platform_device *pdev); - -/* Clocks */ - -void vexpress_clk_init(void __iomem *sp810_base); - #endif diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 65261a7244f..d09e0938fd6 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -75,6 +75,9 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq); bool virtqueue_is_broken(struct virtqueue *vq); +void *virtqueue_get_avail(struct virtqueue *vq); +void *virtqueue_get_used(struct virtqueue *vq); + /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus @@ -101,11 +104,12 @@ struct virtio_device { const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; - /* Note that this is a Linux set_bit-style bitmap. */ - unsigned long features[1]; + u64 features; void *priv; }; +bool virtio_device_is_legacy_only(struct virtio_device_id id); + static inline struct virtio_device *dev_to_virtio(struct device *_dev) { return container_of(_dev, struct virtio_device, dev); @@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev); * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this driver. * @feature_table_size: number of entries in the feature table array. + * @feature_table_legacy: same as feature_table but when working in legacy mode. + * @feature_table_size_legacy: number of entries in feature table legacy array. * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration @@ -138,6 +144,8 @@ struct virtio_driver { const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h new file mode 100644 index 00000000000..51865d05b26 --- /dev/null +++ b/include/linux/virtio_byteorder.h @@ -0,0 +1,59 @@ +#ifndef _LINUX_VIRTIO_BYTEORDER_H +#define _LINUX_VIRTIO_BYTEORDER_H +#include <linux/types.h> +#include <uapi/linux/virtio_types.h> + +/* + * Low-level memory accessors for handling virtio in modern little endian and in + * compatibility native endian format. + */ + +static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) +{ + if (little_endian) + return le16_to_cpu((__force __le16)val); + else + return (__force u16)val; +} + +static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) +{ + if (little_endian) + return (__force __virtio16)cpu_to_le16(val); + else + return (__force __virtio16)val; +} + +static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) +{ + if (little_endian) + return le32_to_cpu((__force __le32)val); + else + return (__force u32)val; +} + +static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) +{ + if (little_endian) + return (__force __virtio32)cpu_to_le32(val); + else + return (__force __virtio32)val; +} + +static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) +{ + if (little_endian) + return le64_to_cpu((__force __le64)val); + else + return (__force u64)val; +} + +static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) +{ + if (little_endian) + return (__force __virtio64)cpu_to_le64(val); + else + return (__force __virtio64)val; +} + +#endif /* _LINUX_VIRTIO_BYTEORDER */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7f4ef66873e..7979f850e7a 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -4,6 +4,7 @@ #include <linux/err.h> #include <linux/bug.h> #include <linux/virtio.h> +#include <linux/virtio_byteorder.h> #include <uapi/linux/virtio_config.h> /** @@ -46,6 +47,7 @@ * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. + * Returns 0 on success or error status * @bus_name: return the bus name associated with the device * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which @@ -66,8 +68,8 @@ struct virtio_config_ops { vq_callback_t *callbacks[], const char *names[]); void (*del_vqs)(struct virtio_device *); - u32 (*get_features)(struct virtio_device *vdev); - void (*finalize_features)(struct virtio_device *vdev); + u64 (*get_features)(struct virtio_device *vdev); + int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); }; @@ -77,23 +79,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit); /** - * virtio_has_feature - helper to determine if this device has this feature. + * __virtio_test_bit - helper to test feature bits. For use by transports. + * Devices should normally use virtio_has_feature, + * which includes more checks. * @vdev: the device * @fbit: the feature bit */ -static inline bool virtio_has_feature(const struct virtio_device *vdev, +static inline bool __virtio_test_bit(const struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 64); + else + BUG_ON(fbit >= 64); + + return vdev->features & BIT_ULL(fbit); +} + +/** + * __virtio_set_bit - helper to set feature bits. For use by transports. + * @vdev: the device + * @fbit: the feature bit + */ +static inline void __virtio_set_bit(struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 64); + else + BUG_ON(fbit >= 64); + + vdev->features |= BIT_ULL(fbit); +} + +/** + * __virtio_clear_bit - helper to clear feature bits. For use by transports. + * @vdev: the device + * @fbit: the feature bit + */ +static inline void __virtio_clear_bit(struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 32); + BUILD_BUG_ON(fbit >= 64); else - BUG_ON(fbit >= 32); + BUG_ON(fbit >= 64); + vdev->features &= ~BIT_ULL(fbit); +} + +/** + * virtio_has_feature - helper to determine if this device has this feature. + * @vdev: the device + * @fbit: the feature bit + */ +static inline bool virtio_has_feature(const struct virtio_device *vdev, + unsigned int fbit) +{ if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); - return test_bit(fbit, vdev->features); + return __virtio_test_bit(vdev, fbit); } static inline @@ -152,6 +201,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) return 0; } +/* Memory accessors */ +static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) +{ + return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) +{ + return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) +{ + return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) +{ + return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) +{ + return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) +{ + return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ @@ -239,12 +319,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, { u16 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio16_to_cpu(vdev, (__force __virtio16)ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + val = (__force u16)cpu_to_virtio16(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } @@ -253,12 +334,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, { u32 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio32_to_cpu(vdev, (__force __virtio32)ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + val = (__force u32)cpu_to_virtio32(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } @@ -267,12 +349,13 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, { u64 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio64_to_cpu(vdev, (__force __virtio64)ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + val = (__force u64)cpu_to_virtio64(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h deleted file mode 100644 index de429d1f435..00000000000 --- a/include/linux/virtio_scsi.h +++ /dev/null @@ -1,162 +0,0 @@ -/* - * This header is BSD licensed so anyone can use the definitions to implement - * compatible drivers/servers. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#ifndef _LINUX_VIRTIO_SCSI_H -#define _LINUX_VIRTIO_SCSI_H - -#define VIRTIO_SCSI_CDB_SIZE 32 -#define VIRTIO_SCSI_SENSE_SIZE 96 - -/* SCSI command request, followed by data-out */ -struct virtio_scsi_cmd_req { - u8 lun[8]; /* Logical Unit Number */ - u64 tag; /* Command identifier */ - u8 task_attr; /* Task attribute */ - u8 prio; /* SAM command priority field */ - u8 crn; - u8 cdb[VIRTIO_SCSI_CDB_SIZE]; -} __packed; - -/* SCSI command request, followed by protection information */ -struct virtio_scsi_cmd_req_pi { - u8 lun[8]; /* Logical Unit Number */ - u64 tag; /* Command identifier */ - u8 task_attr; /* Task attribute */ - u8 prio; /* SAM command priority field */ - u8 crn; - u32 pi_bytesout; /* DataOUT PI Number of bytes */ - u32 pi_bytesin; /* DataIN PI Number of bytes */ - u8 cdb[VIRTIO_SCSI_CDB_SIZE]; -} __packed; - -/* Response, followed by sense data and data-in */ -struct virtio_scsi_cmd_resp { - u32 sense_len; /* Sense data length */ - u32 resid; /* Residual bytes in data buffer */ - u16 status_qualifier; /* Status qualifier */ - u8 status; /* Command completion status */ - u8 response; /* Response values */ - u8 sense[VIRTIO_SCSI_SENSE_SIZE]; -} __packed; - -/* Task Management Request */ -struct virtio_scsi_ctrl_tmf_req { - u32 type; - u32 subtype; - u8 lun[8]; - u64 tag; -} __packed; - -struct virtio_scsi_ctrl_tmf_resp { - u8 response; -} __packed; - -/* Asynchronous notification query/subscription */ -struct virtio_scsi_ctrl_an_req { - u32 type; - u8 lun[8]; - u32 event_requested; -} __packed; - -struct virtio_scsi_ctrl_an_resp { - u32 event_actual; - u8 response; -} __packed; - -struct virtio_scsi_event { - u32 event; - u8 lun[8]; - u32 reason; -} __packed; - -struct virtio_scsi_config { - u32 num_queues; - u32 seg_max; - u32 max_sectors; - u32 cmd_per_lun; - u32 event_info_size; - u32 sense_size; - u32 cdb_size; - u16 max_channel; - u16 max_target; - u32 max_lun; -} __packed; - -/* Feature Bits */ -#define VIRTIO_SCSI_F_INOUT 0 -#define VIRTIO_SCSI_F_HOTPLUG 1 -#define VIRTIO_SCSI_F_CHANGE 2 -#define VIRTIO_SCSI_F_T10_PI 3 - -/* Response codes */ -#define VIRTIO_SCSI_S_OK 0 -#define VIRTIO_SCSI_S_OVERRUN 1 -#define VIRTIO_SCSI_S_ABORTED 2 -#define VIRTIO_SCSI_S_BAD_TARGET 3 -#define VIRTIO_SCSI_S_RESET 4 -#define VIRTIO_SCSI_S_BUSY 5 -#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 -#define VIRTIO_SCSI_S_TARGET_FAILURE 7 -#define VIRTIO_SCSI_S_NEXUS_FAILURE 8 -#define VIRTIO_SCSI_S_FAILURE 9 -#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 -#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 -#define VIRTIO_SCSI_S_INCORRECT_LUN 12 - -/* Controlq type codes. */ -#define VIRTIO_SCSI_T_TMF 0 -#define VIRTIO_SCSI_T_AN_QUERY 1 -#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2 - -/* Valid TMF subtypes. */ -#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 -#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 -#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 -#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 -#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 -#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 -#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 -#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 - -/* Events. */ -#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000 -#define VIRTIO_SCSI_T_NO_EVENT 0 -#define VIRTIO_SCSI_T_TRANSPORT_RESET 1 -#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2 -#define VIRTIO_SCSI_T_PARAM_CHANGE 3 - -/* Reasons of transport reset event */ -#define VIRTIO_SCSI_EVT_RESET_HARD 0 -#define VIRTIO_SCSI_EVT_RESET_RESCAN 1 -#define VIRTIO_SCSI_EVT_RESET_REMOVED 2 - -#define VIRTIO_SCSI_S_SIMPLE 0 -#define VIRTIO_SCSI_S_ORDERED 1 -#define VIRTIO_SCSI_S_HEAD 2 -#define VIRTIO_SCSI_S_ACA 3 - - -#endif /* _LINUX_VIRTIO_SCSI_H */ diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 023430e265f..5691f752ce8 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -24,6 +24,7 @@ #define VMCI_KERNEL_API_VERSION_2 2 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 +struct msghdr; typedef void (vmci_device_shutdown_fn) (void *device_registration, void *user_data); @@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, void *iov, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, - void *iov, size_t iov_size, int mode); -ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, + struct msghdr *msg, size_t iov_size, int mode); +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); #endif /* !__VMW_VMCI_API_H__ */ diff --git a/include/linux/wait.h b/include/linux/wait.h index e4a8eb9312e..2232ed16635 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t; typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); +/* __wait_queue::flags */ +#define WQ_FLAG_EXCLUSIVE 0x01 +#define WQ_FLAG_WOKEN 0x02 + struct __wait_queue { unsigned int flags; -#define WQ_FLAG_EXCLUSIVE 0x01 void *private; wait_queue_func_t func; struct list_head task_list; @@ -258,11 +261,37 @@ __out: __ret; \ */ #define wait_event(wq, condition) \ do { \ + might_sleep(); \ if (condition) \ break; \ __wait_event(wq, condition); \ } while (0) +#define __wait_event_freezable(wq, condition) \ + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ + schedule(); try_to_freeze()) + +/** + * wait_event - sleep (or freeze) until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute + * to system load) until the @condition evaluates to true. The + * @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define wait_event_freezable(wq, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_event_freezable(wq, condition); \ + __ret; \ +}) + #define __wait_event_timeout(wq, condition, timeout) \ ___wait_event(wq, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ @@ -290,11 +319,30 @@ do { \ #define wait_event_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ + might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_timeout(wq, condition, timeout); \ __ret; \ }) +#define __wait_event_freezable_timeout(wq, condition, timeout) \ + ___wait_event(wq, ___wait_cond_timeout(condition), \ + TASK_INTERRUPTIBLE, 0, timeout, \ + __ret = schedule_timeout(__ret); try_to_freeze()) + +/* + * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid + * increasing load and is freezable. + */ +#define wait_event_freezable_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_freezable_timeout(wq, condition, timeout); \ + __ret; \ +}) + #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ cmd1; schedule(); cmd2) @@ -315,6 +363,7 @@ do { \ */ #define wait_event_cmd(wq, condition, cmd1, cmd2) \ do { \ + might_sleep(); \ if (condition) \ break; \ __wait_event_cmd(wq, condition, cmd1, cmd2); \ @@ -342,6 +391,7 @@ do { \ #define wait_event_interruptible(wq, condition) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible(wq, condition); \ __ret; \ @@ -375,6 +425,7 @@ do { \ #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ + might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_interruptible_timeout(wq, \ condition, timeout); \ @@ -425,6 +476,7 @@ do { \ #define wait_event_hrtimeout(wq, condition, timeout) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_UNINTERRUPTIBLE); \ @@ -450,6 +502,7 @@ do { \ #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ ({ \ long __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_INTERRUPTIBLE); \ @@ -463,12 +516,27 @@ do { \ #define wait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible_exclusive(wq, condition);\ __ret; \ }) +#define __wait_event_freezable_exclusive(wq, condition) \ + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ + schedule(); try_to_freeze()) + +#define wait_event_freezable_exclusive(wq, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_event_freezable_exclusive(wq, condition);\ + __ret; \ +}) + + #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ ({ \ int __ret = 0; \ @@ -637,6 +705,7 @@ do { \ #define wait_event_killable(wq, condition) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable(wq, condition); \ __ret; \ @@ -830,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); +long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); +int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); @@ -886,6 +957,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *); static inline int wait_on_bit(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, @@ -910,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode) static inline int wait_on_bit_io(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, @@ -936,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode) static inline int wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { + might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); @@ -963,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode static inline int wait_on_bit_lock(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); @@ -986,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode) static inline int wait_on_bit_lock_io(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); @@ -1011,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode) static inline int wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { + might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, action, mode); @@ -1029,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned static inline int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) { + might_sleep(); if (atomic_read(val) == 0) return 0; return out_of_line_wait_on_atomic_t(val, action, mode); |