diff options
Diffstat (limited to 'include/linux')
36 files changed, 683 insertions, 287 deletions
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index eb1c6a47b67..994739da827 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -318,6 +318,7 @@ struct bcma_bus { const struct bcma_host_ops *ops; enum bcma_hosttype hosttype; + bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ union { /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ struct pci_dev *host_pci; diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 0333e605ea0..3f809ae372c 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -223,6 +223,7 @@ struct bcma_drv_pci_host { struct bcma_drv_pci { struct bcma_device *core; + u8 early_setup_done:1; u8 setup_done:1; u8 hostmode:1; @@ -237,6 +238,7 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) +extern void bcma_core_pci_early_init(struct bcma_drv_pci *pc); extern void bcma_core_pci_init(struct bcma_drv_pci *pc); extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, bool enable); diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index e64ae7bf80a..ebd5c1fcdea 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -64,6 +64,8 @@ #define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ #define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ +#define BCMA_PCIE2_BAR0_WIN2 0x70 + /* SiliconBackplane Address Map. * All regions may not exist on all chips. */ diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index f24d245f839..1b5fc0c3b1b 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h @@ -5,8 +5,6 @@ struct bcma_soc { struct bcma_bus bus; - struct bcma_device core_cc; - struct bcma_device core_mips; }; int __init bcma_host_soc_register(struct bcma_soc *soc); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index abcafaa20b8..9c78d15d33e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -18,8 +18,6 @@ #include <asm/div64.h> #include <asm/io.h> -/* clocksource cycle base type */ -typedef u64 cycle_t; struct clocksource; struct module; @@ -28,106 +26,6 @@ struct module; #endif /** - * struct cyclecounter - hardware abstraction for a free running counter - * Provides completely state-free accessors to the underlying hardware. - * Depending on which hardware it reads, the cycle counter may wrap - * around quickly. Locking rules (if necessary) have to be defined - * by the implementor and user of specific instances of this API. - * - * @read: returns the current cycle value - * @mask: bitmask for two's complement - * subtraction of non 64 bit counters, - * see CLOCKSOURCE_MASK() helper macro - * @mult: cycle to nanosecond multiplier - * @shift: cycle to nanosecond divisor (power of two) - */ -struct cyclecounter { - cycle_t (*read)(const struct cyclecounter *cc); - cycle_t mask; - u32 mult; - u32 shift; -}; - -/** - * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds - * Contains the state needed by timecounter_read() to detect - * cycle counter wrap around. Initialize with - * timecounter_init(). Also used to convert cycle counts into the - * corresponding nanosecond counts with timecounter_cyc2time(). Users - * of this code are responsible for initializing the underlying - * cycle counter hardware, locking issues and reading the time - * more often than the cycle counter wraps around. The nanosecond - * counter will only wrap around after ~585 years. - * - * @cc: the cycle counter used by this instance - * @cycle_last: most recent cycle counter value seen by - * timecounter_read() - * @nsec: continuously increasing count - */ -struct timecounter { - const struct cyclecounter *cc; - cycle_t cycle_last; - u64 nsec; -}; - -/** - * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds - * @cc: Pointer to cycle counter. - * @cycles: Cycles - * - * XXX - This could use some mult_lxl_ll() asm optimization. Same code - * as in cyc2ns, but with unsigned result. - */ -static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, - cycle_t cycles) -{ - u64 ret = (u64)cycles; - ret = (ret * cc->mult) >> cc->shift; - return ret; -} - -/** - * timecounter_init - initialize a time counter - * @tc: Pointer to time counter which is to be initialized/reset - * @cc: A cycle counter, ready to be used. - * @start_tstamp: Arbitrary initial time stamp. - * - * After this call the current cycle register (roughly) corresponds to - * the initial time stamp. Every call to timecounter_read() increments - * the time stamp counter by the number of elapsed nanoseconds. - */ -extern void timecounter_init(struct timecounter *tc, - const struct cyclecounter *cc, - u64 start_tstamp); - -/** - * timecounter_read - return nanoseconds elapsed since timecounter_init() - * plus the initial time stamp - * @tc: Pointer to time counter. - * - * In other words, keeps track of time since the same epoch as - * the function which generated the initial time stamp. - */ -extern u64 timecounter_read(struct timecounter *tc); - -/** - * timecounter_cyc2time - convert a cycle counter to same - * time base as values returned by - * timecounter_read() - * @tc: Pointer to time counter. - * @cycle_tstamp: a value returned by tc->cc->read() - * - * Cycle counts that are converted correctly as long as they - * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], - * with "max cycle count" == cs->mask+1. - * - * This allows conversion of cycle counter values which were generated - * in the past. - */ -extern u64 timecounter_cyc2time(struct timecounter *tc, - cycle_t cycle_tstamp); - -/** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. * This is the structure used for system time. diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 41c891d05f0..1d869d185a0 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -52,6 +52,10 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) +struct sk_buff **eth_gro_receive(struct sk_buff **head, + struct sk_buff *skb); +int eth_gro_complete(struct sk_buff *skb, int nhoff); + /* Reserved Ethernet Addresses per IEEE 802.1Q */ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; diff --git a/include/linux/fec.h b/include/linux/fec.h index bcff455d1d5..1454a503622 100644 --- a/include/linux/fec.h +++ b/include/linux/fec.h @@ -19,6 +19,7 @@ struct fec_platform_data { phy_interface_t phy; unsigned char mac[ETH_ALEN]; + void (*sleep_mode_enable)(int enabled); }; #endif diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4f4eea8a628..b9c7897dc56 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1017,6 +1017,15 @@ struct ieee80211_mmie { u8 mic[8]; } __packed; +/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */ +struct ieee80211_mmie_16 { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[16]; +} __packed; + struct ieee80211_vendor_ie { u8 element_id; u8 len; @@ -1994,9 +2003,15 @@ enum ieee80211_key_len { WLAN_KEY_LEN_WEP40 = 5, WLAN_KEY_LEN_WEP104 = 13, WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_CCMP_256 = 32, WLAN_KEY_LEN_TKIP = 32, WLAN_KEY_LEN_AES_CMAC = 16, WLAN_KEY_LEN_SMS4 = 32, + WLAN_KEY_LEN_GCMP = 16, + WLAN_KEY_LEN_GCMP_256 = 32, + WLAN_KEY_LEN_BIP_CMAC_256 = 32, + WLAN_KEY_LEN_BIP_GMAC_128 = 16, + WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; #define IEEE80211_WEP_IV_LEN 4 @@ -2004,9 +2019,16 @@ enum ieee80211_key_len { #define IEEE80211_CCMP_HDR_LEN 8 #define IEEE80211_CCMP_MIC_LEN 8 #define IEEE80211_CCMP_PN_LEN 6 +#define IEEE80211_CCMP_256_HDR_LEN 8 +#define IEEE80211_CCMP_256_MIC_LEN 16 +#define IEEE80211_CCMP_256_PN_LEN 6 #define IEEE80211_TKIP_IV_LEN 8 #define IEEE80211_TKIP_ICV_LEN 4 #define IEEE80211_CMAC_PN_LEN 6 +#define IEEE80211_GMAC_PN_LEN 6 +#define IEEE80211_GCMP_HDR_LEN 8 +#define IEEE80211_GCMP_MIC_LEN 16 +#define IEEE80211_GCMP_PN_LEN 6 /* Public action codes */ enum ieee80211_pub_actioncode { @@ -2230,6 +2252,11 @@ enum ieee80211_sa_query_action { #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 #define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 #define WLAN_CIPHER_SUITE_GCMP 0x000FAC08 +#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09 +#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A +#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B +#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C +#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D #define WLAN_CIPHER_SUITE_SMS4 0x00147201 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 0a8ce762a47..a57bca2ea97 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -50,24 +50,6 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; -#if IS_ENABLED(CONFIG_BRIDGE) -int br_fdb_external_learn_add(struct net_device *dev, - const unsigned char *addr, u16 vid); -int br_fdb_external_learn_del(struct net_device *dev, - const unsigned char *addr, u16 vid); -#else -static inline int br_fdb_external_learn_add(struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - return 0; -} -static inline int br_fdb_external_learn_del(struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - return 0; -} -#endif - #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 960e666c51e..b11b28a30b9 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -78,9 +78,9 @@ static inline bool is_vlan_dev(struct net_device *dev) return dev->priv_flags & IFF_802_1Q_VLAN; } -#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) -#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) -#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats @@ -376,7 +376,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb_vlan_tag_get(skb)); if (likely(skb)) skb->vlan_tci = 0; return skb; @@ -393,7 +393,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) */ static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) { - if (vlan_tx_tag_present(skb)) + if (skb_vlan_tag_present(skb)) skb = __vlan_hwaccel_push_inside(skb); return skb; } @@ -442,8 +442,8 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { - if (vlan_tx_tag_present(skb)) { - *vlan_tci = vlan_tx_tag_get(skb); + if (skb_vlan_tag_present(skb)) { + *vlan_tci = skb_vlan_tag_get(skb); return 0; } else { *vlan_tci = 0; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index c694e7baa62..4d5169f5d7d 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -52,6 +52,7 @@ struct ipv6_devconf { __s32 force_tllao; __s32 ndisc_notify; __s32 suppress_frag_ndisc; + __s32 accept_ra_mtu; void *sysctl; }; @@ -124,6 +125,12 @@ struct ipv6_mc_socklist; struct ipv6_ac_socklist; struct ipv6_fl_socklist; +struct inet6_cork { + struct ipv6_txoptions *opt; + u8 hop_limit; + u8 tclass; +}; + /** * struct ipv6_pinfo - ipv6 private area * @@ -216,11 +223,7 @@ struct ipv6_pinfo { struct ipv6_txoptions *opt; struct sk_buff *pktoptions; struct sk_buff *rxpmtu; - struct { - struct ipv6_txoptions *opt; - u8 hop_limit; - u8 tclass; - } cork; + struct inet6_cork cork; }; /* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */ diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index 5d10ae364b5..f266661d266 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -1,6 +1,9 @@ #ifndef _LINUX_LIST_NULLS_H #define _LINUX_LIST_NULLS_H +#include <linux/poison.h> +#include <linux/const.h> + /* * Special version of lists, where end of list is not a NULL pointer, * but a 'nulls' marker, which can have many different values. @@ -21,8 +24,9 @@ struct hlist_nulls_head { struct hlist_nulls_node { struct hlist_nulls_node *next, **pprev; }; +#define NULLS_MARKER(value) (1UL | (((long)value) << 1)) #define INIT_HLIST_NULLS_HEAD(ptr, nulls) \ - ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1))) + ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) /** diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 64d25941b32..7b6d4e9ff60 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -71,6 +71,7 @@ enum { /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, + MLX4_CMD_VIRT_PORT_MAP = 0x5c, MLX4_CMD_GET_OP_REQ = 0x59, /* TPT commands */ @@ -165,9 +166,15 @@ enum { }; enum { - MLX4_CMD_TIME_CLASS_A = 10000, - MLX4_CMD_TIME_CLASS_B = 10000, - MLX4_CMD_TIME_CLASS_C = 10000, + MLX4_CMD_TIME_CLASS_A = 60000, + MLX4_CMD_TIME_CLASS_B = 60000, + MLX4_CMD_TIME_CLASS_C = 60000, +}; + +enum { + /* virtual to physical port mapping opcode modifiers */ + MLX4_GET_PORT_VIRT2PHY = 0x0, + MLX4_SET_PORT_VIRT2PHY = 0x1, }; enum { @@ -279,6 +286,8 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); int mlx4_config_dev_retrieval(struct mlx4_dev *dev, struct mlx4_config_dev_params *params); +void mlx4_cmd_wake_completions(struct mlx4_dev *dev); +void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) @@ -288,5 +297,6 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) +#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17) #endif /* MLX4_CMD_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 5f3a9aa7225..e4ebff7e9d0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -42,7 +42,7 @@ #include <linux/atomic.h> -#include <linux/clocksource.h> +#include <linux/timecounter.h> #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 @@ -70,6 +70,7 @@ enum { MLX4_FLAG_SLAVE = 1 << 3, MLX4_FLAG_SRIOV = 1 << 4, MLX4_FLAG_OLD_REG_MAC = 1 << 6, + MLX4_FLAG_BONDED = 1 << 7 }; enum { @@ -200,7 +201,9 @@ enum { MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, - MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, + MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, + MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21 }; enum { @@ -208,6 +211,10 @@ enum { MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 }; +enum { + MLX4_VF_CAP_FLAG_RESET = 1 << 0 +}; + /* bit enums for an 8-bit flags field indicating special use * QPs which require special handling in qp_reserve_range. * Currently, this only includes QPs used by the ETH interface, @@ -248,9 +255,14 @@ enum { MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, + MLX4_BMME_FLAG_PORT_REMAP = 1 << 24, MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, }; +enum { + MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP +}; + enum mlx4_event { MLX4_EVENT_TYPE_COMP = 0x00, MLX4_EVENT_TYPE_PATH_MIG = 0x01, @@ -276,6 +288,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, + MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -285,6 +298,11 @@ enum { }; enum { + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1, + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2, +}; + +enum { MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, }; @@ -411,6 +429,16 @@ enum { MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, }; +enum { + MLX4_DEVICE_STATE_UP = 1 << 0, + MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1, +}; + +enum { + MLX4_INTERFACE_STATE_UP = 1 << 0, + MLX4_INTERFACE_STATE_DELETION = 1 << 1, +}; + #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) @@ -535,6 +563,7 @@ struct mlx4_caps { u8 alloc_res_qp_mask; u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; + u32 vf_caps; }; struct mlx4_buf_list { @@ -660,6 +689,8 @@ struct mlx4_cq { void (*comp)(struct mlx4_cq *); void *priv; } tasklet_ctx; + int reset_notify_added; + struct list_head reset_notify; }; struct mlx4_qp { @@ -744,8 +775,23 @@ struct mlx4_vf_dev { u8 n_ports; }; -struct mlx4_dev { +struct mlx4_dev_persistent { struct pci_dev *pdev; + struct mlx4_dev *dev; + int nvfs[MLX4_MAX_PORTS + 1]; + int num_vfs; + enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1]; + enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1]; + struct work_struct catas_work; + struct workqueue_struct *catas_wq; + struct mutex device_state_mutex; /* protect HW state */ + u8 state; + struct mutex interface_state_mutex; /* protect SW state */ + u8 interface_state; +}; + +struct mlx4_dev { + struct mlx4_dev_persistent *persist; unsigned long flags; unsigned long num_slaves; struct mlx4_caps caps; @@ -754,13 +800,11 @@ struct mlx4_dev { struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; - int num_vfs; int numa_node; int oper_log_mgm_entry_size; u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; struct mlx4_vf_dev *dev_vfs; - int nvfs[MLX4_MAX_PORTS + 1]; }; struct mlx4_eqe { @@ -832,6 +876,11 @@ struct mlx4_eqe { } __packed tbl_change_info; } params; } __packed port_mgmt_change; + struct { + u8 reserved[3]; + u8 port; + u32 reserved1[5]; + } __packed bad_cable; } event; u8 slave_id; u8 reserved3[2]; @@ -1338,6 +1387,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port); int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis); +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2); int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 022055c8fb2..9553a73d204 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -49,6 +49,10 @@ enum mlx4_dev_event { MLX4_DEV_EVENT_SLAVE_SHUTDOWN, }; +enum { + MLX4_INTFF_BONDING = 1 << 0 +}; + struct mlx4_interface { void * (*add) (struct mlx4_dev *dev); void (*remove)(struct mlx4_dev *dev, void *context); @@ -57,11 +61,26 @@ struct mlx4_interface { void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); struct list_head list; enum mlx4_protocol protocol; + int flags; }; int mlx4_register_interface(struct mlx4_interface *intf); void mlx4_unregister_interface(struct mlx4_interface *intf); +int mlx4_bond(struct mlx4_dev *dev); +int mlx4_unbond(struct mlx4_dev *dev); +static inline int mlx4_is_bonded(struct mlx4_dev *dev) +{ + return !!(dev->flags & MLX4_FLAG_BONDED); +} + +struct mlx4_port_map { + u8 port1; + u8 port2; +}; + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); + void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); static inline u64 mlx4_mac_to_u64(u8 *addr) diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 467ccdf94c9..2bbc62aa818 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -96,6 +96,7 @@ enum { MLX4_QP_BIT_RRE = 1 << 15, MLX4_QP_BIT_RWE = 1 << 14, MLX4_QP_BIT_RAE = 1 << 13, + MLX4_QP_BIT_FPP = 1 << 3, MLX4_QP_BIT_RIC = 1 << 4, }; diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 0f01fe06542..99680796371 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -24,13 +24,15 @@ * Vendors and devices. Sort key: vendor first, device next. */ #define SDIO_VENDOR_ID_BROADCOM 0x02d0 -#define SDIO_DEVICE_ID_BROADCOM_43143 43143 +#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 #define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c +#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 -#define SDIO_DEVICE_ID_BROADCOM_43362 43362 +#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_VENDOR_ID_INTEL 0x0089 diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 8e30685affe..7d59dc6ab78 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -66,6 +66,7 @@ enum { NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_BUSY_POLL_BIT, /* Busy poll */ + NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */ /* * Add your fresh new feature above and remember to update @@ -124,6 +125,7 @@ enum { #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) +#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ @@ -159,7 +161,9 @@ enum { */ #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ NETIF_F_SG | NETIF_F_HIGHDMA | \ - NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) + NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \ + NETIF_F_HW_SWITCH_OFFLOAD) + /* * If one device doesn't support one of these features, then disable it * for all in netdev_increment_features. diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 52fd8e8694c..d115256ed5a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -51,6 +51,7 @@ #include <linux/netdev_features.h> #include <linux/neighbour.h> #include <uapi/linux/netdevice.h> +#include <uapi/linux/if_bonding.h> struct netpoll_info; struct device; @@ -643,39 +644,40 @@ struct rps_dev_flow_table { /* * The rps_sock_flow_table contains mappings of flows to the last CPU * on which they were processed by the application (set in recvmsg). + * Each entry is a 32bit value. Upper part is the high order bits + * of flow hash, lower part is cpu number. + * rps_cpu_mask is used to partition the space, depending on number of + * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 + * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f, + * meaning we use 32-6=26 bits for the hash. */ struct rps_sock_flow_table { - unsigned int mask; - u16 ents[0]; + u32 mask; + + u32 ents[0] ____cacheline_aligned_in_smp; }; -#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ - ((_num) * sizeof(u16))) +#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) #define RPS_NO_CPU 0xffff +extern u32 rps_cpu_mask; +extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; + static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, u32 hash) { if (table && hash) { - unsigned int cpu, index = hash & table->mask; + unsigned int index = hash & table->mask; + u32 val = hash & ~rps_cpu_mask; /* We only give a hint, preemption can change cpu under us */ - cpu = raw_smp_processor_id(); + val |= raw_smp_processor_id(); - if (table->ents[index] != cpu) - table->ents[index] = cpu; + if (table->ents[index] != val) + table->ents[index] = val; } } -static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, - u32 hash) -{ - if (table && hash) - table->ents[hash & table->mask] = RPS_NO_CPU; -} - -extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; - #ifdef CONFIG_RFS_ACCEL bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, u16 filter_id); @@ -1154,13 +1156,15 @@ struct net_device_ops { int idx); int (*ndo_bridge_setlink)(struct net_device *dev, - struct nlmsghdr *nlh); + struct nlmsghdr *nlh, + u16 flags); int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask); int (*ndo_bridge_dellink)(struct net_device *dev, - struct nlmsghdr *nlh); + struct nlmsghdr *nlh, + u16 flags); int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, @@ -1514,6 +1518,8 @@ struct net_device { struct list_head napi_list; struct list_head unreg_list; struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; struct { struct list_head upper; @@ -1969,7 +1975,7 @@ struct offload_callbacks { struct sk_buff *(*gso_segment)(struct sk_buff *skb, netdev_features_t features); struct sk_buff **(*gro_receive)(struct sk_buff **head, - struct sk_buff *skb); + struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); }; @@ -1979,10 +1985,21 @@ struct packet_offload { struct list_head list; }; +struct udp_offload; + +struct udp_offload_callbacks { + struct sk_buff **(*gro_receive)(struct sk_buff **head, + struct sk_buff *skb, + struct udp_offload *uoff); + int (*gro_complete)(struct sk_buff *skb, + int nhoff, + struct udp_offload *uoff); +}; + struct udp_offload { __be16 port; u8 ipproto; - struct offload_callbacks callbacks; + struct udp_offload_callbacks callbacks; }; /* often modified stats are per cpu, other are shared (netdev->stats) */ @@ -2041,6 +2058,7 @@ struct pcpu_sw_netstats { #define NETDEV_RESEND_IGMP 0x0016 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ #define NETDEV_CHANGEINFODATA 0x0018 +#define NETDEV_BONDING_INFO 0x0019 int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2303,6 +2321,21 @@ do { \ compute_pseudo(skb, proto)); \ } while (0) +static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, + int start, int offset) +{ + __wsum delta; + + BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); + + delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); + NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); +} + + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -3464,6 +3497,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; /* must be first */ + struct netdev_bonding_info bonding_info; +}; + +void netdev_bonding_info_change(struct net_device *dev, + struct netdev_bonding_info *bonding_info); + static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { diff --git a/include/linux/phy.h b/include/linux/phy.h index 22af8f8f580..685809835b5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -327,6 +327,8 @@ struct phy_c45_device_ids { * c45_ids: 802.3-c45 Device Identifers if is_c45. * is_c45: Set to true if this phy uses clause 45 addressing. * is_internal: Set to true if this phy is internal to a MAC. + * has_fixups: Set to true if this phy has fixups/quirks. + * suspended: Set to true if this phy has been suspended successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * addr: Bus address of PHY @@ -364,6 +366,7 @@ struct phy_device { bool is_c45; bool is_internal; bool has_fixups; + bool suspended; enum phy_state state; @@ -565,6 +568,15 @@ struct phy_driver { void (*write_mmd_indirect)(struct phy_device *dev, int ptrad, int devnum, int regnum, u32 val); + /* Get the size and type of the eeprom contained within a plug-in + * module */ + int (*module_info)(struct phy_device *dev, + struct ethtool_modinfo *modinfo); + + /* Get the eeprom information from the plug-in module */ + int (*module_eeprom)(struct phy_device *dev, + struct ethtool_eeprom *ee, u8 *data); + struct device_driver driver; }; #define to_phy_driver(d) container_of(d, struct phy_driver, driver) diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h new file mode 100644 index 00000000000..38f77b5e56c --- /dev/null +++ b/include/linux/platform_data/irda-sa11x0.h @@ -0,0 +1,20 @@ +/* + * arch/arm/include/asm/mach/irda.h + * + * Copyright (C) 2004 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_ARM_MACH_IRDA_H +#define __ASM_ARM_MACH_IRDA_H + +struct irda_platform_data { + int (*startup)(struct device *); + void (*shutdown)(struct device *); + int (*set_power)(struct device *, unsigned int state); + void (*set_speed)(struct device *, unsigned int speed); +}; + +#endif diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 5087fff96d8..cc2bdafb0c6 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h @@ -26,6 +26,8 @@ struct st21nfca_nfc_platform_data { unsigned int gpio_ena; unsigned int irq_polarity; + bool is_ese_present; + bool is_uicc_present; }; #endif /* _ST21NFCA_HCI_H_ */ diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index c3b432f5b63..b023373d987 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -19,8 +19,6 @@ #ifndef _ST21NFCB_NCI_H_ #define _ST21NFCB_NCI_H_ -#include <linux/i2c.h> - #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" struct st21nfcb_nfc_platform_data { @@ -28,4 +26,4 @@ struct st21nfcb_nfc_platform_data { unsigned int irq_polarity; }; -#endif /* _ST21NFCA_HCI_H_ */ +#endif /* _ST21NFCB_NCI_H_ */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index b93fd89b2e5..58851275fed 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -18,16 +18,45 @@ #ifndef _LINUX_RHASHTABLE_H #define _LINUX_RHASHTABLE_H -#include <linux/rculist.h> +#include <linux/compiler.h> +#include <linux/list_nulls.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> + +/* + * The end of the chain is marked with a special nulls marks which has + * the following format: + * + * +-------+-----------------------------------------------------+-+ + * | Base | Hash |1| + * +-------+-----------------------------------------------------+-+ + * + * Base (4 bits) : Reserved to distinguish between multiple tables. + * Specified via &struct rhashtable_params.nulls_base. + * Hash (27 bits): Full hash (unmasked) of first element added to bucket + * 1 (1 bit) : Nulls marker (always set) + * + * The remaining bits of the next pointer remain unused for now. + */ +#define RHT_BASE_BITS 4 +#define RHT_HASH_BITS 27 +#define RHT_BASE_SHIFT RHT_HASH_BITS struct rhash_head { struct rhash_head __rcu *next; }; -#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) - +/** + * struct bucket_table - Table of hash buckets + * @size: Number of hash buckets + * @locks_mask: Mask to apply before accessing locks[] + * @locks: Array of spinlocks protecting individual buckets + * @buckets: size * hash buckets + */ struct bucket_table { size_t size; + unsigned int locks_mask; + spinlock_t *locks; struct rhash_head __rcu *buckets[]; }; @@ -45,11 +74,16 @@ struct rhashtable; * @hash_rnd: Seed to use while hashing * @max_shift: Maximum number of shifts while expanding * @min_shift: Minimum number of shifts while shrinking + * @nulls_base: Base value to generate nulls marker + * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @hashfn: Function to hash key * @obj_hashfn: Function to hash object * @grow_decision: If defined, may return true if table should expand * @shrink_decision: If defined, may return true if table should shrink - * @mutex_is_held: Must return true if protecting mutex is held + * + * Note: when implementing the grow and shrink decision function, min/max + * shift must be enforced, otherwise, resizing watermarks they set may be + * useless. */ struct rhashtable_params { size_t nelem_hint; @@ -59,36 +93,95 @@ struct rhashtable_params { u32 hash_rnd; size_t max_shift; size_t min_shift; + u32 nulls_base; + size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; bool (*grow_decision)(const struct rhashtable *ht, size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); -#ifdef CONFIG_PROVE_LOCKING - int (*mutex_is_held)(void *parent); - void *parent; -#endif }; /** * struct rhashtable - Hash table handle * @tbl: Bucket table + * @future_tbl: Table under construction during expansion/shrinking * @nelems: Number of elements in table * @shift: Current size (1 << shift) * @p: Configuration parameters + * @run_work: Deferred worker to expand/shrink asynchronously + * @mutex: Mutex to protect current/future table swapping + * @walkers: List of active walkers + * @being_destroyed: True if table is set up for destruction */ struct rhashtable { struct bucket_table __rcu *tbl; - size_t nelems; - size_t shift; + struct bucket_table __rcu *future_tbl; + atomic_t nelems; + atomic_t shift; struct rhashtable_params p; + struct work_struct run_work; + struct mutex mutex; + struct list_head walkers; + bool being_destroyed; +}; + +/** + * struct rhashtable_walker - Hash table walker + * @list: List entry on list of walkers + * @resize: Resize event occured + */ +struct rhashtable_walker { + struct list_head list; + bool resize; }; +/** + * struct rhashtable_iter - Hash table iterator, fits into netlink cb + * @ht: Table to iterate through + * @p: Current pointer + * @walker: Associated rhashtable walker + * @slot: Current slot + * @skip: Number of entries to skip in slot + */ +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhashtable_walker *walker; + unsigned int slot; + unsigned int skip; +}; + +static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) +{ + return NULLS_MARKER(ht->p.nulls_base + hash); +} + +#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ + ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) + +static inline bool rht_is_a_nulls(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr & 1); +} + +static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr) >> 1; +} + #ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(const struct rhashtable *ht); +int lockdep_rht_mutex_is_held(struct rhashtable *ht); +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); #else -static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) +static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) +{ + return 1; +} + +static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, + u32 hash) { return 1; } @@ -96,13 +189,8 @@ static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); -u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); -u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); - void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); -void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); @@ -110,11 +198,23 @@ bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); int rhashtable_expand(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht); -void *rhashtable_lookup(const struct rhashtable *ht, const void *key); -void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, +void *rhashtable_lookup(struct rhashtable *ht, const void *key); +void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg); -void rhashtable_destroy(const struct rhashtable *ht); +bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj); +bool rhashtable_lookup_compare_insert(struct rhashtable *ht, + struct rhash_head *obj, + bool (*compare)(void *, void *), + void *arg); + +int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); +void rhashtable_walk_exit(struct rhashtable_iter *iter); +int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); +void *rhashtable_walk_next(struct rhashtable_iter *iter); +void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); + +void rhashtable_destroy(struct rhashtable *ht); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -122,92 +222,146 @@ void rhashtable_destroy(const struct rhashtable *ht); #define rht_dereference_rcu(p, ht) \ rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) -#define rht_entry(ptr, type, member) container_of(ptr, type, member) -#define rht_entry_safe(ptr, type, member) \ -({ \ - typeof(ptr) __ptr = (ptr); \ - __ptr ? rht_entry(__ptr, type, member) : NULL; \ -}) +#define rht_dereference_bucket(p, tbl, hash) \ + rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) + +#define rht_dereference_bucket_rcu(p, tbl, hash) \ + rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) -#define rht_next_entry_safe(pos, ht, member) \ -({ \ - pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \ - typeof(*(pos)), member) : NULL; \ -}) +#define rht_entry(tpos, pos, member) \ + ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) + +/** + * rht_for_each_continue - continue iterating over hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + */ +#define rht_for_each_continue(pos, head, tbl, hash) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ + !rht_is_a_nulls(pos); \ + pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each - iterate over hash chain - * @pos: &struct rhash_head to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index */ -#define rht_for_each(pos, head, ht) \ - for (pos = rht_dereference(head, ht); \ - pos; \ - pos = rht_dereference((pos)->next, ht)) +#define rht_for_each(pos, tbl, hash) \ + rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) + +/** + * rht_for_each_entry_continue - continue iterating over hash chain + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. + */ +#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ + for (pos = rht_dereference_bucket(head, tbl, hash); \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = rht_dereference_bucket((pos)->next, tbl, hash)) /** * rht_for_each_entry - iterate over hash chain of given type - * @pos: type * to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable - * @member: name of the rhash_head within the hashable struct. + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. */ -#define rht_for_each_entry(pos, head, ht, member) \ - for (pos = rht_entry_safe(rht_dereference(head, ht), \ - typeof(*(pos)), member); \ - pos; \ - pos = rht_next_entry_safe(pos, ht, member)) +#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ + tbl, hash, member) /** * rht_for_each_entry_safe - safely iterate over hash chain of given type - * @pos: type * to use as a loop cursor. - * @n: type * to use for temporary next object storage - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable - * @member: name of the rhash_head within the hashable struct. + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @next: the &struct rhash_head to use as next in loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ -#define rht_for_each_entry_safe(pos, n, head, ht, member) \ - for (pos = rht_entry_safe(rht_dereference(head, ht), \ - typeof(*(pos)), member), \ - n = rht_next_entry_safe(pos, ht, member); \ - pos; \ - pos = n, \ - n = rht_next_entry_safe(pos, ht, member)) +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL) + +/** + * rht_for_each_rcu_continue - continue iterating over rcu hash chain + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_rcu_continue(pos, head, tbl, hash) \ + for (({barrier(); }), \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ + !rht_is_a_nulls(pos); \ + pos = rcu_dereference_raw(pos->next)) /** * rht_for_each_rcu - iterate over rcu hash chain - * @pos: &struct rhash_head to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @ht: pointer to your struct rhashtable + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu mutation primitives such as rhashtable_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_rcu(pos, tbl, hash) \ + rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) + +/** + * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @head: the previous &struct rhash_head to continue from + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with - * the _rcu fkht mutation primitives such as rht_insert() as long as the + * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_rcu(pos, head, ht) \ - for (pos = rht_dereference_rcu(head, ht); \ - pos; \ - pos = rht_dereference_rcu((pos)->next, ht)) +#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ + for (({barrier(); }), \ + pos = rht_dereference_bucket_rcu(head, tbl, hash); \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) /** * rht_for_each_entry_rcu - iterate over rcu hash chain of given type - * @pos: type * to use as a loop cursor. - * @head: head of the hash chain (struct rhash_head *) - * @member: name of the rhash_head within the hashable struct. + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct rhash_head to use as a loop cursor. + * @tbl: the &struct bucket_table + * @hash: the hash value / bucket index + * @member: name of the &struct rhash_head within the hashable struct. * * This hash chain list-traversal primitive may safely run concurrently with - * the _rcu fkht mutation primitives such as rht_insert() as long as the + * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu(pos, head, member) \ - for (pos = rht_entry_safe(rcu_dereference_raw(head), \ - typeof(*(pos)), member); \ - pos; \ - pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \ - typeof(*(pos)), member)) +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ + tbl, hash, member) #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 85ab7d72b54..1bb36edb66b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -626,8 +626,11 @@ struct sk_buff { __u32 hash; __be16 vlan_proto; __u16 vlan_tci; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int napi_id; +#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; @@ -2484,19 +2487,18 @@ static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) } static inline int skb_add_data(struct sk_buff *skb, - char __user *from, int copy) + struct iov_iter *from, int copy) { const int off = skb->len; if (skb->ip_summed == CHECKSUM_NONE) { - int err = 0; - __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), - copy, 0, &err); - if (!err) { + __wsum csum = 0; + if (csum_and_copy_from_iter(skb_put(skb, copy), copy, + &csum, from) == copy) { skb->csum = csum_block_add(skb->csum, csum, off); return 0; } - } else if (!copy_from_user(skb_put(skb, copy), from, copy)) + } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) return 0; __skb_trim(skb, off); @@ -2693,8 +2695,7 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) { - /* XXX: stripping const */ - return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); + return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; } static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) @@ -3071,7 +3072,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) #define skb_checksum_validate_zero_check(skb, proto, check, \ compute_pseudo) \ - __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo) + __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo) #define skb_checksum_simple_validate(skb) \ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) @@ -3096,6 +3097,27 @@ do { \ compute_pseudo(skb, proto)); \ } while (0) +/* Update skbuf and packet to reflect the remote checksum offload operation. + * When called, ptr indicates the starting point for skb->csum when + * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete + * here, skb_postpull_rcsum is done so skb->csum start is ptr. + */ +static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, + int start, int offset) +{ + __wsum delta; + + if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { + __skb_checksum_complete(skb); + skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); + } + + delta = remcsum_adjust(ptr, skb->csum, start, offset); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); +} + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) diff --git a/include/linux/socket.h b/include/linux/socket.h index 6e49a14365d..5c19cba34dc 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -318,13 +318,6 @@ struct ucred { /* IPX options */ #define IPX_TYPE 1 -extern int csum_partial_copy_fromiovecend(unsigned char *kdata, - struct iovec *iov, - int offset, - unsigned int len, __wsum *csump); -extern unsigned long iov_pages(const struct iovec *iov, int offset, - unsigned long nr_segs); - extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 262ba4ef9a8..3e18379dfa6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -190,6 +190,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) +# define raw_spin_lock_bh_nested(lock, subclass) \ + _raw_spin_lock_bh_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -205,6 +207,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -324,6 +327,11 @@ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) +#define spin_lock_bh_nested(lock, subclass) \ +do { \ + raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ +} while (0) + #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e74..5344268e6e6 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass) + __acquires(lock); void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) __acquires(lock); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index d0d188861ad..d3afef9d8db 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -57,6 +57,7 @@ #define _raw_spin_lock(lock) __LOCK(lock) #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock) #define _raw_read_lock(lock) __LOCK(lock) #define _raw_write_lock(lock) __LOCK(lock) #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index f7b9100686c..c0f707ac192 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -173,6 +173,7 @@ #define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) #define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) #define SSB_SPROMSIZE_WORDS_R10 230 +#define SSB_SPROMSIZE_WORDS_R11 234 #define SSB_SPROM_BASE1 0x1000 #define SSB_SPROM_BASE31 0x0800 #define SSB_SPROM_REVISION 0x007E diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 67309ece077..1a7adb41164 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -115,6 +115,7 @@ struct tcp_request_sock { u32 rcv_isn; u32 snt_isn; u32 snt_synack; /* synack sent time */ + u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# * after data-in-SYN. @@ -152,6 +153,7 @@ struct tcp_sock { u32 snd_sml; /* Last byte of the most recently transmitted small packet */ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ + u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ u32 tsoffset; /* timestamp offset */ @@ -340,6 +342,10 @@ struct tcp_timewait_sock { u32 tw_rcv_wnd; u32 tw_ts_offset; u32 tw_ts_recent; + + /* The time we sent the last out-of-window ACK: */ + u32 tw_last_oow_ack_time; + long tw_ts_recent_stamp; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h new file mode 100644 index 00000000000..4382035a75b --- /dev/null +++ b/include/linux/timecounter.h @@ -0,0 +1,139 @@ +/* + * linux/include/linux/timecounter.h + * + * based on code that migrated away from + * linux/include/linux/clocksource.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _LINUX_TIMECOUNTER_H +#define _LINUX_TIMECOUNTER_H + +#include <linux/types.h> + +/* simplify initialization of mask field */ +#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) + +/** + * struct cyclecounter - hardware abstraction for a free running counter + * Provides completely state-free accessors to the underlying hardware. + * Depending on which hardware it reads, the cycle counter may wrap + * around quickly. Locking rules (if necessary) have to be defined + * by the implementor and user of specific instances of this API. + * + * @read: returns the current cycle value + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters, + * see CYCLECOUNTER_MASK() helper macro + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + */ +struct cyclecounter { + cycle_t (*read)(const struct cyclecounter *cc); + cycle_t mask; + u32 mult; + u32 shift; +}; + +/** + * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds + * Contains the state needed by timecounter_read() to detect + * cycle counter wrap around. Initialize with + * timecounter_init(). Also used to convert cycle counts into the + * corresponding nanosecond counts with timecounter_cyc2time(). Users + * of this code are responsible for initializing the underlying + * cycle counter hardware, locking issues and reading the time + * more often than the cycle counter wraps around. The nanosecond + * counter will only wrap around after ~585 years. + * + * @cc: the cycle counter used by this instance + * @cycle_last: most recent cycle counter value seen by + * timecounter_read() + * @nsec: continuously increasing count + * @mask: bit mask for maintaining the 'frac' field + * @frac: accumulated fractional nanoseconds + */ +struct timecounter { + const struct cyclecounter *cc; + cycle_t cycle_last; + u64 nsec; + u64 mask; + u64 frac; +}; + +/** + * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds + * @cc: Pointer to cycle counter. + * @cycles: Cycles + * @mask: bit mask for maintaining the 'frac' field + * @frac: pointer to storage for the fractional nanoseconds. + */ +static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, + cycle_t cycles, u64 mask, u64 *frac) +{ + u64 ns = (u64) cycles; + + ns = (ns * cc->mult) + *frac; + *frac = ns & mask; + return ns >> cc->shift; +} + +/** + * timecounter_adjtime - Shifts the time of the clock. + * @delta: Desired change in nanoseconds. + */ +static inline void timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +/** + * timecounter_init - initialize a time counter + * @tc: Pointer to time counter which is to be initialized/reset + * @cc: A cycle counter, ready to be used. + * @start_tstamp: Arbitrary initial time stamp. + * + * After this call the current cycle register (roughly) corresponds to + * the initial time stamp. Every call to timecounter_read() increments + * the time stamp counter by the number of elapsed nanoseconds. + */ +extern void timecounter_init(struct timecounter *tc, + const struct cyclecounter *cc, + u64 start_tstamp); + +/** + * timecounter_read - return nanoseconds elapsed since timecounter_init() + * plus the initial time stamp + * @tc: Pointer to time counter. + * + * In other words, keeps track of time since the same epoch as + * the function which generated the initial time stamp. + */ +extern u64 timecounter_read(struct timecounter *tc); + +/** + * timecounter_cyc2time - convert a cycle counter to same + * time base as values returned by + * timecounter_read() + * @tc: Pointer to time counter. + * @cycle_tstamp: a value returned by tc->cc->read() + * + * Cycle counts that are converted correctly as long as they + * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], + * with "max cycle count" == cs->mask+1. + * + * This allows conversion of cycle counter values which were generated + * in the past. + */ +extern u64 timecounter_cyc2time(struct timecounter *tc, + cycle_t cycle_tstamp); + +#endif diff --git a/include/linux/types.h b/include/linux/types.h index a0bb7048687..62323825cff 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -213,5 +213,8 @@ struct callback_head { }; #define rcu_head callback_head +/* clocksource cycle base type */ +typedef u64 cycle_t; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/udp.h b/include/linux/udp.h index ee327759322..247cfdcc4b0 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,11 +49,7 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ - convert_csum:1;/* On receive, convert checksum - * unnecessary to checksum complete - * if possible. - */ + no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -102,16 +98,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } -static inline void udp_set_convert_csum(struct sock *sk, bool val) -{ - udp_sk(sk)->convert_csum = val; -} - -static inline bool udp_get_convert_csum(struct sock *sk) -{ - return udp_sk(sk)->convert_csum; -} - #define udp_portaddr_for_each_entry(__sk, node, list) \ hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) diff --git a/include/linux/uio.h b/include/linux/uio.h index 1c5e453f7ea..3e0cb4ea390 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -135,10 +135,4 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); -int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, - int offset, int len); -int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, - int offset, int len); - #endif diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 5691f752ce8..63df3a2a8ce 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -74,7 +74,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, int mode); ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, - void *iov, size_t iov_size, int mode); + struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, |