diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/netdevice.h | 36 | ||||
-rw-r--r-- | include/linux/skbuff.h | 17 | ||||
-rw-r--r-- | include/net/sctp/structs.h | 14 |
3 files changed, 50 insertions, 17 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 440a02ee6f9..e8eeebd49a9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -752,6 +752,9 @@ struct netdev_phys_port_id { unsigned char id_len; }; +typedef u16 (*select_queue_fallback_t)(struct net_device *dev, + struct sk_buff *skb); + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -783,7 +786,7 @@ struct netdev_phys_port_id { * Required can not be NULL. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - * void *accel_priv); + * void *accel_priv, select_queue_fallback_t fallback); * Called to decide which queue to when device supports multiple * transmit queues. * @@ -1005,7 +1008,8 @@ struct net_device_ops { struct net_device *dev); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + void *accel_priv, + select_queue_fallback_t fallback); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); @@ -1551,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); -u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); /* * Net namespace inlines @@ -2276,6 +2279,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue) } /** + * netdev_cap_txqueue - check if selected tx queue exceeds device queues + * @dev: network device + * @queue_index: given tx queue index + * + * Returns 0 if given tx queue index >= number of device tx queues, + * otherwise returns the originally passed tx queue index. + */ +static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) +{ + if (unlikely(queue_index >= dev->real_num_tx_queues)) { + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", + dev->name, queue_index, + dev->real_num_tx_queues); + return 0; + } + + return queue_index; +} + +/** * netif_running - test if up * @dev: network device * @@ -3068,7 +3091,12 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -netdev_features_t netif_skb_features(struct sk_buff *skb); +netdev_features_t netif_skb_dev_features(struct sk_buff *skb, + const struct net_device *dev); +static inline netdev_features_t netif_skb_features(struct sk_buff *skb) +{ + return netif_skb_dev_features(skb, skb->dev); +} static inline bool net_gso_ok(netdev_features_t features, int gso_type) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f589c9af8cb..3ebbbe7b6d0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2916,5 +2916,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) { return !skb->head_frag || skb_cloned(skb); } + +/** + * skb_gso_network_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_network_seglen is used to determine the real size of the + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). + * + * The MAC/L2 header is not accounted for. + */ +static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - + skb_network_header(skb); + return hdr_len + skb_gso_transport_seglen(skb); +} #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d992ca3145f..6ee76c80489 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1653,17 +1653,6 @@ struct sctp_association { /* This is the last advertised value of rwnd over a SACK chunk. */ __u32 a_rwnd; - /* Number of bytes by which the rwnd has slopped. The rwnd is allowed - * to slop over a maximum of the association's frag_point. - */ - __u32 rwnd_over; - - /* Keeps treack of rwnd pressure. This happens when we have - * a window, but not recevie buffer (i.e small packets). This one - * is releases slowly (1 PMTU at a time ). - */ - __u32 rwnd_press; - /* This is the sndbuf size in use for the association. * This corresponds to the sndbuf size for the association, * as specified in the sk->sndbuf. @@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); -void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); -void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); +void sctp_assoc_rwnd_update(struct sctp_association *, bool); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); void sctp_assoc_del_nonprimary_peers(struct sctp_association *, |