diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 178 |
1 files changed, 143 insertions, 35 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f671cd2f133..322b5eae57d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -108,6 +108,14 @@ struct wireless_dev; #define MAX_HEADER (LL_MAX_HEADER + 48) #endif +struct net_device_subqueue +{ + /* Give a control state for each queue. This struct may contain + * per-queue locks in the future. + */ + unsigned long state; +}; + /* * Network device statistics. Akin to the 2.0 ether stats but * with byte counters. @@ -177,19 +185,25 @@ struct netif_rx_stats DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); +struct dev_addr_list +{ + struct dev_addr_list *next; + u8 da_addr[MAX_ADDR_LEN]; + u8 da_addrlen; + u8 da_synced; + int da_users; + int da_gusers; +}; /* * We tag multicasts with these structures. */ - -struct dev_mc_list -{ - struct dev_mc_list *next; - __u8 dmi_addr[MAX_ADDR_LEN]; - unsigned char dmi_addrlen; - int dmi_users; - int dmi_gusers; -}; + +#define dev_mc_list dev_addr_list +#define dmi_addr da_addr +#define dmi_addrlen da_addrlen +#define dmi_users da_users +#define dmi_gusers da_gusers struct hh_cache { @@ -248,6 +262,8 @@ enum netdev_state_t __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, __LINK_STATE_QDISC_RUNNING, + /* Set by the netpoll NAPI code */ + __LINK_STATE_POLL_LIST_FROZEN, }; @@ -314,9 +330,10 @@ struct net_device /* Net device features */ unsigned long features; #define NETIF_F_SG 1 /* Scatter/gather IO. */ -#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ +#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ +#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ @@ -325,6 +342,7 @@ struct net_device #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ #define NETIF_F_GSO 2048 /* Enable software GSO. */ #define NETIF_F_LLTX 4096 /* LockLess TX */ +#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ /* Segmentation offload features */ #define NETIF_F_GSO_SHIFT 16 @@ -338,8 +356,11 @@ struct net_device /* List of features with software fallbacks. */ #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) + #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) -#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) +#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) +#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) struct net_device *next_sched; @@ -388,7 +409,10 @@ struct net_device unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ - struct dev_mc_list *mc_list; /* Multicast mac addresses */ + struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ + int uc_count; /* Number of installed ucasts */ + int uc_promisc; + struct dev_addr_list *mc_list; /* Multicast mac addresses */ int mc_count; /* Number of installed mcasts */ int promiscuity; int allmulti; @@ -493,6 +517,11 @@ struct net_device void *saddr, unsigned len); int (*rebuild_header)(struct sk_buff *skb); +#define HAVE_CHANGE_RX_FLAGS + void (*change_rx_flags)(struct net_device *dev, + int flags); +#define HAVE_SET_RX_MODE + void (*set_rx_mode)(struct net_device *dev); #define HAVE_MULTICAST void (*set_multicast_list)(struct net_device *dev); #define HAVE_SET_MAC_ADDR @@ -535,22 +564,29 @@ struct net_device /* bridge stuff */ struct net_bridge_port *br_port; + /* macvlan */ + struct macvlan_port *macvlan_port; /* class/net/name entry */ struct device dev; /* space for optional statistics and wireless sysfs groups */ struct attribute_group *sysfs_groups[3]; + + /* rtnetlink link ops */ + const struct rtnl_link_ops *rtnl_link_ops; + + /* The TX queue control structures */ + unsigned int egress_subqueue_count; + struct net_device_subqueue egress_subqueue[0]; }; #define to_net_dev(d) container_of(d, struct net_device, dev) #define NETDEV_ALIGN 32 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) -static inline void *netdev_priv(struct net_device *dev) +static inline void *netdev_priv(const struct net_device *dev) { - return (char *)dev + ((sizeof(struct net_device) - + NETDEV_ALIGN_CONST) - & ~NETDEV_ALIGN_CONST); + return dev->priv; } #define SET_MODULE_OWNER(dev) do { } while (0) @@ -702,6 +738,62 @@ static inline int netif_running(const struct net_device *dev) return test_bit(__LINK_STATE_START, &dev->state); } +/* + * Routines to manage the subqueues on a device. We only need start + * stop, and a check if it's stopped. All other device management is + * done at the overall netdevice level. + * Also test the device if we're multiqueue. + */ +static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); +#endif +} + +static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) + return; +#endif + set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); +#endif +} + +static inline int netif_subqueue_stopped(const struct net_device *dev, + u16 queue_index) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + return test_bit(__LINK_STATE_XOFF, + &dev->egress_subqueue[queue_index].state); +#else + return 0; +#endif +} + +static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#ifdef CONFIG_NETPOLL_TRAP + if (netpoll_trap()) + return; +#endif + if (test_and_clear_bit(__LINK_STATE_XOFF, + &dev->egress_subqueue[queue_index].state)) + __netif_schedule(dev); +#endif +} + +static inline int netif_is_multiqueue(const struct net_device *dev) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + return (!!(NETIF_F_MULTI_QUEUE & dev->features)); +#else + return 0; +#endif +} /* Use this variant when it is known for sure that it * is executing from interrupt context. @@ -910,6 +1002,17 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) return 0; } +/* same as netif_rx_complete, except that local_irq_save(flags) + * has already been issued + */ +static inline void __netif_rx_complete(struct net_device *dev) +{ + BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); + list_del(&dev->poll_list); + smp_mb__before_clear_bit(); + clear_bit(__LINK_STATE_RX_SCHED, &dev->state); +} + /* Remove interface from poll list: it must be in the poll list * on current cpu. This primitive is called by dev->poll(), when * it completes the work. The device cannot be out of poll list at this @@ -919,11 +1022,16 @@ static inline void netif_rx_complete(struct net_device *dev) { unsigned long flags; +#ifdef CONFIG_NETPOLL + /* Prevent race with netpoll - yes, this is a kludge. + * But at least it doesn't penalize the non-netpoll + * code path. */ + if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state)) + return; +#endif + local_irq_save(flags); - BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); - list_del(&dev->poll_list); - smp_mb__before_clear_bit(); - clear_bit(__LINK_STATE_RX_SCHED, &dev->state); + __netif_rx_complete(dev); local_irq_restore(flags); } @@ -940,17 +1048,6 @@ static inline void netif_poll_enable(struct net_device *dev) clear_bit(__LINK_STATE_RX_SCHED, &dev->state); } -/* same as netif_rx_complete, except that local_irq_save(flags) - * has already been issued - */ -static inline void __netif_rx_complete(struct net_device *dev) -{ - BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); - list_del(&dev->poll_list); - smp_mb__before_clear_bit(); - clear_bit(__LINK_STATE_RX_SCHED, &dev->state); -} - static inline void netif_tx_lock(struct net_device *dev) { spin_lock(&dev->_xmit_lock); @@ -995,15 +1092,26 @@ static inline void netif_tx_disable(struct net_device *dev) extern void ether_setup(struct net_device *dev); /* Support for loadable net-drivers */ -extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, - void (*setup)(struct net_device *)); +extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, + void (*setup)(struct net_device *), + unsigned int queue_count); +#define alloc_netdev(sizeof_priv, name, setup) \ + alloc_netdev_mq(sizeof_priv, name, setup, 1) extern int register_netdev(struct net_device *dev); extern void unregister_netdev(struct net_device *dev); -/* Functions used for multicast support */ -extern void dev_mc_upload(struct net_device *dev); +/* Functions used for secondary unicast and multicast support */ +extern void dev_set_rx_mode(struct net_device *dev); +extern void __dev_set_rx_mode(struct net_device *dev); +extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); +extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); +extern int dev_mc_sync(struct net_device *to, struct net_device *from); +extern void dev_mc_unsync(struct net_device *to, struct net_device *from); extern void dev_mc_discard(struct net_device *dev); +extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); +extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); +extern void __dev_addr_discard(struct dev_addr_list **list); extern void dev_set_promiscuity(struct net_device *dev, int inc); extern void dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); |