diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 16:35:17 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-07 16:35:17 -0500 |
commit | 21b4e736922f546e0f1aa7b9d6c442f309a2444a (patch) | |
tree | e1be8645297f8ebe87445251743ebcc52081a20d /net | |
parent | 34161db6b14d984fb9b06c735b7b42f8803f6851 (diff) | |
parent | 68380b581383c028830f79ec2670f4a193854aa6 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Diffstat (limited to 'net')
67 files changed, 617 insertions, 389 deletions
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index d9f04864d15..8ca448db7a0 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -23,7 +23,7 @@ #include <asm/atomic.h> #include "br_private.h" -static kmem_cache_t *br_fdb_cache __read_mostly; +static struct kmem_cache *br_fdb_cache __read_mostly; static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index ac47ba2ba02..bd221ad52ea 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -34,6 +34,7 @@ #include <linux/netfilter_ipv6.h> #include <linux/netfilter_arp.h> #include <linux/in_route.h> +#include <linux/inetdevice.h> #include <net/ip.h> #include <net/ipv6.h> @@ -221,10 +222,14 @@ static void __br_dnat_complain(void) * * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will - * later be passed up to the IP stack to be routed. + * later be passed up to the IP stack to be routed. For a redirected + * packet, ip_route_input() will give back the localhost as output device, + * which differs from the bridge device. * * Let us now consider the case that ip_route_input() fails: * + * This can be because the destination address is martian, in which case + * the packet will be dropped. * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input() * will fail, while __ip_route_output_key() will return success. The source * address for __ip_route_output_key() is set to zero, so __ip_route_output_key @@ -237,7 +242,8 @@ static void __br_dnat_complain(void) * * --Lennert, 20020411 * --Bart, 20020416 (updated) - * --Bart, 20021007 (updated) */ + * --Bart, 20021007 (updated) + * --Bart, 20062711 (updated) */ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) { if (skb->pkt_type == PACKET_OTHERHOST) { @@ -264,15 +270,15 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) struct net_device *dev = skb->dev; struct iphdr *iph = skb->nh.iph; struct nf_bridge_info *nf_bridge = skb->nf_bridge; + int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; - if (dnat_took_place(skb)) { - if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) { + if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct rtable *rt; struct flowi fl = { .nl_u = { @@ -283,19 +289,33 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb) }, .proto = 0, }; + struct in_device *in_dev = in_dev_get(dev); + + /* If err equals -EHOSTUNREACH the error is due to a + * martian destination or due to the fact that + * forwarding is disabled. For most martian packets, + * ip_route_output_key() will fail. It won't fail for 2 types of + * martian destinations: loopback destinations and destination + * 0.0.0.0. In both cases the packet will be dropped because the + * destination is the loopback device and not the bridge. */ + if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) + goto free_skb; if (!ip_route_output_key(&rt, &fl)) { /* - Bridged-and-DNAT'ed traffic doesn't - * require ip_forwarding. - * - Deal with redirected traffic. */ - if (((struct dst_entry *)rt)->dev == dev || - rt->rt_type == RTN_LOCAL) { + * require ip_forwarding. */ + if (((struct dst_entry *)rt)->dev == dev) { skb->dst = (struct dst_entry *)rt; goto bridged_dnat; } + /* we are sure that forwarding is disabled, so printing + * this message is no problem. Note that the packet could + * still have a martian destination address, in which case + * the packet could be dropped even if forwarding were enabled */ __br_dnat_complain(); dst_release((struct dst_entry *)rt); } +free_skb: kfree_skb(skb); return 0; } else { diff --git a/net/core/dev.c b/net/core/dev.c index 59d058a3b50..e660cb57e42 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev) EXPORT_SYMBOL(unregister_netdev); -#ifdef CONFIG_HOTPLUG_CPU static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) @@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_NET_DMA /** diff --git a/net/core/dst.c b/net/core/dst.c index 1a5e49da0e7..836ec660692 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops) if (ops->gc()) return NULL; } - dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC); + dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); if (!dst) return NULL; memset(dst, 0, ops->entry_size); diff --git a/net/core/flow.c b/net/core/flow.c index b16d31ae5e5..d137f971f97 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; #define flow_table(cpu) (per_cpu(flow_tables, cpu)) -static kmem_cache_t *flow_cachep __read_mostly; +static struct kmem_cache *flow_cachep __read_mostly; static int flow_lwm, flow_hwm; @@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, if (flow_count(cpu) > flow_hwm) flow_cache_shrink(cpu); - fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC); + fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { fle->next = *head; *head = fle; @@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu) tasklet_init(tasklet, flow_cache_flush_tasklet, 0); } -#ifdef CONFIG_HOTPLUG_CPU static int flow_cache_cpu(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb, __flow_cache_shrink((unsigned long)hcpu, 0); return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ static int __init flow_cache_init(void) { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ba509a4a8e9..0ab1987b934 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) goto out_entries; } - n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC); + n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); if (!n) goto out_entries; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e1c385e5ba..de7801d589e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -68,8 +68,8 @@ #include "kmap_skb.h" -static kmem_cache_t *skbuff_head_cache __read_mostly; -static kmem_cache_t *skbuff_fclone_cache __read_mostly; +static struct kmem_cache *skbuff_head_cache __read_mostly; +static struct kmem_cache *skbuff_fclone_cache __read_mostly; /* * Keep out-of-line to prevent kernel bloat. @@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug); * @gfp_mask: allocation mask * @fclone: allocate from fclone cache instead of head cache * and allocate a cloned (child) skb + * @node: numa node to allocate memory on * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of size bytes. The object has a reference count of one. @@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug); * %GFP_ATOMIC. */ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, - int fclone) + int fclone, int node) { - kmem_cache_t *cache; + struct kmem_cache *cache; struct skb_shared_info *shinfo; struct sk_buff *skb; u8 *data; @@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; /* Get the HEAD */ - skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); + skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); if (!skb) goto out; /* Get the DATA. Size must match skb_add_mtu(). */ size = SKB_DATA_ALIGN(size); - data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), - gfp_mask); + data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), + gfp_mask, node); if (!data) goto nodata; @@ -210,7 +211,7 @@ nodata: * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, +struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, unsigned int size, gfp_t gfp_mask) { @@ -267,9 +268,10 @@ nodata: struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, gfp_t gfp_mask) { + int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1; struct sk_buff *skb; - skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); + skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; diff --git a/net/core/sock.c b/net/core/sock.c index 419c7d3289c..0ed5b4f0bc4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -810,24 +810,11 @@ lenout: */ static void inline sock_lock_init(struct sock *sk) { - spin_lock_init(&sk->sk_lock.slock); - sk->sk_lock.owner = NULL; - init_waitqueue_head(&sk->sk_lock.wq); - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock)); - - /* - * Mark both the sk_lock and the sk_lock.slock as a - * per-address-family lock class: - */ - lockdep_set_class_and_name(&sk->sk_lock.slock, - af_family_slock_keys + sk->sk_family, - af_family_slock_key_strings[sk->sk_family]); - lockdep_init_map(&sk->sk_lock.dep_map, - af_family_key_strings[sk->sk_family], - af_family_keys + sk->sk_family, 0); + sock_lock_init_class_and_name(sk, + af_family_slock_key_strings[sk->sk_family], + af_family_slock_keys + sk->sk_family, + af_family_key_strings[sk->sk_family], + af_family_keys + sk->sk_family); } /** @@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; - kmem_cache_t *slab = prot->slab; + struct kmem_cache *slab = prot->slab; if (slab != NULL) sk = kmem_cache_alloc(slab, priority); diff --git a/net/core/wireless.c b/net/core/wireless.c index cb1b8728d7e..f69ab7b4408 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -2130,7 +2130,7 @@ int iw_handler_set_spy(struct net_device * dev, * The rtnl_lock() make sure we don't race with the other iw_handlers. * This make sure wireless_spy_update() "see" that the spy list * is temporarily disabled. */ - wmb(); + smp_wmb(); /* Are there are addresses to copy? */ if(wrqu->data.length > 0) { @@ -2159,7 +2159,7 @@ int iw_handler_set_spy(struct net_device * dev, } /* Make sure above is updated before re-enabling */ - wmb(); + smp_wmb(); /* Enable addresses */ spydata->spy_number = wrqu->data.length; diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index bdf1bb7a82c..1f4727ddbdb 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -21,8 +21,8 @@ #include <net/sock.h> -static kmem_cache_t *dccp_ackvec_slab; -static kmem_cache_t *dccp_ackvec_record_slab; +static struct kmem_cache *dccp_ackvec_slab; +static struct kmem_cache *dccp_ackvec_record_slab; static struct dccp_ackvec_record *dccp_ackvec_record_new(void) { diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c index ff05e59043c..d8cf92f09e6 100644 --- a/net/dccp/ccid.c +++ b/net/dccp/ccid.c @@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void) #define ccids_read_unlock() do { } while(0) #endif -static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) +static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) { - kmem_cache_t *slab; + struct kmem_cache *slab; char slab_name_fmt[32], *slab_name; va_list args; @@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) return slab; } -static void ccid_kmem_cache_destroy(kmem_cache_t *slab) +static void ccid_kmem_cache_destroy(struct kmem_cache *slab) { if (slab != NULL) { const char *name = kmem_cache_name(slab); diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index c7c29514dce..bcc2d12ae81 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -27,9 +27,9 @@ struct ccid_operations { unsigned char ccid_id; const char *ccid_name; struct module *ccid_owner; - kmem_cache_t *ccid_hc_rx_slab; + struct kmem_cache *ccid_hc_rx_slab; __u32 ccid_hc_rx_obj_size; - kmem_cache_t *ccid_hc_tx_slab; + struct kmem_cache *ccid_hc_tx_slab; __u32 ccid_hc_tx_obj_size; int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk); diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index cf8c07b2704..66a27b9688c 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); if (new_packet == NULL || new_packet->dccphtx_sent) { new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, - SLAB_ATOMIC); + GFP_ATOMIC); if (unlikely(new_packet == NULL)) { DCCP_WARN("%s, sk=%p, not enough mem to add to history," @@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) /* new loss event detected */ /* calculate last interval length */ seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss); - entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); + entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC); if (entry == NULL) { DCCP_BUG("out of memory - can not allocate entry"); @@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) } packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, - skb, SLAB_ATOMIC); + skb, GFP_ATOMIC); if (unlikely(packet == NULL)) { DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " "to history, consider it lost!\n", dccp_role(sk), sk); diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 48b9b93f8ac..0a0baef16b3 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c @@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, int i; for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) { - entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); + entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC); if (entry == NULL) { dccp_li_hist_purge(hist, list); DCCP_BUG("loss interval list entry is NULL"); diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 0ae85f0340b..eb257014dd7 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -20,7 +20,7 @@ #define DCCP_LI_HIST_IVAL_F_LENGTH 8 struct dccp_li_hist { - kmem_cache_t *dccplih_slab; + struct kmem_cache *dccplih_slab; }; extern struct dccp_li_hist *dccp_li_hist_new(const char *name); diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 067cf1c85a3..9a8bcf224aa 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -68,14 +68,14 @@ struct dccp_rx_hist_entry { }; struct dccp_tx_hist { - kmem_cache_t *dccptxh_slab; + struct kmem_cache *dccptxh_slab; }; extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); struct dccp_rx_hist { - kmem_cache_t *dccprxh_slab; + struct kmem_cache *dccprxh_slab; }; extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name); diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index bdbc3f43166..13b2421991b 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; static DEFINE_RWLOCK(dn_fib_tables_lock); -static kmem_cache_t *dn_hash_kmem __read_mostly; +static struct kmem_cache *dn_hash_kmem __read_mostly; static int dn_fib_hash_zombies; static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) @@ -590,7 +590,7 @@ create: replace: err = -ENOBUFS; - new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); + new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); if (new_f == NULL) goto out; diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index 08386c10295..eec1a1dd91d 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -431,6 +431,17 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, return 0; } +void +ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac) +{ + unsigned long flags; + + spin_lock_irqsave(&mac->lock, flags); + mac->associnfo.associating = 1; + schedule_work(&mac->associnfo.work); + spin_unlock_irqrestore(&mac->lock, flags); +} + int ieee80211softmac_handle_disassoc(struct net_device * dev, struct ieee80211_disassoc *disassoc) @@ -449,8 +460,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, dprintk(KERN_INFO PFX "got disassoc frame\n"); ieee80211softmac_disassoc(mac); - /* try to reassociate */ - schedule_delayed_work(&mac->associnfo.work, 0); + ieee80211softmac_try_reassoc(mac); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 6012705aa4f..8ed3e59b802 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -337,6 +337,8 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac, /* can't transmit data right now... */ netif_carrier_off(mac->dev); spin_unlock_irqrestore(&mac->lock, flags); + + ieee80211softmac_try_reassoc(mac); } /* diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index c0dbe070e54..4c2bba34d32 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h @@ -239,4 +239,6 @@ void ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, in int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask); +void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac); + #endif /* IEEE80211SOFTMAC_PRIV_H_ */ diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 2ffaebd21c5..480d72c7a42 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -495,7 +495,8 @@ ieee80211softmac_wx_set_mlme(struct net_device *dev, printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n"); goto out; } - return ieee80211softmac_deauth_req(mac, net, reason); + err = ieee80211softmac_deauth_req(mac, net, reason); + goto out; case IW_MLME_DISASSOC: ieee80211softmac_send_disassoc_req(mac, reason); mac->associnfo.associated = 0; diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 107bb6cbb0b..648f47c1c39 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -45,8 +45,8 @@ #include "fib_lookup.h" -static kmem_cache_t *fn_hash_kmem __read_mostly; -static kmem_cache_t *fn_alias_kmem __read_mostly; +static struct kmem_cache *fn_hash_kmem __read_mostly; +static struct kmem_cache *fn_alias_kmem __read_mostly; struct fib_node { struct hlist_node fn_hash; @@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) goto out; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; new_f = NULL; if (!f) { - new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); + new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL); if (new_f == NULL) goto out_free_new_fa; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d17990ec724..cfb249cc0a5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); static struct tnode *halve(struct trie *t, struct tnode *tn); static void tnode_free(struct tnode *tn); -static kmem_cache_t *fn_alias_kmem __read_mostly; +static struct kmem_cache *fn_alias_kmem __read_mostly; static struct trie *trie_local = NULL, *trie_main = NULL; @@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) u8 state; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; @@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg) goto out; err = -ENOBUFS; - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 244c4f445c7..8c79c8a4ea5 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -27,11 +27,11 @@ * Allocate and initialize a new local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. */ -struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, +struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, struct inet_bind_hashbucket *head, const unsigned short snum) { - struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); + struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); if (tb != NULL) { tb->port = snum; @@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, /* * Caller must hold hashbucket lock for this tb with local BH disabled */ -void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) +void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) { if (hlist_empty(&tb->owners)) { __hlist_del(&tb->node); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 8c74f9168b7..9f414e35c48 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat { struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, - SLAB_ATOMIC); + GFP_ATOMIC); if (tw != NULL) { const struct inet_sock *inet = inet_sk(sk); @@ -178,7 +178,6 @@ void inet_twdr_hangman(unsigned long data) need_timer = 0; if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { twdr->thread_slots |= (1 << twdr->slot); - mb(); schedule_work(&twdr->twkill_work); need_timer = 1; } else { diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f072f3875af..711eb6d0285 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -73,7 +73,7 @@ /* Exported for inet_getid inline function. */ DEFINE_SPINLOCK(inet_peer_idlock); -static kmem_cache_t *peer_cachep __read_mostly; +static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height static struct inet_peer peer_fake_node = { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index efcf45ecc81..ecb5422ea23 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); In this case data path is free of exclusive locks at all. */ -static kmem_cache_t *mrt_cachep __read_mostly; +static struct kmem_cache *mrt_cachep __read_mostly; static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 8832eb517d5..8086787a2c5 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c @@ -44,7 +44,7 @@ static struct list_head *ip_vs_conn_tab; /* SLAB cache for IPVS connections */ -static kmem_cache_t *ip_vs_conn_cachep __read_mostly; +static struct kmem_cache *ip_vs_conn_cachep __read_mostly; /* counter for current IPVS connections */ static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 413c2d0a1f3..71b76ade00e 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -375,6 +375,13 @@ static int mark_source_chains(struct xt_table_info *newinfo, && unconditional(&e->arp)) { unsigned int oldpos, size; + if (t->verdict < -NF_MAX_VERDICT - 1) { + duprintf("mark_source_chains: bad " + "negative verdict (%i)\n", + t->verdict); + return 0; + } + /* Return: backtrack through the last * big jump. */ @@ -404,6 +411,14 @@ static int mark_source_chains(struct xt_table_info *newinfo, if (strcmp(t->target.u.user.name, ARPT_STANDARD_TARGET) == 0 && newpos >= 0) { + if (newpos > newinfo->size - + sizeof(struct arpt_entry)) { + duprintf("mark_source_chains: " + "bad verdict (%i)\n", + newpos); + return 0; + } + /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); @@ -426,8 +441,6 @@ static int mark_source_chains(struct xt_table_info *newinfo, static inline int standard_check(const struct arpt_entry_target *t, unsigned int max_offset) { - struct arpt_standard_target *targ = (void *)t; - /* Check standard info. */ if (t->u.target_size != ARPT_ALIGN(sizeof(struct arpt_standard_target))) { @@ -437,18 +450,6 @@ static inline int standard_check(const struct arpt_entry_target *t, return 0; } - if (targ->verdict >= 0 - && targ->verdict > max_offset - sizeof(struct arpt_entry)) { - duprintf("arpt_standard_check: bad verdict (%i)\n", - targ->verdict); - return 0; - } - - if (targ->verdict < -NF_MAX_VERDICT - 1) { - duprintf("arpt_standard_check: bad negative verdict (%i)\n", - targ->verdict); - return 0; - } return 1; } @@ -627,18 +628,20 @@ static int translate_table(const char *name, } } + if (!mark_source_chains(newinfo, valid_hooks, entry0)) { + duprintf("Looping hook\n"); + return -ELOOP; + } + /* Finally, each sanity check must pass */ i = 0; ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, check_entry, name, size, &i); - if (ret != 0) - goto cleanup; - - ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry0)) { - duprintf("Looping hook\n"); - goto cleanup; + if (ret != 0) { + ARPT_ENTRY_ITERATE(entry0, newinfo->size, + cleanup_entry, &i); + return ret; } /* And one copy for every other CPU */ @@ -647,9 +650,6 @@ static int translate_table(const char *name, memcpy(newinfo->entries[i], entry0, newinfo->size); } - return 0; -cleanup: - ARPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); return ret; } diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index f4b0e68a16d..8556a4f4f60 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -65,8 +65,8 @@ static LIST_HEAD(helpers); unsigned int ip_conntrack_htable_size __read_mostly = 0; int ip_conntrack_max __read_mostly; struct list_head *ip_conntrack_hash __read_mostly; -static kmem_cache_t *ip_conntrack_cachep __read_mostly; -static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly; +static struct kmem_cache *ip_conntrack_cachep __read_mostly; +static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; struct ip_conntrack ip_conntrack_untracked; unsigned int ip_ct_log_invalid __read_mostly; static LIST_HEAD(unconfirmed); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 8a455439b12..0ff2956d35e 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -401,6 +401,13 @@ mark_source_chains(struct xt_table_info *newinfo, && unconditional(&e->ip)) { unsigned int oldpos, size; + if (t->verdict < -NF_MAX_VERDICT - 1) { + duprintf("mark_source_chains: bad " + "negative verdict (%i)\n", + t->verdict); + return 0; + } + /* Return: backtrack through the last big jump. */ do { @@ -438,6 +445,13 @@ mark_source_chains(struct xt_table_info *newinfo, if (strcmp(t->target.u.user.name, IPT_STANDARD_TARGET) == 0 && newpos >= 0) { + if (newpos > newinfo->size - + sizeof(struct ipt_entry)) { + duprintf("mark_source_chains: " + "bad verdict (%i)\n", + newpos); + return 0; + } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); @@ -470,27 +484,6 @@ cleanup_match(struct ipt_entry_match *m, unsigned int *i) } static inline int -standard_check(const struct ipt_entry_target *t, - unsigned int max_offset) -{ - struct ipt_standard_target *targ = (void *)t; - - /* Check standard info. */ - if (targ->verdict >= 0 - && targ->verdict > max_offset - sizeof(struct ipt_entry)) { - duprintf("ipt_standard_check: bad verdict (%i)\n", - targ->verdict); - return 0; - } - if (targ->verdict < -NF_MAX_VERDICT - 1) { - duprintf("ipt_standard_check: bad negative verdict (%i)\n", - targ->verdict); - return 0; - } - return 1; -} - -static inline int check_match(struct ipt_entry_match *m, const char *name, const struct ipt_ip *ip, @@ -576,12 +569,7 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size, if (ret) goto err; - if (t->u.kernel.target == &ipt_standard_target) { - if (!standard_check(t, size)) { - ret = -EINVAL; - goto err; - } - } else if (t->u.kernel.target->checkentry + if (t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(name, e, target, t->data, e->comefrom)) { duprintf("ip_tables: check failed for `%s'.\n", @@ -718,17 +706,19 @@ translate_table(const char *name, } } + if (!mark_source_chains(newinfo, valid_hooks, entry0)) + return -ELOOP; + /* Finally, each sanity check must pass */ i = 0; ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, check_entry, name, size, &i); - if (ret != 0) - goto cleanup; - - ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry0)) - goto cleanup; + if (ret != 0) { + IPT_ENTRY_ITERATE(entry0, newinfo->size, + cleanup_entry, &i); + return ret; + } /* And one copy for every other CPU */ for_each_possible_cpu(i) { @@ -736,9 +726,6 @@ translate_table(const char *name, memcpy(newinfo->entries[i], entry0, newinfo->size); } - return 0; -cleanup: - IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); return ret; } @@ -1529,25 +1516,8 @@ static inline int compat_copy_match_from_user(struct ipt_entry_match *m, void **dstptr, compat_uint_t *size, const char *name, const struct ipt_ip *ip, unsigned int hookmask) { - struct ipt_entry_match *dm; - struct ipt_match *match; - int ret; - - dm = (struct ipt_entry_match *)*dstptr; - match = m->u.kernel.match; xt_compat_match_from_user(m, dstptr, size); - - ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm), - name, hookmask, ip->proto, - ip->invflags & IPT_INV_PROTO); - if (!ret && m->u.kernel.match->checkentry - && !m->u.kernel.match->checkentry(name, ip, match, dm->data, - hookmask)) { - duprintf("ip_tables: check failed for `%s'.\n", - m->u.kernel.match->name); - ret = -EINVAL; - } - return ret; + return 0; } static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, @@ -1569,7 +1539,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size, name, &de->ip, de->comefrom); if (ret) - goto err; + return ret; de->target_offset = e->target_offset - (origsize - *size); t = ipt_get_target(e); target = t->u.kernel.target; @@ -1582,31 +1552,62 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr, if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } + return ret; +} + +static inline int compat_check_match(struct ipt_entry_match *m, const char *name, + const struct ipt_ip *ip, unsigned int hookmask) +{ + struct ipt_match *match; + int ret; + + match = m->u.kernel.match; + ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m), + name, hookmask, ip->proto, + ip->invflags & IPT_INV_PROTO); + if (!ret && m->u.kernel.match->checkentry + && !m->u.kernel.match->checkentry(name, ip, match, m->data, + hookmask)) { + duprintf("ip_tables: compat: check failed for `%s'.\n", + m->u.kernel.match->name); + ret = -EINVAL; + } + return ret; +} + +static inline int compat_check_target(struct ipt_entry *e, const char *name) +{ + struct ipt_entry_target *t; + struct ipt_target *target; + int ret; - t = ipt_get_target(de); + t = ipt_get_target(e); target = t->u.kernel.target; ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), name, e->comefrom, e->ip.proto, e->ip.invflags & IPT_INV_PROTO); - if (ret) - goto err; - - ret = -EINVAL; - if (t->u.kernel.target == &ipt_standard_target) { - if (!standard_check(t, *size)) - goto err; - } else if (t->u.kernel.target->checkentry - && !t->u.kernel.target->checkentry(name, de, target, - t->data, de->comefrom)) { + if (!ret && t->u.kernel.target->checkentry + && !t->u.kernel.target->checkentry(name, e, target, + t->data, e->comefrom)) { duprintf("ip_tables: compat: check failed for `%s'.\n", t->u.kernel.target->name); - goto err; + ret = -EINVAL; } - ret = 0; -err: return ret; } +static inline int compat_check_entry(struct ipt_entry *e, const char *name) +{ + int ret; + + ret = IPT_MATCH_ITERATE(e, compat_check_match, name, &e->ip, + e->comefrom); + if (ret) + return ret; + + return compat_check_target(e, name); +} + static int translate_compat_table(const char *name, unsigned int valid_hooks, @@ -1695,6 +1696,11 @@ translate_compat_table(const char *name, if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; + ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, + name); + if (ret) + goto free_newinfo; + /* And one copy for every other CPU */ for_each_possible_cpu(i) if (newinfo->entries[i] && newinfo->entries[i] != entry1) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 9f3924c4905..11c167118e8 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1780,7 +1780,7 @@ static inline int __mkroute_input(struct sk_buff *skb, #endif if (in_dev->cnf.no_policy) rth->u.dst.flags |= DST_NOPOLICY; - if (in_dev->cnf.no_xfrm) + if (out_dev->cnf.no_xfrm) rth->u.dst.flags |= DST_NOXFRM; rth->fl.fl4_dst = daddr; rth->rt_dst = daddr; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9304034c0c4..c701f6abbfc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4235,7 +4235,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, * Change state from SYN-SENT only after copied_seq * is initialized. */ tp->copied_seq = tp->rcv_nxt; - mb(); + smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); security_inet_conn_established(sk, skb); @@ -4483,7 +4483,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, case TCP_SYN_RECV: if (acceptable) { tp->copied_seq = tp->rcv_nxt; - mb(); + smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index d4107bb701b..fb9f69c616f 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -274,6 +274,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst) if (likely(xdst->u.rt.idev)) in_dev_put(xdst->u.rt.idev); + if (likely(xdst->u.rt.peer)) + inet_putpeer(xdst->u.rt.peer); xfrm_dst_destroy(xdst); } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 87c8f54872b..e5cd83b2205 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -720,10 +720,8 @@ snmp6_mib_free(void *ptr[2]) { if (ptr == NULL) return; - if (ptr[0]) - free_percpu(ptr[0]); - if (ptr[1]) - free_percpu(ptr[1]); + free_percpu(ptr[0]); + free_percpu(ptr[1]); ptr[0] = ptr[1] = NULL; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bf526115e51..96d8310ae9c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -50,7 +50,7 @@ struct rt6_statistics rt6_stats; -static kmem_cache_t * fib6_node_kmem __read_mostly; +static struct kmem_cache * fib6_node_kmem __read_mostly; enum fib_walk_state_t { @@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void) { struct fib6_node *fn; - if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL) + if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) memset(fn, 0, sizeof(struct fib6_node)); return fn; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index e05ecbb1412..e9212c7ff5c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -624,13 +624,13 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) skb_shinfo(skb)->frag_list = NULL; /* BUILD HEADER */ + *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } - *prevhdr = NEXTHDR_FRAGMENT; __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); skb->nh.raw = __skb_push(skb, hlen); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f63fb86d7c7..4eec4b3988b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -440,6 +440,13 @@ mark_source_chains(struct xt_table_info *newinfo, && unconditional(&e->ipv6)) { unsigned int oldpos, size; + if (t->verdict < -NF_MAX_VERDICT - 1) { + duprintf("mark_source_chains: bad " + "negative verdict (%i)\n", + t->verdict); + return 0; + } + /* Return: backtrack through the last big jump. */ do { @@ -477,6 +484,13 @@ mark_source_chains(struct xt_table_info *newinfo, if (strcmp(t->target.u.user.name, IP6T_STANDARD_TARGET) == 0 && newpos >= 0) { + if (newpos > newinfo->size - + sizeof(struct ip6t_entry)) { + duprintf("mark_source_chains: " + "bad verdict (%i)\n", + newpos); + return 0; + } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); @@ -509,27 +523,6 @@ cleanup_match(struct ip6t_entry_match *m, unsigned int *i) } static inline int -standard_check(const struct ip6t_entry_target *t, - unsigned int max_offset) -{ - struct ip6t_standard_target *targ = (void *)t; - - /* Check standard info. */ - if (targ->verdict >= 0 - && targ->verdict > max_offset - sizeof(struct ip6t_entry)) { - duprintf("ip6t_standard_check: bad verdict (%i)\n", - targ->verdict); - return 0; - } - if (targ->verdict < -NF_MAX_VERDICT - 1) { - duprintf("ip6t_standard_check: bad negative verdict (%i)\n", - targ->verdict); - return 0; - } - return 1; -} - -static inline int check_match(struct ip6t_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, @@ -616,12 +609,7 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size, if (ret) goto err; - if (t->u.kernel.target == &ip6t_standard_target) { - if (!standard_check(t, size)) { - ret = -EINVAL; - goto err; - } - } else if (t->u.kernel.target->checkentry + if (t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(name, e, target, t->data, e->comefrom)) { duprintf("ip_tables: check failed for `%s'.\n", @@ -758,17 +746,19 @@ translate_table(const char *name, } } + if (!mark_source_chains(newinfo, valid_hooks, entry0)) + return -ELOOP; + /* Finally, each sanity check must pass */ i = 0; ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, check_entry, name, size, &i); - if (ret != 0) - goto cleanup; - - ret = -ELOOP; - if (!mark_source_chains(newinfo, valid_hooks, entry0)) - goto cleanup; + if (ret != 0) { + IP6T_ENTRY_ITERATE(entry0, newinfo->size, + cleanup_entry, &i); + return ret; + } /* And one copy for every other CPU */ for_each_possible_cpu(i) { @@ -777,9 +767,6 @@ translate_table(const char *name, } return 0; -cleanup: - IP6T_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); - return ret; } /* Gets counters. */ diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c2e629d6aea..4ae1b19ada5 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -854,7 +854,8 @@ back_from_confirm: } done: dst_release(dst); - release_sock(sk); + if (!inet->hdrincl) + release_sock(sk); out: fl6_sock_release(flowlabel); return err<0?err:len; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 01a5c52a2be..12e426b9aac 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi; #define XFRM6_TUNNEL_SPI_MIN 1 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff -static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; +static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 @@ -180,7 +180,7 @@ try_next_2:; spi = 0; goto out; alloc_spi: - x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); + x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); if (!x6spi) goto out; diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 252f1101256..03504f3e499 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -1100,7 +1100,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, return -ENOMEM; /* Reserve space for MUX_CONTROL and LAP header */ - skb_reserve(tx_skb, TTP_MAX_HEADER); + skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER); } else { tx_skb = userdata; /* @@ -1349,7 +1349,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, return -ENOMEM; /* Reserve space for MUX_CONTROL and LAP header */ - skb_reserve(tx_skb, TTP_MAX_HEADER); + skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER); } else { tx_skb = userdata; /* diff --git a/net/key/af_key.c b/net/key/af_key.c index 0e1dbfbb9b1..5dd5094659a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -27,6 +27,7 @@ #include <linux/proc_fs.h> #include <linux/init.h> #include <net/xfrm.h> +#include <linux/audit.h> #include <net/sock.h> @@ -1420,6 +1421,9 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, else err = xfrm_state_update(x); + xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, + AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); + if (err < 0) { x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); @@ -1460,8 +1464,12 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h err = -EPERM; goto out; } - + err = xfrm_state_delete(x); + + xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, + AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); + if (err < 0) goto out; @@ -1637,12 +1645,15 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd { unsigned proto; struct km_event c; + struct xfrm_audit audit_info; proto = pfkey_satype2proto(hdr->sadb_msg_satype); if (proto == 0) return -EINVAL; - xfrm_state_flush(proto); + audit_info.loginuid = audit_get_loginuid(current->audit_context); + audit_info.secid = 0; + xfrm_state_flush(proto, &audit_info); c.data.proto = proto; c.seq = hdr->sadb_msg_seq; c.pid = hdr->sadb_msg_pid; @@ -2205,6 +2216,9 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, hdr->sadb_msg_type != SADB_X_SPDUPDATE); + xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, + AUDIT_MAC_IPSEC_ADDSPD, err ? 0 : 1, xp, NULL); + if (err) goto out; @@ -2282,6 +2296,10 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1, &sel, tmp.security, 1); security_xfrm_policy_free(&tmp); + + xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, + AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL); + if (xp == NULL) return -ENOENT; @@ -2416,8 +2434,11 @@ static int key_notify_policy_flush(struct km_event *c) static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) { struct km_event c; + struct xfrm_audit audit_info; - xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN); + audit_info.loginuid = audit_get_loginuid(current->audit_context); + audit_info.secid = 0; + xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN, &audit_info); c.data.type = XFRM_POLICY_TYPE_MAIN; c.event = XFRM_MSG_FLUSHPOLICY; c.pid = hdr->sadb_msg_pid; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index eaa0f8a1adb..9b02ec4012f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -108,7 +108,7 @@ static struct { size_t size; /* slab cache pointer */ - kmem_cache_t *cachep; + struct kmem_cache *cachep; /* allocated slab cache + modules which uses this slab cache */ int use; @@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name, { int ret = 0; char *cache_name; - kmem_cache_t *cachep; + struct kmem_cache *cachep; DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", features, name, size); @@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ void nf_conntrack_unregister_cache(u_int32_t features) { - kmem_cache_t *cachep; + struct kmem_cache *cachep; char *name; /* @@ -1093,7 +1093,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) get_order(sizeof(struct list_head) * size)); } -void nf_conntrack_flush() +void nf_conntrack_flush(void) { nf_ct_iterate_cleanup(kill_all, NULL); } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 588d3793704..9cbf926cdd1 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -29,7 +29,7 @@ LIST_HEAD(nf_conntrack_expect_list); EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); -kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; +struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; static unsigned int nf_conntrack_expect_next_id; /* nf_conntrack_expect helper functions */ @@ -91,25 +91,28 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get); struct nf_conntrack_expect * find_expectation(const struct nf_conntrack_tuple *tuple) { - struct nf_conntrack_expect *i; + struct nf_conntrack_expect *exp; + + exp = __nf_conntrack_expect_find(tuple); + if (!exp) + return NULL; - list_for_each_entry(i, &nf_conntrack_expect_list, list) { /* If master is not in hash table yet (ie. packet hasn't left this machine yet), how can other end know about expected? Hence these are not the droids you are looking for (if master ct never got confirmed, we'd hold a reference to it and weird things would happen to future packets). */ - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) - && nf_ct_is_confirmed(i->master)) { - if (i->flags & NF_CT_EXPECT_PERMANENT) { - atomic_inc(&i->use); - return i; - } else if (del_timer(&i->timeout)) { - nf_ct_unlink_expect(i); - return i; - } - } + if (!nf_ct_is_confirmed(exp->master)) + return NULL; + + if (exp->flags & NF_CT_EXPECT_PERMANENT) { + atomic_inc(&exp->use); + return exp; + } else if (del_timer(&exp->timeout)) { + nf_ct_unlink_expect(exp); + return exp; } + return NULL; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index a98de0b54d6..a5a6e192ac2 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -92,7 +92,7 @@ struct xt_hashlimit_htable { static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ static HLIST_HEAD(hashlimit_htables); -static kmem_cache_t *hashlimit_cachep __read_mostly; +static struct kmem_cache *hashlimit_cachep __read_mostly; static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) { diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index b9b03747c1f..548e4e6e698 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -143,6 +143,13 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops) goto errout; } + if (ops->dumpit) + ops->flags |= GENL_CMD_CAP_DUMP; + if (ops->doit) + ops->flags |= GENL_CMD_CAP_DO; + if (ops->policy) + ops->flags |= GENL_CMD_CAP_HASPOL; + genl_lock(); list_add_tail(&ops->ops_list, &family->ops_list); genl_unlock(); @@ -387,7 +394,7 @@ static void genl_rcv(struct sock *sk, int len) static struct genl_family genl_ctrl = { .id = GENL_ID_CTRL, .name = "nlctrl", - .version = 0x1, + .version = 0x2, .maxattr = CTRL_ATTR_MAX, }; @@ -425,15 +432,6 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); - if (ops->policy) - NLA_PUT_FLAG(skb, CTRL_ATTR_OP_POLICY); - - if (ops->doit) - NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DOIT); - - if (ops->dumpit) - NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DUMPIT); - nla_nest_end(skb, nest); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 08e68b67bbf..da73e8a8c18 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -660,7 +660,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe sll->sll_ifindex = dev->ifindex; h->tp_status = status; - mb(); + smp_mb(); { struct page *p_start, *p_end; diff --git a/net/rxrpc/krxiod.c b/net/rxrpc/krxiod.c index dada34a77b2..49effd92144 100644 --- a/net/rxrpc/krxiod.c +++ b/net/rxrpc/krxiod.c @@ -13,6 +13,7 @@ #include <linux/completion.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/freezer.h> #include <rxrpc/krxiod.h> #include <rxrpc/transport.h> #include <rxrpc/peer.h> diff --git a/net/rxrpc/krxsecd.c b/net/rxrpc/krxsecd.c index cea4eb5e249..3ab0f77409f 100644 --- a/net/rxrpc/krxsecd.c +++ b/net/rxrpc/krxsecd.c @@ -27,6 +27,7 @@ #include <rxrpc/call.h> #include <linux/udp.h> #include <linux/ip.h> +#include <linux/freezer.h> #include <net/sock.h> #include "internal.h" diff --git a/net/rxrpc/krxtimod.c b/net/rxrpc/krxtimod.c index 3e7466900bd..9a9b6132dba 100644 --- a/net/rxrpc/krxtimod.c +++ b/net/rxrpc/krxtimod.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/completion.h> +#include <linux/freezer.h> #include <rxrpc/rxrpc.h> #include <rxrpc/krxtimod.h> #include <asm/errno.h> diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index f59a2c4aa03..c797d6ada7d 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -101,9 +101,10 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f; int r; - u32 id = skb->mark & head->mask; + u32 id = skb->mark; if (head != NULL) { + id &= head->mask; for (f=head->ht[fw_hash(id)]; f; f=f->next) { if (f->id == id) { *res = f->res; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 11f3b549f4a..f2ba8615895 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific; static struct sctp_af *sctp_af_v4_specific; static struct sctp_af *sctp_af_v6_specific; -kmem_cache_t *sctp_chunk_cachep __read_mostly; -kmem_cache_t *sctp_bucket_cachep __read_mostly; +struct kmem_cache *sctp_chunk_cachep __read_mostly; +struct kmem_cache *sctp_bucket_cachep __read_mostly; /* Return the address of the control sock. */ struct sock *sctp_get_ctl_sock(void) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 04954e5f684..30927d3a597 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -65,7 +65,7 @@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> -extern kmem_cache_t *sctp_chunk_cachep; +extern struct kmem_cache *sctp_chunk_cachep; SCTP_STATIC struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, @@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, { struct sctp_chunk *retval; - retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC); + retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 02b27145b27..1e8132b8c4d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; -extern kmem_cache_t *sctp_bucket_cachep; +extern struct kmem_cache *sctp_bucket_cachep; /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) @@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create( { struct sctp_bind_bucket *pp; - pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC); + pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); SCTP_DBG_OBJCNT_INC(bind_bucket); if (pp) { pp->port = snum; diff --git a/net/socket.c b/net/socket.c index e8db54702a6..29ea1de43ec 100644 --- a/net/socket.c +++ b/net/socket.c @@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, #define SOCKFS_MAGIC 0x534F434B -static kmem_cache_t *sock_inode_cachep __read_mostly; +static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; - ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; init_waitqueue_head(&ei->socket.wait); @@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode) container_of(inode, struct socket_alloc, vfs_inode)); } -static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) { struct socket_alloc *ei = (struct socket_alloc *)foo; @@ -305,7 +305,14 @@ static struct file_system_type sock_fs_type = { static int sockfs_delete_dentry(struct dentry *dentry) { - return 1; + /* + * At creation time, we pretended this dentry was hashed + * (by clearing DCACHE_UNHASHED bit in d_flags) + * At delete time, we restore the truth : not hashed. + * (so that dput() can proceed correctly) + */ + dentry->d_flags |= DCACHE_UNHASHED; + return 0; } static struct dentry_operations sockfs_dentry_operations = { .d_delete = sockfs_delete_dentry, @@ -353,14 +360,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file) this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); this.name = name; - this.hash = SOCK_INODE(sock)->i_ino; + this.hash = 0; file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); if (unlikely(!file->f_dentry)) return -ENOMEM; file->f_dentry->d_op = &sockfs_dentry_operations; - d_add(file->f_dentry, SOCK_INODE(sock)); + /* + * We dont want to push this dentry into global dentry hash table. + * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED + * This permits a working /proc/$pid/fd/XXX on sockets + */ + file->f_dentry->d_flags &= ~DCACHE_UNHASHED; + d_instantiate(file->f_dentry, SOCK_INODE(sock)); file->f_vfsmnt = mntget(sock_mnt); file->f_mapping = file->f_dentry->d_inode->i_mapping; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 49dba5febbb..19703aa9659 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -33,7 +33,7 @@ static int rpc_mount_count; static struct file_system_type rpc_pipe_fs_type; -static kmem_cache_t *rpc_inode_cachep __read_mostly; +static struct kmem_cache *rpc_inode_cachep __read_mostly; #define RPC_UPCALL_TIMEOUT (30*HZ) @@ -143,7 +143,7 @@ static struct inode * rpc_alloc_inode(struct super_block *sb) { struct rpc_inode *rpci; - rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); + rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); if (!rpci) return NULL; return &rpci->vfs_inode; @@ -824,7 +824,7 @@ static struct file_system_type rpc_pipe_fs_type = { }; static void -init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) { struct rpc_inode *rpci = (struct rpc_inode *) foo; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 18a33d32701..79bc4cdf5d4 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -34,8 +34,8 @@ static int rpc_task_id; #define RPC_BUFFER_MAXSIZE (2048) #define RPC_BUFFER_POOLSIZE (8) #define RPC_TASK_POOLSIZE (8) -static kmem_cache_t *rpc_task_slabp __read_mostly; -static kmem_cache_t *rpc_buffer_slabp __read_mostly; +static struct kmem_cache *rpc_task_slabp __read_mostly; +static struct kmem_cache *rpc_buffer_slabp __read_mostly; static mempool_t *rpc_task_mempool __read_mostly; static mempool_t *rpc_buffer_mempool __read_mostly; diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index ee9bb1522d5..c7bb5f7f21a 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -119,7 +119,8 @@ EXPORT_SYMBOL(svc_auth_unregister); #define DN_HASHMASK (DN_HASHMAX-1) static struct hlist_head auth_domain_table[DN_HASHMAX]; -static spinlock_t auth_domain_lock = SPIN_LOCK_UNLOCKED; +static spinlock_t auth_domain_lock = + __SPIN_LOCK_UNLOCKED(auth_domain_lock); void auth_domain_put(struct auth_domain *dom) { diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 64ca1f61dd9..99f54fb6d66 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -32,6 +32,7 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/file.h> +#include <linux/freezer.h> #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> @@ -84,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req); */ static int svc_conn_age_period = 6*60; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key svc_key[2]; +static struct lock_class_key svc_slock_key[2]; + +static inline void svc_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + BUG_ON(sk->sk_lock.owner != NULL); + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", + &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); + break; + + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", + &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); + break; + + default: + BUG(); + } +} +#else +static inline void svc_reclassify_socket(struct socket *sock) +{ +} +#endif + /* * Queue up an idle server thread. Must have pool->sp_lock held. * Note: this is really a stack rather than a queue, so that we only @@ -1556,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) return error; + svc_reclassify_socket(sock); + if (type == SOCK_STREAM) sock->sk->sk_reuse = 1; /* allow address reuse */ error = kernel_bind(sock, (struct sockaddr *) sin, diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3bb232eb5d9..49cabffd7fd 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1173,6 +1173,35 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) return err; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key xs_key[2]; +static struct lock_class_key xs_slock_key[2]; + +static inline void xs_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + BUG_ON(sk->sk_lock.owner != NULL); + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", + &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]); + break; + + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS", + &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]); + break; + + default: + BUG(); + } +} +#else +static inline void xs_reclassify_socket(struct socket *sock) +{ +} +#endif + /** * xs_udp_connect_worker - set up a UDP socket * @work: RPC transport to connect @@ -1197,6 +1226,7 @@ static void xs_udp_connect_worker(struct work_struct *work) dprintk("RPC: can't create UDP transport socket (%d).\n", -err); goto out; } + xs_reclassify_socket(sock); if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { sock_release(sock); @@ -1282,6 +1312,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) dprintk("RPC: can't create TCP transport socket (%d).\n", -err); goto out; } + xs_reclassify_socket(sock); if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { sock_release(sock); diff --git a/net/tipc/handler.c b/net/tipc/handler.c index ae6ddf00a1a..eb80778d6d9 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -42,7 +42,7 @@ struct queue_item { unsigned long data; }; -static kmem_cache_t *tipc_queue_item_cache; +static struct kmem_cache *tipc_queue_item_cache; static struct list_head signal_queue_head; static DEFINE_SPINLOCK(qitem_lock); static int handler_enabled = 0; diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 316211d9f17..769cdd62c1b 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -62,63 +62,6 @@ #define KMEM_SAFETYZONE 8 -/***********FOR DEBUGGING PURPOSES********************************************* -static void * dbg_kmalloc(unsigned int size, int prio, int line) { - int i = 0; - void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio); - char * c1 = v; - c1 += sizeof(unsigned int); - *((unsigned int *)v) = size; - - for (i = 0; i < KMEM_SAFETYZONE; i++) { - c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D'; - c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F'; - c1 += 8; - } - c1 += size; - for (i = 0; i < KMEM_SAFETYZONE; i++) { - c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G'; - c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L'; - c1 += 8; - } - v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8; - printk(KERN_INFO "line %d kmalloc(%d,%d) = %p\n",line,size,prio,v); - return v; -} -static void dbg_kfree(void * v, int line) { - unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8)); - unsigned int size = *sp; - char * c1 = ((char *)v) - KMEM_SAFETYZONE*8; - int i = 0; - for (i = 0; i < KMEM_SAFETYZONE; i++) { - if ( c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D' - || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') { - printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v); - printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8, - c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] ); - } - c1 += 8; - } - c1 += size; - for (i = 0; i < KMEM_SAFETYZONE; i++) { - if ( c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G' - || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L' - ) { - printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v); - printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8, - c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] ); - } - c1 += 8; - } - printk(KERN_INFO "line %d kfree(%p)\n",line,v); - v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8); - kfree(v); -} - -#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__) -#define kfree(x) dbg_kfree(x,__LINE__) -*****************************************************************************/ - /* * Function Prototypes */ diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 5a0dbeb6bbe..6b381fc0383 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -119,6 +119,23 @@ static struct xfrm_algo_desc aalg_list[] = { .sadb_alg_maxbits = 160 } }, +{ + .name = "xcbc(aes)", + + .uinfo = { + .auth = { + .icv_truncbits = 96, + .icv_fullbits = 128, + } + }, + + .desc = { + .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC, + .sadb_alg_ivlen = 0, + .sadb_alg_minbits = 128, + .sadb_alg_maxbits = 128 + } +}, }; static struct xfrm_algo_desc ealg_list[] = { diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index e8198a2c785..414f8907038 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -12,7 +12,7 @@ #include <net/ip.h> #include <net/xfrm.h> -static kmem_cache_t *secpath_cachep __read_mostly; +static struct kmem_cache *secpath_cachep __read_mostly; void __secpath_destroy(struct sec_path *sp) { @@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src) { struct sec_path *sp; - sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC); + sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC); if (!sp) return NULL; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f6c77bd36fd..bebd40e5a62 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -25,6 +25,7 @@ #include <linux/cache.h> #include <net/xfrm.h> #include <net/ip.h> +#include <linux/audit.h> #include "xfrm_hash.h" @@ -39,7 +40,7 @@ EXPORT_SYMBOL(xfrm_policy_count); static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; -static kmem_cache_t *xfrm_dst_cache __read_mostly; +static struct kmem_cache *xfrm_dst_cache __read_mostly; static struct work_struct xfrm_policy_gc_work; static HLIST_HEAD(xfrm_policy_gc_list); @@ -804,7 +805,7 @@ struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete) } EXPORT_SYMBOL(xfrm_policy_byid); -void xfrm_policy_flush(u8 type) +void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) { int dir; @@ -824,6 +825,9 @@ void xfrm_policy_flush(u8 type) hlist_del(&pol->byidx); write_unlock_bh(&xfrm_policy_lock); + xfrm_audit_log(audit_info->loginuid, audit_info->secid, + AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL); + xfrm_policy_kill(pol); killed++; @@ -842,6 +846,11 @@ void xfrm_policy_flush(u8 type) hlist_del(&pol->byidx); write_unlock_bh(&xfrm_policy_lock); + xfrm_audit_log(audit_info->loginuid, + audit_info->secid, + AUDIT_MAC_IPSEC_DELSPD, 1, + pol, NULL); + xfrm_policy_kill(pol); killed++; @@ -860,33 +869,12 @@ EXPORT_SYMBOL(xfrm_policy_flush); int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *data) { - struct xfrm_policy *pol; + struct xfrm_policy *pol, *last = NULL; struct hlist_node *entry; - int dir, count, error; + int dir, last_dir = 0, count, error; read_lock_bh(&xfrm_policy_lock); count = 0; - for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { - struct hlist_head *table = xfrm_policy_bydst[dir].table; - int i; - - hlist_for_each_entry(pol, entry, - &xfrm_policy_inexact[dir], bydst) { - if (pol->type == type) - count++; - } - for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { - hlist_for_each_entry(pol, entry, table + i, bydst) { - if (pol->type == type) - count++; - } - } - } - - if (count == 0) { - error = -ENOENT; - goto out; - } for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { struct hlist_head *table = xfrm_policy_bydst[dir].table; @@ -896,21 +884,37 @@ int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*) &xfrm_policy_inexact[dir], bydst) { if (pol->type != type) continue; - error = func(pol, dir % XFRM_POLICY_MAX, --count, data); - if (error) - goto out; + if (last) { + error = func(last, last_dir % XFRM_POLICY_MAX, + count, data); + if (error) + goto out; + } + last = pol; + last_dir = dir; + count++; } for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) { hlist_for_each_entry(pol, entry, table + i, bydst) { if (pol->type != type) continue; - error = func(pol, dir % XFRM_POLICY_MAX, --count, data); - if (error) - goto out; + if (last) { + error = func(last, last_dir % XFRM_POLICY_MAX, + count, data); + if (error) + goto out; + } + last = pol; + last_dir = dir; + count++; } } } - error = 0; + if (count == 0) { + error = -ENOENT; + goto out; + } + error = func(last, last_dir % XFRM_POLICY_MAX, 0, data); out: read_unlock_bh(&xfrm_policy_lock); return error; @@ -1982,6 +1986,117 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, EXPORT_SYMBOL(xfrm_bundle_ok); +#ifdef CONFIG_AUDITSYSCALL +/* Audit addition and deletion of SAs and ipsec policy */ + +void xfrm_audit_log(uid_t auid, u32 sid, int type, int result, + struct xfrm_policy *xp, struct xfrm_state *x) +{ + + char *secctx; + u32 secctx_len; + struct xfrm_sec_ctx *sctx = NULL; + struct audit_buffer *audit_buf; + int family; + extern int audit_enabled; + + if (audit_enabled == 0) + return; + + audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); + if (audit_buf == NULL) + return; + + switch(type) { + case AUDIT_MAC_IPSEC_ADDSA: + audit_log_format(audit_buf, "SAD add: auid=%u", auid); + break; + case AUDIT_MAC_IPSEC_DELSA: + audit_log_format(audit_buf, "SAD delete: auid=%u", auid); + break; + case AUDIT_MAC_IPSEC_ADDSPD: + audit_log_format(audit_buf, "SPD add: auid=%u", auid); + break; + case AUDIT_MAC_IPSEC_DELSPD: + audit_log_format(audit_buf, "SPD delete: auid=%u", auid); + break; + default: + return; + } + + if (sid != 0 && + security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) + audit_log_format(audit_buf, " subj=%s", secctx); + else + audit_log_task_context(audit_buf); + + if (xp) { + family = xp->selector.family; + if (xp->security) + sctx = xp->security; + } else { + family = x->props.family; + if (x->security) + sctx = x->security; + } + + if (sctx) + audit_log_format(audit_buf, + " sec_alg=%u sec_doi=%u sec_obj=%s", + sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str); + + switch(family) { + case AF_INET: + { + struct in_addr saddr, daddr; + if (xp) { + saddr.s_addr = xp->selector.saddr.a4; + daddr.s_addr = xp->selector.daddr.a4; + } else { + saddr.s_addr = x->props.saddr.a4; + daddr.s_addr = x->id.daddr.a4; + } + audit_log_format(audit_buf, + " src=%u.%u.%u.%u dst=%u.%u.%u.%u", + NIPQUAD(saddr), NIPQUAD(daddr)); + } + break; + case AF_INET6: + { + struct in6_addr saddr6, daddr6; + if (xp) { + memcpy(&saddr6, xp->selector.saddr.a6, + sizeof(struct in6_addr)); + memcpy(&daddr6, xp->selector.daddr.a6, + sizeof(struct in6_addr)); + } else { + memcpy(&saddr6, x->props.saddr.a6, + sizeof(struct in6_addr)); + memcpy(&daddr6, x->id.daddr.a6, + sizeof(struct in6_addr)); + } + audit_log_format(audit_buf, + " src=" NIP6_FMT "dst=" NIP6_FMT, + NIP6(saddr6), NIP6(daddr6)); + } + break; + } + + if (x) + audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s", + (unsigned long)ntohl(x->id.spi), + (unsigned long)ntohl(x->id.spi), + x->id.proto == IPPROTO_AH ? "AH" : + (x->id.proto == IPPROTO_ESP ? + "ESP" : "IPCOMP")); + + audit_log_format(audit_buf, " res=%u", result); + audit_log_end(audit_buf); +} + +EXPORT_SYMBOL(xfrm_audit_log); +#endif /* CONFIG_AUDITSYSCALL */ + int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { int err = 0; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index da54a64ccfa..fdb08d9f34a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/cache.h> #include <asm/uaccess.h> +#include <linux/audit.h> #include "xfrm_hash.h" @@ -238,6 +239,7 @@ static void xfrm_timer_handler(unsigned long data) unsigned long now = (unsigned long)xtime.tv_sec; long next = LONG_MAX; int warn = 0; + int err = 0; spin_lock(&x->lock); if (x->km.state == XFRM_STATE_DEAD) @@ -295,9 +297,14 @@ expired: next = 2; goto resched; } - if (!__xfrm_state_delete(x) && x->id.spi) + + err = __xfrm_state_delete(x); + if (!err && x->id.spi) km_state_expired(x, 1, 0); + xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, + AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); + out: spin_unlock(&x->lock); } @@ -384,9 +391,10 @@ int xfrm_state_delete(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_delete); -void xfrm_state_flush(u8 proto) +void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info) { int i; + int err = 0; spin_lock_bh(&xfrm_state_lock); for (i = 0; i <= xfrm_state_hmask; i++) { @@ -399,7 +407,11 @@ restart: xfrm_state_hold(x); spin_unlock_bh(&xfrm_state_lock); - xfrm_state_delete(x); + err = xfrm_state_delete(x); + xfrm_audit_log(audit_info->loginuid, + audit_info->secid, + AUDIT_MAC_IPSEC_DELSA, + err ? 0 : 1, NULL, x); xfrm_state_put(x); spin_lock_bh(&xfrm_state_lock); @@ -1099,7 +1111,7 @@ int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *data) { int i; - struct xfrm_state *x; + struct xfrm_state *x, *last = NULL; struct hlist_node *entry; int count = 0; int err = 0; @@ -1107,24 +1119,22 @@ int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), spin_lock_bh(&xfrm_state_lock); for (i = 0; i <= xfrm_state_hmask; i++) { hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { - if (xfrm_id_proto_match(x->id.proto, proto)) - count++; + if (!xfrm_id_proto_match(x->id.proto, proto)) + continue; + if (last) { + err = func(last, count, data); + if (err) + goto out; + } + last = x; + count++; } } if (count == 0) { err = -ENOENT; goto out; } - - for (i = 0; i <= xfrm_state_hmask; i++) { - hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { - if (!xfrm_id_proto_match(x->id.proto, proto)) - continue; - err = func(x, --count, data); - if (err) - goto out; - } - } + err = func(last, 0, data); out: spin_unlock_bh(&xfrm_state_lock); return err; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 311205ffa77..e5372b11fc8 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -31,6 +31,7 @@ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include <linux/in6.h> #endif +#include <linux/audit.h> static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) { @@ -454,6 +455,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) else err = xfrm_state_update(x); + xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, + AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); + if (err < 0) { x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); @@ -523,6 +527,10 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) } err = xfrm_state_delete(x); + + xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, + AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); + if (err < 0) goto out; @@ -1030,6 +1038,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr * a type XFRM_MSG_UPDPOLICY - JHS */ excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; err = xfrm_policy_insert(p->dir, xp, excl); + xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, + AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); + if (err) { security_xfrm_policy_free(xp); kfree(xp); @@ -1257,6 +1268,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete); security_xfrm_policy_free(&tmp); } + if (delete) + xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, + AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL); + if (xp == NULL) return -ENOENT; @@ -1291,8 +1306,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma { struct km_event c; struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); + struct xfrm_audit audit_info; - xfrm_state_flush(p->proto); + audit_info.loginuid = NETLINK_CB(skb).loginuid; + audit_info.secid = NETLINK_CB(skb).sid; + xfrm_state_flush(p->proto, &audit_info); c.data.proto = p->proto; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; @@ -1442,12 +1460,15 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x struct km_event c; u8 type = XFRM_POLICY_TYPE_MAIN; int err; + struct xfrm_audit audit_info; err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); if (err) return err; - xfrm_policy_flush(type); + audit_info.loginuid = NETLINK_CB(skb).loginuid; + audit_info.secid = NETLINK_CB(skb).sid; + xfrm_policy_flush(type, &audit_info); c.data.type = type; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; @@ -1502,6 +1523,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void * err = 0; if (up->hard) { xfrm_policy_delete(xp, p->dir); + xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, + AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL); + } else { // reset the timers here? printk("Dont know what to do with soft policy expire\n"); @@ -1533,8 +1557,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void ** goto out; km_state_expired(x, ue->hard, current->pid); - if (ue->hard) + if (ue->hard) { __xfrm_state_delete(x); + xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, + AUDIT_MAC_IPSEC_DELSA, 1, NULL, x); + } out: spin_unlock_bh(&x->lock); xfrm_state_put(x); |