diff options
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/dst.h | 2 | ||||
-rw-r--r-- | include/net/inet_hashtables.h | 6 | ||||
-rw-r--r-- | include/net/neighbour.h | 2 | ||||
-rw-r--r-- | include/net/netfilter/nf_conntrack_expect.h | 2 | ||||
-rw-r--r-- | include/net/request_sock.h | 4 | ||||
-rw-r--r-- | include/net/sock.h | 21 | ||||
-rw-r--r-- | include/net/timewait_sock.h | 2 |
7 files changed, 29 insertions, 10 deletions
diff --git a/include/net/dst.h b/include/net/dst.h index e156e38e4ac..62b7e7598e9 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -98,7 +98,7 @@ struct dst_ops int entry_size; atomic_t entries; - kmem_cache_t *kmem_cachep; + struct kmem_cache *kmem_cachep; }; #ifdef __KERNEL__ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index a9eb2eaf094..34cc76e3ddb 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -125,7 +125,7 @@ struct inet_hashinfo { rwlock_t lhash_lock ____cacheline_aligned; atomic_t lhash_users; wait_queue_head_t lhash_wait; - kmem_cache_t *bind_bucket_cachep; + struct kmem_cache *bind_bucket_cachep; }; static inline struct inet_ehash_bucket *inet_ehash_bucket( @@ -136,10 +136,10 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket( } extern struct inet_bind_bucket * - inet_bind_bucket_create(kmem_cache_t *cachep, + inet_bind_bucket_create(struct kmem_cache *cachep, struct inet_bind_hashbucket *head, const unsigned short snum); -extern void inet_bind_bucket_destroy(kmem_cache_t *cachep, +extern void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); static inline int inet_bhashfn(const __u16 lport, const int bhash_size) diff --git a/include/net/neighbour.h b/include/net/neighbour.h index c8aacbd2e33..23967031ddb 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -160,7 +160,7 @@ struct neigh_table atomic_t entries; rwlock_t lock; unsigned long last_rand; - kmem_cache_t *kmem_cachep; + struct kmem_cache *kmem_cachep; struct neigh_statistics *stats; struct neighbour **hash_buckets; unsigned int hash_mask; diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index cef3136e22a..41bcc9eb420 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -7,7 +7,7 @@ #include <net/netfilter/nf_conntrack.h> extern struct list_head nf_conntrack_expect_list; -extern kmem_cache_t *nf_conntrack_expect_cachep; +extern struct kmem_cache *nf_conntrack_expect_cachep; extern struct file_operations exp_file_ops; struct nf_conntrack_expect diff --git a/include/net/request_sock.h b/include/net/request_sock.h index e37baaf2080..7aed02ce2b6 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -29,7 +29,7 @@ struct proto; struct request_sock_ops { int family; int obj_size; - kmem_cache_t *slab; + struct kmem_cache *slab; int (*rtx_syn_ack)(struct sock *sk, struct request_sock *req, struct dst_entry *dst); @@ -60,7 +60,7 @@ struct request_sock { static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) { - struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC); + struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); if (req != NULL) req->rsk_ops = ops; diff --git a/include/net/sock.h b/include/net/sock.h index fe3a33fad03..03684e702d1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -571,7 +571,7 @@ struct proto { int *sysctl_rmem; int max_header; - kmem_cache_t *slab; + struct kmem_cache *slab; unsigned int obj_size; atomic_t *orphan_count; @@ -746,6 +746,25 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) */ #define sock_owned_by_user(sk) ((sk)->sk_lock.owner) +/* + * Macro so as to not evaluate some arguments when + * lockdep is not enabled. + * + * Mark both the sk_lock and the sk_lock.slock as a + * per-address-family lock class. + */ +#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ +do { \ + sk->sk_lock.owner = NULL; \ + init_waitqueue_head(&sk->sk_lock.wq); \ + spin_lock_init(&(sk)->sk_lock.slock); \ + debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ + sizeof((sk)->sk_lock)); \ + lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ + (skey), (sname)); \ + lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ +} while (0) + extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass)); static inline void lock_sock(struct sock *sk) diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h index d7a306ea560..1e1ee3253fd 100644 --- a/include/net/timewait_sock.h +++ b/include/net/timewait_sock.h @@ -15,7 +15,7 @@ #include <net/sock.h> struct timewait_sock_ops { - kmem_cache_t *twsk_slab; + struct kmem_cache *twsk_slab; unsigned int twsk_obj_size; int (*twsk_unique)(struct sock *sk, struct sock *sktw, void *twp); |