summaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h50
1 files changed, 41 insertions, 9 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index ac286a35303..03684e702d1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -47,6 +47,7 @@
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/mm.h>
#include <linux/security.h>
#include <linux/filter.h>
@@ -570,7 +571,7 @@ struct proto {
int *sysctl_rmem;
int max_header;
- kmem_cache_t *slab;
+ struct kmem_cache *slab;
unsigned int obj_size;
atomic_t *orphan_count;
@@ -745,7 +746,32 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
-extern void FASTCALL(lock_sock(struct sock *sk));
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
+do { \
+ sk->sk_lock.owner = NULL; \
+ init_waitqueue_head(&sk->sk_lock.wq); \
+ spin_lock_init(&(sk)->sk_lock.slock); \
+ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
+ sizeof((sk)->sk_lock)); \
+ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
+ (skey), (sname)); \
+ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
+} while (0)
+
+extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
+
+static inline void lock_sock(struct sock *sk)
+{
+ lock_sock_nested(sk, 0);
+}
+
extern void FASTCALL(release_sock(struct sock *sk));
/* BH context may only use the following locking interface. */
@@ -883,18 +909,23 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
}
/**
- * sk_filter_release: Release a socket filter
- * @rcu: rcu_head that contains the sk_filter info to remove
- *
- * Remove a filter from a socket and release its resources.
+ * sk_filter_rcu_free: Free a socket filter
+ * @rcu: rcu_head that contains the sk_filter to free
*/
-
static inline void sk_filter_rcu_free(struct rcu_head *rcu)
{
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
kfree(fp);
}
+/**
+ * sk_filter_release: Release a socket filter
+ * @sk: socket
+ * @fp: filter to remove
+ *
+ * Remove a filter from a socket and release its resources.
+ */
+
static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
{
unsigned int size = sk_filter_len(fp);
@@ -943,7 +974,8 @@ static inline void sock_put(struct sock *sk)
sk_free(sk);
}
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb);
+extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested);
/* Detach socket from process context.
* Announce socket dead, detach it from wait queue and inode.
@@ -1077,7 +1109,7 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from,
{
if (skb->ip_summed == CHECKSUM_NONE) {
int err = 0;
- unsigned int csum = csum_and_copy_from_user(from,
+ __wsum csum = csum_and_copy_from_user(from,
page_address(page) + off,
copy, 0, &err);
if (err)