summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 20:08:09 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 15:41:49 -0700
commitf3f05f7046e7c85b04af390d95a82a27160dd5d0 (patch)
tree9a4a552c030ea8b2428ceee75311d73a6b339255 /include/net
parent6e04e02165a7209a71db553b7bc48d68421e5ebf (diff)
[INET]: Generalise the tcp_listen_ lock routines
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/inet_hashtables.h48
-rw-r--r--include/net/tcp.h21
2 files changed, 48 insertions, 21 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index da07411b36d..f5d65121f7b 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -19,10 +19,14 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/tcp.h> /* only for TCP_LISTEN, damn :-( */
#include <linux/types.h>
+#include <linux/wait.h>
#include <net/sock.h>
+#include <asm/atomic.h>
+
/* This is for all connections with a full identity, no wildcards.
* New scheme, half the table is for TIME_WAIT, the other half is
* for the rest. I'll experiment with dynamic table growth later.
@@ -192,4 +196,48 @@ static inline void inet_inherit_port(struct inet_hashinfo *table,
extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
+extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
+
+/*
+ * - We may sleep inside this lock.
+ * - If sleeping is not required (or called from BH),
+ * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
+ */
+static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
+{
+ /* read_lock synchronizes to candidates to writers */
+ read_lock(&hashinfo->lhash_lock);
+ atomic_inc(&hashinfo->lhash_users);
+ read_unlock(&hashinfo->lhash_lock);
+}
+
+static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
+{
+ if (atomic_dec_and_test(&hashinfo->lhash_users))
+ wake_up(&hashinfo->lhash_wait);
+}
+
+static inline void __inet_hash(struct inet_hashinfo *hashinfo,
+ struct sock *sk, const int listen_possible)
+{
+ struct hlist_head *list;
+ rwlock_t *lock;
+
+ BUG_TRAP(sk_unhashed(sk));
+ if (listen_possible && sk->sk_state == TCP_LISTEN) {
+ list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+ lock = &hashinfo->lhash_lock;
+ inet_listen_wlock(hashinfo);
+ } else {
+ sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size);
+ list = &hashinfo->ehash[sk->sk_hashent].chain;
+ lock = &hashinfo->ehash[sk->sk_hashent].lock;
+ write_lock(lock);
+ }
+ __sk_add_node(sk, list);
+ sock_prot_inc_use(sk->sk_prot);
+ write_unlock(lock);
+ if (listen_possible && sk->sk_state == TCP_LISTEN)
+ wake_up(&hashinfo->lhash_wait);
+}
#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 99e47695d4b..bc110cc7022 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1447,27 +1447,6 @@ static __inline__ void tcp_openreq_init(struct request_sock *req,
extern void tcp_enter_memory_pressure(void);
-extern void tcp_listen_wlock(void);
-
-/* - We may sleep inside this lock.
- * - If sleeping is not required (or called from BH),
- * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
- */
-
-static inline void tcp_listen_lock(void)
-{
- /* read_lock synchronizes to candidates to writers */
- read_lock(&tcp_hashinfo.lhash_lock);
- atomic_inc(&tcp_hashinfo.lhash_users);
- read_unlock(&tcp_hashinfo.lhash_lock);
-}
-
-static inline void tcp_listen_unlock(void)
-{
- if (atomic_dec_and_test(&tcp_hashinfo.lhash_users))
- wake_up(&tcp_hashinfo.lhash_wait);
-}
-
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;