summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-11-21 22:08:50 +0800
committerDavid S. Miller <davem@davemloft.net>2008-01-28 14:54:40 -0800
commitebb53d75657f86587ac8cf3e38ab0c860a8e3d4f (patch)
tree2b1d63cb8ee54b19589d6d13a693147001e642ad
parentde4d1db369785c29d68915edfee0cb70e8199f4c (diff)
[NET] proto: Use pcounters for the inuse field
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h59
-rw-r--r--net/core/sock.c61
2 files changed, 14 insertions, 106 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 9c55af8e5f8..e329d05f799 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
+#include <linux/pcounter.h>
#include <linux/skbuff.h> /* struct sk_buff */
#include <linux/mm.h>
#include <linux/security.h>
@@ -565,14 +566,9 @@ struct proto {
void (*unhash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
-#ifdef CONFIG_SMP
/* Keeping track of sockets in use */
- void (*inuse_add)(struct proto *prot, int inc);
- int (*inuse_getval)(const struct proto *prot);
- int *inuse_ptr;
-#else
- int inuse;
-#endif
+ struct pcounter inuse;
+
/* Memory pressure */
void (*enter_memory_pressure)(void);
atomic_t *memory_allocated; /* Current allocated memory. */
@@ -607,35 +603,8 @@ struct proto {
#endif
};
-/*
- * Special macros to let protos use a fast version of inuse{get|add}
- * using a static percpu variable per proto instead of an allocated one,
- * saving one dereference.
- * This might be changed if/when dynamic percpu vars become fast.
- */
-#ifdef CONFIG_SMP
-# define DEFINE_PROTO_INUSE(NAME) \
-static DEFINE_PER_CPU(int, NAME##_inuse); \
-static void NAME##_inuse_add(struct proto *prot, int inc) \
-{ \
- __get_cpu_var(NAME##_inuse) += inc; \
-} \
- \
-static int NAME##_inuse_getval(const struct proto *prot)\
-{ \
- int res = 0, cpu; \
- \
- for_each_possible_cpu(cpu) \
- res += per_cpu(NAME##_inuse, cpu); \
- return res; \
-}
-# define REF_PROTO_INUSE(NAME) \
- .inuse_add = NAME##_inuse_add, \
- .inuse_getval = NAME##_inuse_getval,
-#else
-# define DEFINE_PROTO_INUSE(NAME)
-# define REF_PROTO_INUSE(NAME)
-#endif
+#define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
+#define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
@@ -668,29 +637,17 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
/* Called with local bh disabled */
static __inline__ void sock_prot_inc_use(struct proto *prot)
{
-#ifdef CONFIG_SMP
- prot->inuse_add(prot, 1);
-#else
- prot->inuse++;
-#endif
+ pcounter_add(&prot->inuse, 1);
}
static __inline__ void sock_prot_dec_use(struct proto *prot)
{
-#ifdef CONFIG_SMP
- prot->inuse_add(prot, -1);
-#else
- prot->inuse--;
-#endif
+ pcounter_add(&prot->inuse, -1);
}
static __inline__ int sock_prot_inuse(struct proto *proto)
{
-#ifdef CONFIG_SMP
- return proto->inuse_getval(proto);
-#else
- return proto->inuse;
-#endif
+ return pcounter_getval(&proto->inuse);
}
/* With per-bucket locks this operation is not-atomic, so that
diff --git a/net/core/sock.c b/net/core/sock.c
index c9305a86176..eac7aa0721d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1804,65 +1804,15 @@ EXPORT_SYMBOL(sk_common_release);
static DEFINE_RWLOCK(proto_list_lock);
static LIST_HEAD(proto_list);
-#ifdef CONFIG_SMP
-/*
- * Define default functions to keep track of inuse sockets per protocol
- * Note that often used protocols use dedicated functions to get a speed increase.
- * (see DEFINE_PROTO_INUSE/REF_PROTO_INUSE)
- */
-static void inuse_add(struct proto *prot, int inc)
-{
- per_cpu_ptr(prot->inuse_ptr, smp_processor_id())[0] += inc;
-}
-
-static int inuse_get(const struct proto *prot)
-{
- int res = 0, cpu;
- for_each_possible_cpu(cpu)
- res += per_cpu_ptr(prot->inuse_ptr, cpu)[0];
- return res;
-}
-
-static int inuse_init(struct proto *prot)
-{
- if (!prot->inuse_getval || !prot->inuse_add) {
- prot->inuse_ptr = alloc_percpu(int);
- if (prot->inuse_ptr == NULL)
- return -ENOBUFS;
-
- prot->inuse_getval = inuse_get;
- prot->inuse_add = inuse_add;
- }
- return 0;
-}
-
-static void inuse_fini(struct proto *prot)
-{
- if (prot->inuse_ptr != NULL) {
- free_percpu(prot->inuse_ptr);
- prot->inuse_ptr = NULL;
- prot->inuse_getval = NULL;
- prot->inuse_add = NULL;
- }
-}
-#else
-static inline int inuse_init(struct proto *prot)
-{
- return 0;
-}
-
-static inline void inuse_fini(struct proto *prot)
-{
-}
-#endif
-
int proto_register(struct proto *prot, int alloc_slab)
{
char *request_sock_slab_name = NULL;
char *timewait_sock_slab_name;
- if (inuse_init(prot))
+ if (pcounter_alloc(&prot->inuse) != 0) {
+ printk(KERN_CRIT "%s: Can't alloc inuse counters!\n", prot->name);
goto out;
+ }
if (alloc_slab) {
prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
@@ -1930,7 +1880,7 @@ out_free_sock_slab:
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
out_free_inuse:
- inuse_fini(prot);
+ pcounter_free(&prot->inuse);
out:
return -ENOBUFS;
}
@@ -1943,7 +1893,8 @@ void proto_unregister(struct proto *prot)
list_del(&prot->node);
write_unlock(&proto_list_lock);
- inuse_fini(prot);
+ pcounter_free(&prot->inuse);
+
if (prot->slab != NULL) {
kmem_cache_destroy(prot->slab);
prot->slab = NULL;