diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-13 16:46:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-13 16:46:18 -0700 |
commit | 16cefa8c3863721fd40445a1b34dea18cd16ccfe (patch) | |
tree | c8e58ca06e2edfd667d3e6062a642b80cc58e5e7 /net | |
parent | 4fbef206daead133085fe33905f5e842d38fb8da (diff) | |
parent | d8558f99fbc5ef5d4ae76b893784005056450f82 (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (122 commits)
sunrpc: drop BKL around wrap and unwrap
NFSv4: Make sure unlock is really an unlock when cancelling a lock
NLM: fix source address of callback to client
SUNRPC client: add interface for binding to a local address
SUNRPC server: record the destination address of a request
SUNRPC: cleanup transport creation argument passing
NFSv4: Make the NFS state model work with the nosharedcache mount option
NFS: Error when mounting the same filesystem with different options
NFS: Add the mount option "nosharecache"
NFS: Add support for mounting NFSv4 file systems with string options
NFS: Add final pieces to support in-kernel mount option parsing
NFS: Introduce generic mount client API
NFS: Add enums and match tables for mount option parsing
NFS: Improve debugging output in NFS in-kernel mount client
NFS: Clean up in-kernel NFS mount
NFS: Remake nfsroot_mount as a permanent part of NFS client
SUNRPC: Add a convenient default for the hostname when calling rpc_create()
SUNRPC: Rename rpcb_getport to be consistent with new rpcb_getport_sync name
SUNRPC: Rename rpcb_getport_external routine
SUNRPC: Allow rpcbind requests to be interrupted by a signal.
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth.c | 370 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 349 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_mech.c | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_null.c | 10 | ||||
-rw-r--r-- | net/sunrpc/auth_unix.c | 54 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 371 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 80 | ||||
-rw-r--r-- | net/sunrpc/rpcb_clnt.c | 65 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 209 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 8 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 20 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 19 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 81 |
14 files changed, 980 insertions, 660 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 9527f2bb174..aa55d0a03e6 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -13,17 +13,22 @@ #include <linux/errno.h> #include <linux/sunrpc/clnt.h> #include <linux/spinlock.h> +#include <linux/smp_lock.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif -static struct rpc_authops * auth_flavors[RPC_AUTH_MAXFLAVOR] = { +static DEFINE_SPINLOCK(rpc_authflavor_lock); +static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = { &authnull_ops, /* AUTH_NULL */ &authunix_ops, /* AUTH_UNIX */ NULL, /* others can be loadable modules */ }; +static LIST_HEAD(cred_unused); +static unsigned long number_cred_unused; + static u32 pseudoflavor_to_flavor(u32 flavor) { if (flavor >= RPC_AUTH_MAXFLAVOR) @@ -32,55 +37,67 @@ pseudoflavor_to_flavor(u32 flavor) { } int -rpcauth_register(struct rpc_authops *ops) +rpcauth_register(const struct rpc_authops *ops) { rpc_authflavor_t flavor; + int ret = -EPERM; if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) return -EINVAL; - if (auth_flavors[flavor] != NULL) - return -EPERM; /* what else? */ - auth_flavors[flavor] = ops; - return 0; + spin_lock(&rpc_authflavor_lock); + if (auth_flavors[flavor] == NULL) { + auth_flavors[flavor] = ops; + ret = 0; + } + spin_unlock(&rpc_authflavor_lock); + return ret; } int -rpcauth_unregister(struct rpc_authops *ops) +rpcauth_unregister(const struct rpc_authops *ops) { rpc_authflavor_t flavor; + int ret = -EPERM; if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR) return -EINVAL; - if (auth_flavors[flavor] != ops) - return -EPERM; /* what else? */ - auth_flavors[flavor] = NULL; - return 0; + spin_lock(&rpc_authflavor_lock); + if (auth_flavors[flavor] == ops) { + auth_flavors[flavor] = NULL; + ret = 0; + } + spin_unlock(&rpc_authflavor_lock); + return ret; } struct rpc_auth * rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt) { struct rpc_auth *auth; - struct rpc_authops *ops; + const struct rpc_authops *ops; u32 flavor = pseudoflavor_to_flavor(pseudoflavor); auth = ERR_PTR(-EINVAL); if (flavor >= RPC_AUTH_MAXFLAVOR) goto out; - /* FIXME - auth_flavors[] really needs an rw lock, - * and module refcounting. */ #ifdef CONFIG_KMOD if ((ops = auth_flavors[flavor]) == NULL) request_module("rpc-auth-%u", flavor); #endif - if ((ops = auth_flavors[flavor]) == NULL) + spin_lock(&rpc_authflavor_lock); + ops = auth_flavors[flavor]; + if (ops == NULL || !try_module_get(ops->owner)) { + spin_unlock(&rpc_authflavor_lock); goto out; + } + spin_unlock(&rpc_authflavor_lock); auth = ops->create(clnt, pseudoflavor); + module_put(ops->owner); if (IS_ERR(auth)) return auth; if (clnt->cl_auth) - rpcauth_destroy(clnt->cl_auth); + rpcauth_release(clnt->cl_auth); clnt->cl_auth = auth; out: @@ -88,7 +105,7 @@ out: } void -rpcauth_destroy(struct rpc_auth *auth) +rpcauth_release(struct rpc_auth *auth) { if (!atomic_dec_and_test(&auth->au_count)) return; @@ -97,11 +114,31 @@ rpcauth_destroy(struct rpc_auth *auth) static DEFINE_SPINLOCK(rpc_credcache_lock); +static void +rpcauth_unhash_cred_locked(struct rpc_cred *cred) +{ + hlist_del_rcu(&cred->cr_hash); + smp_mb__before_clear_bit(); + clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); +} + +static void +rpcauth_unhash_cred(struct rpc_cred *cred) +{ + spinlock_t *cache_lock; + + cache_lock = &cred->cr_auth->au_credcache->lock; + spin_lock(cache_lock); + if (atomic_read(&cred->cr_count) == 0) + rpcauth_unhash_cred_locked(cred); + spin_unlock(cache_lock); +} + /* * Initialize RPC credential cache */ int -rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) +rpcauth_init_credcache(struct rpc_auth *auth) { struct rpc_cred_cache *new; int i; @@ -111,8 +148,7 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) return -ENOMEM; for (i = 0; i < RPC_CREDCACHE_NR; i++) INIT_HLIST_HEAD(&new->hashtable[i]); - new->expire = expire; - new->nextgc = jiffies + (expire >> 1); + spin_lock_init(&new->lock); auth->au_credcache = new; return 0; } @@ -121,13 +157,13 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire) * Destroy a list of credentials */ static inline -void rpcauth_destroy_credlist(struct hlist_head *head) +void rpcauth_destroy_credlist(struct list_head *head) { struct rpc_cred *cred; - while (!hlist_empty(head)) { - cred = hlist_entry(head->first, struct rpc_cred, cr_hash); - hlist_del_init(&cred->cr_hash); + while (!list_empty(head)) { + cred = list_entry(head->next, struct rpc_cred, cr_lru); + list_del_init(&cred->cr_lru); put_rpccred(cred); } } @@ -137,58 +173,95 @@ void rpcauth_destroy_credlist(struct hlist_head *head) * that are not referenced. */ void -rpcauth_free_credcache(struct rpc_auth *auth) +rpcauth_clear_credcache(struct rpc_cred_cache *cache) { - struct rpc_cred_cache *cache = auth->au_credcache; - HLIST_HEAD(free); - struct hlist_node *pos, *next; + LIST_HEAD(free); + struct hlist_head *head; struct rpc_cred *cred; int i; spin_lock(&rpc_credcache_lock); + spin_lock(&cache->lock); for (i = 0; i < RPC_CREDCACHE_NR; i++) { - hlist_for_each_safe(pos, next, &cache->hashtable[i]) { - cred = hlist_entry(pos, struct rpc_cred, cr_hash); - __hlist_del(&cred->cr_hash); - hlist_add_head(&cred->cr_hash, &free); + head = &cache->hashtable[i]; + while (!hlist_empty(head)) { + cred = hlist_entry(head->first, struct rpc_cred, cr_hash); + get_rpccred(cred); + if (!list_empty(&cred->cr_lru)) { + list_del(&cred->cr_lru); + number_cred_unused--; + } + list_add_tail(&cred->cr_lru, &free); + rpcauth_unhash_cred_locked(cred); } } + spin_unlock(&cache->lock); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); } -static void -rpcauth_prune_expired(struct rpc_auth *auth, struct rpc_cred *cred, struct hlist_head *free) +/* + * Destroy the RPC credential cache + */ +void +rpcauth_destroy_credcache(struct rpc_auth *auth) { - if (atomic_read(&cred->cr_count) != 1) - return; - if (time_after(jiffies, cred->cr_expire + auth->au_credcache->expire)) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; - if (!(cred->cr_flags & RPCAUTH_CRED_UPTODATE)) { - __hlist_del(&cred->cr_hash); - hlist_add_head(&cred->cr_hash, free); + struct rpc_cred_cache *cache = auth->au_credcache; + + if (cache) { + auth->au_credcache = NULL; + rpcauth_clear_credcache(cache); + kfree(cache); } } /* * Remove stale credentials. Avoid sleeping inside the loop. */ -static void -rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) +static int +rpcauth_prune_expired(struct list_head *free, int nr_to_scan) { - struct rpc_cred_cache *cache = auth->au_credcache; - struct hlist_node *pos, *next; - struct rpc_cred *cred; - int i; + spinlock_t *cache_lock; + struct rpc_cred *cred; - dprintk("RPC: gc'ing RPC credentials for auth %p\n", auth); - for (i = 0; i < RPC_CREDCACHE_NR; i++) { - hlist_for_each_safe(pos, next, &cache->hashtable[i]) { - cred = hlist_entry(pos, struct rpc_cred, cr_hash); - rpcauth_prune_expired(auth, cred, free); + while (!list_empty(&cred_unused)) { + cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); + list_del_init(&cred->cr_lru); + number_cred_unused--; + if (atomic_read(&cred->cr_count) != 0) + continue; + cache_lock = &cred->cr_auth->au_credcache->lock; + spin_lock(cache_lock); + if (atomic_read(&cred->cr_count) == 0) { + get_rpccred(cred); + list_add_tail(&cred->cr_lru, free); + rpcauth_unhash_cred_locked(cred); + nr_to_scan--; } + spin_unlock(cache_lock); + if (nr_to_scan == 0) + break; } - cache->nextgc = jiffies + cache->expire; + return nr_to_scan; +} + +/* + * Run memory cache shrinker. + */ +static int +rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) +{ + LIST_HEAD(free); + int res; + + if (list_empty(&cred_unused)) + return 0; + spin_lock(&rpc_credcache_lock); + nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); + res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; + spin_unlock(&rpc_credcache_lock); + rpcauth_destroy_credlist(&free); + return res; } /* @@ -198,53 +271,56 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, int flags) { + LIST_HEAD(free); struct rpc_cred_cache *cache = auth->au_credcache; - HLIST_HEAD(free); - struct hlist_node *pos, *next; - struct rpc_cred *new = NULL, - *cred = NULL; + struct hlist_node *pos; + struct rpc_cred *cred = NULL, + *entry, *new; int nr = 0; if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) nr = acred->uid & RPC_CREDCACHE_MASK; -retry: - spin_lock(&rpc_credcache_lock); - if (time_before(cache->nextgc, jiffies)) - rpcauth_gc_credcache(auth, &free); - hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { - struct rpc_cred *entry; - entry = hlist_entry(pos, struct rpc_cred, cr_hash); - if (entry->cr_ops->crmatch(acred, entry, flags)) { - hlist_del(&entry->cr_hash); - cred = entry; - break; + + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { + if (!entry->cr_ops->crmatch(acred, entry, flags)) + continue; + spin_lock(&cache->lock); + if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) { + spin_unlock(&cache->lock); + continue; } - rpcauth_prune_expired(auth, entry, &free); + cred = get_rpccred(entry); + spin_unlock(&cache->lock); + break; } - if (new) { - if (cred) - hlist_add_head(&new->cr_hash, &free); - else - cred = new; - } - if (cred) { - hlist_add_head(&cred->cr_hash, &cache->hashtable[nr]); - get_rpccred(cred); - } - spin_unlock(&rpc_credcache_lock); + rcu_read_unlock(); - rpcauth_destroy_credlist(&free); + if (cred != NULL) + goto found; - if (!cred) { - new = auth->au_ops->crcreate(auth, acred, flags); - if (!IS_ERR(new)) { -#ifdef RPC_DEBUG - new->cr_magic = RPCAUTH_CRED_MAGIC; -#endif - goto retry; - } else - cred = new; - } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) + new = auth->au_ops->crcreate(auth, acred, flags); + if (IS_ERR(new)) { + cred = new; + goto out; + } + + spin_lock(&cache->lock); + hlist_for_each_entry(entry, pos, &cache->hashtable[nr], cr_hash) { + if (!entry->cr_ops->crmatch(acred, entry, flags)) + continue; + cred = get_rpccred(entry); + break; + } + if (cred == NULL) { + cred = new; + set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); + hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); + } else + list_add_tail(&new->cr_lru, &free); + spin_unlock(&cache->lock); +found: + if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && cred->cr_ops->cr_init != NULL && !(flags & RPCAUTH_LOOKUP_NEW)) { int res = cred->cr_ops->cr_init(auth, cred); @@ -253,8 +329,9 @@ retry: cred = ERR_PTR(res); } } - - return (struct rpc_cred *) cred; + rpcauth_destroy_credlist(&free); +out: + return cred; } struct rpc_cred * @@ -275,10 +352,27 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags) return ret; } +void +rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, + struct rpc_auth *auth, const struct rpc_credops *ops) +{ + INIT_HLIST_NODE(&cred->cr_hash); + INIT_LIST_HEAD(&cred->cr_lru); + atomic_set(&cred->cr_count, 1); + cred->cr_auth = auth; + cred->cr_ops = ops; + cred->cr_expire = jiffies; +#ifdef RPC_DEBUG + cred->cr_magic = RPCAUTH_CRED_MAGIC; +#endif + cred->cr_uid = acred->uid; +} +EXPORT_SYMBOL(rpcauth_init_cred); + struct rpc_cred * rpcauth_bindcred(struct rpc_task *task) { - struct rpc_auth *auth = task->tk_auth; + struct rpc_auth *auth = task->tk_client->cl_auth; struct auth_cred acred = { .uid = current->fsuid, .gid = current->fsgid, @@ -288,7 +382,7 @@ rpcauth_bindcred(struct rpc_task *task) int flags = 0; dprintk("RPC: %5u looking up %s cred\n", - task->tk_pid, task->tk_auth->au_ops->au_name); + task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); get_group_info(acred.group_info); if (task->tk_flags & RPC_TASK_ROOTCREDS) flags |= RPCAUTH_LOOKUP_ROOTCREDS; @@ -304,19 +398,42 @@ rpcauth_bindcred(struct rpc_task *task) void rpcauth_holdcred(struct rpc_task *task) { - dprintk("RPC: %5u holding %s cred %p\n", - task->tk_pid, task->tk_auth->au_ops->au_name, - task->tk_msg.rpc_cred); - if (task->tk_msg.rpc_cred) - get_rpccred(task->tk_msg.rpc_cred); + struct rpc_cred *cred = task->tk_msg.rpc_cred; + if (cred != NULL) { + get_rpccred(cred); + dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, + cred->cr_auth->au_ops->au_name, cred); + } } void put_rpccred(struct rpc_cred *cred) { - cred->cr_expire = jiffies; + /* Fast path for unhashed credentials */ + if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) + goto need_lock; + if (!atomic_dec_and_test(&cred->cr_count)) return; + goto out_destroy; +need_lock: + if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock)) + return; + if (!list_empty(&cred->cr_lru)) { + number_cred_unused--; + list_del_init(&cred->cr_lru); + } + if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) + rpcauth_unhash_cred(cred); + else if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { + cred->cr_expire = jiffies; + list_add_tail(&cred->cr_lru, &cred_unused); + number_cred_unused++; + spin_unlock(&rpc_credcache_lock); + return; + } + spin_unlock(&rpc_credcache_lock); +out_destroy: cred->cr_ops->crdestroy(cred); } @@ -326,7 +443,7 @@ rpcauth_unbindcred(struct rpc_task *task) struct rpc_cred *cred = task->tk_msg.rpc_cred; dprintk("RPC: %5u releasing %s cred %p\n", - task->tk_pid, task->tk_auth->au_ops->au_name, cred); + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); put_rpccred(cred); task->tk_msg.rpc_cred = NULL; @@ -338,7 +455,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p) struct rpc_cred *cred = task->tk_msg.rpc_cred; dprintk("RPC: %5u marshaling %s cred %p\n", - task->tk_pid, task->tk_auth->au_ops->au_name, cred); + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return cred->cr_ops->crmarshal(task, p); } @@ -349,7 +466,7 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p) struct rpc_cred *cred = task->tk_msg.rpc_cred; dprintk("RPC: %5u validating %s cred %p\n", - task->tk_pid, task->tk_auth->au_ops->au_name, cred); + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); return cred->cr_ops->crvalidate(task, p); } @@ -359,13 +476,17 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_msg.rpc_cred; + int ret; dprintk("RPC: %5u using %s cred %p to wrap rpc data\n", task->tk_pid, cred->cr_ops->cr_name, cred); if (cred->cr_ops->crwrap_req) return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); /* By default, we encode the arguments normally. */ - return encode(rqstp, data, obj); + lock_kernel(); + ret = encode(rqstp, data, obj); + unlock_kernel(); + return ret; } int @@ -373,6 +494,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, __be32 *data, void *obj) { struct rpc_cred *cred = task->tk_msg.rpc_cred; + int ret; dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n", task->tk_pid, cred->cr_ops->cr_name, cred); @@ -380,7 +502,10 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, return cred->cr_ops->crunwrap_resp(task, decode, rqstp, data, obj); /* By default, we decode the arguments normally. */ - return decode(rqstp, data, obj); + lock_kernel(); + ret = decode(rqstp, data, obj); + unlock_kernel(); + return ret; } int @@ -390,7 +515,7 @@ rpcauth_refreshcred(struct rpc_task *task) int err; dprintk("RPC: %5u refreshing %s cred %p\n", - task->tk_pid, task->tk_auth->au_ops->au_name, cred); + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); err = cred->cr_ops->crrefresh(task); if (err < 0) @@ -401,17 +526,34 @@ rpcauth_refreshcred(struct rpc_task *task) void rpcauth_invalcred(struct rpc_task *task) { + struct rpc_cred *cred = task->tk_msg.rpc_cred; + dprintk("RPC: %5u invalidating %s cred %p\n", - task->tk_pid, task->tk_auth->au_ops->au_name, task->tk_msg.rpc_cred); - spin_lock(&rpc_credcache_lock); - if (task->tk_msg.rpc_cred) - task->tk_msg.rpc_cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; - spin_unlock(&rpc_credcache_lock); + task->tk_pid, cred->cr_auth->au_ops->au_name, cred); + if (cred) + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); } int rpcauth_uptodatecred(struct rpc_task *task) { - return !(task->tk_msg.rpc_cred) || - (task->tk_msg.rpc_cred->cr_flags & RPCAUTH_CRED_UPTODATE); + struct rpc_cred *cred = task->tk_msg.rpc_cred; + + return cred == NULL || + test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; +} + + +static struct shrinker *rpc_cred_shrinker; + +void __init rpcauth_init_module(void) +{ + rpc_init_authunix(); + rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker); +} + +void __exit rpcauth_remove_module(void) +{ + if (rpc_cred_shrinker != NULL) + remove_shrinker(rpc_cred_shrinker); } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 4e4ccc5b6fe..baf4096d52d 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -54,9 +54,10 @@ #include <linux/sunrpc/gss_api.h> #include <asm/uaccess.h> -static struct rpc_authops authgss_ops; +static const struct rpc_authops authgss_ops; -static struct rpc_credops gss_credops; +static const struct rpc_credops gss_credops; +static const struct rpc_credops gss_nullops; #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH @@ -64,7 +65,6 @@ static struct rpc_credops gss_credops; #define NFS_NGROUPS 16 -#define GSS_CRED_EXPIRE (60 * HZ) /* XXX: reasonable? */ #define GSS_CRED_SLACK 1024 /* XXX: unused */ /* length of a krb5 verifier (48), plus data added before arguments when * using integrity (two 4-byte integers): */ @@ -79,19 +79,16 @@ static struct rpc_credops gss_credops; /* dump the buffer in `emacs-hexl' style */ #define isprint(c) ((c > 0x1f) && (c < 0x7f)) -static DEFINE_RWLOCK(gss_ctx_lock); - struct gss_auth { + struct kref kref; struct rpc_auth rpc_auth; struct gss_api_mech *mech; enum rpc_gss_svc service; - struct list_head upcalls; struct rpc_clnt *client; struct dentry *dentry; - spinlock_t lock; }; -static void gss_destroy_ctx(struct gss_cl_ctx *); +static void gss_free_ctx(struct gss_cl_ctx *); static struct rpc_pipe_ops gss_upcall_ops; static inline struct gss_cl_ctx * @@ -105,20 +102,24 @@ static inline void gss_put_ctx(struct gss_cl_ctx *ctx) { if (atomic_dec_and_test(&ctx->count)) - gss_destroy_ctx(ctx); + gss_free_ctx(ctx); } +/* gss_cred_set_ctx: + * called by gss_upcall_callback and gss_create_upcall in order + * to set the gss context. The actual exchange of an old context + * and a new one is protected by the inode->i_lock. + */ static void gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) { struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *old; - write_lock(&gss_ctx_lock); + old = gss_cred->gc_ctx; - gss_cred->gc_ctx = ctx; - cred->cr_flags |= RPCAUTH_CRED_UPTODATE; - cred->cr_flags &= ~RPCAUTH_CRED_NEW; - write_unlock(&gss_ctx_lock); + rcu_assign_pointer(gss_cred->gc_ctx, ctx); + set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); if (old) gss_put_ctx(old); } @@ -129,10 +130,10 @@ gss_cred_is_uptodate_ctx(struct rpc_cred *cred) struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); int res = 0; - read_lock(&gss_ctx_lock); - if ((cred->cr_flags & RPCAUTH_CRED_UPTODATE) && gss_cred->gc_ctx) + rcu_read_lock(); + if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx) res = 1; - read_unlock(&gss_ctx_lock); + rcu_read_unlock(); return res; } @@ -171,10 +172,10 @@ gss_cred_get_ctx(struct rpc_cred *cred) struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = NULL; - read_lock(&gss_ctx_lock); + rcu_read_lock(); if (gss_cred->gc_ctx) ctx = gss_get_ctx(gss_cred->gc_ctx); - read_unlock(&gss_ctx_lock); + rcu_read_unlock(); return ctx; } @@ -269,10 +270,10 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) } static struct gss_upcall_msg * -__gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) +__gss_find_upcall(struct rpc_inode *rpci, uid_t uid) { struct gss_upcall_msg *pos; - list_for_each_entry(pos, &gss_auth->upcalls, list) { + list_for_each_entry(pos, &rpci->in_downcall, list) { if (pos->uid != uid) continue; atomic_inc(&pos->count); @@ -290,24 +291,24 @@ __gss_find_upcall(struct gss_auth *gss_auth, uid_t uid) static inline struct gss_upcall_msg * gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg) { + struct inode *inode = gss_auth->dentry->d_inode; + struct rpc_inode *rpci = RPC_I(inode); struct gss_upcall_msg *old; - spin_lock(&gss_auth->lock); - old = __gss_find_upcall(gss_auth, gss_msg->uid); + spin_lock(&inode->i_lock); + old = __gss_find_upcall(rpci, gss_msg->uid); if (old == NULL) { atomic_inc(&gss_msg->count); - list_add(&gss_msg->list, &gss_auth->upcalls); + list_add(&gss_msg->list, &rpci->in_downcall); } else gss_msg = old; - spin_unlock(&gss_auth->lock); + spin_unlock(&inode->i_lock); return gss_msg; } static void __gss_unhash_msg(struct gss_upcall_msg *gss_msg) { - if (list_empty(&gss_msg->list)) - return; list_del_init(&gss_msg->list); rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); wake_up_all(&gss_msg->waitqueue); @@ -318,10 +319,14 @@ static void gss_unhash_msg(struct gss_upcall_msg *gss_msg) { struct gss_auth *gss_auth = gss_msg->auth; + struct inode *inode = gss_auth->dentry->d_inode; - spin_lock(&gss_auth->lock); - __gss_unhash_msg(gss_msg); - spin_unlock(&gss_auth->lock); + if (list_empty(&gss_msg->list)) + return; + spin_lock(&inode->i_lock); + if (!list_empty(&gss_msg->list)) + __gss_unhash_msg(gss_msg); + spin_unlock(&inode->i_lock); } static void @@ -330,16 +335,16 @@ gss_upcall_callback(struct rpc_task *task) struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; + struct inode *inode = gss_msg->auth->dentry->d_inode; - BUG_ON(gss_msg == NULL); + spin_lock(&inode->i_lock); if (gss_msg->ctx) gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); else task->tk_status = gss_msg->msg.errno; - spin_lock(&gss_msg->auth->lock); gss_cred->gc_upcall = NULL; rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); - spin_unlock(&gss_msg->auth->lock); + spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); } @@ -386,11 +391,12 @@ static inline int gss_refresh_upcall(struct rpc_task *task) { struct rpc_cred *cred = task->tk_msg.rpc_cred; - struct gss_auth *gss_auth = container_of(task->tk_client->cl_auth, + struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_upcall_msg *gss_msg; + struct inode *inode = gss_auth->dentry->d_inode; int err = 0; dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, @@ -400,7 +406,7 @@ gss_refresh_upcall(struct rpc_task *task) err = PTR_ERR(gss_msg); goto out; } - spin_lock(&gss_auth->lock); + spin_lock(&inode->i_lock); if (gss_cred->gc_upcall != NULL) rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { @@ -411,7 +417,7 @@ gss_refresh_upcall(struct rpc_task *task) rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); } else err = gss_msg->msg.errno; - spin_unlock(&gss_auth->lock); + spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); out: dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n", @@ -422,6 +428,7 @@ out: static inline int gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) { + struct inode *inode = gss_auth->dentry->d_inode; struct rpc_cred *cred = &gss_cred->gc_base; struct gss_upcall_msg *gss_msg; DEFINE_WAIT(wait); @@ -435,12 +442,11 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) } for (;;) { prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); - spin_lock(&gss_auth->lock); + spin_lock(&inode->i_lock); if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { - spin_unlock(&gss_auth->lock); break; } - spin_unlock(&gss_auth->lock); + spin_unlock(&inode->i_lock); if (signalled()) { err = -ERESTARTSYS; goto out_intr; @@ -451,6 +457,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); else err = gss_msg->msg.errno; + spin_unlock(&inode->i_lock); out_intr: finish_wait(&gss_msg->waitqueue, &wait); gss_release_msg(gss_msg); @@ -489,12 +496,11 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) const void *p, *end; void *buf; struct rpc_clnt *clnt; - struct gss_auth *gss_auth; - struct rpc_cred *cred; struct gss_upcall_msg *gss_msg; + struct inode *inode = filp->f_path.dentry->d_inode; struct gss_cl_ctx *ctx; uid_t uid; - int err = -EFBIG; + ssize_t err = -EFBIG; if (mlen > MSG_BUF_MAXSIZE) goto out; @@ -503,7 +509,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (!buf) goto out; - clnt = RPC_I(filp->f_path.dentry->d_inode)->private; + clnt = RPC_I(inode)->private; err = -EFAULT; if (copy_from_user(buf, src, mlen)) goto err; @@ -519,43 +525,38 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) ctx = gss_alloc_context(); if (ctx == NULL) goto err; - err = 0; - gss_auth = container_of(clnt->cl_auth, struct gss_auth, rpc_auth); - p = gss_fill_context(p, end, ctx, gss_auth->mech); + + err = -ENOENT; + /* Find a matching upcall */ + spin_lock(&inode->i_lock); + gss_msg = __gss_find_upcall(RPC_I(inode), uid); + if (gss_msg == NULL) { + spin_unlock(&inode->i_lock); + goto err_put_ctx; + } + list_del_init(&gss_msg->list); + spin_unlock(&inode->i_lock); + + p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { err = PTR_ERR(p); - if (err != -EACCES) - goto err_put_ctx; - } - spin_lock(&gss_auth->lock); - gss_msg = __gss_find_upcall(gss_auth, uid); - if (gss_msg) { - if (err == 0 && gss_msg->ctx == NULL) - gss_msg->ctx = gss_get_ctx(ctx); - gss_msg->msg.errno = err; - __gss_unhash_msg(gss_msg); - spin_unlock(&gss_auth->lock); - gss_release_msg(gss_msg); - } else { - struct auth_cred acred = { .uid = uid }; - spin_unlock(&gss_auth->lock); - cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); - if (IS_ERR(cred)) { - err = PTR_ERR(cred); - goto err_put_ctx; - } - gss_cred_set_ctx(cred, gss_get_ctx(ctx)); + gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN; + goto err_release_msg; } - gss_put_ctx(ctx); - kfree(buf); - dprintk("RPC: gss_pipe_downcall returning length %Zu\n", mlen); - return mlen; + gss_msg->ctx = gss_get_ctx(ctx); + err = mlen; + +err_release_msg: + spin_lock(&inode->i_lock); + __gss_unhash_msg(gss_msg); + spin_unlock(&inode->i_lock); + gss_release_msg(gss_msg); err_put_ctx: gss_put_ctx(ctx); err: kfree(buf); out: - dprintk("RPC: gss_pipe_downcall returning %d\n", err); + dprintk("RPC: gss_pipe_downcall returning %Zd\n", err); return err; } @@ -563,27 +564,21 @@ static void gss_pipe_release(struct inode *inode) { struct rpc_inode *rpci = RPC_I(inode); - struct rpc_clnt *clnt; - struct rpc_auth *auth; - struct gss_auth *gss_auth; + struct gss_upcall_msg *gss_msg; - clnt = rpci->private; - auth = clnt->cl_auth; - gss_auth = container_of(auth, struct gss_auth, rpc_auth); - spin_lock(&gss_auth->lock); - while (!list_empty(&gss_auth->upcalls)) { - struct gss_upcall_msg *gss_msg; + spin_lock(&inode->i_lock); + while (!list_empty(&rpci->in_downcall)) { - gss_msg = list_entry(gss_auth->upcalls.next, + gss_msg = list_entry(rpci->in_downcall.next, struct gss_upcall_msg, list); gss_msg->msg.errno = -EPIPE; atomic_inc(&gss_msg->count); __gss_unhash_msg(gss_msg); - spin_unlock(&gss_auth->lock); + spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); - spin_lock(&gss_auth->lock); + spin_lock(&inode->i_lock); } - spin_unlock(&gss_auth->lock); + spin_unlock(&inode->i_lock); } static void @@ -637,18 +632,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); if (gss_auth->service == 0) goto err_put_mech; - INIT_LIST_HEAD(&gss_auth->upcalls); - spin_lock_init(&gss_auth->lock); auth = &gss_auth->rpc_auth; auth->au_cslack = GSS_CRED_SLACK >> 2; auth->au_rslack = GSS_VERF_SLACK >> 2; auth->au_ops = &authgss_ops; auth->au_flavor = flavor; atomic_set(&auth->au_count, 1); - - err = rpcauth_init_credcache(auth, GSS_CRED_EXPIRE); - if (err) - goto err_put_mech; + kref_init(&gss_auth->kref); gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name, clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); @@ -657,7 +647,13 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) goto err_put_mech; } + err = rpcauth_init_credcache(auth); + if (err) + goto err_unlink_pipe; + return auth; +err_unlink_pipe: + rpc_unlink(gss_auth->dentry); err_put_mech: gss_mech_put(gss_auth->mech); err_free: @@ -668,6 +664,25 @@ out_dec: } static void +gss_free(struct gss_auth *gss_auth) +{ + rpc_unlink(gss_auth->dentry); + gss_auth->dentry = NULL; + gss_mech_put(gss_auth->mech); + + kfree(gss_auth); + module_put(THIS_MODULE); +} + +static void +gss_free_callback(struct kref *kref) +{ + struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); + + gss_free(gss_auth); +} + +static void gss_destroy(struct rpc_auth *auth) { struct gss_auth *gss_auth; @@ -675,23 +690,51 @@ gss_destroy(struct rpc_auth *auth) dprintk("RPC: destroying GSS authenticator %p flavor %d\n", auth, auth->au_flavor); + rpcauth_destroy_credcache(auth); + gss_auth = container_of(auth, struct gss_auth, rpc_auth); - rpc_unlink(gss_auth->dentry); - gss_auth->dentry = NULL; - gss_mech_put(gss_auth->mech); + kref_put(&gss_auth->kref, gss_free_callback); +} - rpcauth_free_credcache(auth); - kfree(gss_auth); - module_put(THIS_MODULE); +/* + * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call + * to the server with the GSS control procedure field set to + * RPC_GSS_PROC_DESTROY. This should normally cause the server to release + * all RPCSEC_GSS state associated with that context. + */ +static int +gss_destroying_context(struct rpc_cred *cred) +{ + struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); + struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); + struct rpc_task *task; + + if (gss_cred->gc_ctx == NULL || + gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY) + return 0; + + gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; + cred->cr_ops = &gss_nullops; + + /* Take a reference to ensure the cred will be destroyed either + * by the RPC call or by the put_rpccred() below */ + get_rpccred(cred); + + task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC); + if (!IS_ERR(task)) + rpc_put_task(task); + + put_rpccred(cred); + return 1; } -/* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure +/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure * to create a new cred or context, so they check that things have been * allocated before freeing them. */ static void -gss_destroy_ctx(struct gss_cl_ctx *ctx) +gss_do_free_ctx(struct gss_cl_ctx *ctx) { - dprintk("RPC: gss_destroy_ctx\n"); + dprintk("RPC: gss_free_ctx\n"); if (ctx->gc_gss_ctx) gss_delete_sec_context(&ctx->gc_gss_ctx); @@ -701,15 +744,46 @@ gss_destroy_ctx(struct gss_cl_ctx *ctx) } static void -gss_destroy_cred(struct rpc_cred *rc) +gss_free_ctx_callback(struct rcu_head *head) { - struct gss_cred *cred = container_of(rc, struct gss_cred, gc_base); + struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); + gss_do_free_ctx(ctx); +} - dprintk("RPC: gss_destroy_cred \n"); +static void +gss_free_ctx(struct gss_cl_ctx *ctx) +{ + call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); +} - if (cred->gc_ctx) - gss_put_ctx(cred->gc_ctx); - kfree(cred); +static void +gss_free_cred(struct gss_cred *gss_cred) +{ + dprintk("RPC: gss_free_cred %p\n", gss_cred); + kfree(gss_cred); +} + +static void +gss_free_cred_callback(struct rcu_head *head) +{ + struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); + gss_free_cred(gss_cred); +} + +static void +gss_destroy_cred(struct rpc_cred *cred) +{ + struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); + struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); + struct gss_cl_ctx *ctx = gss_cred->gc_ctx; + + if (gss_destroying_context(cred)) + return; + rcu_assign_pointer(gss_cred->gc_ctx, NULL); + call_rcu(&cred->cr_rcu, gss_free_cred_callback); + if (ctx) + gss_put_ctx(ctx); + kref_put(&gss_auth->kref, gss_free_callback); } /* @@ -734,16 +808,14 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) goto out_err; - atomic_set(&cred->gc_count, 1); - cred->gc_uid = acred->uid; + rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); /* * Note: in order to force a call to call_refresh(), we deliberately * fail to flag the credential as RPCAUTH_CRED_UPTODATE. */ - cred->gc_flags = 0; - cred->gc_base.cr_ops = &gss_credops; - cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; + cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; cred->gc_service = gss_auth->service; + kref_get(&gss_auth->kref); return &cred->gc_base; out_err: @@ -774,7 +846,7 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) * we don't really care if the credential has expired or not, * since the caller should be prepared to reinitialise it. */ - if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) + if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) goto out; /* Don't match with creds that have expired. */ if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) @@ -830,7 +902,7 @@ gss_marshal(struct rpc_task *task, __be32 *p) mic.data = (u8 *)(p + 1); maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); } else if (maj_stat != 0) { printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); goto out_put_ctx; @@ -855,6 +927,13 @@ gss_refresh(struct rpc_task *task) return 0; } +/* Dummy refresh routine: used only when destroying the context */ +static int +gss_refresh_null(struct rpc_task *task) +{ + return -EACCES; +} + static __be32 * gss_validate(struct rpc_task *task, __be32 *p) { @@ -883,12 +962,15 @@ gss_validate(struct rpc_task *task, __be32 *p) maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; - if (maj_stat) + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); + if (maj_stat) { + dprintk("RPC: %5u gss_validate: gss_verify_mic returned" + "error 0x%08x\n", task->tk_pid, maj_stat); goto out_bad; + } /* We leave it to unwrap to calculate au_rslack. For now we just * calculate the length of the verifier: */ - task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; + cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; gss_put_ctx(ctx); dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n", task->tk_pid); @@ -917,7 +999,9 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; *p++ = htonl(rqstp->rq_seqno); + lock_kernel(); status = encode(rqstp, p, obj); + unlock_kernel(); if (status) return status; @@ -937,7 +1021,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); status = -EIO; /* XXX? */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); else if (maj_stat) return status; q = xdr_encode_opaque(p, NULL, mic.len); @@ -1011,7 +1095,9 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; *p++ = htonl(rqstp->rq_seqno); + lock_kernel(); status = encode(rqstp, p, obj); + unlock_kernel(); if (status) return status; @@ -1036,7 +1122,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was * done anyway, so it's safe to put the request on the wire: */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); else if (maj_stat) return status; @@ -1070,12 +1156,16 @@ gss_wrap_req(struct rpc_task *task, /* The spec seems a little ambiguous here, but I think that not * wrapping context destruction requests makes the most sense. */ + lock_kernel(); status = encode(rqstp, p, obj); + unlock_kernel(); goto out; } switch (gss_cred->gc_service) { case RPC_GSS_SVC_NONE: + lock_kernel(); status = encode(rqstp, p, obj); + unlock_kernel(); break; case RPC_GSS_SVC_INTEGRITY: status = gss_wrap_req_integ(cred, ctx, encode, @@ -1123,7 +1213,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat != GSS_S_COMPLETE) return status; return 0; @@ -1148,7 +1238,7 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat != GSS_S_COMPLETE) return status; if (ntohl(*(*p)++) != rqstp->rq_seqno) @@ -1188,10 +1278,12 @@ gss_unwrap_resp(struct rpc_task *task, break; } /* take into account extra slack for integrity and privacy cases: */ - task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) + cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) + (savedlen - head->iov_len); out_decode: + lock_kernel(); status = decode(rqstp, p, obj); + unlock_kernel(); out: gss_put_ctx(ctx); dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, @@ -1199,7 +1291,7 @@ out: return status; } -static struct rpc_authops authgss_ops = { +static const struct rpc_authops authgss_ops = { .owner = THIS_MODULE, .au_flavor = RPC_AUTH_GSS, #ifdef RPC_DEBUG @@ -1211,7 +1303,7 @@ static struct rpc_authops authgss_ops = { .crcreate = gss_create_cred }; -static struct rpc_credops gss_credops = { +static const struct rpc_credops gss_credops = { .cr_name = "AUTH_GSS", .crdestroy = gss_destroy_cred, .cr_init = gss_cred_init, @@ -1223,6 +1315,17 @@ static struct rpc_credops gss_credops = { .crunwrap_resp = gss_unwrap_resp, }; +static const struct rpc_credops gss_nullops = { + .cr_name = "AUTH_GSS", + .crdestroy = gss_destroy_cred, + .crmatch = gss_match, + .crmarshal = gss_marshal, + .crrefresh = gss_refresh_null, + .crvalidate = gss_validate, + .crwrap_req = gss_wrap_req, + .crunwrap_resp = gss_unwrap_resp, +}; + static struct rpc_pipe_ops gss_upcall_ops = { .upcall = gss_pipe_upcall, .downcall = gss_pipe_downcall, diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 7b194321705..71b9daefdff 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -201,7 +201,7 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { kfree(kctx); } -static struct gss_api_ops gss_kerberos_ops = { +static const struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, .gss_verify_mic = gss_verify_mic_kerberos, diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 7e15aa68ae6..577d590e755 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -202,7 +202,7 @@ gss_get_mic_spkm3(struct gss_ctx *ctx, return err; } -static struct gss_api_ops gss_spkm3_ops = { +static const struct gss_api_ops gss_spkm3_ops = { .gss_import_sec_context = gss_import_sec_context_spkm3, .gss_get_mic = gss_get_mic_spkm3, .gss_verify_mic = gss_verify_mic_spkm3, diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 3df9fccab2f..537d0e8589d 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c @@ -76,7 +76,7 @@ nul_marshal(struct rpc_task *task, __be32 *p) static int nul_refresh(struct rpc_task *task) { - task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; + set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); return 0; } @@ -101,7 +101,7 @@ nul_validate(struct rpc_task *task, __be32 *p) return p; } -struct rpc_authops authnull_ops = { +const struct rpc_authops authnull_ops = { .owner = THIS_MODULE, .au_flavor = RPC_AUTH_NULL, #ifdef RPC_DEBUG @@ -122,7 +122,7 @@ struct rpc_auth null_auth = { }; static -struct rpc_credops null_credops = { +const struct rpc_credops null_credops = { .cr_name = "AUTH_NULL", .crdestroy = nul_destroy_cred, .crmatch = nul_match, @@ -133,9 +133,11 @@ struct rpc_credops null_credops = { static struct rpc_cred null_cred = { + .cr_lru = LIST_HEAD_INIT(null_cred.cr_lru), + .cr_auth = &null_auth, .cr_ops = &null_credops, .cr_count = ATOMIC_INIT(1), - .cr_flags = RPCAUTH_CRED_UPTODATE, + .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, #ifdef RPC_DEBUG .cr_magic = RPCAUTH_CRED_MAGIC, #endif diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4e7733aee36..5ed91e5bcee 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -20,11 +20,6 @@ struct unx_cred { gid_t uc_gids[NFS_NGROUPS]; }; #define uc_uid uc_base.cr_uid -#define uc_count uc_base.cr_count -#define uc_flags uc_base.cr_flags -#define uc_expire uc_base.cr_expire - -#define UNX_CRED_EXPIRE (60 * HZ) #define UNX_WRITESLACK (21 + (UNX_MAXNODENAME >> 2)) @@ -34,15 +29,14 @@ struct unx_cred { static struct rpc_auth unix_auth; static struct rpc_cred_cache unix_cred_cache; -static struct rpc_credops unix_credops; +static const struct rpc_credops unix_credops; static struct rpc_auth * unx_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor) { dprintk("RPC: creating UNIX authenticator for client %p\n", clnt); - if (atomic_inc_return(&unix_auth.au_count) == 0) - unix_cred_cache.nextgc = jiffies + (unix_cred_cache.expire >> 1); + atomic_inc(&unix_auth.au_count); return &unix_auth; } @@ -50,7 +44,7 @@ static void unx_destroy(struct rpc_auth *auth) { dprintk("RPC: destroying UNIX authenticator %p\n", auth); - rpcauth_free_credcache(auth); + rpcauth_clear_credcache(auth->au_credcache); } /* @@ -74,8 +68,8 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) return ERR_PTR(-ENOMEM); - atomic_set(&cred->uc_count, 1); - cred->uc_flags = RPCAUTH_CRED_UPTODATE; + rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops); + cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { cred->uc_uid = 0; cred->uc_gid = 0; @@ -85,22 +79,34 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) if (groups > NFS_NGROUPS) groups = NFS_NGROUPS; - cred->uc_uid = acred->uid; cred->uc_gid = acred->gid; for (i = 0; i < groups; i++) cred->uc_gids[i] = GROUP_AT(acred->group_info, i); if (i < NFS_NGROUPS) cred->uc_gids[i] = NOGROUP; } - cred->uc_base.cr_ops = &unix_credops; - return (struct rpc_cred *) cred; + return &cred->uc_base; +} + +static void +unx_free_cred(struct unx_cred *unx_cred) +{ + dprintk("RPC: unx_free_cred %p\n", unx_cred); + kfree(unx_cred); +} + +static void +unx_free_cred_callback(struct rcu_head *head) +{ + struct unx_cred *unx_cred = container_of(head, struct unx_cred, uc_base.cr_rcu); + unx_free_cred(unx_cred); } static void unx_destroy_cred(struct rpc_cred *cred) { - kfree(cred); + call_rcu(&cred->cr_rcu, unx_free_cred_callback); } /* @@ -111,7 +117,7 @@ unx_destroy_cred(struct rpc_cred *cred) static int unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) { - struct unx_cred *cred = (struct unx_cred *) rcred; + struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base); int i; if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { @@ -142,7 +148,7 @@ static __be32 * unx_marshal(struct rpc_task *task, __be32 *p) { struct rpc_clnt *clnt = task->tk_client; - struct unx_cred *cred = (struct unx_cred *) task->tk_msg.rpc_cred; + struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base); __be32 *base, *hold; int i; @@ -175,7 +181,7 @@ unx_marshal(struct rpc_task *task, __be32 *p) static int unx_refresh(struct rpc_task *task) { - task->tk_msg.rpc_cred->cr_flags |= RPCAUTH_CRED_UPTODATE; + set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags); return 0; } @@ -198,13 +204,18 @@ unx_validate(struct rpc_task *task, __be32 *p) printk("RPC: giant verf size: %u\n", size); return NULL; } - task->tk_auth->au_rslack = (size >> 2) + 2; + task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2; p += (size >> 2); return p; } -struct rpc_authops authunix_ops = { +void __init rpc_init_authunix(void) +{ + spin_lock_init(&unix_cred_cache.lock); +} + +const struct rpc_authops authunix_ops = { .owner = THIS_MODULE, .au_flavor = RPC_AUTH_UNIX, #ifdef RPC_DEBUG @@ -218,7 +229,6 @@ struct rpc_authops authunix_ops = { static struct rpc_cred_cache unix_cred_cache = { - .expire = UNX_CRED_EXPIRE, }; static @@ -232,7 +242,7 @@ struct rpc_auth unix_auth = { }; static -struct rpc_credops unix_credops = { +const struct rpc_credops unix_credops = { .cr_name = "AUTH_UNIX", .crdestroy = unx_destroy_cred, .crmatch = unx_match, diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d8fbee40a19..52429b1ffcc 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -44,6 +44,12 @@ dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ __FUNCTION__, t->tk_status) +/* + * All RPC clients are linked into this list + */ +static LIST_HEAD(all_clients); +static DEFINE_SPINLOCK(rpc_client_lock); + static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); @@ -66,6 +72,21 @@ static void call_connect_status(struct rpc_task *task); static __be32 * call_header(struct rpc_task *task); static __be32 * call_verify(struct rpc_task *task); +static int rpc_ping(struct rpc_clnt *clnt, int flags); + +static void rpc_register_client(struct rpc_clnt *clnt) +{ + spin_lock(&rpc_client_lock); + list_add(&clnt->cl_clients, &all_clients); + spin_unlock(&rpc_client_lock); +} + +static void rpc_unregister_client(struct rpc_clnt *clnt) +{ + spin_lock(&rpc_client_lock); + list_del(&clnt->cl_clients); + spin_unlock(&rpc_client_lock); +} static int rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) @@ -111,6 +132,9 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s dprintk("RPC: creating %s client for %s (xprt %p)\n", program->name, servname, xprt); + err = rpciod_up(); + if (err) + goto out_no_rpciod; err = -EINVAL; if (!xprt) goto out_no_xprt; @@ -121,8 +145,6 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; - atomic_set(&clnt->cl_users, 0); - atomic_set(&clnt->cl_count, 1); clnt->cl_parent = clnt; clnt->cl_server = clnt->cl_inline_name; @@ -148,6 +170,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s if (clnt->cl_metrics == NULL) goto out_no_stats; clnt->cl_program = program; + INIT_LIST_HEAD(&clnt->cl_tasks); + spin_lock_init(&clnt->cl_lock); if (!xprt_bound(clnt->cl_xprt)) clnt->cl_autobind = 1; @@ -155,6 +179,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); + kref_init(&clnt->cl_kref); + err = rpc_setup_pipedir(clnt, program->pipe_dir_name); if (err < 0) goto out_no_path; @@ -172,6 +198,7 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s if (clnt->cl_nodelen > UNX_MAXNODENAME) clnt->cl_nodelen = UNX_MAXNODENAME; memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); + rpc_register_client(clnt); return clnt; out_no_auth: @@ -188,6 +215,8 @@ out_no_stats: out_err: xprt_put(xprt); out_no_xprt: + rpciod_down(); +out_no_rpciod: return ERR_PTR(err); } @@ -205,13 +234,32 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; + struct rpc_xprtsock_create xprtargs = { + .proto = args->protocol, + .srcaddr = args->saddress, + .dstaddr = args->address, + .addrlen = args->addrsize, + .timeout = args->timeout + }; + char servername[20]; - xprt = xprt_create_transport(args->protocol, args->address, - args->addrsize, args->timeout); + xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; /* + * If the caller chooses not to specify a hostname, whip + * up a string representation of the passed-in address. + */ + if (args->servername == NULL) { + struct sockaddr_in *addr = + (struct sockaddr_in *) &args->address; + snprintf(servername, sizeof(servername), NIPQUAD_FMT, + NIPQUAD(addr->sin_addr.s_addr)); + args->servername = servername; + } + + /* * By default, kernel RPC client connects from a reserved port. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, * but it is always enabled for rpciod, which handles the connect @@ -245,8 +293,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) clnt->cl_intr = 1; if (args->flags & RPC_CLNT_CREATE_AUTOBIND) clnt->cl_autobind = 1; - if (args->flags & RPC_CLNT_CREATE_ONESHOT) - clnt->cl_oneshot = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY) clnt->cl_discrtry = 1; @@ -268,24 +314,25 @@ rpc_clone_client(struct rpc_clnt *clnt) new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); if (!new) goto out_no_clnt; - atomic_set(&new->cl_count, 1); - atomic_set(&new->cl_users, 0); + new->cl_parent = clnt; + /* Turn off autobind on clones */ + new->cl_autobind = 0; + INIT_LIST_HEAD(&new->cl_tasks); + spin_lock_init(&new->cl_lock); + rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); new->cl_metrics = rpc_alloc_iostats(clnt); if (new->cl_metrics == NULL) goto out_no_stats; + kref_init(&new->cl_kref); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; - new->cl_parent = clnt; - atomic_inc(&clnt->cl_count); - new->cl_xprt = xprt_get(clnt->cl_xprt); - /* Turn off autobind on clones */ - new->cl_autobind = 0; - new->cl_oneshot = 0; - new->cl_dead = 0; - rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); + xprt_get(clnt->cl_xprt); + kref_get(&clnt->cl_kref); + rpc_register_client(new); + rpciod_up(); return new; out_no_path: rpc_free_iostats(new->cl_metrics); @@ -298,86 +345,86 @@ out_no_clnt: /* * Properly shut down an RPC client, terminating all outstanding - * requests. Note that we must be certain that cl_oneshot and - * cl_dead are cleared, or else the client would be destroyed - * when the last task releases it. + * requests. */ -int -rpc_shutdown_client(struct rpc_clnt *clnt) +void rpc_shutdown_client(struct rpc_clnt *clnt) { - dprintk("RPC: shutting down %s client for %s, tasks=%d\n", - clnt->cl_protname, clnt->cl_server, - atomic_read(&clnt->cl_users)); - - while (atomic_read(&clnt->cl_users) > 0) { - /* Don't let rpc_release_client destroy us */ - clnt->cl_oneshot = 0; - clnt->cl_dead = 0; + dprintk("RPC: shutting down %s client for %s\n", + clnt->cl_protname, clnt->cl_server); + + while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, - !atomic_read(&clnt->cl_users), 1*HZ); - } - - if (atomic_read(&clnt->cl_users) < 0) { - printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", - clnt, atomic_read(&clnt->cl_users)); -#ifdef RPC_DEBUG - rpc_show_tasks(); -#endif - BUG(); + list_empty(&clnt->cl_tasks), 1*HZ); } - return rpc_destroy_client(clnt); + rpc_release_client(clnt); } /* - * Delete an RPC client + * Free an RPC client */ -int -rpc_destroy_client(struct rpc_clnt *clnt) +static void +rpc_free_client(struct kref *kref) { - if (!atomic_dec_and_test(&clnt->cl_count)) - return 1; - BUG_ON(atomic_read(&clnt->cl_users) != 0); + struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); dprintk("RPC: destroying %s client for %s\n", clnt->cl_protname, clnt->cl_server); - if (clnt->cl_auth) { - rpcauth_destroy(clnt->cl_auth); - clnt->cl_auth = NULL; - } if (!IS_ERR(clnt->cl_dentry)) { rpc_rmdir(clnt->cl_dentry); rpc_put_mount(); } if (clnt->cl_parent != clnt) { - rpc_destroy_client(clnt->cl_parent); + rpc_release_client(clnt->cl_parent); goto out_free; } if (clnt->cl_server != clnt->cl_inline_name) kfree(clnt->cl_server); out_free: + rpc_unregister_client(clnt); rpc_free_iostats(clnt->cl_metrics); clnt->cl_metrics = NULL; xprt_put(clnt->cl_xprt); + rpciod_down(); kfree(clnt); - return 0; } /* - * Release an RPC client + * Free an RPC client + */ +static void +rpc_free_auth(struct kref *kref) +{ + struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); + + if (clnt->cl_auth == NULL) { + rpc_free_client(kref); + return; + } + + /* + * Note: RPCSEC_GSS may need to send NULL RPC calls in order to + * release remaining GSS contexts. This mechanism ensures + * that it can do so safely. + */ + kref_init(kref); + rpcauth_release(clnt->cl_auth); + clnt->cl_auth = NULL; + kref_put(kref, rpc_free_client); +} + +/* + * Release reference to the RPC client */ void rpc_release_client(struct rpc_clnt *clnt) { - dprintk("RPC: rpc_release_client(%p, %d)\n", - clnt, atomic_read(&clnt->cl_users)); + dprintk("RPC: rpc_release_client(%p)\n", clnt); - if (!atomic_dec_and_test(&clnt->cl_users)) - return; - wake_up(&destroy_wait); - if (clnt->cl_oneshot || clnt->cl_dead) - rpc_destroy_client(clnt); + if (list_empty(&clnt->cl_tasks)) + wake_up(&destroy_wait); + kref_put(&clnt->cl_kref, rpc_free_auth); } /** @@ -468,82 +515,96 @@ void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) rpc_restore_sigmask(oldset); } -/* - * New rpc_call implementation +static +struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt, + struct rpc_message *msg, + int flags, + const struct rpc_call_ops *ops, + void *data) +{ + struct rpc_task *task, *ret; + sigset_t oldset; + + task = rpc_new_task(clnt, flags, ops, data); + if (task == NULL) { + rpc_release_calldata(ops, data); + return ERR_PTR(-ENOMEM); + } + + /* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */ + rpc_task_sigmask(task, &oldset); + if (msg != NULL) { + rpc_call_setup(task, msg, 0); + if (task->tk_status != 0) { + ret = ERR_PTR(task->tk_status); + rpc_put_task(task); + goto out; + } + } + atomic_inc(&task->tk_count); + rpc_execute(task); + ret = task; +out: + rpc_restore_sigmask(&oldset); + return ret; +} + +/** + * rpc_call_sync - Perform a synchronous RPC call + * @clnt: pointer to RPC client + * @msg: RPC call parameters + * @flags: RPC call flags */ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) { struct rpc_task *task; - sigset_t oldset; - int status; - - /* If this client is slain all further I/O fails */ - if (clnt->cl_dead) - return -EIO; + int status; BUG_ON(flags & RPC_TASK_ASYNC); - task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); - if (task == NULL) - return -ENOMEM; - - /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ - rpc_task_sigmask(task, &oldset); - - /* Set up the call info struct and execute the task */ - rpc_call_setup(task, msg, 0); - if (task->tk_status == 0) { - atomic_inc(&task->tk_count); - rpc_execute(task); - } + task = rpc_do_run_task(clnt, msg, flags, &rpc_default_ops, NULL); + if (IS_ERR(task)) + return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); - rpc_restore_sigmask(&oldset); return status; } -/* - * New rpc_call implementation +/** + * rpc_call_async - Perform an asynchronous RPC call + * @clnt: pointer to RPC client + * @msg: RPC call parameters + * @flags: RPC call flags + * @ops: RPC call ops + * @data: user call data */ int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *data) { struct rpc_task *task; - sigset_t oldset; - int status; - - /* If this client is slain all further I/O fails */ - status = -EIO; - if (clnt->cl_dead) - goto out_release; - - flags |= RPC_TASK_ASYNC; - - /* Create/initialize a new RPC task */ - status = -ENOMEM; - if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) - goto out_release; - - /* Mask signals on GSS_AUTH upcalls */ - rpc_task_sigmask(task, &oldset); - rpc_call_setup(task, msg, 0); - - /* Set up the call info struct and execute the task */ - status = task->tk_status; - if (status == 0) - rpc_execute(task); - else - rpc_put_task(task); - - rpc_restore_sigmask(&oldset); - return status; -out_release: - rpc_release_calldata(tk_ops, data); - return status; + task = rpc_do_run_task(clnt, msg, flags|RPC_TASK_ASYNC, tk_ops, data); + if (IS_ERR(task)) + return PTR_ERR(task); + rpc_put_task(task); + return 0; } +/** + * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it + * @clnt: pointer to RPC client + * @flags: RPC flags + * @ops: RPC call ops + * @data: user call data + */ +struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, + const struct rpc_call_ops *tk_ops, + void *data) +{ + return rpc_do_run_task(clnt, NULL, flags, tk_ops, data); +} +EXPORT_SYMBOL(rpc_run_task); void rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) @@ -745,7 +806,7 @@ call_reserveresult(struct rpc_task *task) static void call_allocate(struct rpc_task *task) { - unsigned int slack = task->tk_auth->au_cslack; + unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = task->tk_xprt; struct rpc_procinfo *proc = task->tk_msg.rpc_proc; @@ -843,10 +904,8 @@ call_encode(struct rpc_task *task) if (encode == NULL) return; - lock_kernel(); task->tk_status = rpcauth_wrap_req(task, encode, req, p, task->tk_msg.rpc_argp); - unlock_kernel(); if (task->tk_status == -ENOMEM) { /* XXX: Is this sane? */ rpc_delay(task, 3*HZ); @@ -1177,10 +1236,8 @@ call_decode(struct rpc_task *task) task->tk_action = rpc_exit_task; if (decode) { - lock_kernel(); task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, task->tk_msg.rpc_resp); - unlock_kernel(); } dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, task->tk_status); @@ -1273,9 +1330,9 @@ call_verify(struct rpc_task *task) * - if it isn't pointer subtraction in the NFS client may give * undefined results */ - printk(KERN_WARNING - "call_verify: XDR representation not a multiple of" - " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); + dprintk("RPC: %5u %s: XDR representation not a multiple of" + " 4 bytes: 0x%x\n", task->tk_pid, __FUNCTION__, + task->tk_rqstp->rq_rcv_buf.len); goto out_eio; } if ((len -= 3) < 0) @@ -1283,7 +1340,8 @@ call_verify(struct rpc_task *task) p += 1; /* skip XID */ if ((n = ntohl(*p++)) != RPC_REPLY) { - printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); + dprintk("RPC: %5u %s: not an RPC reply: %x\n", + task->tk_pid, __FUNCTION__, n); goto out_garbage; } if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { @@ -1334,7 +1392,8 @@ call_verify(struct rpc_task *task) "authentication.\n", task->tk_client->cl_server); break; default: - printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); + dprintk("RPC: %5u %s: unknown auth error: %x\n", + task->tk_pid, __FUNCTION__, n); error = -EIO; } dprintk("RPC: %5u %s: call rejected %d\n", @@ -1342,7 +1401,8 @@ call_verify(struct rpc_task *task) goto out_err; } if (!(p = rpcauth_checkverf(task, p))) { - printk(KERN_WARNING "call_verify: auth check failed\n"); + dprintk("RPC: %5u %s: auth check failed\n", + task->tk_pid, __FUNCTION__); goto out_garbage; /* bad verifier, retry */ } len = p - (__be32 *)iov->iov_base - 1; @@ -1381,7 +1441,8 @@ call_verify(struct rpc_task *task) task->tk_pid, __FUNCTION__); break; /* retry */ default: - printk(KERN_WARNING "call_verify: server accept status: %x\n", n); + dprintk("RPC: %5u %s: server accept status: %x\n", + task->tk_pid, __FUNCTION__, n); /* Also retry */ } @@ -1395,14 +1456,16 @@ out_garbage: out_retry: return ERR_PTR(-EAGAIN); } - printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); out_eio: error = -EIO; out_err: rpc_exit(task, error); + dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, + __FUNCTION__, error); return ERR_PTR(error); out_overflow: - printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); + dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, + __FUNCTION__); goto out_garbage; } @@ -1421,7 +1484,7 @@ static struct rpc_procinfo rpcproc_null = { .p_decode = rpcproc_decode_null, }; -int rpc_ping(struct rpc_clnt *clnt, int flags) +static int rpc_ping(struct rpc_clnt *clnt, int flags) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, @@ -1432,3 +1495,51 @@ int rpc_ping(struct rpc_clnt *clnt, int flags) put_rpccred(msg.rpc_cred); return err; } + +struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) +{ + struct rpc_message msg = { + .rpc_proc = &rpcproc_null, + .rpc_cred = cred, + }; + return rpc_do_run_task(clnt, &msg, flags, &rpc_default_ops, NULL); +} +EXPORT_SYMBOL(rpc_call_null); + +#ifdef RPC_DEBUG +void rpc_show_tasks(void) +{ + struct rpc_clnt *clnt; + struct rpc_task *t; + + spin_lock(&rpc_client_lock); + if (list_empty(&all_clients)) + goto out; + printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " + "-rpcwait -action- ---ops--\n"); + list_for_each_entry(clnt, &all_clients, cl_clients) { + if (list_empty(&clnt->cl_tasks)) + continue; + spin_lock(&clnt->cl_lock); + list_for_each_entry(t, &clnt->cl_tasks, tk_task) { + const char *rpc_waitq = "none"; + + if (RPC_IS_QUEUED(t)) + rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); + + printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", + t->tk_pid, + (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), + t->tk_flags, t->tk_status, + t->tk_client, + (t->tk_client ? t->tk_client->cl_prog : 0), + t->tk_rqstp, t->tk_timeout, + rpc_waitq, + t->tk_action, t->tk_ops); + } + spin_unlock(&clnt->cl_lock); + } +out: + spin_unlock(&rpc_client_lock); +} +#endif diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 5887457dc93..e787b6a43ee 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -344,7 +344,7 @@ rpc_info_open(struct inode *inode, struct file *file) mutex_lock(&inode->i_mutex); clnt = RPC_I(inode)->private; if (clnt) { - atomic_inc(&clnt->cl_users); + kref_get(&clnt->cl_kref); m->private = clnt; } else { single_release(inode, file); @@ -448,6 +448,15 @@ void rpc_put_mount(void) simple_release_fs(&rpc_mount, &rpc_mount_count); } +static int rpc_delete_dentry(struct dentry *dentry) +{ + return 1; +} + +static struct dentry_operations rpc_dentry_operations = { + .d_delete = rpc_delete_dentry, +}; + static int rpc_lookup_parent(char *path, struct nameidata *nd) { @@ -506,7 +515,7 @@ rpc_get_inode(struct super_block *sb, int mode) * FIXME: This probably has races. */ static void -rpc_depopulate(struct dentry *parent) +rpc_depopulate(struct dentry *parent, int start, int eof) { struct inode *dir = parent->d_inode; struct list_head *pos, *next; @@ -518,6 +527,10 @@ repeat: spin_lock(&dcache_lock); list_for_each_safe(pos, next, &parent->d_subdirs) { dentry = list_entry(pos, struct dentry, d_u.d_child); + if (!dentry->d_inode || + dentry->d_inode->i_ino < start || + dentry->d_inode->i_ino >= eof) + continue; spin_lock(&dentry->d_lock); if (!d_unhashed(dentry)) { dget_locked(dentry); @@ -533,11 +546,11 @@ repeat: if (n) { do { dentry = dvec[--n]; - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); + if (S_ISREG(dentry->d_inode->i_mode)) simple_unlink(dir, dentry); - } - inode_dir_notify(dir, DN_DELETE); + else if (S_ISDIR(dentry->d_inode->i_mode)) + simple_rmdir(dir, dentry); + d_delete(dentry); dput(dentry); } while (n); goto repeat; @@ -560,6 +573,7 @@ rpc_populate(struct dentry *parent, dentry = d_alloc_name(parent, files[i].name); if (!dentry) goto out_bad; + dentry->d_op = &rpc_dentry_operations; mode = files[i].mode; inode = rpc_get_inode(dir->i_sb, mode); if (!inode) { @@ -607,21 +621,14 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) { int error; - - shrink_dcache_parent(dentry); - if (d_unhashed(dentry)) - return 0; - if ((error = simple_rmdir(dir, dentry)) != 0) - return error; - if (!error) { - inode_dir_notify(dir, DN_DELETE); - d_drop(dentry); - } - return 0; + error = simple_rmdir(dir, dentry); + if (!error) + d_delete(dentry); + return error; } static struct dentry * -rpc_lookup_create(struct dentry *parent, const char *name, int len) +rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive) { struct inode *dir = parent->d_inode; struct dentry *dentry; @@ -630,7 +637,9 @@ rpc_lookup_create(struct dentry *parent, const char *name, int len) dentry = lookup_one_len(name, parent, len); if (IS_ERR(dentry)) goto out_err; - if (dentry->d_inode) { + if (!dentry->d_inode) + dentry->d_op = &rpc_dentry_operations; + else if (exclusive) { dput(dentry); dentry = ERR_PTR(-EEXIST); goto out_err; @@ -649,7 +658,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) if ((error = rpc_lookup_parent(path, nd)) != 0) return ERR_PTR(error); - dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len); + dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1); if (IS_ERR(dentry)) rpc_release_path(nd); return dentry; @@ -681,7 +690,7 @@ out: rpc_release_path(&nd); return dentry; err_depopulate: - rpc_depopulate(dentry); + rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); __rpc_rmdir(dir, dentry); err_dput: dput(dentry); @@ -701,7 +710,7 @@ rpc_rmdir(struct dentry *dentry) parent = dget_parent(dentry); dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - rpc_depopulate(dentry); + rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); error = __rpc_rmdir(dir, dentry); dput(dentry); mutex_unlock(&dir->i_mutex); @@ -716,10 +725,21 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi struct inode *dir, *inode; struct rpc_inode *rpci; - dentry = rpc_lookup_create(parent, name, strlen(name)); + dentry = rpc_lookup_create(parent, name, strlen(name), 0); if (IS_ERR(dentry)) return dentry; dir = parent->d_inode; + if (dentry->d_inode) { + rpci = RPC_I(dentry->d_inode); + if (rpci->private != private || + rpci->ops != ops || + rpci->flags != flags) { + dput (dentry); + dentry = ERR_PTR(-EBUSY); + } + rpci->nkern_readwriters++; + goto out; + } inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); if (!inode) goto err_dput; @@ -730,6 +750,7 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi rpci->private = private; rpci->flags = flags; rpci->ops = ops; + rpci->nkern_readwriters = 1; inode_dir_notify(dir, DN_CREATE); dget(dentry); out: @@ -754,13 +775,11 @@ rpc_unlink(struct dentry *dentry) parent = dget_parent(dentry); dir = parent->d_inode; mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); - if (!d_unhashed(dentry)) { - d_drop(dentry); - if (dentry->d_inode) { - rpc_close_pipes(dentry->d_inode); - error = simple_unlink(dir, dentry); - } - inode_dir_notify(dir, DN_DELETE); + if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) { + rpc_close_pipes(dentry->d_inode); + error = simple_unlink(dir, dentry); + if (!error) + d_delete(dentry); } dput(dentry); mutex_unlock(&dir->i_mutex); @@ -833,6 +852,7 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) rpci->nreaders = 0; rpci->nwriters = 0; INIT_LIST_HEAD(&rpci->in_upcall); + INIT_LIST_HEAD(&rpci->in_downcall); INIT_LIST_HEAD(&rpci->pipe); rpci->pipelen = 0; init_waitqueue_head(&rpci->waitq); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 6c7aa8a1f0c..d1740dbab99 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -12,6 +12,8 @@ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ +#include <linux/module.h> + #include <linux/types.h> #include <linux/socket.h> #include <linux/kernel.h> @@ -184,8 +186,8 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr, .program = &rpcb_program, .version = version, .authflavor = RPC_AUTH_UNIX, - .flags = (RPC_CLNT_CREATE_ONESHOT | - RPC_CLNT_CREATE_NOPING), + .flags = (RPC_CLNT_CREATE_NOPING | + RPC_CLNT_CREATE_INTR), }; ((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT); @@ -238,6 +240,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) error = rpc_call_sync(rpcb_clnt, &msg, 0); + rpc_shutdown_client(rpcb_clnt); if (error < 0) printk(KERN_WARNING "RPC: failed to contact local rpcbind " "server (errno %d).\n", -error); @@ -246,21 +249,20 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) return error; } -#ifdef CONFIG_ROOT_NFS /** - * rpcb_getport_external - obtain the port for an RPC service on a given host + * rpcb_getport_sync - obtain the port for an RPC service on a given host * @sin: address of remote peer * @prog: RPC program number to bind * @vers: RPC version number to bind * @prot: transport protocol to use to make this request * * Called from outside the RPC client in a synchronous task context. + * Uses default timeout parameters specified by underlying transport. * - * For now, this supports only version 2 queries, but is used only by - * mount_clnt for NFS_ROOT. + * XXX: Needs to support IPv6, and rpcbind versions 3 and 4 */ -int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, - __u32 vers, int prot) +int rpcb_getport_sync(struct sockaddr_in *sin, __u32 prog, + __u32 vers, int prot) { struct rpcbind_args map = { .r_prog = prog, @@ -277,15 +279,16 @@ int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, char hostname[40]; int status; - dprintk("RPC: rpcb_getport_external(%u.%u.%u.%u, %u, %u, %d)\n", - NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); + dprintk("RPC: %s(" NIPQUAD_FMT ", %u, %u, %d)\n", + __FUNCTION__, NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); - sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); + sprintf(hostname, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr)); rpcb_clnt = rpcb_create(hostname, (struct sockaddr *)sin, prot, 2, 0); if (IS_ERR(rpcb_clnt)) return PTR_ERR(rpcb_clnt); status = rpc_call_sync(rpcb_clnt, &msg, 0); + rpc_shutdown_client(rpcb_clnt); if (status >= 0) { if (map.r_port != 0) @@ -294,16 +297,16 @@ int rpcb_getport_external(struct sockaddr_in *sin, __u32 prog, } return status; } -#endif +EXPORT_SYMBOL_GPL(rpcb_getport_sync); /** - * rpcb_getport - obtain the port for a given RPC service on a given host + * rpcb_getport_async - obtain the port for a given RPC service on a given host * @task: task that is waiting for portmapper request * * This one can be called for an ongoing RPC request, and can be used in * an async (rpciod) context. */ -void rpcb_getport(struct rpc_task *task) +void rpcb_getport_async(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int bind_version; @@ -314,17 +317,17 @@ void rpcb_getport(struct rpc_task *task) struct sockaddr addr; int status; - dprintk("RPC: %5u rpcb_getport(%s, %u, %u, %d)\n", - task->tk_pid, clnt->cl_server, - clnt->cl_prog, clnt->cl_vers, xprt->prot); + dprintk("RPC: %5u %s(%s, %u, %u, %d)\n", + task->tk_pid, __FUNCTION__, + clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot); /* Autobind on cloned rpc clients is discouraged */ BUG_ON(clnt->cl_parent != clnt); if (xprt_test_and_set_binding(xprt)) { status = -EACCES; /* tell caller to check again */ - dprintk("RPC: %5u rpcb_getport waiting for another binder\n", - task->tk_pid); + dprintk("RPC: %5u %s: waiting for another binder\n", + task->tk_pid, __FUNCTION__); goto bailout_nowake; } @@ -335,27 +338,28 @@ void rpcb_getport(struct rpc_task *task) /* Someone else may have bound if we slept */ if (xprt_bound(xprt)) { status = 0; - dprintk("RPC: %5u rpcb_getport already bound\n", task->tk_pid); + dprintk("RPC: %5u %s: already bound\n", + task->tk_pid, __FUNCTION__); goto bailout_nofree; } if (rpcb_next_version[xprt->bind_index].rpc_proc == NULL) { xprt->bind_index = 0; status = -EACCES; /* tell caller to try again later */ - dprintk("RPC: %5u rpcb_getport no more getport versions " - "available\n", task->tk_pid); + dprintk("RPC: %5u %s: no more getport versions available\n", + task->tk_pid, __FUNCTION__); goto bailout_nofree; } bind_version = rpcb_next_version[xprt->bind_index].rpc_vers; - dprintk("RPC: %5u rpcb_getport trying rpcbind version %u\n", - task->tk_pid, bind_version); + dprintk("RPC: %5u %s: trying rpcbind version %u\n", + task->tk_pid, __FUNCTION__, bind_version); map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC); if (!map) { status = -ENOMEM; - dprintk("RPC: %5u rpcb_getport no memory available\n", - task->tk_pid); + dprintk("RPC: %5u %s: no memory available\n", + task->tk_pid, __FUNCTION__); goto bailout_nofree; } map->r_prog = clnt->cl_prog; @@ -373,16 +377,17 @@ void rpcb_getport(struct rpc_task *task) rpcb_clnt = rpcb_create(clnt->cl_server, &addr, xprt->prot, bind_version, 0); if (IS_ERR(rpcb_clnt)) { status = PTR_ERR(rpcb_clnt); - dprintk("RPC: %5u rpcb_getport rpcb_create failed, error %ld\n", - task->tk_pid, PTR_ERR(rpcb_clnt)); + dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n", + task->tk_pid, __FUNCTION__, PTR_ERR(rpcb_clnt)); goto bailout; } child = rpc_run_task(rpcb_clnt, RPC_TASK_ASYNC, &rpcb_getport_ops, map); + rpc_release_client(rpcb_clnt); if (IS_ERR(child)) { status = -EIO; - dprintk("RPC: %5u rpcb_getport rpc_run_task failed\n", - task->tk_pid); + dprintk("RPC: %5u %s: rpc_run_task failed\n", + task->tk_pid, __FUNCTION__); goto bailout_nofree; } rpc_put_task(child); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 944d75396fb..2ac43c41c3a 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -25,7 +25,6 @@ #ifdef RPC_DEBUG #define RPCDBG_FACILITY RPCDBG_SCHED #define RPC_TASK_MAGIC_ID 0xf00baa -static int rpc_task_id; #endif /* @@ -40,7 +39,6 @@ static mempool_t *rpc_task_mempool __read_mostly; static mempool_t *rpc_buffer_mempool __read_mostly; static void __rpc_default_timer(struct rpc_task *task); -static void rpciod_killall(void); static void rpc_async_schedule(struct work_struct *); static void rpc_release_task(struct rpc_task *task); @@ -50,23 +48,13 @@ static void rpc_release_task(struct rpc_task *task); static RPC_WAITQ(delay_queue, "delayq"); /* - * All RPC tasks are linked into this list - */ -static LIST_HEAD(all_tasks); - -/* * rpciod-related stuff */ static DEFINE_MUTEX(rpciod_mutex); -static unsigned int rpciod_users; +static atomic_t rpciod_users = ATOMIC_INIT(0); struct workqueue_struct *rpciod_workqueue; /* - * Spinlock for other critical sections of code. - */ -static DEFINE_SPINLOCK(rpc_sched_lock); - -/* * Disable the timer for a given RPC task. Should be called with * queue->lock and bh_disabled in order to avoid races within * rpc_run_timer(). @@ -267,18 +255,33 @@ static int rpc_wait_bit_interruptible(void *word) return 0; } +#ifdef RPC_DEBUG +static void rpc_task_set_debuginfo(struct rpc_task *task) +{ + static atomic_t rpc_pid; + + task->tk_magic = RPC_TASK_MAGIC_ID; + task->tk_pid = atomic_inc_return(&rpc_pid); +} +#else +static inline void rpc_task_set_debuginfo(struct rpc_task *task) +{ +} +#endif + static void rpc_set_active(struct rpc_task *task) { + struct rpc_clnt *clnt; if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) return; - spin_lock(&rpc_sched_lock); -#ifdef RPC_DEBUG - task->tk_magic = RPC_TASK_MAGIC_ID; - task->tk_pid = rpc_task_id++; -#endif + rpc_task_set_debuginfo(task); /* Add to global list of all tasks */ - list_add_tail(&task->tk_task, &all_tasks); - spin_unlock(&rpc_sched_lock); + clnt = task->tk_client; + if (clnt != NULL) { + spin_lock(&clnt->cl_lock); + list_add_tail(&task->tk_task, &clnt->cl_tasks); + spin_unlock(&clnt->cl_lock); + } } /* @@ -818,6 +821,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons if (tk_ops->rpc_call_prepare != NULL) task->tk_action = rpc_prepare_task; task->tk_calldata = calldata; + INIT_LIST_HEAD(&task->tk_task); /* Initialize retry counters */ task->tk_garb_retry = 2; @@ -830,7 +834,7 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons task->tk_workqueue = rpciod_workqueue; if (clnt) { - atomic_inc(&clnt->cl_users); + kref_get(&clnt->cl_kref); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; if (!clnt->cl_intr) @@ -860,9 +864,7 @@ static void rpc_free_task(struct rcu_head *rcu) } /* - * Create a new task for the specified client. We have to - * clean up after an allocation failure, as the client may - * have specified "oneshot". + * Create a new task for the specified client. */ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) { @@ -870,7 +872,7 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc task = rpc_alloc_task(); if (!task) - goto cleanup; + goto out; rpc_init_task(task, clnt, flags, tk_ops, calldata); @@ -878,16 +880,6 @@ struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc task->tk_flags |= RPC_TASK_DYNAMIC; out: return task; - -cleanup: - /* Check whether to release the client */ - if (clnt) { - printk("rpc_new_task: failed, users=%d, oneshot=%d\n", - atomic_read(&clnt->cl_users), clnt->cl_oneshot); - atomic_inc(&clnt->cl_users); /* pretend we were used ... */ - rpc_release_client(clnt); - } - goto out; } @@ -920,11 +912,13 @@ static void rpc_release_task(struct rpc_task *task) #endif dprintk("RPC: %5u release task\n", task->tk_pid); - /* Remove from global task list */ - spin_lock(&rpc_sched_lock); - list_del(&task->tk_task); - spin_unlock(&rpc_sched_lock); - + if (!list_empty(&task->tk_task)) { + struct rpc_clnt *clnt = task->tk_client; + /* Remove from client task list */ + spin_lock(&clnt->cl_lock); + list_del(&task->tk_task); + spin_unlock(&clnt->cl_lock); + } BUG_ON (RPC_IS_QUEUED(task)); /* Synchronously delete any running timer */ @@ -939,29 +933,6 @@ static void rpc_release_task(struct rpc_task *task) rpc_put_task(task); } -/** - * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it - * @clnt: pointer to RPC client - * @flags: RPC flags - * @ops: RPC call ops - * @data: user call data - */ -struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, - const struct rpc_call_ops *ops, - void *data) -{ - struct rpc_task *task; - task = rpc_new_task(clnt, flags, ops, data); - if (task == NULL) { - rpc_release_calldata(ops, data); - return ERR_PTR(-ENOMEM); - } - atomic_inc(&task->tk_count); - rpc_execute(task); - return task; -} -EXPORT_SYMBOL(rpc_run_task); - /* * Kill all tasks for the given client. * XXX: kill their descendants as well? @@ -969,44 +940,25 @@ EXPORT_SYMBOL(rpc_run_task); void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; - struct list_head *le; - dprintk("RPC: killing all tasks for client %p\n", clnt); + if (list_empty(&clnt->cl_tasks)) + return; + dprintk("RPC: killing all tasks for client %p\n", clnt); /* * Spin lock all_tasks to prevent changes... */ - spin_lock(&rpc_sched_lock); - alltask_for_each(rovr, le, &all_tasks) { + spin_lock(&clnt->cl_lock); + list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { if (! RPC_IS_ACTIVATED(rovr)) continue; - if (!clnt || rovr->tk_client == clnt) { + if (!(rovr->tk_flags & RPC_TASK_KILLED)) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); rpc_wake_up_task(rovr); } } - spin_unlock(&rpc_sched_lock); -} - -static void rpciod_killall(void) -{ - unsigned long flags; - - while (!list_empty(&all_tasks)) { - clear_thread_flag(TIF_SIGPENDING); - rpc_killall_tasks(NULL); - flush_workqueue(rpciod_workqueue); - if (!list_empty(&all_tasks)) { - dprintk("RPC: rpciod_killall: waiting for tasks " - "to exit\n"); - yield(); - } - } - - spin_lock_irqsave(¤t->sighand->siglock, flags); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); + spin_unlock(&clnt->cl_lock); } /* @@ -1018,28 +970,27 @@ rpciod_up(void) struct workqueue_struct *wq; int error = 0; + if (atomic_inc_not_zero(&rpciod_users)) + return 0; + mutex_lock(&rpciod_mutex); - dprintk("RPC: rpciod_up: users %u\n", rpciod_users); - rpciod_users++; - if (rpciod_workqueue) - goto out; - /* - * If there's no pid, we should be the first user. - */ - if (rpciod_users > 1) - printk(KERN_WARNING "rpciod_up: no workqueue, %u users??\n", rpciod_users); + + /* Guard against races with rpciod_down() */ + if (rpciod_workqueue != NULL) + goto out_ok; /* * Create the rpciod thread and wait for it to start. */ + dprintk("RPC: creating workqueue rpciod\n"); error = -ENOMEM; wq = create_workqueue("rpciod"); - if (wq == NULL) { - printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); - rpciod_users--; + if (wq == NULL) goto out; - } + rpciod_workqueue = wq; error = 0; +out_ok: + atomic_inc(&rpciod_users); out: mutex_unlock(&rpciod_mutex); return error; @@ -1048,59 +999,19 @@ out: void rpciod_down(void) { + if (!atomic_dec_and_test(&rpciod_users)) + return; + mutex_lock(&rpciod_mutex); - dprintk("RPC: rpciod_down sema %u\n", rpciod_users); - if (rpciod_users) { - if (--rpciod_users) - goto out; - } else - printk(KERN_WARNING "rpciod_down: no users??\n"); + dprintk("RPC: destroying workqueue rpciod\n"); - if (!rpciod_workqueue) { - dprintk("RPC: rpciod_down: Nothing to do!\n"); - goto out; + if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) { + destroy_workqueue(rpciod_workqueue); + rpciod_workqueue = NULL; } - rpciod_killall(); - - destroy_workqueue(rpciod_workqueue); - rpciod_workqueue = NULL; - out: mutex_unlock(&rpciod_mutex); } -#ifdef RPC_DEBUG -void rpc_show_tasks(void) -{ - struct list_head *le; - struct rpc_task *t; - - spin_lock(&rpc_sched_lock); - if (list_empty(&all_tasks)) { - spin_unlock(&rpc_sched_lock); - return; - } - printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " - "-rpcwait -action- ---ops--\n"); - alltask_for_each(t, le, &all_tasks) { - const char *rpc_waitq = "none"; - - if (RPC_IS_QUEUED(t)) - rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); - - printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", - t->tk_pid, - (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), - t->tk_flags, t->tk_status, - t->tk_client, - (t->tk_client ? t->tk_client->cl_prog : 0), - t->tk_rqstp, t->tk_timeout, - rpc_waitq, - t->tk_action, t->tk_ops); - } - spin_unlock(&rpc_sched_lock); -} -#endif - void rpc_destroy_mempool(void) { diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 73075dec83c..384c4ad5ab8 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -28,15 +28,11 @@ EXPORT_SYMBOL(rpc_init_task); EXPORT_SYMBOL(rpc_sleep_on); EXPORT_SYMBOL(rpc_wake_up_next); EXPORT_SYMBOL(rpc_wake_up_task); -EXPORT_SYMBOL(rpciod_down); -EXPORT_SYMBOL(rpciod_up); -EXPORT_SYMBOL(rpc_new_task); EXPORT_SYMBOL(rpc_wake_up_status); /* RPC client functions */ EXPORT_SYMBOL(rpc_clone_client); EXPORT_SYMBOL(rpc_bind_new_program); -EXPORT_SYMBOL(rpc_destroy_client); EXPORT_SYMBOL(rpc_shutdown_client); EXPORT_SYMBOL(rpc_killall_tasks); EXPORT_SYMBOL(rpc_call_sync); @@ -61,7 +57,7 @@ EXPORT_SYMBOL(rpcauth_unregister); EXPORT_SYMBOL(rpcauth_create); EXPORT_SYMBOL(rpcauth_lookupcred); EXPORT_SYMBOL(rpcauth_lookup_credcache); -EXPORT_SYMBOL(rpcauth_free_credcache); +EXPORT_SYMBOL(rpcauth_destroy_credcache); EXPORT_SYMBOL(rpcauth_init_credcache); EXPORT_SYMBOL(put_rpccred); @@ -156,6 +152,7 @@ init_sunrpc(void) cache_register(&ip_map_cache); cache_register(&unix_gid_cache); init_socket_xprt(); + rpcauth_init_module(); out: return err; } @@ -163,6 +160,7 @@ out: static void __exit cleanup_sunrpc(void) { + rpcauth_remove_module(); cleanup_socket_xprt(); unregister_rpc_pipefs(); rpc_destroy_mempool(); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5baf48de255..64b9b8c743c 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -644,6 +644,7 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) struct msghdr msg = { .msg_flags = MSG_DONTWAIT, }; + struct sockaddr *sin; int len; len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, @@ -654,6 +655,19 @@ svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); rqstp->rq_addrlen = svsk->sk_remotelen; + /* Destination address in request is needed for binding the + * source address in RPC callbacks later. + */ + sin = (struct sockaddr *)&svsk->sk_local; + switch (sin->sa_family) { + case AF_INET: + rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; + break; + case AF_INET6: + rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; + break; + } + dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", svsk, iov[0].iov_base, iov[0].iov_len, len); @@ -1064,6 +1078,12 @@ svc_tcp_accept(struct svc_sock *svsk) goto failed; memcpy(&newsvsk->sk_remote, sin, slen); newsvsk->sk_remotelen = slen; + err = kernel_getsockname(newsock, sin, &slen); + if (unlikely(err < 0)) { + dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); + slen = offsetof(struct sockaddr, sa_data); + } + memcpy(&newsvsk->sk_local, sin, slen); svc_sock_received(newsvsk); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5b05b73e4c1..c8c2edccad7 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -127,7 +127,7 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); } else - schedule_work(&xprt->task_cleanup); + queue_work(rpciod_workqueue, &xprt->task_cleanup); } /* @@ -515,7 +515,7 @@ xprt_init_autodisconnect(unsigned long data) if (xprt_connecting(xprt)) xprt_release_write(xprt, NULL); else - schedule_work(&xprt->task_cleanup); + queue_work(rpciod_workqueue, &xprt->task_cleanup); return; out_abort: spin_unlock(&xprt->transport_lock); @@ -886,27 +886,24 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i /** * xprt_create_transport - create an RPC transport - * @proto: requested transport protocol - * @ap: remote peer address - * @size: length of address - * @to: timeout parameters + * @args: rpc transport creation arguments * */ -struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) +struct rpc_xprt *xprt_create_transport(struct rpc_xprtsock_create *args) { struct rpc_xprt *xprt; struct rpc_rqst *req; - switch (proto) { + switch (args->proto) { case IPPROTO_UDP: - xprt = xs_setup_udp(ap, size, to); + xprt = xs_setup_udp(args); break; case IPPROTO_TCP: - xprt = xs_setup_tcp(ap, size, to); + xprt = xs_setup_tcp(args); break; default: printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", - proto); + args->proto); return ERR_PTR(-EIO); } if (IS_ERR(xprt)) { diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index cc33c5880ab..4ae7eed7f61 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -235,6 +235,7 @@ struct sock_xprt { * Connection of transports */ struct delayed_work connect_worker; + struct sockaddr_storage addr; unsigned short port; /* @@ -653,8 +654,7 @@ static void xs_destroy(struct rpc_xprt *xprt) dprintk("RPC: xs_destroy xprt %p\n", xprt); - cancel_delayed_work(&transport->connect_worker); - flush_scheduled_work(); + cancel_rearming_delayed_work(&transport->connect_worker); xprt_disconnect(xprt); xs_close(xprt); @@ -1001,7 +1001,7 @@ static void xs_tcp_state_change(struct sock *sk) /* Try to schedule an autoclose RPC calls */ set_bit(XPRT_CLOSE_WAIT, &xprt->state); if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) - schedule_work(&xprt->task_cleanup); + queue_work(rpciod_workqueue, &xprt->task_cleanup); default: xprt_disconnect(xprt); } @@ -1146,31 +1146,36 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) sap->sin_port = htons(port); } -static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) +static int xs_bind(struct sock_xprt *transport, struct socket *sock) { struct sockaddr_in myaddr = { .sin_family = AF_INET, }; + struct sockaddr_in *sa; int err; unsigned short port = transport->port; + if (!transport->xprt.resvport) + port = 0; + sa = (struct sockaddr_in *)&transport->addr; + myaddr.sin_addr = sa->sin_addr; do { myaddr.sin_port = htons(port); err = kernel_bind(sock, (struct sockaddr *) &myaddr, sizeof(myaddr)); + if (!transport->xprt.resvport) + break; if (err == 0) { transport->port = port; - dprintk("RPC: xs_bindresvport bound to port %u\n", - port); - return 0; + break; } if (port <= xprt_min_resvport) port = xprt_max_resvport; else port--; } while (err == -EADDRINUSE && port != transport->port); - - dprintk("RPC: can't bind to reserved port (%d).\n", -err); + dprintk("RPC: xs_bind "NIPQUAD_FMT":%u: %s (%d)\n", + NIPQUAD(myaddr.sin_addr), port, err ? "failed" : "ok", err); return err; } @@ -1229,7 +1234,7 @@ static void xs_udp_connect_worker(struct work_struct *work) } xs_reclassify_socket(sock); - if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { + if (xs_bind(transport, sock)) { sock_release(sock); goto out; } @@ -1316,7 +1321,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) } xs_reclassify_socket(sock); - if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { + if (xs_bind(transport, sock)) { sock_release(sock); goto out; } @@ -1410,18 +1415,16 @@ static void xs_connect(struct rpc_task *task) dprintk("RPC: xs_connect delayed xprt %p for %lu " "seconds\n", xprt, xprt->reestablish_timeout / HZ); - schedule_delayed_work(&transport->connect_worker, - xprt->reestablish_timeout); + queue_delayed_work(rpciod_workqueue, + &transport->connect_worker, + xprt->reestablish_timeout); xprt->reestablish_timeout <<= 1; if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; } else { dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_delayed_work(&transport->connect_worker, 0); - - /* flush_scheduled_work can sleep... */ - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); + queue_delayed_work(rpciod_workqueue, + &transport->connect_worker, 0); } } @@ -1476,7 +1479,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, .release_xprt = xprt_release_xprt_cong, - .rpcbind = rpcb_getport, + .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, .buf_alloc = rpc_malloc, @@ -1493,7 +1496,7 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, - .rpcbind = rpcb_getport, + .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, .buf_alloc = rpc_malloc, @@ -1505,12 +1508,12 @@ static struct rpc_xprt_ops xs_tcp_ops = { .print_stats = xs_tcp_print_stats, }; -static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size) +static struct rpc_xprt *xs_setup_xprt(struct rpc_xprtsock_create *args, unsigned int slot_table_size) { struct rpc_xprt *xprt; struct sock_xprt *new; - if (addrlen > sizeof(xprt->addr)) { + if (args->addrlen > sizeof(xprt->addr)) { dprintk("RPC: xs_setup_xprt: address too large\n"); return ERR_PTR(-EBADF); } @@ -1532,8 +1535,10 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns return ERR_PTR(-ENOMEM); } - memcpy(&xprt->addr, addr, addrlen); - xprt->addrlen = addrlen; + memcpy(&xprt->addr, args->dstaddr, args->addrlen); + xprt->addrlen = args->addrlen; + if (args->srcaddr) + memcpy(&new->addr, args->srcaddr, args->addrlen); new->port = xs_get_random_port(); return xprt; @@ -1541,22 +1546,20 @@ static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, uns /** * xs_setup_udp - Set up transport to use a UDP socket - * @addr: address of remote server - * @addrlen: length of address in bytes - * @to: timeout parameters + * @args: rpc transport creation arguments * */ -struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) +struct rpc_xprt *xs_setup_udp(struct rpc_xprtsock_create *args) { struct rpc_xprt *xprt; struct sock_xprt *transport; - xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries); + xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); if (IS_ERR(xprt)) return xprt; transport = container_of(xprt, struct sock_xprt, xprt); - if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) + if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) xprt_set_bound(xprt); xprt->prot = IPPROTO_UDP; @@ -1572,8 +1575,8 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ xprt->ops = &xs_udp_ops; - if (to) - xprt->timeout = *to; + if (args->timeout) + xprt->timeout = *args->timeout; else xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); @@ -1586,22 +1589,20 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ /** * xs_setup_tcp - Set up transport to use a TCP socket - * @addr: address of remote server - * @addrlen: length of address in bytes - * @to: timeout parameters + * @args: rpc transport creation arguments * */ -struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) +struct rpc_xprt *xs_setup_tcp(struct rpc_xprtsock_create *args) { struct rpc_xprt *xprt; struct sock_xprt *transport; - xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries); + xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) return xprt; transport = container_of(xprt, struct sock_xprt, xprt); - if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) + if (ntohs(((struct sockaddr_in *)args->dstaddr)->sin_port) != 0) xprt_set_bound(xprt); xprt->prot = IPPROTO_TCP; @@ -1616,8 +1617,8 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ xprt->ops = &xs_tcp_ops; - if (to) - xprt->timeout = *to; + if (args->timeout) + xprt->timeout = *args->timeout; else xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |