summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-03 03:35:23 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-03 03:35:23 +0300
commite6b92572808467f35fd159d47c45b650de29e722 (patch)
tree5fbd2e6279539c4f3eeeca0d6a69779bdbd0d6a4
parente45428a436765fcd154d461a2739b5640916dc00 (diff)
parent260f71eff493a844531629854c0935fa8de4fa2c (diff)
downloadlinux-e6b92572808467f35fd159d47c45b650de29e722.tar.xz
Merge tag 'nfs-for-4.21-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
Pull NFS client updates from Anna Schumaker: "Stable bugfixes: - xprtrdma: Yet another double DMA-unmap # v4.20 Features: - Allow some /proc/sys/sunrpc entries without CONFIG_SUNRPC_DEBUG - Per-xprt rdma receive workqueues - Drop support for FMR memory registration - Make port= mount option optional for RDMA mounts Other bugfixes and cleanups: - Remove unused nfs4_xdev_fs_type declaration - Fix comments for behavior that has changed - Remove generic RPC credentials by switching to 'struct cred' - Fix crossing mountpoints with different auth flavors - Various xprtrdma fixes from testing and auditing the close code - Fixes for disconnect issues when using xprtrdma with krb5 - Clean up and improve xprtrdma trace points - Fix NFS v4.2 async copy reboot recovery" * tag 'nfs-for-4.21-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (63 commits) sunrpc: convert to DEFINE_SHOW_ATTRIBUTE sunrpc: Add xprt after nfs4_test_session_trunk() sunrpc: convert unnecessary GFP_ATOMIC to GFP_NOFS sunrpc: handle ENOMEM in rpcb_getport_async NFS: remove unnecessary test for IS_ERR(cred) xprtrdma: Prevent leak of rpcrdma_rep objects NFSv4.2 fix async copy reboot recovery xprtrdma: Don't leak freed MRs xprtrdma: Add documenting comment for rpcrdma_buffer_destroy xprtrdma: Replace outdated comment for rpcrdma_ep_post xprtrdma: Update comments in frwr_op_send SUNRPC: Fix some kernel doc complaints SUNRPC: Simplify defining common RPC trace events NFS: Fix NFSv4 symbolic trace point output xprtrdma: Trace mapping, alloc, and dereg failures xprtrdma: Add trace points for calls to transport switch methods xprtrdma: Relocate the xprtrdma_mr_map trace points xprtrdma: Clean up of xprtrdma chunk trace points xprtrdma: Remove unused fields from rpcrdma_ia xprtrdma: Cull dprintk() call sites ...
-rw-r--r--fs/lockd/clntproc.c6
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/client.c9
-rw-r--r--fs/nfs/delegation.c28
-rw-r--r--fs/nfs/delegation.h10
-rw-r--r--fs/nfs/dir.c59
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c64
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h8
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c16
-rw-r--r--fs/nfs/inode.c13
-rw-r--r--fs/nfs/internal.h15
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/nfs4_fs.h68
-rw-r--r--fs/nfs/nfs4client.c4
-rw-r--r--fs/nfs/nfs4proc.c158
-rw-r--r--fs/nfs/nfs4renewd.c9
-rw-r--r--fs/nfs/nfs4session.c5
-rw-r--r--fs/nfs/nfs4state.c131
-rw-r--r--fs/nfs/nfs4trace.h456
-rw-r--r--fs/nfs/pagelist.c4
-rw-r--r--fs/nfs/pnfs.c14
-rw-r--r--fs/nfs/pnfs.h10
-rw-r--r--fs/nfs/pnfs_dev.c4
-rw-r--r--fs/nfs/pnfs_nfs.c2
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/super.c13
-rw-r--r--fs/nfs/unlink.c20
-rw-r--r--fs/nfs/write.c24
-rw-r--r--fs/nfsd/nfs4callback.c31
-rw-r--r--fs/nfsd/state.h2
-rw-r--r--include/linux/cred.h26
-rw-r--r--include/linux/nfs_fs.h13
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_xdr.h16
-rw-r--r--include/linux/sunrpc/auth.h51
-rw-r--r--include/linux/sunrpc/clnt.h5
-rw-r--r--include/linux/sunrpc/sched.h6
-rw-r--r--include/trace/events/rpcrdma.h218
-rw-r--r--include/trace/events/sunrpc.h172
-rw-r--r--kernel/cred.c58
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/auth.c116
-rw-r--r--net/sunrpc/auth_generic.c293
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c47
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/auth_null.c4
-rw-r--r--net/sunrpc/auth_unix.c110
-rw-r--r--net/sunrpc/backchannel_rqst.c2
-rw-r--r--net/sunrpc/clnt.c29
-rw-r--r--net/sunrpc/rpc_pipe.c19
-rw-r--r--net/sunrpc/rpcb_clnt.c12
-rw-r--r--net/sunrpc/sched.c5
-rw-r--r--net/sunrpc/xprtmultipath.c4
-rw-r--r--net/sunrpc/xprtrdma/Makefile3
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c39
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c337
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c209
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c78
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c8
-rw-r--r--net/sunrpc/xprtrdma/transport.c91
-rw-r--r--net/sunrpc/xprtrdma/verbs.c255
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h80
-rw-r--r--net/sunrpc/xprtsock.c10
63 files changed, 1518 insertions, 1995 deletions
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 0a67dd4250e9..e8a004097d18 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -256,7 +256,7 @@ static int nlm_wait_on_grace(wait_queue_head_t *queue)
* Generic NLM call
*/
static int
-nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
+nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
@@ -401,7 +401,7 @@ int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *t
* completion in order to be able to correctly track the lock
* state.
*/
-static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
+static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_args,
@@ -510,7 +510,7 @@ static int do_vfs_lock(struct file_lock *fl)
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
- struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
+ const struct cred *cred = nfs_file_cred(fl->fl_file);
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
struct nlm_wait *block = NULL;
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index d3781cd983f6..690221747b47 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -584,7 +584,7 @@ static int decode_sector_number(__be32 **rp, sector_t *sp)
static struct nfs4_deviceid_node *
bl_find_get_deviceid(struct nfs_server *server,
- const struct nfs4_deviceid *id, struct rpc_cred *cred,
+ const struct nfs4_deviceid *id, const struct cred *cred,
gfp_t gfp_mask)
{
struct nfs4_deviceid_node *node;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 96d5f8135eb9..fb1cf1a4bda2 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -151,7 +151,6 @@ EXPORT_SYMBOL_GPL(unregister_nfs_version);
struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
{
struct nfs_client *clp;
- struct rpc_cred *cred;
int err = -ENOMEM;
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
@@ -182,9 +181,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_proto = cl_init->proto;
clp->cl_net = get_net(cl_init->net);
- cred = rpc_lookup_machine_cred("*");
- if (!IS_ERR(cred))
- clp->cl_machine_cred = cred;
+ clp->cl_principal = "*";
nfs_fscache_get_client_cookie(clp);
return clp;
@@ -246,9 +243,6 @@ void nfs_free_client(struct nfs_client *clp)
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
- if (clp->cl_machine_cred != NULL)
- put_rpccred(clp->cl_machine_cred);
-
put_net(clp->cl_net);
put_nfs_version(clp->cl_nfs_mod);
kfree(clp->cl_hostname);
@@ -527,6 +521,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
return PTR_ERR(clnt);
}
+ clnt->cl_principal = clp->cl_principal;
clp->cl_rpcclient = clnt;
return 0;
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 6ec2f78c1e19..885363ca8569 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -26,10 +26,8 @@
static void nfs_free_delegation(struct nfs_delegation *delegation)
{
- if (delegation->cred) {
- put_rpccred(delegation->cred);
- delegation->cred = NULL;
- }
+ put_cred(delegation->cred);
+ delegation->cred = NULL;
kfree_rcu(delegation, rcu);
}
@@ -178,13 +176,13 @@ again:
* @pagemod_limit: write delegation "space_limit"
*
*/
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
fmode_t type,
const nfs4_stateid *stateid,
unsigned long pagemod_limit)
{
struct nfs_delegation *delegation;
- struct rpc_cred *oldcred = NULL;
+ const struct cred *oldcred = NULL;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
@@ -195,12 +193,12 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
delegation->type = type;
delegation->pagemod_limit = pagemod_limit;
oldcred = delegation->cred;
- delegation->cred = get_rpccred(cred);
+ delegation->cred = get_cred(cred);
clear_bit(NFS_DELEGATION_NEED_RECLAIM,
&delegation->flags);
spin_unlock(&delegation->lock);
rcu_read_unlock();
- put_rpccred(oldcred);
+ put_cred(oldcred);
trace_nfs4_reclaim_delegation(inode, type);
return;
}
@@ -341,7 +339,7 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
*
* Returns zero on success, or a negative errno value.
*/
-int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred,
+int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
fmode_t type,
const nfs4_stateid *stateid,
unsigned long pagemod_limit)
@@ -360,7 +358,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred,
delegation->type = type;
delegation->pagemod_limit = pagemod_limit;
delegation->change_attr = inode_peek_iversion_raw(inode);
- delegation->cred = get_rpccred(cred);
+ delegation->cred = get_cred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
spin_lock_init(&delegation->lock);
@@ -1047,7 +1045,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp)
struct nfs_delegation *delegation;
struct nfs_server *server;
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
nfs4_stateid stateid;
restart:
@@ -1069,7 +1067,7 @@ restart:
nfs_sb_deactive(server->super);
goto restart;
}
- cred = get_rpccred_rcu(delegation->cred);
+ cred = get_cred_rcu(delegation->cred);
nfs4_stateid_copy(&stateid, &delegation->stateid);
clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
rcu_read_unlock();
@@ -1078,7 +1076,7 @@ restart:
nfs_revoke_delegation(inode, &stateid);
nfs_inode_find_state_and_recover(inode, &stateid);
}
- put_rpccred(cred);
+ put_cred(cred);
if (nfs4_server_rebooted(clp)) {
nfs_inode_mark_test_expired_delegation(server,inode);
iput(inode);
@@ -1173,7 +1171,7 @@ out:
* otherwise "false" is returned.
*/
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
- nfs4_stateid *dst, struct rpc_cred **cred)
+ nfs4_stateid *dst, const struct cred **cred)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
@@ -1187,7 +1185,7 @@ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
nfs4_stateid_copy(dst, &delegation->stateid);
nfs_mark_delegation_referenced(delegation);
if (cred)
- *cred = get_rpccred(delegation->cred);
+ *cred = get_cred(delegation->cred);
}
rcu_read_unlock();
return ret;
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index bb1ef8c37af4..dcbf3394ba0e 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -15,7 +15,7 @@
*/
struct nfs_delegation {
struct list_head super_list;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct inode *inode;
nfs4_stateid stateid;
fmode_t type;
@@ -36,9 +36,9 @@ enum {
NFS_DELEGATION_TEST_EXPIRED,
};
-int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred,
+int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
int nfs4_inode_return_delegation(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
@@ -60,10 +60,10 @@ void nfs_mark_test_expired_all_delegations(struct nfs_client *clp);
void nfs_reap_expired_delegations(struct nfs_client *clp);
/* NFSv4 delegation-related procedures */
-int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
+int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
-bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
+bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 71b2e390becf..6bf4471850c8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -67,7 +67,7 @@ const struct address_space_operations nfs_dir_aops = {
.freepage = nfs_readdir_clear_array,
};
-static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred)
+static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, const struct cred *cred)
{
struct nfs_inode *nfsi = NFS_I(dir);
struct nfs_open_dir_context *ctx;
@@ -77,7 +77,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
ctx->attr_gencount = nfsi->attr_gencount;
ctx->dir_cookie = 0;
ctx->dup_cookie = 0;
- ctx->cred = get_rpccred(cred);
+ ctx->cred = get_cred(cred);
spin_lock(&dir->i_lock);
list_add(&ctx->list, &nfsi->open_files);
spin_unlock(&dir->i_lock);
@@ -91,7 +91,7 @@ static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_cont
spin_lock(&dir->i_lock);
list_del(&ctx->list);
spin_unlock(&dir->i_lock);
- put_rpccred(ctx->cred);
+ put_cred(ctx->cred);
kfree(ctx);
}
@@ -103,23 +103,18 @@ nfs_opendir(struct inode *inode, struct file *filp)
{
int res = 0;
struct nfs_open_dir_context *ctx;
- struct rpc_cred *cred;
dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
- cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return PTR_ERR(cred);
- ctx = alloc_nfs_open_dir_context(inode, cred);
+ ctx = alloc_nfs_open_dir_context(inode, current_cred());
if (IS_ERR(ctx)) {
res = PTR_ERR(ctx);
goto out;
}
filp->private_data = ctx;
out:
- put_rpccred(cred);
return res;
}
@@ -334,7 +329,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
struct nfs_entry *entry, struct file *file, struct inode *inode)
{
struct nfs_open_dir_context *ctx = file->private_data;
- struct rpc_cred *cred = ctx->cred;
+ const struct cred *cred = ctx->cred;
unsigned long timestamp, gencount;
int error;
@@ -2139,7 +2134,7 @@ MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache lengt
static void nfs_access_free_entry(struct nfs_access_entry *entry)
{
- put_rpccred(entry->cred);
+ put_cred(entry->cred);
kfree_rcu(entry, rcu_head);
smp_mb__before_atomic();
atomic_long_dec(&nfs_access_nr_entries);
@@ -2265,17 +2260,18 @@ void nfs_access_zap_cache(struct inode *inode)
}
EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
-static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
+static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred)
{
struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
- struct nfs_access_entry *entry;
while (n != NULL) {
- entry = rb_entry(n, struct nfs_access_entry, rb_node);
+ struct nfs_access_entry *entry =
+ rb_entry(n, struct nfs_access_entry, rb_node);
+ int cmp = cred_fscmp(cred, entry->cred);
- if (cred < entry->cred)
+ if (cmp < 0)
n = n->rb_left;
- else if (cred > entry->cred)
+ else if (cmp > 0)
n = n->rb_right;
else
return entry;
@@ -2283,7 +2279,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, st
return NULL;
}
-static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res, bool may_block)
+static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_access_entry *cache;
@@ -2326,7 +2322,7 @@ out_zap:
return -ENOENT;
}
-static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
+static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res)
{
/* Only check the most recently returned cache entry,
* but do it without locking.
@@ -2363,15 +2359,17 @@ static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *
struct rb_node **p = &root_node->rb_node;
struct rb_node *parent = NULL;
struct nfs_access_entry *entry;
+ int cmp;
spin_lock(&inode->i_lock);
while (*p != NULL) {
parent = *p;
entry = rb_entry(parent, struct nfs_access_entry, rb_node);
+ cmp = cred_fscmp(set->cred, entry->cred);
- if (set->cred < entry->cred)
+ if (cmp < 0)
p = &parent->rb_left;
- else if (set->cred > entry->cred)
+ else if (cmp > 0)
p = &parent->rb_right;
else
goto found;
@@ -2395,7 +2393,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
if (cache == NULL)
return;
RB_CLEAR_NODE(&cache->rb_node);
- cache->cred = get_rpccred(set->cred);
+ cache->cred = get_cred(set->cred);
cache->mask = set->mask;
/* The above field assignments must be visible
@@ -2459,7 +2457,7 @@ void nfs_access_set_mask(struct nfs_access_entry *entry, u32 access_result)
}
EXPORT_SYMBOL_GPL(nfs_access_set_mask);
-static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
+static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
{
struct nfs_access_entry cache;
bool may_block = (mask & MAY_NOT_BLOCK) == 0;
@@ -2523,7 +2521,7 @@ static int nfs_open_permission_mask(int openflags)
return mask;
}
-int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags)
{
return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
}
@@ -2548,7 +2546,7 @@ static int nfs_execute_ok(struct inode *inode, int mask)
int nfs_permission(struct inode *inode, int mask)
{
- struct rpc_cred *cred;
+ const struct cred *cred = current_cred();
int res = 0;
nfs_inc_stats(inode, NFSIOS_VFSACCESS);
@@ -2582,20 +2580,11 @@ force_lookup:
/* Always try fast lookups first */
rcu_read_lock();
- cred = rpc_lookup_cred_nonblock();
- if (!IS_ERR(cred))
- res = nfs_do_access(inode, cred, mask|MAY_NOT_BLOCK);
- else
- res = PTR_ERR(cred);
+ res = nfs_do_access(inode, cred, mask|MAY_NOT_BLOCK);
rcu_read_unlock();
if (res == -ECHILD && !(mask & MAY_NOT_BLOCK)) {
/* Fast lookup failed, try the slow way */
- cred = rpc_lookup_cred();
- if (!IS_ERR(cred)) {
- res = nfs_do_access(inode, cred, mask);
- put_rpccred(cred);
- } else
- res = PTR_ERR(cred);
+ res = nfs_do_access(inode, cred, mask);
}
out:
if (!res && (mask & MAY_EXEC))
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 310d7500f665..63abe705f4ca 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -9,6 +9,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
+#include <linux/sched/mm.h>
#include <linux/sunrpc/metrics.h>
@@ -27,9 +28,6 @@
#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
#define FF_LAYOUTRETURN_MAXERR 20
-
-static struct group_info *ff_zero_group;
-
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
struct nfs_pgio_header *hdr);
static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
@@ -226,16 +224,14 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
ff_layout_remove_mirror(mirror);
kfree(mirror->fh_versions);
cred = rcu_access_pointer(mirror->ro_cred);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
cred = rcu_access_pointer(mirror->rw_cred);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
kfree(mirror);
}
@@ -413,8 +409,10 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
for (i = 0; i < fls->mirror_array_cnt; i++) {
struct nfs4_ff_layout_mirror *mirror;
- struct auth_cred acred = { .group_info = ff_zero_group };
- struct rpc_cred __rcu *cred;
+ struct cred *kcred;
+ const struct cred *cred;
+ kuid_t uid;
+ kgid_t gid;
u32 ds_count, fh_count, id;
int j;
@@ -482,21 +480,28 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
if (rc)
goto out_err_free;
- acred.uid = make_kuid(&init_user_ns, id);
+ uid = make_kuid(&init_user_ns, id);
/* group */
rc = decode_name(&stream, &id);
if (rc)
goto out_err_free;
- acred.gid = make_kgid(&init_user_ns, id);
+ gid = make_kgid(&init_user_ns, id);
- /* find the cred for it */
- rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
- if (IS_ERR(cred)) {
- rc = PTR_ERR(cred);
- goto out_err_free;
+ if (gfp_flags & __GFP_FS)
+ kcred = prepare_kernel_cred(NULL);
+ else {
+ unsigned int nofs_flags = memalloc_nofs_save();
+ kcred = prepare_kernel_cred(NULL);
+ memalloc_nofs_restore(nofs_flags);
}
+ rc = -ENOMEM;
+ if (!kcred)
+ goto out_err_free;
+ kcred->fsuid = uid;
+ kcred->fsgid = gid;
+ cred = kcred;
if (lgr->range.iomode == IOMODE_READ)
rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
@@ -519,8 +524,8 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
dprintk("%s: iomode %s uid %u gid %u\n", __func__,
lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
- from_kuid(&init_user_ns, acred.uid),
- from_kgid(&init_user_ns, acred.gid));
+ from_kuid(&init_user_ns, uid),
+ from_kgid(&init_user_ns, gid));
}
p = xdr_inline_decode(&stream, 4);
@@ -1698,7 +1703,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
- struct rpc_cred *ds_cred;
+ const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
u32 idx = hdr->pgio_mirror_idx;
int vers;
@@ -1749,7 +1754,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
vers == 3 ? &ff_layout_read_call_ops_v3 :
&ff_layout_read_call_ops_v4,
0, RPC_TASK_SOFTCONN);
- put_rpccred(ds_cred);
+ put_cred(ds_cred);
return PNFS_ATTEMPTED;
out_failed:
@@ -1765,7 +1770,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
- struct rpc_cred *ds_cred;
+ const struct cred *ds_cred;
loff_t offset = hdr->args.offset;
int vers;
struct nfs_fh *fh;
@@ -1814,7 +1819,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
vers == 3 ? &ff_layout_write_call_ops_v3 :
&ff_layout_write_call_ops_v4,
sync, RPC_TASK_SOFTCONN);
- put_rpccred(ds_cred);
+ put_cred(ds_cred);
return PNFS_ATTEMPTED;
out_failed:
@@ -1844,7 +1849,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
struct rpc_clnt *ds_clnt;
- struct rpc_cred *ds_cred;
+ const struct cred *ds_cred;
u32 idx;
int vers, ret;
struct nfs_fh *fh;
@@ -1884,7 +1889,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
vers == 3 ? &ff_layout_commit_call_ops_v3 :
&ff_layout_commit_call_ops_v4,
how, RPC_TASK_SOFTCONN);
- put_rpccred(ds_cred);
+ put_cred(ds_cred);
return ret;
out_err:
pnfs_generic_prepare_to_resend_writes(data);
@@ -2383,11 +2388,6 @@ static int __init nfs4flexfilelayout_init(void)
{
printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
__func__);
- if (!ff_zero_group) {
- ff_zero_group = groups_alloc(0);
- if (!ff_zero_group)
- return -ENOMEM;
- }
return pnfs_register_layoutdriver(&flexfilelayout_type);
}
@@ -2396,10 +2396,6 @@ static void __exit nfs4flexfilelayout_exit(void)
printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
__func__);
pnfs_unregister_layoutdriver(&flexfilelayout_type);
- if (ff_zero_group) {
- put_group_info(ff_zero_group);
- ff_zero_group = NULL;
- }
}
MODULE_ALIAS("nfs-layouttype4-4");
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index de50a342d5a5..c2626bad466b 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -81,8 +81,8 @@ struct nfs4_ff_layout_mirror {
u32 fh_versions_cnt;
struct nfs_fh *fh_versions;
nfs4_stateid stateid;
- struct rpc_cred __rcu *ro_cred;
- struct rpc_cred __rcu *rw_cred;
+ const struct cred __rcu *ro_cred;
+ const struct cred __rcu *rw_cred;
refcount_t ref;
spinlock_t lock;
unsigned long flags;
@@ -229,8 +229,8 @@ nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
u32 ds_idx,
struct nfs_client *ds_clp,
struct inode *inode);
-struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
- u32 ds_idx, struct rpc_cred *mdscred);
+const struct cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
+ u32 ds_idx, const struct cred *mdscred);
bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index d23347389626..11766a74216d 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -330,10 +330,10 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
return 0;
}
-static struct rpc_cred *
+static const struct cred *
ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
{
- struct rpc_cred *cred, __rcu **pcred;
+ const struct cred *cred, __rcu **pcred;
if (iomode == IOMODE_READ)
pcred = &mirror->ro_cred;
@@ -346,7 +346,7 @@ ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
if (!cred)
break;
- cred = get_rpccred_rcu(cred);
+ cred = get_cred_rcu(cred);
} while(!cred);
rcu_read_unlock();
return cred;
@@ -465,19 +465,19 @@ out:
return ds;
}
-struct rpc_cred *
+const struct cred *
ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
- struct rpc_cred *mdscred)
+ const struct cred *mdscred)
{
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
- struct rpc_cred *cred;
+ const struct cred *cred;
if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
if (!cred)
- cred = get_rpccred(mdscred);
+ cred = get_cred(mdscred);
} else {
- cred = get_rpccred(mdscred);
+ cred = get_cred(mdscred);
}
return cred;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 5b1eee4952b7..094775ea0781 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -950,18 +950,17 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
struct file *filp)
{
struct nfs_open_context *ctx;
- struct rpc_cred *cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return ERR_CAST(cred);
+ const struct cred *cred = get_current_cred();
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
- put_rpccred(cred);
+ put_cred(cred);
return ERR_PTR(-ENOMEM);
}
nfs_sb_active(dentry->d_sb);
ctx->dentry = dget(dentry);
ctx->cred = cred;
+ ctx->ll_cred = NULL;
ctx->state = NULL;
ctx->mode = f_mode;
ctx->flags = 0;
@@ -997,10 +996,10 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
}
if (inode != NULL)
NFS_PROTO(inode)->close_context(ctx, is_sync);
- if (ctx->cred != NULL)
- put_rpccred(ctx->cred);
+ put_cred(ctx->cred);
dput(ctx->dentry);
nfs_sb_deactive(sb);
+ put_rpccred(ctx->ll_cred);
kfree(ctx->mdsthreshold);
kfree_rcu(ctx, rcu_head);
}
@@ -1042,7 +1041,7 @@ EXPORT_SYMBOL_GPL(nfs_file_set_open_context);
/*
* Given an inode, search for an open context with the desired characteristics
*/
-struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode)
+struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *pos, *ctx = NULL;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8357ff69962f..7f80f036ebd9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -254,7 +254,7 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
- struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+ const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
const struct rpc_call_ops *call_ops, int how, int flags);
void nfs_free_request(struct nfs_page *req);
struct nfs_pgio_mirror *
@@ -269,7 +269,7 @@ static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc)
static inline bool nfs_match_open_context(const struct nfs_open_context *ctx1,
const struct nfs_open_context *ctx2)
{
- return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state;
+ return cred_fscmp(ctx1->cred, ctx2->cred) == 0 && ctx1->state == ctx2->state;
}
/* nfs2xdr.c */
@@ -395,7 +395,6 @@ extern const struct super_operations nfs_sops;
extern struct file_system_type nfs_fs_type;
extern struct file_system_type nfs_xdev_fs_type;
#if IS_ENABLED(CONFIG_NFS_V4)
-extern struct file_system_type nfs4_xdev_fs_type;
extern struct file_system_type nfs4_referral_fs_type;
#endif
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
@@ -565,13 +564,13 @@ extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct nfs_client_initdata *);
extern int nfs40_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
- struct rpc_cred *cred);
+ const struct cred *cred);
extern int nfs41_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
- struct rpc_cred *cred);
-extern int nfs4_test_session_trunk(struct rpc_clnt *,
- struct rpc_xprt *,
- void *);
+ const struct cred *cred);
+extern void nfs4_test_session_trunk(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
+ void *data);
static inline struct inode *nfs_igrab_and_active(struct inode *inode)
{
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 71bc16225b98..a3ad2d46fd42 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -607,7 +607,7 @@ out:
* readdirplus.
*/
static int
-nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+nfs3_proc_readdir(struct dentry *dentry, const struct cred *cred,
u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
@@ -628,7 +628,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
.rpc_proc = &nfs3_procedures[NFS3PROC_READDIR],
.rpc_argp = &arg,
.rpc_resp = &res,
- .rpc_cred = cred
+ .rpc_cred = cred,
};
int status = -ENOMEM;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1b994b527518..06ac3d9ac7c6 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -62,10 +62,11 @@ struct nfs4_minor_version_ops {
void (*free_lock_state)(struct nfs_server *,
struct nfs4_lock_state *);
int (*test_and_free_expired)(struct nfs_server *,
- nfs4_stateid *, struct rpc_cred *);
+ nfs4_stateid *, const struct cred *);
struct nfs_seqid *
(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
- int (*session_trunk)(struct rpc_clnt *, struct rpc_xprt *, void *);
+ void (*session_trunk)(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt, void *data);
const struct rpc_call_ops *call_sync_ops;
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
@@ -107,7 +108,7 @@ struct nfs4_state_owner {
unsigned long so_expires;
struct rb_node so_server_node;
- struct rpc_cred *so_cred; /* Associated cred */
+ const struct cred *so_cred; /* Associated cred */
spinlock_t so_lock;
atomic_t so_count;
@@ -212,10 +213,10 @@ struct nfs4_state_recovery_ops {
int state_flag_bit;
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
- int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
- int (*reclaim_complete)(struct nfs_client *, struct rpc_cred *);
+ int (*establish_clid)(struct nfs_client *, const struct cred *);
+ int (*reclaim_complete)(struct nfs_client *, const struct cred *);
int (*detect_trunking)(struct nfs_client *, struct nfs_client **,
- struct rpc_cred *);
+ const struct cred *);
};
struct nfs4_opendata {
@@ -245,19 +246,19 @@ struct nfs4_opendata {
struct nfs4_add_xprt_data {
struct nfs_client *clp;
- struct rpc_cred *cred;
+ const struct cred *cred;
};
struct nfs4_state_maintenance_ops {
- int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
- struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
- int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
+ int (*sched_state_renewal)(struct nfs_client *, const struct cred *, unsigned);
+ const struct cred * (*get_state_renewal_cred)(struct nfs_client *);
+ int (*renew_lease)(struct nfs_client *, const struct cred *);
};
struct nfs4_mig_recovery_ops {
int (*get_locations)(struct inode *, struct nfs4_fs_locations *,
- struct page *, struct rpc_cred *);
- int (*fsid_present)(struct inode *, struct rpc_cred *);
+ struct page *, const struct cred *);
+ int (*fsid_present)(struct inode *, const struct cred *);
};
extern const struct dentry_operations nfs4_dentry_operations;
@@ -286,21 +287,21 @@ extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *,
struct rpc_message *, struct nfs4_sequence_args *,
struct nfs4_sequence_res *, int);
extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int, int);
-extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
-extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
+extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, const struct cred *, struct nfs4_setclientid_res *);
+extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, const struct cred *);
extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *, bool);
-extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, struct rpc_cred *cred);
-extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
+extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, const struct cred *cred);
+extern int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred);
extern int nfs4_destroy_clientid(struct nfs_client *clp);
-extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_init_clientid(struct nfs_client *, const struct cred *);
+extern int nfs41_init_clientid(struct nfs_client *, const struct cred *);
extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
struct nfs4_fs_locations *, struct page *);
extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
- struct page *page, struct rpc_cred *);
-extern int nfs4_proc_fsid_present(struct inode *, struct rpc_cred *);
+ struct page *page, const struct cred *);
+extern int nfs4_proc_fsid_present(struct inode *, const struct cred *);
extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
@@ -312,8 +313,8 @@ extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
#if defined(CONFIG_NFS_V4_1)
extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
-extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
+extern int nfs4_proc_create_session(struct nfs_client *, const struct cred *);
+extern int nfs4_proc_destroy_session(struct nfs4_session *, const struct cred *);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
@@ -338,7 +339,6 @@ static inline bool
_nfs4_state_protect(struct nfs_client *clp, unsigned long sp4_mode,
struct rpc_clnt **clntp, struct rpc_message *msg)
{
- struct rpc_cred *newcred = NULL;
rpc_authflavor_t flavor;
if (sp4_mode == NFS_SP4_MACH_CRED_CLEANUP ||
@@ -353,13 +353,7 @@ _nfs4_state_protect(struct nfs_client *clp, unsigned long sp4_mode,
return false;
}
if (test_bit(sp4_mode, &clp->cl_sp4_flags)) {
- spin_lock(&clp->cl_lock);
- if (clp->cl_machine_cred != NULL)
- /* don't call get_rpccred on the machine cred -
- * a reference will be held for life of clp */
- newcred = clp->cl_machine_cred;
- spin_unlock(&clp->cl_lock);
- msg->rpc_cred = newcred;
+ msg->rpc_cred = rpc_machine_cred();
flavor = clp->cl_rpcclient->cl_auth->au_flavor;
WARN_ON_ONCE(flavor != RPC_AUTH_GSS_KRB5I &&
@@ -450,16 +444,16 @@ extern void nfs4_set_lease_period(struct nfs_client *clp,
/* nfs4state.c */
-struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
-struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+const struct cred *nfs4_get_clid_cred(struct nfs_client *clp);
+const struct cred *nfs4_get_machine_cred(struct nfs_client *clp);
+const struct cred *nfs4_get_renew_cred(struct nfs_client *clp);
int nfs4_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **);
int nfs40_discover_server_trunking(struct nfs_client *clp,
- struct nfs_client **, struct rpc_cred *);
+ struct nfs_client **, const struct cred *);
#if defined(CONFIG_NFS_V4_1)
int nfs41_discover_server_trunking(struct nfs_client *clp,
- struct nfs_client **, struct rpc_cred *);
+ struct nfs_client **, const struct cred *);
extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
extern void nfs41_notify_server(struct nfs_client *);
#else
@@ -468,7 +462,7 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
}
#endif /* CONFIG_NFS_V4_1 */
-extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
+extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
extern void nfs4_purge_state_owners(struct nfs_server *);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
@@ -494,7 +488,7 @@ extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
const struct nfs_lock_context *, nfs4_stateid *,
- struct rpc_cred **);
+ const struct cred **);
extern bool nfs4_refresh_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
extern bool nfs4_copy_open_stateid(nfs4_stateid *dst,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8f53455c4765..2548405da1f7 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -545,7 +545,7 @@ static int nfs4_match_client(struct nfs_client *pos, struct nfs_client *new,
*/
int nfs40_walk_client_list(struct nfs_client *new,
struct nfs_client **result,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
struct nfs_client *pos, *prev = NULL;
@@ -711,7 +711,7 @@ out_err:
*/
int nfs41_walk_client_list(struct nfs_client *new,
struct nfs_client **result,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
struct nfs_client *pos, *prev = NULL;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0ba2b0fb8ff3..557a5d636183 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -93,19 +93,19 @@ static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinf
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label, struct inode *inode);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode);
-static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs_open_context *ctx, struct nfs4_label *ilabel,
struct nfs4_label *olabel);
#ifdef CONFIG_NFS_V4_1
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
- struct rpc_cred *cred,
+ const struct cred *cred,
struct nfs4_slot *slot,
bool is_privileged);
static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
- struct rpc_cred *);
+ const struct cred *);
static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
- struct rpc_cred *, bool);
+ const struct cred *, bool);
#endif
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
@@ -361,7 +361,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
static void nfs4_test_and_free_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
@@ -370,7 +370,7 @@ static void nfs4_test_and_free_stateid(struct nfs_server *server,
static void __nfs4_free_revoked_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
stateid->type = NFS4_REVOKED_STATEID_TYPE;
nfs4_test_and_free_stateid(server, stateid, cred);
@@ -378,7 +378,7 @@ static void __nfs4_free_revoked_stateid(struct nfs_server *server,
static void nfs4_free_revoked_stateid(struct nfs_server *server,
const nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
nfs4_stateid tmp;
@@ -908,7 +908,7 @@ static const struct rpc_call_ops nfs41_call_sync_ops = {
static void
nfs4_sequence_process_interrupted(struct nfs_client *client,
- struct nfs4_slot *slot, struct rpc_cred *cred)
+ struct nfs4_slot *slot, const struct cred *cred)
{
struct rpc_task *task;
@@ -939,7 +939,7 @@ EXPORT_SYMBOL_GPL(nfs4_sequence_done);
static void
nfs4_sequence_process_interrupted(struct nfs_client *client,
- struct nfs4_slot *slot, struct rpc_cred *cred)
+ struct nfs4_slot *slot, const struct cred *cred)
{
WARN_ON_ONCE(1);
slot->interrupted = 0;
@@ -2484,7 +2484,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
* Note that in the non-execute case, we want to turn off permission
* checking if we just created a new file (POSIX open() semantics).
*/
-static int nfs4_opendata_access(struct rpc_cred *cred,
+static int nfs4_opendata_access(const struct cred *cred,
struct nfs4_opendata *opendata,
struct nfs4_state *state, fmode_t fmode,
int openflags)
@@ -2651,7 +2651,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
return -NFS4ERR_BAD_STATEID;
}
@@ -2659,7 +2659,7 @@ static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
#if defined(CONFIG_NFS_V4_1)
static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
int status;
@@ -2693,7 +2693,7 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
struct nfs_server *server = NFS_SERVER(state->inode);
nfs4_stateid stateid;
struct nfs_delegation *delegation;
- struct rpc_cred *cred;
+ const struct cred *cred = NULL;
int status;
/* Get the delegation credential for use by test/free_stateid */
@@ -2718,14 +2718,16 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
return;
}
- cred = get_rpccred(delegation->cred);
+ if (delegation->cred)
+ cred = get_cred(delegation->cred);
rcu_read_unlock();
status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
trace_nfs4_test_delegation_stateid(state, NULL, status);
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
nfs_finish_clear_delegation_stateid(state, &stateid);
- put_rpccred(cred);
+ if (delegation->cred)
+ put_cred(cred);
}
/**
@@ -2748,7 +2750,7 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
spin_lock(&state->state_lock);
list_for_each_entry(lsp, &state->lock_states, ls_locks) {
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
- struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
+ const struct cred *cred = lsp->ls_state->owner->so_cred;
refcount_inc(&lsp->ls_count);
spin_unlock(&state->state_lock);
@@ -2792,7 +2794,7 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
nfs4_stateid *stateid = &state->open_stateid;
- struct rpc_cred *cred = state->owner->so_cred;
+ const struct cred *cred = state->owner->so_cred;
int status;
if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
@@ -2950,7 +2952,7 @@ static int _nfs4_do_open(struct inode *dir,
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_opendata *opendata;
struct dentry *dentry = ctx->dentry;
- struct rpc_cred *cred = ctx->cred;
+ const struct cred *cred = ctx->cred;
struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
@@ -3120,7 +3122,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
static int _nfs4_do_setattr(struct inode *inode,
struct nfs_setattrargs *arg,
struct nfs_setattrres *res,
- struct rpc_cred *cred,
+ const struct cred *cred,
struct nfs_open_context *ctx)
{
struct nfs_server *server = NFS_SERVER(inode);
@@ -3130,7 +3132,7 @@ static int _nfs4_do_setattr(struct inode *inode,
.rpc_resp = res,
.rpc_cred = cred,
};
- struct rpc_cred *delegation_cred = NULL;
+ const struct cred *delegation_cred = NULL;
unsigned long timestamp = jiffies;
bool truncate;
int status;
@@ -3165,14 +3167,14 @@ zero_stateid:
status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
- put_rpccred(delegation_cred);
+ put_cred(delegation_cred);
if (status == 0 && ctx != NULL)
renew_lease(server, timestamp);
trace_nfs4_setattr(inode, &arg->stateid, status);
return status;
}
-static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
struct nfs_open_context *ctx, struct nfs4_label *ilabel,
struct nfs4_label *olabel)
@@ -3973,7 +3975,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
struct iattr *sattr)
{
struct inode *inode = d_inode(dentry);
- struct rpc_cred *cred = NULL;
+ const struct cred *cred = NULL;
struct nfs_open_context *ctx = NULL;
struct nfs4_label *label = NULL;
int status;
@@ -4202,7 +4204,6 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
return -ENOMEM;
args.bitmask = server->cache_consistency_bitmask;
}
-
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
nfs_access_set_mask(entry, res.access);
@@ -4691,7 +4692,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
return err;
}
-static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
@@ -4729,7 +4730,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
return status;
}
-static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+static int nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct nfs4_exception exception = { };
@@ -5257,7 +5258,7 @@ static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_release = nfs4_renew_release,
};
-static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
+static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -5281,7 +5282,7 @@ static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred,
&nfs4_renew_ops, data);
}
-static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
@@ -5696,7 +5697,6 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs4_label ilabel, *olabel = NULL;
struct nfs_fattr fattr;
- struct rpc_cred *cred;
int status;
if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
@@ -5709,10 +5709,6 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
ilabel.label = (char *)buf;
ilabel.len = buflen;
- cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return PTR_ERR(cred);
-
olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
if (IS_ERR(olabel)) {
status = -PTR_ERR(olabel);
@@ -5725,7 +5721,6 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
nfs4_label_free(olabel);
out:
- put_rpccred(cred);
return status;
}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
@@ -5894,13 +5889,13 @@ static const struct rpc_call_ops nfs4_setclientid_ops = {
* @clp: state data structure
* @program: RPC program for NFSv4 callback service
* @port: IP port number for NFS4 callback service
- * @cred: RPC credential to use for this call
+ * @cred: credential to use for this call
* @res: where to place the result
*
* Returns zero, a negative errno, or a negative NFS4ERR status code.
*/
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
- unsigned short port, struct rpc_cred *cred,
+ unsigned short port, const struct cred *cred,
struct nfs4_setclientid_res *res)
{
nfs4_verifier sc_verifier;
@@ -5969,13 +5964,13 @@ out:
* nfs4_proc_setclientid_confirm - Confirm client ID
* @clp: state data structure
* @res: result of a previous SETCLIENTID
- * @cred: RPC credential to use for this call
+ * @cred: credential to use for this call
*
* Returns zero, a negative errno, or a negative NFS4ERR status code.
*/
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
struct nfs4_setclientid_res *arg,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
@@ -6138,7 +6133,7 @@ static const struct rpc_call_ops nfs4_delegreturn_ops = {
.rpc_release = nfs4_delegreturn_release,
};
-static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
+static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs4_delegreturndata *data;
struct nfs_server *server = NFS_SERVER(inode);
@@ -6205,7 +6200,7 @@ out:
return status;
}
-int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
+int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_exception exception = { };
@@ -7268,7 +7263,7 @@ int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
*/
static int _nfs40_proc_get_locations(struct inode *inode,
struct nfs4_fs_locations *locations,
- struct page *page, struct rpc_cred *cred)
+ struct page *page, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
@@ -7325,7 +7320,7 @@ static int _nfs40_proc_get_locations(struct inode *inode,
*/
static int _nfs41_proc_get_locations(struct inode *inode,
struct nfs4_fs_locations *locations,
- struct page *page, struct rpc_cred *cred)
+ struct page *page, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
@@ -7384,7 +7379,7 @@ static int _nfs41_proc_get_locations(struct inode *inode,
*/
int nfs4_proc_get_locations(struct inode *inode,
struct nfs4_fs_locations *locations,
- struct page *page, struct rpc_cred *cred)
+ struct page *page, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
@@ -7415,7 +7410,7 @@ int nfs4_proc_get_locations(struct inode *inode,
* is appended to this compound to identify the client ID which is
* performing recovery.
*/
-static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
+static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
@@ -7461,7 +7456,7 @@ static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
* this operation is identified in the SEQUENCE operation in this
* compound.
*/
-static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
+static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
@@ -7508,7 +7503,7 @@ static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
* NFS4ERR code if some error occurred on the server, or a
* negative errno if a local failure occurred.
*/
-int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
+int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
@@ -7555,7 +7550,7 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
.rpc_resp = &res,
};
struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
- struct rpc_cred *cred = NULL;
+ const struct cred *cred = NULL;
if (use_integrity) {
clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
@@ -7572,8 +7567,7 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
&res.seq_res, 0);
dprintk("NFS reply secinfo: %d\n", status);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
return status;
}
@@ -7654,7 +7648,7 @@ static
int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
struct rpc_xprt *xprt,
struct nfs_client *clp,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
int status;
struct nfs41_bind_conn_to_session_args args = {
@@ -7716,7 +7710,7 @@ int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
struct rpc_bind_conn_calldata {
struct nfs_client *clp;
- struct rpc_cred *cred;
+ const struct cred *cred;
};
static int
@@ -7729,7 +7723,7 @@ nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
}
-int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
{
struct rpc_bind_conn_calldata data = {
.clp = clp,
@@ -7895,7 +7889,7 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
* Wrapper for EXCHANGE_ID operation.
*/
static struct rpc_task *
-nfs4_run_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
u32 sp4_how, struct rpc_xprt *xprt)
{
struct rpc_message msg = {
@@ -7991,7 +7985,7 @@ out:
*
* Wrapper for EXCHANGE_ID operation.
*/
-static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
+static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
u32 sp4_how)
{
struct rpc_task *task;
@@ -8058,7 +8052,7 @@ out:
*
* Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
*/
-int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
{
rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
int status;
@@ -8090,7 +8084,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
* @xprt: the rpc_xprt to test
* @data: call data for _nfs4_proc_exchange_id.
*/
-int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
+void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
void *data)
{
struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
@@ -8107,20 +8101,22 @@ int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
/* Test connection for session trunking. Async exchange_id call */
task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
if (IS_ERR(task))
- return PTR_ERR(task);
+ return;
status = task->tk_status;
if (status == 0)
status = nfs4_detect_session_trunking(adata->clp,
task->tk_msg.rpc_resp, xprt);
+ if (status == 0)
+ rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
+
rpc_put_task(task);
- return status;
}
EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
@@ -8138,7 +8134,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
}
static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
unsigned int loop;
int ret;
@@ -8159,7 +8155,7 @@ static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
int nfs4_destroy_clientid(struct nfs_client *clp)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
int ret = 0;
if (clp->cl_mvops->minor_version < 1)
@@ -8170,8 +8166,7 @@ int nfs4_destroy_clientid(struct nfs_client *clp)
goto out;
cred = nfs4_get_clid_cred(clp);
ret = nfs4_proc_destroy_clientid(clp, cred);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
switch (ret) {
case 0:
case -NFS4ERR_STALE_CLIENTID:
@@ -8387,7 +8382,7 @@ static void nfs4_update_session(struct nfs4_session *session,
}
static int _nfs4_proc_create_session(struct nfs_client *clp,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs4_session *session = clp->cl_session;
struct nfs41_create_session_args args = {
@@ -8439,7 +8434,7 @@ out:
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
-int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
{
int status;
unsigned *ptr;
@@ -8470,7 +8465,7 @@ out:
* The caller must serialize access to this routine.
*/
int nfs4_proc_destroy_session(struct nfs4_session *session,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
@@ -8572,7 +8567,7 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
};
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
- struct rpc_cred *cred,
+ const struct cred *cred,
struct nfs4_slot *slot,
bool is_privileged)
{
@@ -8615,7 +8610,7 @@ out_err:
return ret;
}
-static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
+static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
{
struct rpc_task *task;
int ret = 0;
@@ -8631,7 +8626,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
return ret;
}
-static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
{
struct rpc_task *task;
int ret;
@@ -8727,7 +8722,7 @@ static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
* Issue a global reclaim complete.
*/
static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs4_reclaim_complete_data *calldata;
struct rpc_task *task;
@@ -9080,7 +9075,7 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
static int
_nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *pdev,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs4_getdeviceinfo_args args = {
.pdev = pdev,
@@ -9112,7 +9107,7 @@ _nfs4_proc_getdeviceinfo(struct nfs_server *server,
int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *pdev,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs4_exception exception = { };
int err;
@@ -9169,7 +9164,7 @@ static void nfs4_layoutcommit_release(void *calldata)
pnfs_cleanup_layoutcommit(data);
nfs_post_op_update_inode_force_wcc(data->args.inode,
data->res.fattr);
- put_rpccred(data->cred);
+ put_cred(data->cred);
nfs_iput_and_deactive(data->inode);
kfree(data);
}
@@ -9245,7 +9240,7 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = &res,
};
struct rpc_clnt *clnt = server->client;
- struct rpc_cred *cred = NULL;
+ const struct cred *cred = NULL;
int status;
if (use_integrity) {
@@ -9259,8 +9254,7 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
&res.seq_res, 0);
dprintk("<-- %s status=%d\n", __func__, status);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
return status;
}
@@ -9373,7 +9367,7 @@ out:
static int _nfs41_test_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
int status;
struct nfs41_test_stateid_args args = {
@@ -9434,7 +9428,7 @@ static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
*/
static int nfs41_test_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs4_exception exception = { };
int err;
@@ -9496,7 +9490,7 @@ static const struct rpc_call_ops nfs41_free_stateid_ops = {
*/
static int nfs41_free_stateid(struct nfs_server *server,
const nfs4_stateid *stateid,
- struct rpc_cred *cred,
+ const struct cred *cred,
bool privileged)
{
struct rpc_message msg = {
@@ -9537,7 +9531,7 @@ static int nfs41_free_stateid(struct nfs_server *server,
static void
nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
- struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
+ const struct cred *cred = lsp->ls_state->owner->so_cred;
nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
nfs4_free_lock_state(server, lsp);
@@ -9608,14 +9602,14 @@ static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
.sched_state_renewal = nfs4_proc_async_renew,
- .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
+ .get_state_renewal_cred = nfs4_get_renew_cred,
.renew_lease = nfs4_proc_renew,
};
#if defined(CONFIG_NFS_V4_1)
static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
.sched_state_renewal = nfs41_proc_async_sequence,
- .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
+ .get_state_renewal_cred = nfs4_get_machine_cred,
.renew_lease = nfs4_proc_sequence,
};
#endif
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 1f8c2ae43a8d..6ea431b067dd 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -57,7 +57,7 @@ nfs4_renew_state(struct work_struct *work)
const struct nfs4_state_maintenance_ops *ops;
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
- struct rpc_cred *cred;
+ const struct cred *cred;
long lease;
unsigned long last, now;
unsigned renew_flags = 0;
@@ -68,7 +68,6 @@ nfs4_renew_state(struct work_struct *work)
if (test_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state))
goto out;
- spin_lock(&clp->cl_lock);
lease = clp->cl_lease_time;
last = clp->cl_last_renewal;
now = jiffies;
@@ -79,8 +78,7 @@ nfs4_renew_state(struct work_struct *work)
renew_flags |= NFS4_RENEW_DELEGATION_CB;
if (renew_flags != 0) {
- cred = ops->get_state_renewal_cred_locked(clp);
- spin_unlock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred(clp);
if (cred == NULL) {
if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -92,7 +90,7 @@ nfs4_renew_state(struct work_struct *work)
/* Queue an asynchronous RENEW. */
ret = ops->sched_state_renewal(clp, cred, renew_flags);
- put_rpccred(cred);
+ put_cred(cred);
switch (ret) {
default:
goto out_exp;
@@ -104,7 +102,6 @@ nfs4_renew_state(struct work_struct *work)
} else {
dprintk("%s: failed to call renewd. Reason: lease not expired \n",
__func__);
- spin_unlock(&clp->cl_lock);
}
nfs4_schedule_state_renewal(clp);
out_exp:
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index 769b85655c4b..a5489d70a724 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -573,12 +573,11 @@ static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
void nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
- struct rpc_cred *cred;
+ const struct cred *cred;
cred = nfs4_get_clid_cred(session->clp);
nfs4_proc_destroy_session(session, cred);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
rcu_read_lock();
xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index d8decf2ec48f..02488b50534a 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -87,7 +87,7 @@ const nfs4_stateid current_stateid = {
static DEFINE_MUTEX(nfs_clid_init_mutex);
-int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_init_clientid(struct nfs_client *clp, const struct cred *cred)
{
struct nfs4_setclientid_res clid = {
.clientid = clp->cl_clientid,
@@ -134,7 +134,7 @@ out:
*/
int nfs40_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **result,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs4_setclientid_res clid = {
.clientid = clp->cl_clientid,
@@ -164,32 +164,23 @@ out:
return status;
}
-struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
+const struct cred *nfs4_get_machine_cred(struct nfs_client *clp)
{
- struct rpc_cred *cred = NULL;
-
- if (clp->cl_machine_cred != NULL)
- cred = get_rpccred(clp->cl_machine_cred);
- return cred;
+ return get_cred(rpc_machine_cred());
}
static void nfs4_root_machine_cred(struct nfs_client *clp)
{
- struct rpc_cred *cred, *new;
- new = rpc_lookup_machine_cred(NULL);
- spin_lock(&clp->cl_lock);
- cred = clp->cl_machine_cred;
- clp->cl_machine_cred = new;
- spin_unlock(&clp->cl_lock);
- if (cred != NULL)
- put_rpccred(cred);
+ /* Force root creds instead of machine */
+ clp->cl_principal = NULL;
+ clp->cl_rpcclient->cl_principal = NULL;
}
-static struct rpc_cred *
+static const struct cred *
nfs4_get_renew_cred_server_locked(struct nfs_server *server)
{
- struct rpc_cred *cred = NULL;
+ const struct cred *cred = NULL;
struct nfs4_state_owner *sp;
struct rb_node *pos;
@@ -199,29 +190,30 @@ nfs4_get_renew_cred_server_locked(struct nfs_server *server)
sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
if (list_empty(&sp->so_states))
continue;
- cred = get_rpccred(sp->so_cred);
+ cred = get_cred(sp->so_cred);
break;
}
return cred;
}
/**
- * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * nfs4_get_renew_cred - Acquire credential for a renew operation
* @clp: client state handle
*
* Returns an rpc_cred with reference count bumped, or NULL.
* Caller must hold clp->cl_lock.
*/
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+const struct cred *nfs4_get_renew_cred(struct nfs_client *clp)
{
- struct rpc_cred *cred = NULL;
+ const struct cred *cred = NULL;
struct nfs_server *server;
/* Use machine credentials if available */
- cred = nfs4_get_machine_cred_locked(clp);
+ cred = nfs4_get_machine_cred(clp);
if (cred != NULL)
goto out;
+ spin_lock(&clp->cl_lock);
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
cred = nfs4_get_renew_cred_server_locked(server);
@@ -229,6 +221,7 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
break;
}
rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
out:
return cred;
@@ -319,7 +312,7 @@ static void nfs41_finish_session_reset(struct nfs_client *clp)
nfs41_setup_state_renewal(clp);
}
-int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
{
int status;
@@ -354,7 +347,7 @@ out:
*/
int nfs41_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **result,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
int status;
@@ -392,32 +385,32 @@ int nfs41_discover_server_trunking(struct nfs_client *clp,
* nfs4_get_clid_cred - Acquire credential for a setclientid operation
* @clp: client state handle
*
- * Returns an rpc_cred with reference count bumped, or NULL.
+ * Returns a cred with reference count bumped, or NULL.
*/
-struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp)
+const struct cred *nfs4_get_clid_cred(struct nfs_client *clp)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
- spin_lock(&clp->cl_lock);
- cred = nfs4_get_machine_cred_locked(clp);
- spin_unlock(&clp->cl_lock);
+ cred = nfs4_get_machine_cred(clp);
return cred;
}
static struct nfs4_state_owner *
-nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
+nfs4_find_state_owner_locked(struct nfs_server *server, const struct cred *cred)
{
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp;
+ int cmp;
while (*p != NULL) {
parent = *p;
sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
+ cmp = cred_fscmp(cred, sp->so_cred);
- if (cred < sp->so_cred)
+ if (cmp < 0)
p = &parent->rb_left;
- else if (cred > sp->so_cred)
+ else if (cmp > 0)
p = &parent->rb_right;
else {
if (!list_empty(&sp->so_lru))
@@ -436,14 +429,16 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp;
+ int cmp;
while (*p != NULL) {
parent = *p;
sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
+ cmp = cred_fscmp(new->so_cred, sp->so_cred);
- if (new->so_cred < sp->so_cred)
+ if (cmp < 0)
p = &parent->rb_left;
- else if (new->so_cred > sp->so_cred)
+ else if (cmp > 0)
p = &parent->rb_right;
else {
if (!list_empty(&sp->so_lru))
@@ -490,7 +485,7 @@ nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
*/
static struct nfs4_state_owner *
nfs4_alloc_state_owner(struct nfs_server *server,
- struct rpc_cred *cred,
+ const struct cred *cred,
gfp_t gfp_flags)
{
struct nfs4_state_owner *sp;
@@ -505,7 +500,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
return NULL;
}
sp->so_server = server;
- sp->so_cred = get_rpccred(cred);
+ sp->so_cred = get_cred(cred);
spin_lock_init(&sp->so_lock);
INIT_LIST_HEAD(&sp->so_states);
nfs4_init_seqid_counter(&sp->so_seqid);
@@ -534,7 +529,7 @@ nfs4_reset_state_owner(struct nfs4_state_owner *sp)
static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
- put_rpccred(sp->so_cred);
+ put_cred(sp->so_cred);
ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
kfree(sp);
}
@@ -572,7 +567,7 @@ static void nfs4_gc_state_owners(struct nfs_server *server)
* Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
*/
struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
- struct rpc_cred *cred,
+ const struct cred *cred,
gfp_t gfp_flags)
{
struct nfs_client *clp = server->nfs_client;
@@ -1041,7 +1036,7 @@ bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
*/
int nfs4_select_rw_stateid(struct nfs4_state *state,
fmode_t fmode, const struct nfs_lock_context *l_ctx,
- nfs4_stateid *dst, struct rpc_cred **cred)
+ nfs4_stateid *dst, const struct cred **cred)
{
int ret;
@@ -1560,7 +1555,7 @@ static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state
spin_lock(&sp->so_server->nfs_client->cl_lock);
list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
- if (nfs4_stateid_match_other(&state->stateid, &copy->parent_state->stateid))
+ if (!nfs4_stateid_match_other(&state->stateid, &copy->parent_state->stateid))
continue;
copy->flags = 1;
complete(&copy->completion);
@@ -1741,7 +1736,7 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
static int nfs4_reclaim_complete(struct nfs_client *clp,
const struct nfs4_state_recovery_ops *ops,
- struct rpc_cred *cred)
+ const struct cred *cred)
{
/* Notify the server we're done reclaiming our state */
if (ops->reclaim_complete)
@@ -1792,7 +1787,7 @@ static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
{
const struct nfs4_state_recovery_ops *ops;
- struct rpc_cred *cred;
+ const struct cred *cred;
int err;
if (!nfs4_state_clear_reclaim_reboot(clp))
@@ -1800,7 +1795,7 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
ops = clp->cl_mvops->reboot_recovery_ops;
cred = nfs4_get_clid_cred(clp);
err = nfs4_reclaim_complete(clp, ops, cred);
- put_rpccred(cred);
+ put_cred(cred);
if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
}
@@ -1896,7 +1891,7 @@ restart:
static int nfs4_check_lease(struct nfs_client *clp)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
int status;
@@ -1904,9 +1899,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
return 0;
- spin_lock(&clp->cl_lock);
- cred = ops->get_state_renewal_cred_locked(clp);
- spin_unlock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred(clp);
if (cred == NULL) {
cred = nfs4_get_clid_cred(clp);
status = -ENOKEY;
@@ -1914,7 +1907,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
goto out;
}
status = ops->renew_lease(clp, cred);
- put_rpccred(cred);
+ put_cred(cred);
if (status == -ETIMEDOUT) {
set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
return 0;
@@ -1974,7 +1967,7 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
static int nfs4_establish_lease(struct nfs_client *clp)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
const struct nfs4_state_recovery_ops *ops =
clp->cl_mvops->reboot_recovery_ops;
int status;
@@ -1986,7 +1979,7 @@ static int nfs4_establish_lease(struct nfs_client *clp)
if (cred == NULL)
return -ENOENT;
status = ops->establish_clid(clp, cred);
- put_rpccred(cred);
+ put_cred(cred);
if (status != 0)
return status;
pnfs_destroy_all_layouts(clp);
@@ -2033,7 +2026,7 @@ static int nfs4_purge_lease(struct nfs_client *clp)
*
* Returns zero or a negative NFS4ERR status code.
*/
-static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred)
+static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_fs_locations *locations = NULL;
@@ -2103,14 +2096,12 @@ static int nfs4_handle_migration(struct nfs_client *clp)
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
struct nfs_server *server;
- struct rpc_cred *cred;
+ const struct cred *cred;
dprintk("%s: migration reported on \"%s\"\n", __func__,
clp->cl_hostname);
- spin_lock(&clp->cl_lock);
- cred = ops->get_state_renewal_cred_locked(clp);
- spin_unlock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred(clp);
if (cred == NULL)
return -NFS4ERR_NOENT;
@@ -2131,13 +2122,13 @@ restart:
rcu_read_unlock();
status = nfs4_try_migration(server, cred);
if (status < 0) {
- put_rpccred(cred);
+ put_cred(cred);
return status;
}
goto restart;
}
rcu_read_unlock();
- put_rpccred(cred);
+ put_cred(cred);
return 0;
}
@@ -2151,14 +2142,12 @@ static int nfs4_handle_lease_moved(struct nfs_client *clp)
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
struct nfs_server *server;
- struct rpc_cred *cred;
+ const struct cred *cred;
dprintk("%s: lease moved reported on \"%s\"\n", __func__,
clp->cl_hostname);
- spin_lock(&clp->cl_lock);
- cred = ops->get_state_renewal_cred_locked(clp);
- spin_unlock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred(clp);
if (cred == NULL)
return -NFS4ERR_NOENT;
@@ -2186,7 +2175,7 @@ restart:
rcu_read_unlock();
out:
- put_rpccred(cred);
+ put_cred(cred);
return 0;
}
@@ -2209,7 +2198,7 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
const struct nfs4_state_recovery_ops *ops =
clp->cl_mvops->reboot_recovery_ops;
struct rpc_clnt *clnt;
- struct rpc_cred *cred;
+ const struct cred *cred;
int i, status;
dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
@@ -2225,7 +2214,7 @@ again:
goto out_unlock;
status = ops->detect_trunking(clp, result, cred);
- put_rpccred(cred);
+ put_cred(cred);
switch (status) {
case 0:
case -EINTR:
@@ -2416,7 +2405,7 @@ out_recovery:
static int nfs4_reset_session(struct nfs_client *clp)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
int status;
if (!nfs4_has_session(clp))
@@ -2454,14 +2443,13 @@ static int nfs4_reset_session(struct nfs_client *clp)
dprintk("%s: session reset was successful for server %s!\n",
__func__, clp->cl_hostname);
out:
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
return status;
}
static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
- struct rpc_cred *cred;
+ const struct cred *cred;
int ret;
if (!nfs4_has_session(clp))
@@ -2471,8 +2459,7 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
return ret;
cred = nfs4_get_clid_cred(clp);
ret = nfs4_proc_bind_conn_to_session(clp, cred);
- if (cred)
- put_rpccred(cred);
+ put_cred(cred);
clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
switch (ret) {
case 0:
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index b1483b303e0b..b4557cf685fb 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -10,157 +10,302 @@
#include <linux/tracepoint.h>
+TRACE_DEFINE_ENUM(EPERM);
+TRACE_DEFINE_ENUM(ENOENT);
+TRACE_DEFINE_ENUM(EIO);
+TRACE_DEFINE_ENUM(ENXIO);
+TRACE_DEFINE_ENUM(EACCES);
+TRACE_DEFINE_ENUM(EEXIST);
+TRACE_DEFINE_ENUM(EXDEV);
+TRACE_DEFINE_ENUM(ENOTDIR);
+TRACE_DEFINE_ENUM(EISDIR);
+TRACE_DEFINE_ENUM(EFBIG);
+TRACE_DEFINE_ENUM(ENOSPC);
+TRACE_DEFINE_ENUM(EROFS);
+TRACE_DEFINE_ENUM(EMLINK);
+TRACE_DEFINE_ENUM(ENAMETOOLONG);
+TRACE_DEFINE_ENUM(ENOTEMPTY);
+TRACE_DEFINE_ENUM(EDQUOT);
+TRACE_DEFINE_ENUM(ESTALE);
+TRACE_DEFINE_ENUM(EBADHANDLE);
+TRACE_DEFINE_ENUM(EBADCOOKIE);
+TRACE_DEFINE_ENUM(ENOTSUPP);
+TRACE_DEFINE_ENUM(ETOOSMALL);
+TRACE_DEFINE_ENUM(EREMOTEIO);
+TRACE_DEFINE_ENUM(EBADTYPE);
+TRACE_DEFINE_ENUM(EAGAIN);
+TRACE_DEFINE_ENUM(ELOOP);
+TRACE_DEFINE_ENUM(EOPNOTSUPP);
+TRACE_DEFINE_ENUM(EDEADLK);
+TRACE_DEFINE_ENUM(ENOMEM);
+TRACE_DEFINE_ENUM(EKEYEXPIRED);
+TRACE_DEFINE_ENUM(ETIMEDOUT);
+TRACE_DEFINE_ENUM(ERESTARTSYS);
+TRACE_DEFINE_ENUM(ECONNREFUSED);
+TRACE_DEFINE_ENUM(ECONNRESET);
+TRACE_DEFINE_ENUM(ENETUNREACH);
+TRACE_DEFINE_ENUM(EHOSTUNREACH);
+TRACE_DEFINE_ENUM(EHOSTDOWN);
+TRACE_DEFINE_ENUM(EPIPE);
+TRACE_DEFINE_ENUM(EPFNOSUPPORT);
+TRACE_DEFINE_ENUM(EPROTONOSUPPORT);
+
+TRACE_DEFINE_ENUM(NFS4_OK);
+TRACE_DEFINE_ENUM(NFS4ERR_ACCESS);
+TRACE_DEFINE_ENUM(NFS4ERR_ATTRNOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_ADMIN_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_BACK_CHAN_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_BADCHAR);
+TRACE_DEFINE_ENUM(NFS4ERR_BADHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADIOMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADLABEL);
+TRACE_DEFINE_ENUM(NFS4ERR_BADNAME);
+TRACE_DEFINE_ENUM(NFS4ERR_BADOWNER);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_BADSLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BADTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_BADXDR);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_COOKIE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_HIGH_SLOT);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SEQID);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_SESSION_DIGEST);
+TRACE_DEFINE_ENUM(NFS4ERR_BAD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_CB_PATH_DOWN);
+TRACE_DEFINE_ENUM(NFS4ERR_CLID_INUSE);
+TRACE_DEFINE_ENUM(NFS4ERR_CLIENTID_BUSY);
+TRACE_DEFINE_ENUM(NFS4ERR_COMPLETE_ALREADY);
+TRACE_DEFINE_ENUM(NFS4ERR_CONN_NOT_BOUND_TO_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADLOCK);
+TRACE_DEFINE_ENUM(NFS4ERR_DEADSESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_DELAY);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_ALREADY_WANTED);
+TRACE_DEFINE_ENUM(NFS4ERR_DELEG_REVOKED);
+TRACE_DEFINE_ENUM(NFS4ERR_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_DIRDELEG_UNAVAIL);
+TRACE_DEFINE_ENUM(NFS4ERR_DQUOT);
+TRACE_DEFINE_ENUM(NFS4ERR_ENCR_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_EXIST);
+TRACE_DEFINE_ENUM(NFS4ERR_EXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FBIG);
+TRACE_DEFINE_ENUM(NFS4ERR_FHEXPIRED);
+TRACE_DEFINE_ENUM(NFS4ERR_FILE_OPEN);
+TRACE_DEFINE_ENUM(NFS4ERR_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_HASH_ALG_UNSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_INVAL);
+TRACE_DEFINE_ENUM(NFS4ERR_IO);
+TRACE_DEFINE_ENUM(NFS4ERR_ISDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTTRYLATER);
+TRACE_DEFINE_ENUM(NFS4ERR_LAYOUTUNAVAILABLE);
+TRACE_DEFINE_ENUM(NFS4ERR_LEASE_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKED);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCKS_HELD);
+TRACE_DEFINE_ENUM(NFS4ERR_LOCK_RANGE);
+TRACE_DEFINE_ENUM(NFS4ERR_MINOR_VERS_MISMATCH);
+TRACE_DEFINE_ENUM(NFS4ERR_MLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_MOVED);
+TRACE_DEFINE_ENUM(NFS4ERR_NAMETOOLONG);
+TRACE_DEFINE_ENUM(NFS4ERR_NOENT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOFILEHANDLE);
+TRACE_DEFINE_ENUM(NFS4ERR_NOMATCHING_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_NOSPC);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTDIR);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTEMPTY);
+TRACE_DEFINE_ENUM(NFS4ERR_NOTSUPP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_ONLY_OP);
+TRACE_DEFINE_ENUM(NFS4ERR_NOT_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_NO_GRACE);
+TRACE_DEFINE_ENUM(NFS4ERR_NXIO);
+TRACE_DEFINE_ENUM(NFS4ERR_OLD_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_OPENMODE);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_ILLEGAL);
+TRACE_DEFINE_ENUM(NFS4ERR_OP_NOT_IN_SESSION);
+TRACE_DEFINE_ENUM(NFS4ERR_PERM);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_IO_HOLE);
+TRACE_DEFINE_ENUM(NFS4ERR_PNFS_NO_LAYOUT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECALLCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_BAD);
+TRACE_DEFINE_ENUM(NFS4ERR_RECLAIM_CONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_REJECT_DELEG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+TRACE_DEFINE_ENUM(NFS4ERR_REQ_TOO_BIG);
+TRACE_DEFINE_ENUM(NFS4ERR_RESOURCE);
+TRACE_DEFINE_ENUM(NFS4ERR_RESTOREFH);
+TRACE_DEFINE_ENUM(NFS4ERR_RETRY_UNCACHED_REP);
+TRACE_DEFINE_ENUM(NFS4ERR_RETURNCONFLICT);
+TRACE_DEFINE_ENUM(NFS4ERR_ROFS);
+TRACE_DEFINE_ENUM(NFS4ERR_SAME);
+TRACE_DEFINE_ENUM(NFS4ERR_SHARE_DENIED);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQUENCE_POS);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_FALSE_RETRY);
+TRACE_DEFINE_ENUM(NFS4ERR_SEQ_MISORDERED);
+TRACE_DEFINE_ENUM(NFS4ERR_SERVERFAULT);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_CLIENTID);
+TRACE_DEFINE_ENUM(NFS4ERR_STALE_STATEID);
+TRACE_DEFINE_ENUM(NFS4ERR_SYMLINK);
+TRACE_DEFINE_ENUM(NFS4ERR_TOOSMALL);
+TRACE_DEFINE_ENUM(NFS4ERR_TOO_MANY_OPS);
+TRACE_DEFINE_ENUM(NFS4ERR_UNKNOWN_LAYOUTTYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_UNSAFE_COMPOUND);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONGSEC);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
+TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
+TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
+
#define show_nfsv4_errors(error) \
- __print_symbolic(error, \
+ __print_symbolic(-(error), \
{ NFS4_OK, "OK" }, \
/* Mapped by nfs4_stat_to_errno() */ \
- { -EPERM, "EPERM" }, \
- { -ENOENT, "ENOENT" }, \
- { -EIO, "EIO" }, \
- { -ENXIO, "ENXIO" }, \
- { -EACCES, "EACCES" }, \
- { -EEXIST, "EEXIST" }, \
- { -EXDEV, "EXDEV" }, \
- { -ENOTDIR, "ENOTDIR" }, \
- { -EISDIR, "EISDIR" }, \
- { -EFBIG, "EFBIG" }, \
- { -ENOSPC, "ENOSPC" }, \
- { -EROFS, "EROFS" }, \
- { -EMLINK, "EMLINK" }, \
- { -ENAMETOOLONG, "ENAMETOOLONG" }, \
- { -ENOTEMPTY, "ENOTEMPTY" }, \
- { -EDQUOT, "EDQUOT" }, \
- { -ESTALE, "ESTALE" }, \
- { -EBADHANDLE, "EBADHANDLE" }, \
- { -EBADCOOKIE, "EBADCOOKIE" }, \
- { -ENOTSUPP, "ENOTSUPP" }, \
- { -ETOOSMALL, "ETOOSMALL" }, \
- { -EREMOTEIO, "EREMOTEIO" }, \
- { -EBADTYPE, "EBADTYPE" }, \
- { -EAGAIN, "EAGAIN" }, \
- { -ELOOP, "ELOOP" }, \
- { -EOPNOTSUPP, "EOPNOTSUPP" }, \
- { -EDEADLK, "EDEADLK" }, \
+ { EPERM, "EPERM" }, \
+ { ENOENT, "ENOENT" }, \
+ { EIO, "EIO" }, \
+ { ENXIO, "ENXIO" }, \
+ { EACCES, "EACCES" }, \
+ { EEXIST, "EEXIST" }, \
+ { EXDEV, "EXDEV" }, \
+ { ENOTDIR, "ENOTDIR" }, \
+ { EISDIR, "EISDIR" }, \
+ { EFBIG, "EFBIG" }, \
+ { ENOSPC, "ENOSPC" }, \
+ { EROFS, "EROFS" }, \
+ { EMLINK, "EMLINK" }, \
+ { ENAMETOOLONG, "ENAMETOOLONG" }, \
+ { ENOTEMPTY, "ENOTEMPTY" }, \
+ { EDQUOT, "EDQUOT" }, \
+ { ESTALE, "ESTALE" }, \
+ { EBADHANDLE, "EBADHANDLE" }, \
+ { EBADCOOKIE, "EBADCOOKIE" }, \
+ { ENOTSUPP, "ENOTSUPP" }, \
+ { ETOOSMALL, "ETOOSMALL" }, \
+ { EREMOTEIO, "EREMOTEIO" }, \
+ { EBADTYPE, "EBADTYPE" }, \
+ { EAGAIN, "EAGAIN" }, \
+ { ELOOP, "ELOOP" }, \
+ { EOPNOTSUPP, "EOPNOTSUPP" }, \
+ { EDEADLK, "EDEADLK" }, \
/* RPC errors */ \
- { -ENOMEM, "ENOMEM" }, \
- { -EKEYEXPIRED, "EKEYEXPIRED" }, \
- { -ETIMEDOUT, "ETIMEDOUT" }, \
- { -ERESTARTSYS, "ERESTARTSYS" }, \
- { -ECONNREFUSED, "ECONNREFUSED" }, \
- { -ECONNRESET, "ECONNRESET" }, \
- { -ENETUNREACH, "ENETUNREACH" }, \
- { -EHOSTUNREACH, "EHOSTUNREACH" }, \
- { -EHOSTDOWN, "EHOSTDOWN" }, \
- { -EPIPE, "EPIPE" }, \
- { -EPFNOSUPPORT, "EPFNOSUPPORT" }, \
- { -EPROTONOSUPPORT, "EPROTONOSUPPORT" }, \
+ { ENOMEM, "ENOMEM" }, \
+ { EKEYEXPIRED, "EKEYEXPIRED" }, \
+ { ETIMEDOUT, "ETIMEDOUT" }, \
+ { ERESTARTSYS, "ERESTARTSYS" }, \
+ { ECONNREFUSED, "ECONNREFUSED" }, \
+ { ECONNRESET, "ECONNRESET" }, \
+ { ENETUNREACH, "ENETUNREACH" }, \
+ { EHOSTUNREACH, "EHOSTUNREACH" }, \
+ { EHOSTDOWN, "EHOSTDOWN" }, \
+ { EPIPE, "EPIPE" }, \
+ { EPFNOSUPPORT, "EPFNOSUPPORT" }, \
+ { EPROTONOSUPPORT, "EPROTONOSUPPORT" }, \
/* NFSv4 native errors */ \
- { -NFS4ERR_ACCESS, "ACCESS" }, \
- { -NFS4ERR_ATTRNOTSUPP, "ATTRNOTSUPP" }, \
- { -NFS4ERR_ADMIN_REVOKED, "ADMIN_REVOKED" }, \
- { -NFS4ERR_BACK_CHAN_BUSY, "BACK_CHAN_BUSY" }, \
- { -NFS4ERR_BADCHAR, "BADCHAR" }, \
- { -NFS4ERR_BADHANDLE, "BADHANDLE" }, \
- { -NFS4ERR_BADIOMODE, "BADIOMODE" }, \
- { -NFS4ERR_BADLAYOUT, "BADLAYOUT" }, \
- { -NFS4ERR_BADLABEL, "BADLABEL" }, \
- { -NFS4ERR_BADNAME, "BADNAME" }, \
- { -NFS4ERR_BADOWNER, "BADOWNER" }, \
- { -NFS4ERR_BADSESSION, "BADSESSION" }, \
- { -NFS4ERR_BADSLOT, "BADSLOT" }, \
- { -NFS4ERR_BADTYPE, "BADTYPE" }, \
- { -NFS4ERR_BADXDR, "BADXDR" }, \
- { -NFS4ERR_BAD_COOKIE, "BAD_COOKIE" }, \
- { -NFS4ERR_BAD_HIGH_SLOT, "BAD_HIGH_SLOT" }, \
- { -NFS4ERR_BAD_RANGE, "BAD_RANGE" }, \
- { -NFS4ERR_BAD_SEQID, "BAD_SEQID" }, \
- { -NFS4ERR_BAD_SESSION_DIGEST, "BAD_SESSION_DIGEST" }, \
- { -NFS4ERR_BAD_STATEID, "BAD_STATEID" }, \
- { -NFS4ERR_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
- { -NFS4ERR_CLID_INUSE, "CLID_INUSE" }, \
- { -NFS4ERR_CLIENTID_BUSY, "CLIENTID_BUSY" }, \
- { -NFS4ERR_COMPLETE_ALREADY, "COMPLETE_ALREADY" }, \
- { -NFS4ERR_CONN_NOT_BOUND_TO_SESSION, \
+ { NFS4ERR_ACCESS, "ACCESS" }, \
+ { NFS4ERR_ATTRNOTSUPP, "ATTRNOTSUPP" }, \
+ { NFS4ERR_ADMIN_REVOKED, "ADMIN_REVOKED" }, \
+ { NFS4ERR_BACK_CHAN_BUSY, "BACK_CHAN_BUSY" }, \
+ { NFS4ERR_BADCHAR, "BADCHAR" }, \
+ { NFS4ERR_BADHANDLE, "BADHANDLE" }, \
+ { NFS4ERR_BADIOMODE, "BADIOMODE" }, \
+ { NFS4ERR_BADLAYOUT, "BADLAYOUT" }, \
+ { NFS4ERR_BADLABEL, "BADLABEL" }, \
+ { NFS4ERR_BADNAME, "BADNAME" }, \
+ { NFS4ERR_BADOWNER, "BADOWNER" }, \
+ { NFS4ERR_BADSESSION, "BADSESSION" }, \
+ { NFS4ERR_BADSLOT, "BADSLOT" }, \
+ { NFS4ERR_BADTYPE, "BADTYPE" }, \
+ { NFS4ERR_BADXDR, "BADXDR" }, \
+ { NFS4ERR_BAD_COOKIE, "BAD_COOKIE" }, \
+ { NFS4ERR_BAD_HIGH_SLOT, "BAD_HIGH_SLOT" }, \
+ { NFS4ERR_BAD_RANGE, "BAD_RANGE" }, \
+ { NFS4ERR_BAD_SEQID, "BAD_SEQID" }, \
+ { NFS4ERR_BAD_SESSION_DIGEST, "BAD_SESSION_DIGEST" }, \
+ { NFS4ERR_BAD_STATEID, "BAD_STATEID" }, \
+ { NFS4ERR_CB_PATH_DOWN, "CB_PATH_DOWN" }, \
+ { NFS4ERR_CLID_INUSE, "CLID_INUSE" }, \
+ { NFS4ERR_CLIENTID_BUSY, "CLIENTID_BUSY" }, \
+ { NFS4ERR_COMPLETE_ALREADY, "COMPLETE_ALREADY" }, \
+ { NFS4ERR_CONN_NOT_BOUND_TO_SESSION, \
"CONN_NOT_BOUND_TO_SESSION" }, \
- { -NFS4ERR_DEADLOCK, "DEADLOCK" }, \
- { -NFS4ERR_DEADSESSION, "DEAD_SESSION" }, \
- { -NFS4ERR_DELAY, "DELAY" }, \
- { -NFS4ERR_DELEG_ALREADY_WANTED, \
+ { NFS4ERR_DEADLOCK, "DEADLOCK" }, \
+ { NFS4ERR_DEADSESSION, "DEAD_SESSION" }, \
+ { NFS4ERR_DELAY, "DELAY" }, \
+ { NFS4ERR_DELEG_ALREADY_WANTED, \
"DELEG_ALREADY_WANTED" }, \
- { -NFS4ERR_DELEG_REVOKED, "DELEG_REVOKED" }, \
- { -NFS4ERR_DENIED, "DENIED" }, \
- { -NFS4ERR_DIRDELEG_UNAVAIL, "DIRDELEG_UNAVAIL" }, \
- { -NFS4ERR_DQUOT, "DQUOT" }, \
- { -NFS4ERR_ENCR_ALG_UNSUPP, "ENCR_ALG_UNSUPP" }, \
- { -NFS4ERR_EXIST, "EXIST" }, \
- { -NFS4ERR_EXPIRED, "EXPIRED" }, \
- { -NFS4ERR_FBIG, "FBIG" }, \
- { -NFS4ERR_FHEXPIRED, "FHEXPIRED" }, \
- { -NFS4ERR_FILE_OPEN, "FILE_OPEN" }, \
- { -NFS4ERR_GRACE, "GRACE" }, \
- { -NFS4ERR_HASH_ALG_UNSUPP, "HASH_ALG_UNSUPP" }, \
- { -NFS4ERR_INVAL, "INVAL" }, \
- { -NFS4ERR_IO, "IO" }, \
- { -NFS4ERR_ISDIR, "ISDIR" }, \
- { -NFS4ERR_LAYOUTTRYLATER, "LAYOUTTRYLATER" }, \
- { -NFS4ERR_LAYOUTUNAVAILABLE, "LAYOUTUNAVAILABLE" }, \
- { -NFS4ERR_LEASE_MOVED, "LEASE_MOVED" }, \
- { -NFS4ERR_LOCKED, "LOCKED" }, \
- { -NFS4ERR_LOCKS_HELD, "LOCKS_HELD" }, \
- { -NFS4ERR_LOCK_RANGE, "LOCK_RANGE" }, \
- { -NFS4ERR_MINOR_VERS_MISMATCH, "MINOR_VERS_MISMATCH" }, \
- { -NFS4ERR_MLINK, "MLINK" }, \
- { -NFS4ERR_MOVED, "MOVED" }, \
- { -NFS4ERR_NAMETOOLONG, "NAMETOOLONG" }, \
- { -NFS4ERR_NOENT, "NOENT" }, \
- { -NFS4ERR_NOFILEHANDLE, "NOFILEHANDLE" }, \
- { -NFS4ERR_NOMATCHING_LAYOUT, "NOMATCHING_LAYOUT" }, \
- { -NFS4ERR_NOSPC, "NOSPC" }, \
- { -NFS4ERR_NOTDIR, "NOTDIR" }, \
- { -NFS4ERR_NOTEMPTY, "NOTEMPTY" }, \
- { -NFS4ERR_NOTSUPP, "NOTSUPP" }, \
- { -NFS4ERR_NOT_ONLY_OP, "NOT_ONLY_OP" }, \
- { -NFS4ERR_NOT_SAME, "NOT_SAME" }, \
- { -NFS4ERR_NO_GRACE, "NO_GRACE" }, \
- { -NFS4ERR_NXIO, "NXIO" }, \
- { -NFS4ERR_OLD_STATEID, "OLD_STATEID" }, \
- { -NFS4ERR_OPENMODE, "OPENMODE" }, \
- { -NFS4ERR_OP_ILLEGAL, "OP_ILLEGAL" }, \
- { -NFS4ERR_OP_NOT_IN_SESSION, "OP_NOT_IN_SESSION" }, \
- { -NFS4ERR_PERM, "PERM" }, \
- { -NFS4ERR_PNFS_IO_HOLE, "PNFS_IO_HOLE" }, \
- { -NFS4ERR_PNFS_NO_LAYOUT, "PNFS_NO_LAYOUT" }, \
- { -NFS4ERR_RECALLCONFLICT, "RECALLCONFLICT" }, \
- { -NFS4ERR_RECLAIM_BAD, "RECLAIM_BAD" }, \
- { -NFS4ERR_RECLAIM_CONFLICT, "RECLAIM_CONFLICT" }, \
- { -NFS4ERR_REJECT_DELEG, "REJECT_DELEG" }, \
- { -NFS4ERR_REP_TOO_BIG, "REP_TOO_BIG" }, \
- { -NFS4ERR_REP_TOO_BIG_TO_CACHE, \
+ { NFS4ERR_DELEG_REVOKED, "DELEG_REVOKED" }, \
+ { NFS4ERR_DENIED, "DENIED" }, \
+ { NFS4ERR_DIRDELEG_UNAVAIL, "DIRDELEG_UNAVAIL" }, \
+ { NFS4ERR_DQUOT, "DQUOT" }, \
+ { NFS4ERR_ENCR_ALG_UNSUPP, "ENCR_ALG_UNSUPP" }, \
+ { NFS4ERR_EXIST, "EXIST" }, \
+ { NFS4ERR_EXPIRED, "EXPIRED" }, \
+ { NFS4ERR_FBIG, "FBIG" }, \
+ { NFS4ERR_FHEXPIRED, "FHEXPIRED" }, \
+ { NFS4ERR_FILE_OPEN, "FILE_OPEN" }, \
+ { NFS4ERR_GRACE, "GRACE" }, \
+ { NFS4ERR_HASH_ALG_UNSUPP, "HASH_ALG_UNSUPP" }, \
+ { NFS4ERR_INVAL, "INVAL" }, \
+ { NFS4ERR_IO, "IO" }, \
+ { NFS4ERR_ISDIR, "ISDIR" }, \
+ { NFS4ERR_LAYOUTTRYLATER, "LAYOUTTRYLATER" }, \
+ { NFS4ERR_LAYOUTUNAVAILABLE, "LAYOUTUNAVAILABLE" }, \
+ { NFS4ERR_LEASE_MOVED, "LEASE_MOVED" }, \
+ { NFS4ERR_LOCKED, "LOCKED" }, \
+ { NFS4ERR_LOCKS_HELD, "LOCKS_HELD" }, \
+ { NFS4ERR_LOCK_RANGE, "LOCK_RANGE" }, \
+ { NFS4ERR_MINOR_VERS_MISMATCH, "MINOR_VERS_MISMATCH" }, \
+ { NFS4ERR_MLINK, "MLINK" }, \
+ { NFS4ERR_MOVED, "MOVED" }, \
+ { NFS4ERR_NAMETOOLONG, "NAMETOOLONG" }, \
+ { NFS4ERR_NOENT, "NOENT" }, \
+ { NFS4ERR_NOFILEHANDLE, "NOFILEHANDLE" }, \
+ { NFS4ERR_NOMATCHING_LAYOUT, "NOMATCHING_LAYOUT" }, \
+ { NFS4ERR_NOSPC, "NOSPC" }, \
+ { NFS4ERR_NOTDIR, "NOTDIR" }, \
+ { NFS4ERR_NOTEMPTY, "NOTEMPTY" }, \
+ { NFS4ERR_NOTSUPP, "NOTSUPP" }, \
+ { NFS4ERR_NOT_ONLY_OP, "NOT_ONLY_OP" }, \
+ { NFS4ERR_NOT_SAME, "NOT_SAME" }, \
+ { NFS4ERR_NO_GRACE, "NO_GRACE" }, \
+ { NFS4ERR_NXIO, "NXIO" }, \
+ { NFS4ERR_OLD_STATEID, "OLD_STATEID" }, \
+ { NFS4ERR_OPENMODE, "OPENMODE" }, \
+ { NFS4ERR_OP_ILLEGAL, "OP_ILLEGAL" }, \
+ { NFS4ERR_OP_NOT_IN_SESSION, "OP_NOT_IN_SESSION" }, \
+ { NFS4ERR_PERM, "PERM" }, \
+ { NFS4ERR_PNFS_IO_HOLE, "PNFS_IO_HOLE" }, \
+ { NFS4ERR_PNFS_NO_LAYOUT, "PNFS_NO_LAYOUT" }, \
+ { NFS4ERR_RECALLCONFLICT, "RECALLCONFLICT" }, \
+ { NFS4ERR_RECLAIM_BAD, "RECLAIM_BAD" }, \
+ { NFS4ERR_RECLAIM_CONFLICT, "RECLAIM_CONFLICT" }, \
+ { NFS4ERR_REJECT_DELEG, "REJECT_DELEG" }, \
+ { NFS4ERR_REP_TOO_BIG, "REP_TOO_BIG" }, \
+ { NFS4ERR_REP_TOO_BIG_TO_CACHE, \
"REP_TOO_BIG_TO_CACHE" }, \
- { -NFS4ERR_REQ_TOO_BIG, "REQ_TOO_BIG" }, \
- { -NFS4ERR_RESOURCE, "RESOURCE" }, \
- { -NFS4ERR_RESTOREFH, "RESTOREFH" }, \
- { -NFS4ERR_RETRY_UNCACHED_REP, "RETRY_UNCACHED_REP" }, \
- { -NFS4ERR_RETURNCONFLICT, "RETURNCONFLICT" }, \
- { -NFS4ERR_ROFS, "ROFS" }, \
- { -NFS4ERR_SAME, "SAME" }, \
- { -NFS4ERR_SHARE_DENIED, "SHARE_DENIED" }, \
- { -NFS4ERR_SEQUENCE_POS, "SEQUENCE_POS" }, \
- { -NFS4ERR_SEQ_FALSE_RETRY, "SEQ_FALSE_RETRY" }, \
- { -NFS4ERR_SEQ_MISORDERED, "SEQ_MISORDERED" }, \
- { -NFS4ERR_SERVERFAULT, "SERVERFAULT" }, \
- { -NFS4ERR_STALE, "STALE" }, \
- { -NFS4ERR_STALE_CLIENTID, "STALE_CLIENTID" }, \
- { -NFS4ERR_STALE_STATEID, "STALE_STATEID" }, \
- { -NFS4ERR_SYMLINK, "SYMLINK" }, \
- { -NFS4ERR_TOOSMALL, "TOOSMALL" }, \
- { -NFS4ERR_TOO_MANY_OPS, "TOO_MANY_OPS" }, \
- { -NFS4ERR_UNKNOWN_LAYOUTTYPE, "UNKNOWN_LAYOUTTYPE" }, \
- { -NFS4ERR_UNSAFE_COMPOUND, "UNSAFE_COMPOUND" }, \
- { -NFS4ERR_WRONGSEC, "WRONGSEC" }, \
- { -NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
- { -NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
- { -NFS4ERR_XDEV, "XDEV" })
+ { NFS4ERR_REQ_TOO_BIG, "REQ_TOO_BIG" }, \
+ { NFS4ERR_RESOURCE, "RESOURCE" }, \
+ { NFS4ERR_RESTOREFH, "RESTOREFH" }, \
+ { NFS4ERR_RETRY_UNCACHED_REP, "RETRY_UNCACHED_REP" }, \
+ { NFS4ERR_RETURNCONFLICT, "RETURNCONFLICT" }, \
+ { NFS4ERR_ROFS, "ROFS" }, \
+ { NFS4ERR_SAME, "SAME" }, \
+ { NFS4ERR_SHARE_DENIED, "SHARE_DENIED" }, \
+ { NFS4ERR_SEQUENCE_POS, "SEQUENCE_POS" }, \
+ { NFS4ERR_SEQ_FALSE_RETRY, "SEQ_FALSE_RETRY" }, \
+ { NFS4ERR_SEQ_MISORDERED, "SEQ_MISORDERED" }, \
+ { NFS4ERR_SERVERFAULT, "SERVERFAULT" }, \
+ { NFS4ERR_STALE, "STALE" }, \
+ { NFS4ERR_STALE_CLIENTID, "STALE_CLIENTID" }, \
+ { NFS4ERR_STALE_STATEID, "STALE_STATEID" }, \
+ { NFS4ERR_SYMLINK, "SYMLINK" }, \
+ { NFS4ERR_TOOSMALL, "TOOSMALL" }, \
+ { NFS4ERR_TOO_MANY_OPS, "TOO_MANY_OPS" }, \
+ { NFS4ERR_UNKNOWN_LAYOUTTYPE, "UNKNOWN_LAYOUTTYPE" }, \
+ { NFS4ERR_UNSAFE_COMPOUND, "UNSAFE_COMPOUND" }, \
+ { NFS4ERR_WRONGSEC, "WRONGSEC" }, \
+ { NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
+ { NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
+ { NFS4ERR_XDEV, "XDEV" })
#define show_open_flags(flags) \
__print_flags(flags, "|", \
@@ -558,6 +703,13 @@ TRACE_EVENT(nfs4_close,
)
);
+TRACE_DEFINE_ENUM(F_GETLK);
+TRACE_DEFINE_ENUM(F_SETLK);
+TRACE_DEFINE_ENUM(F_SETLKW);
+TRACE_DEFINE_ENUM(F_RDLCK);
+TRACE_DEFINE_ENUM(F_WRLCK);
+TRACE_DEFINE_ENUM(F_UNLCK);
+
#define show_lock_cmd(type) \
__print_symbolic((int)type, \
{ F_GETLK, "GETLK" }, \
@@ -1451,6 +1603,10 @@ DEFINE_NFS4_COMMIT_EVENT(nfs4_commit);
#ifdef CONFIG_NFS_V4_1
DEFINE_NFS4_COMMIT_EVENT(nfs4_pnfs_commit_ds);
+TRACE_DEFINE_ENUM(IOMODE_READ);
+TRACE_DEFINE_ENUM(IOMODE_RW);
+TRACE_DEFINE_ENUM(IOMODE_ANY);
+
#define show_pnfs_iomode(iomode) \
__print_symbolic(iomode, \
{ IOMODE_READ, "READ" }, \
@@ -1528,6 +1684,20 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutcommit);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutreturn);
DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn_on_close);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_UNKNOWN);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_NO_PNFS);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_MDSTHRESH);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_NOMEM);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_BULK_RECALL);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_FOUND_CACHED);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_RETURN);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_BLOCKED);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_INVALID_OPEN);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_RETRY);
+TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
+
#define show_pnfs_update_layout_reason(reason) \
__print_symbolic(reason, \
{ PNFS_UPDATE_LAYOUT_UNKNOWN, "unknown" }, \
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 5c4568a0804b..e54d899c1848 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -461,7 +461,7 @@ EXPORT_SYMBOL_GPL(nfs_wait_on_request);
* @prev: previous request in desc, or NULL
* @req: this request
*
- * Returns zero if @req can be coalesced into @desc, otherwise it returns
+ * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
* the size of the request.
*/
size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
@@ -587,7 +587,7 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
}
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
- struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+ const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
const struct rpc_call_ops *call_ops, int how, int flags)
{
struct rpc_task *task;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 06cb90e9bc6e..53726da5c010 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -275,7 +275,7 @@ pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
list_del_init(&lo->plh_layouts);
spin_unlock(&clp->cl_lock);
}
- put_rpccred(lo->plh_lc_cred);
+ put_cred(lo->plh_lc_cred);
return ld->free_layout_hdr(lo);
}
@@ -1038,7 +1038,7 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
lgp->args.ctx = get_nfs_open_context(ctx);
nfs4_stateid_copy(&lgp->args.stateid, stateid);
lgp->gfp_flags = gfp_flags;
- lgp->cred = get_rpccred(ctx->cred);
+ lgp->cred = get_cred(ctx->cred);
return lgp;
}
@@ -1049,7 +1049,7 @@ void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
nfs4_free_pages(lgp->args.layout.pages, max_pages);
if (lgp->args.inode)
pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
- put_rpccred(lgp->cred);
+ put_cred(lgp->cred);
put_nfs_open_context(lgp->args.ctx);
kfree(lgp);
}
@@ -1324,7 +1324,7 @@ pnfs_commit_and_return_layout(struct inode *inode)
bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
- const struct rpc_cred *cred)
+ const struct cred *cred)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_open_context *ctx;
@@ -1583,7 +1583,7 @@ alloc_init_layout_hdr(struct inode *ino,
INIT_LIST_HEAD(&lo->plh_return_segs);
INIT_LIST_HEAD(&lo->plh_bulk_destroy);
lo->plh_inode = ino;
- lo->plh_lc_cred = get_rpccred(ctx->cred);
+ lo->plh_lc_cred = get_cred(ctx->cred);
lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
return lo;
}
@@ -2928,7 +2928,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
spin_unlock(&inode->i_lock);
data->args.inode = inode;
- data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
+ data->cred = get_cred(nfsi->layout->plh_lc_cred);
nfs_fattr_init(&data->fattr);
data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
data->res.fattr = &data->fattr;
@@ -2941,7 +2941,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
if (ld->prepare_layoutcommit) {
status = ld->prepare_layoutcommit(&data->args);
if (status) {
- put_rpccred(data->cred);
+ put_cred(data->cred);
spin_lock(&inode->i_lock);
set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
if (end_pos > nfsi->layout->plh_lwb)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e2e9fcd5341d..5e80a07b7bea 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -200,7 +200,7 @@ struct pnfs_layout_hdr {
u32 plh_return_seq;
enum pnfs_iomode plh_return_iomode;
loff_t plh_lwb; /* last write byte for layoutcommit */
- struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
+ const struct cred *plh_lc_cred; /* layoutcommit cred */
struct inode *plh_inode;
};
@@ -230,7 +230,7 @@ extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
extern size_t max_response_pages(struct nfs_server *server);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev,
- struct rpc_cred *cred);
+ const struct cred *cred);
extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout);
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
@@ -280,7 +280,7 @@ int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
bool pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
- const struct rpc_cred *cred);
+ const struct cred *cred);
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
int ret);
@@ -343,7 +343,7 @@ struct nfs4_deviceid_node {
struct nfs4_deviceid_node *
nfs4_find_get_deviceid(struct nfs_server *server,
- const struct nfs4_deviceid *id, struct rpc_cred *cred,
+ const struct nfs4_deviceid *id, const struct cred *cred,
gfp_t gfp_mask);
void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
void nfs4_init_deviceid_node(struct nfs4_deviceid_node *, struct nfs_server *,
@@ -694,7 +694,7 @@ static inline bool
pnfs_roc(struct inode *ino,
struct nfs4_layoutreturn_args *args,
struct nfs4_layoutreturn_res *res,
- const struct rpc_cred *cred)
+ const struct cred *cred)
{
return false;
}
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index e8a07b3f9aaa..7fb59487ee90 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -94,7 +94,7 @@ _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
static struct nfs4_deviceid_node *
nfs4_get_device_info(struct nfs_server *server,
const struct nfs4_deviceid *dev_id,
- struct rpc_cred *cred, gfp_t gfp_flags)
+ const struct cred *cred, gfp_t gfp_flags)
{
struct nfs4_deviceid_node *d = NULL;
struct pnfs_device *pdev = NULL;
@@ -184,7 +184,7 @@ __nfs4_find_get_deviceid(struct nfs_server *server,
struct nfs4_deviceid_node *
nfs4_find_get_deviceid(struct nfs_server *server,
- const struct nfs4_deviceid *id, struct rpc_cred *cred,
+ const struct nfs4_deviceid *id, const struct cred *cred,
gfp_t gfp_mask)
{
long hash = nfs4_deviceid_hash(id);
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index d5e4d3cd8c7f..f5ad75fafc3c 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -686,7 +686,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
rpc_clnt_setup_test_and_add_xprt,
&rpcdata);
if (xprtdata.cred)
- put_rpccred(xprtdata.cred);
+ put_cred(xprtdata.cred);
} else {
clp = nfs4_set_ds_client(mds_srv,
(struct sockaddr *)&da->da_addr,
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index e0c257bd62b9..5552fa8b6e12 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -490,7 +490,7 @@ nfs_proc_rmdir(struct inode *dir, const struct qstr *name)
* from nfs_readdir by calling the decode_entry function directly.
*/
static int
-nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+nfs_proc_readdir(struct dentry *dentry, const struct cred *cred,
u64 cookie, struct page **pages, unsigned int count, bool plus)
{
struct inode *dir = d_inode(dentry);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ac4b2f005778..7c942462d8c6 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2168,7 +2168,10 @@ static int nfs_validate_text_mount_data(void *options,
if (args->version == 4) {
#if IS_ENABLED(CONFIG_NFS_V4)
- port = NFS_PORT;
+ if (args->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
+ port = NFS_RDMA_PORT;
+ else
+ port = NFS_PORT;
max_namelen = NFS4_MAXNAMLEN;
max_pathlen = NFS4_MAXPATHLEN;
nfs_validate_transport_protocol(args);
@@ -2178,8 +2181,11 @@ static int nfs_validate_text_mount_data(void *options,
#else
goto out_v4_not_compiled;
#endif /* CONFIG_NFS_V4 */
- } else
+ } else {
nfs_set_mount_transport_protocol(args);
+ if (args->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
+ port = NFS_RDMA_PORT;
+ }
nfs_set_port(sap, &args->nfs_server.port, port);
@@ -2409,8 +2415,7 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
goto Ebusy;
if (a->acdirmax != b->acdirmax)
goto Ebusy;
- if (b->auth_info.flavor_len > 0 &&
- clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
+ if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
goto Ebusy;
return 1;
Ebusy:
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index fd61bf0fce63..79b97b3c4427 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -31,7 +31,7 @@
static void
nfs_free_unlinkdata(struct nfs_unlinkdata *data)
{
- put_rpccred(data->cred);
+ put_cred(data->cred);
kfree(data->args.name.name);
kfree(data);
}
@@ -177,11 +177,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
goto out_free;
data->args.name.len = name->len;
- data->cred = rpc_lookup_cred();
- if (IS_ERR(data->cred)) {
- status = PTR_ERR(data->cred);
- goto out_free_name;
- }
+ data->cred = get_current_cred();
data->res.dir_attr = &data->dir_attr;
init_waitqueue_head(&data->wq);
@@ -202,8 +198,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
return 0;
out_unlock:
spin_unlock(&dentry->d_lock);
- put_rpccred(data->cred);
-out_free_name:
+ put_cred(data->cred);
kfree(data->args.name.name);
out_free:
kfree(data);
@@ -307,7 +302,7 @@ static void nfs_async_rename_release(void *calldata)
iput(data->old_dir);
iput(data->new_dir);
nfs_sb_deactive(sb);
- put_rpccred(data->cred);
+ put_cred(data->cred);
kfree(data);
}
@@ -352,12 +347,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
return ERR_PTR(-ENOMEM);
task_setup_data.callback_data = data;
- data->cred = rpc_lookup_cred();
- if (IS_ERR(data->cred)) {
- struct rpc_task *task = ERR_CAST(data->cred);
- kfree(data);
- return task;
- }
+ data->cred = get_current_cred();
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 4f15665f0ad1..5a0bbf917a32 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1233,9 +1233,12 @@ int
nfs_key_timeout_notify(struct file *filp, struct inode *inode)
{
struct nfs_open_context *ctx = nfs_file_open_context(filp);
- struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
- return rpcauth_key_timeout_notify(auth, ctx->cred);
+ if (nfs_ctx_key_to_expire(ctx, inode) &&
+ !ctx->ll_cred)
+ /* Already expired! */
+ return -EACCES;
+ return 0;
}
/*
@@ -1244,8 +1247,23 @@ nfs_key_timeout_notify(struct file *filp, struct inode *inode)
bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
{
struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
+ struct rpc_cred *cred = ctx->ll_cred;
+ struct auth_cred acred = {
+ .cred = ctx->cred,
+ };
- return rpcauth_cred_key_to_expire(auth, ctx->cred);
+ if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) {
+ put_rpccred(cred);
+ ctx->ll_cred = NULL;
+ cred = NULL;
+ }
+ if (!cred)
+ cred = auth->au_ops->lookup_cred(auth, &acred, 0);
+ if (!cred || IS_ERR(cred))
+ return true;
+ ctx->ll_cred = cred;
+ return !!(cred->cr_ops->crkey_timeout &&
+ cred->cr_ops->crkey_timeout(cred));
}
/*
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 25987bcdf96f..c74e4538d0eb 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -844,24 +844,23 @@ static int max_cb_time(struct net *net)
return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
}
-static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
+static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
{
if (clp->cl_minorversion == 0) {
- char *principal = clp->cl_cred.cr_targ_princ ?
- clp->cl_cred.cr_targ_princ : "nfs";
- struct rpc_cred *cred;
-
- cred = rpc_lookup_machine_cred(principal);
- if (!IS_ERR(cred))
- get_rpccred(cred);
- return cred;
+ client->cl_principal = clp->cl_cred.cr_targ_princ ?
+ clp->cl_cred.cr_targ_princ : "nfs";
+
+ return get_cred(rpc_machine_cred());
} else {
- struct rpc_auth *auth = client->cl_auth;
- struct auth_cred acred = {};
+ struct cred *kcred;
+
+ kcred = prepare_kernel_cred(NULL);
+ if (!kcred)
+ return NULL;
- acred.uid = ses->se_cb_sec.uid;
- acred.gid = ses->se_cb_sec.gid;
- return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
+ kcred->uid = ses->se_cb_sec.uid;
+ kcred->gid = ses->se_cb_sec.gid;
+ return kcred;
}
}
@@ -884,7 +883,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
};
struct rpc_clnt *client;
- struct rpc_cred *cred;
+ const struct cred *cred;
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
@@ -1214,7 +1213,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
if (clp->cl_cb_client) {
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
- put_rpccred(clp->cl_cb_cred);
+ put_cred(clp->cl_cb_cred);
clp->cl_cb_cred = NULL;
}
if (clp->cl_cb_conn.cb_xprt) {
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 6aacb325b6a0..396c76755b03 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -327,7 +327,7 @@ struct nfs4_client {
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
unsigned long cl_flags;
- struct rpc_cred *cl_cb_cred;
+ const struct cred *cl_cb_cred;
struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident;
#define NFSD4_CB_UP 0
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 7eed6101c791..4907c9df86b3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -169,6 +169,7 @@ extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
+extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
/*
@@ -236,7 +237,7 @@ static inline struct cred *get_new_cred(struct cred *cred)
* @cred: The credentials to reference
*
* Get a reference on the specified set of credentials. The caller must
- * release the reference.
+ * release the reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -247,16 +248,29 @@ static inline struct cred *get_new_cred(struct cred *cred)
static inline const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return cred;
validate_creds(cred);
return get_new_cred(nonconst_cred);
}
+static inline const struct cred *get_cred_rcu(const struct cred *cred)
+{
+ struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return NULL;
+ if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ return NULL;
+ validate_creds(cred);
+ return cred;
+}
+
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
*
* Release a reference to a set of credentials, deleting them when the last ref
- * is released.
+ * is released. If %NULL is passed, nothing is done.
*
* This takes a const pointer to a set of credentials because the credentials
* on task_struct are attached by const pointers to prevent accidental
@@ -266,9 +280,11 @@ static inline void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
- __put_cred(cred);
+ if (cred) {
+ validate_creds(cred);
+ if (atomic_dec_and_test(&(cred)->usage))
+ __put_cred(cred);
+ }
}
/**
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 6e0417c02279..40e30376130b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -51,7 +51,7 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- struct rpc_cred * cred;
+ const struct cred * cred;
__u32 mask;
struct rcu_head rcu_head;
};
@@ -70,7 +70,8 @@ struct nfs_open_context {
struct nfs_lock_context lock_context;
fl_owner_t flock_owner;
struct dentry *dentry;
- struct rpc_cred *cred;
+ const struct cred *cred;
+ struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
@@ -88,7 +89,7 @@ struct nfs_open_context {
struct nfs_open_dir_context {
struct list_head list;
- struct rpc_cred *cred;
+ const struct cred *cred;
unsigned long attr_gencount;
__u64 dir_cookie;
__u64 dup_cookie;
@@ -390,7 +391,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
-extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
+extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
@@ -461,7 +462,7 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
return filp->private_data;
}
-static inline struct rpc_cred *nfs_file_cred(struct file *file)
+static inline const struct cred *nfs_file_cred(struct file *file)
{
if (file != NULL) {
struct nfs_open_context *ctx =
@@ -490,7 +491,7 @@ extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr, struct nfs4_label *label);
-extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
+extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
/*
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 0fc0b9135d46..6aa8cc83c3b6 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -58,7 +58,7 @@ struct nfs_client {
struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
u32 cl_minorversion;/* NFSv4 minorversion */
- struct rpc_cred *cl_machine_cred;
+ const char * cl_principal; /* used for machine cred */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 0e016252cfc6..441a93ebcac0 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -270,7 +270,7 @@ struct nfs4_layoutget_res {
struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
- struct rpc_cred *cred;
+ const struct cred *cred;
gfp_t gfp_flags;
};
@@ -309,7 +309,7 @@ struct nfs4_layoutcommit_data {
struct rpc_task task;
struct nfs_fattr fattr;
struct list_head lseg_list;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct inode *inode;
struct nfs4_layoutcommit_args args;
struct nfs4_layoutcommit_res res;
@@ -334,7 +334,7 @@ struct nfs4_layoutreturn_res {
struct nfs4_layoutreturn {
struct nfs4_layoutreturn_args args;
struct nfs4_layoutreturn_res res;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_client *clp;
struct inode *inode;
int rpc_status;
@@ -1469,7 +1469,7 @@ enum {
struct nfs_io_completion;
struct nfs_pgio_header {
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct list_head pages;
struct nfs_page *req;
struct nfs_writeverf verf; /* Used for writes */
@@ -1529,7 +1529,7 @@ struct nfs_commit_info {
struct nfs_commit_data {
struct rpc_task task;
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_fattr fattr;
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
@@ -1560,7 +1560,7 @@ struct nfs_unlinkdata {
struct nfs_removeres res;
struct dentry *dentry;
wait_queue_head_t wq;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_fattr dir_attr;
long timeout;
};
@@ -1568,7 +1568,7 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
struct nfs_fattr old_fattr;
@@ -1634,7 +1634,7 @@ struct nfs_rpc_ops {
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
- int (*readdir) (struct dentry *, struct rpc_cred *,
+ int (*readdir) (struct dentry *, const struct cred *,
u64, struct page **, unsigned int, bool);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index c4db9424b63b..eed3cb16ccf1 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -37,21 +37,9 @@
struct rpcsec_gss_info;
-/* auth_cred ac_flags bits */
-enum {
- RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */
- RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying
- key will expire soon */
-};
-
-/* Work around the lack of a VFS credential */
struct auth_cred {
- kuid_t uid;
- kgid_t gid;
- struct group_info *group_info;
- const char *principal;
- unsigned long ac_flags;
- unsigned char machine_cred : 1;
+ const struct cred *cred;
+ const char *principal; /* If present, this is a machine credential */
};
/*
@@ -68,8 +56,7 @@ struct rpc_cred {
unsigned long cr_expire; /* when to gc */
unsigned long cr_flags; /* various flags */
refcount_t cr_count; /* ref count */
-
- kuid_t cr_uid;
+ const struct cred *cr_cred;
/* per-flavor data */
};
@@ -78,8 +65,7 @@ struct rpc_cred {
#define RPCAUTH_CRED_HASHED 2
#define RPCAUTH_CRED_NEGATIVE 3
-/* rpc_auth au_flags */
-#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */
+const struct cred *rpc_machine_cred(void);
/*
* Client authentication handle
@@ -116,7 +102,6 @@ struct rpc_auth_create_args {
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
-#define RPCAUTH_LOOKUP_RCU 0x02 /* lock-less lookup */
/*
* Client authentication ops
@@ -146,7 +131,6 @@ struct rpc_credops {
void (*crdestroy)(struct rpc_cred *);
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
- struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int);
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
int (*crrefresh)(struct rpc_task *);
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
@@ -155,7 +139,6 @@ struct rpc_credops {
int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
void *, __be32 *, void *);
int (*crkey_timeout)(struct rpc_cred *);
- bool (*crkey_to_expire)(struct rpc_cred *);
char * (*crstringify_acceptor)(struct rpc_cred *);
bool (*crneed_reencode)(struct rpc_task *);
};
@@ -164,16 +147,10 @@ extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
int __init rpc_init_authunix(void);
-int __init rpc_init_generic_auth(void);
int __init rpcauth_init_module(void);
void rpcauth_remove_module(void);
-void rpc_destroy_generic_auth(void);
void rpc_destroy_authunix(void);
-struct rpc_cred * rpc_lookup_cred(void);
-struct rpc_cred * rpc_lookup_cred_nonblock(void);
-struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t);
-struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *,
@@ -187,7 +164,6 @@ int rpcauth_list_flavors(rpc_authflavor_t *, int);
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
-struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
void put_rpccred(struct rpc_cred *);
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
@@ -200,9 +176,6 @@ int rpcauth_uptodatecred(struct rpc_task *);
int rpcauth_init_credcache(struct rpc_auth *);
void rpcauth_destroy_credcache(struct rpc_auth *);
void rpcauth_clear_credcache(struct rpc_cred_cache *);
-int rpcauth_key_timeout_notify(struct rpc_auth *,
- struct rpc_cred *);
-bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
@@ -213,21 +186,5 @@ struct rpc_cred *get_rpccred(struct rpc_cred *cred)
return NULL;
}
-/**
- * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer
- * @cred: cred of which to take a reference
- *
- * In some cases, we may have a pointer to a credential to which we
- * want to take a reference, but don't already have one. Because these
- * objects are freed using RCU, we can access the cr_count while its
- * on its way to destruction and only take a reference if it's not already
- * zero.
- */
-static inline struct rpc_cred *
-get_rpccred_rcu(struct rpc_cred *cred)
-{
- return get_rpccred(cred);
-}
-
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_AUTH_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 73d5c4a870fa..1c441714d569 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -66,6 +66,7 @@ struct rpc_clnt {
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
const struct rpc_program *cl_program;
+ const char * cl_principal; /* use for machine cred */
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
@@ -127,8 +128,8 @@ struct rpc_create_args {
};
struct rpc_add_xprt_test {
- int (*add_xprt_test)(struct rpc_clnt *,
- struct rpc_xprt *,
+ void (*add_xprt_test)(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
void *calldata);
void *data;
};
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 7b540c066594..219aa3910a0c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -26,7 +26,7 @@ struct rpc_message {
const struct rpc_procinfo *rpc_proc; /* Procedure information */
void * rpc_argp; /* Arguments */
void * rpc_resp; /* Result */
- struct rpc_cred * rpc_cred; /* Credentials */
+ const struct cred * rpc_cred; /* Credentials */
};
struct rpc_call_ops;
@@ -71,6 +71,7 @@ struct rpc_task {
struct rpc_clnt * tk_client; /* RPC client */
struct rpc_xprt * tk_xprt; /* Transport */
+ struct rpc_cred * tk_op_cred; /* cred being operated on */
struct rpc_rqst * tk_rqstp; /* RPC request */
@@ -105,6 +106,7 @@ struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
struct rpc_xprt *rpc_xprt;
+ struct rpc_cred *rpc_op_cred; /* credential being operated on */
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
@@ -118,6 +120,7 @@ struct rpc_task_setup {
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
@@ -131,7 +134,6 @@ struct rpc_task_setup {
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
-#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index b093058f78aa..399b1aedc927 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -10,6 +10,7 @@
#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCRDMA_H
+#include <linux/scatterlist.h>
#include <linux/tracepoint.h>
#include <trace/events/rdma.h>
@@ -97,7 +98,6 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, mr)
__field(unsigned int, pos)
__field(int, nents)
__field(u32, handle)
@@ -109,7 +109,6 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->mr = mr;
__entry->pos = pos;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
@@ -118,8 +117,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
- __entry->task_id, __entry->client_id, __entry->mr,
+ TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
__entry->pos, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
__entry->nents < __entry->nsegs ? "more" : "last"
@@ -127,7 +126,7 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
);
#define DEFINE_RDCH_EVENT(name) \
- DEFINE_EVENT(xprtrdma_rdch_event, name, \
+ DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
TP_PROTO( \
const struct rpc_task *task, \
unsigned int pos, \
@@ -148,7 +147,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, mr)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
@@ -159,7 +157,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->mr = mr;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
@@ -167,8 +164,8 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
- __entry->task_id, __entry->client_id, __entry->mr,
+ TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
__entry->length, (unsigned long long)__entry->offset,
__entry->handle,
__entry->nents < __entry->nsegs ? "more" : "last"
@@ -176,7 +173,7 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
);
#define DEFINE_WRCH_EVENT(name) \
- DEFINE_EVENT(xprtrdma_wrch_event, name, \
+ DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
TP_PROTO( \
const struct rpc_task *task, \
struct rpcrdma_mr *mr, \
@@ -234,6 +231,18 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
), \
TP_ARGS(wc, frwr))
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define xprtrdma_show_direction(x) \
+ __print_symbolic(x, \
+ { DMA_BIDIRECTIONAL, "BIDIR" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
DECLARE_EVENT_CLASS(xprtrdma_mr,
TP_PROTO(
const struct rpcrdma_mr *mr
@@ -246,6 +255,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
+ __field(u32, dir)
),
TP_fast_assign(
@@ -253,12 +263,13 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
),
- TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
__entry->mr, __entry->length,
- (unsigned long long)__entry->offset,
- __entry->handle
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir)
)
);
@@ -371,11 +382,13 @@ TRACE_EVENT(xprtrdma_disconnect,
DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
DEFINE_RXPRT_EVENT(xprtrdma_create);
-DEFINE_RXPRT_EVENT(xprtrdma_destroy);
+DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_remove);
DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
-DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
+DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
+DEFINE_RXPRT_EVENT(xprtrdma_op_close);
+DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO(
@@ -437,9 +450,9 @@ TRACE_EVENT(xprtrdma_createmrs,
DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
-DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
-DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
-DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
+DEFINE_RDCH_EVENT(read);
+DEFINE_WRCH_EVENT(write);
+DEFINE_WRCH_EVENT(reply);
TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_readch);
@@ -570,7 +583,7 @@ TRACE_EVENT(xprtrdma_post_recvs,
__entry->r_xprt = r_xprt;
__entry->count = count;
__entry->status = status;
- __entry->posted = r_xprt->rx_buf.rb_posted_receives;
+ __entry->posted = r_xprt->rx_ep.rep_receive_count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
@@ -651,12 +664,147 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+TRACE_EVENT(xprtrdma_frwr_alloc,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int rc
+ ),
+
+ TP_ARGS(mr, rc),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("mr=%p: rc=%d",
+ __entry->mr, __entry->rc
+ )
+);
+
+TRACE_EVENT(xprtrdma_frwr_dereg,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int rc
+ ),
+
+ TP_ARGS(mr, rc),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(u32, dir)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir),
+ __entry->rc
+ )
+);
+
+TRACE_EVENT(xprtrdma_frwr_sgerr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int sg_nents
+ ),
+
+ TP_ARGS(mr, sg_nents),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u64, addr)
+ __field(u32, dir)
+ __field(int, nents)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->addr = mr->mr_sg->dma_address;
+ __entry->dir = mr->mr_dir;
+ __entry->nents = sg_nents;
+ ),
+
+ TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
+ __entry->mr, __entry->addr,
+ xprtrdma_show_direction(__entry->dir),
+ __entry->nents
+ )
+);
+
+TRACE_EVENT(xprtrdma_frwr_maperr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int num_mapped
+ ),
+
+ TP_ARGS(mr, num_mapped),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u64, addr)
+ __field(u32, dir)
+ __field(int, num_mapped)
+ __field(int, nents)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->addr = mr->mr_sg->dma_address;
+ __entry->dir = mr->mr_dir;
+ __entry->num_mapped = num_mapped;
+ __entry->nents = mr->mr_nents;
+ ),
+
+ TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
+ __entry->mr, __entry->addr,
+ xprtrdma_show_direction(__entry->dir),
+ __entry->num_mapped, __entry->nents
+ )
+);
+
DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(map);
DEFINE_MR_EVENT(unmap);
DEFINE_MR_EVENT(remoteinv);
DEFINE_MR_EVENT(recycle);
+TRACE_EVENT(xprtrdma_dma_maperr,
+ TP_PROTO(
+ u64 addr
+ ),
+
+ TP_ARGS(addr),
+
+ TP_STRUCT__entry(
+ __field(u64, addr)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ ),
+
+ TP_printk("dma addr=0x%llx\n", __entry->addr)
+);
+
/**
** Reply events
**/
@@ -824,7 +972,7 @@ TRACE_EVENT(xprtrdma_decode_seg,
** Allocation/release of rpcrdma_reqs and rpcrdma_reps
**/
-TRACE_EVENT(xprtrdma_allocate,
+TRACE_EVENT(xprtrdma_op_allocate,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_req *req
@@ -854,7 +1002,7 @@ TRACE_EVENT(xprtrdma_allocate,
)
);
-TRACE_EVENT(xprtrdma_rpc_done,
+TRACE_EVENT(xprtrdma_op_free,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_req *req
@@ -917,6 +1065,34 @@ TRACE_EVENT(xprtrdma_cb_setup,
DEFINE_CB_EVENT(xprtrdma_cb_call);
DEFINE_CB_EVENT(xprtrdma_cb_reply);
+TRACE_EVENT(xprtrdma_leaked_rep,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rqst, rep),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(const void *, rep)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->rep = rep;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->rep
+ )
+);
+
/**
** Server-side RPC/RDMA events
**/
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8617f4fd6b70..0d5d0d91f861 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -16,40 +16,6 @@
DECLARE_EVENT_CLASS(rpc_task_status,
- TP_PROTO(struct rpc_task *task),
-
- TP_ARGS(task),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(int, status)
- ),
-
- TP_fast_assign(
- __entry->task_id = task->tk_pid;
- __entry->client_id = task->tk_client->cl_clid;
- __entry->status = task->tk_status;
- ),
-
- TP_printk("task:%u@%u status=%d",
- __entry->task_id, __entry->client_id,
- __entry->status)
-);
-
-DEFINE_EVENT(rpc_task_status, rpc_call_status,
- TP_PROTO(struct rpc_task *task),
-
- TP_ARGS(task)
-);
-
-DEFINE_EVENT(rpc_task_status, rpc_bind_status,
- TP_PROTO(struct rpc_task *task),
-
- TP_ARGS(task)
-);
-
-TRACE_EVENT(rpc_connect_status,
TP_PROTO(const struct rpc_task *task),
TP_ARGS(task),
@@ -70,6 +36,16 @@ TRACE_EVENT(rpc_connect_status,
__entry->task_id, __entry->client_id,
__entry->status)
);
+#define DEFINE_RPC_STATUS_EVENT(name) \
+ DEFINE_EVENT(rpc_task_status, rpc_##name##_status, \
+ TP_PROTO( \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(task))
+
+DEFINE_RPC_STATUS_EVENT(call);
+DEFINE_RPC_STATUS_EVENT(bind);
+DEFINE_RPC_STATUS_EVENT(connect);
TRACE_EVENT(rpc_request,
TP_PROTO(const struct rpc_task *task),
@@ -134,30 +110,17 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->action
)
);
+#define DEFINE_RPC_RUNNING_EVENT(name) \
+ DEFINE_EVENT(rpc_task_running, rpc_task_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ const void *action \
+ ), \
+ TP_ARGS(task, action))
-DEFINE_EVENT(rpc_task_running, rpc_task_begin,
-
- TP_PROTO(const struct rpc_task *task, const void *action),
-
- TP_ARGS(task, action)
-
-);
-
-DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
-
- TP_PROTO(const struct rpc_task *task, const void *action),
-
- TP_ARGS(task, action)
-
-);
-
-DEFINE_EVENT(rpc_task_running, rpc_task_complete,
-
- TP_PROTO(const struct rpc_task *task, const void *action),
-
- TP_ARGS(task, action)
-
-);
+DEFINE_RPC_RUNNING_EVENT(begin);
+DEFINE_RPC_RUNNING_EVENT(run_action);
+DEFINE_RPC_RUNNING_EVENT(complete);
DECLARE_EVENT_CLASS(rpc_task_queued,
@@ -195,22 +158,16 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__get_str(q_name)
)
);
+#define DEFINE_RPC_QUEUED_EVENT(name) \
+ DEFINE_EVENT(rpc_task_queued, rpc_task_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ const struct rpc_wait_queue *q \
+ ), \
+ TP_ARGS(task, q))
-DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
-
- TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
-
- TP_ARGS(task, q)
-
-);
-
-DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
-
- TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
-
- TP_ARGS(task, q)
-
-);
+DEFINE_RPC_QUEUED_EVENT(sleep);
+DEFINE_RPC_QUEUED_EVENT(wakeup);
TRACE_EVENT(rpc_stats_latency,
@@ -410,7 +367,11 @@ DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
DECLARE_EVENT_CLASS(rpc_xprt_event,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_PROTO(
+ const struct rpc_xprt *xprt,
+ __be32 xid,
+ int status
+ ),
TP_ARGS(xprt, xid, status),
@@ -432,22 +393,19 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
__get_str(port), __entry->xid,
__entry->status)
);
+#define DEFINE_RPC_XPRT_EVENT(name) \
+ DEFINE_EVENT(rpc_xprt_event, xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt, \
+ __be32 xid, \
+ int status \
+ ), \
+ TP_ARGS(xprt, xid, status))
-DEFINE_EVENT(rpc_xprt_event, xprt_timer,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
-
-DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
-
-DEFINE_EVENT(rpc_xprt_event, xprt_transmit,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
-
-DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
+DEFINE_RPC_XPRT_EVENT(timer);
+DEFINE_RPC_XPRT_EVENT(lookup_rqst);
+DEFINE_RPC_XPRT_EVENT(transmit);
+DEFINE_RPC_XPRT_EVENT(complete_rqst);
TRACE_EVENT(xprt_ping,
TP_PROTO(const struct rpc_xprt *xprt, int status),
@@ -589,7 +547,9 @@ TRACE_EVENT(svc_process,
DECLARE_EVENT_CLASS(svc_rqst_event,
- TP_PROTO(struct svc_rqst *rqst),
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
TP_ARGS(rqst),
@@ -609,14 +569,15 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
__get_str(addr), __entry->xid,
show_rqstp_flags(__entry->flags))
);
+#define DEFINE_SVC_RQST_EVENT(name) \
+ DEFINE_EVENT(svc_rqst_event, svc_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqst \
+ ), \
+ TP_ARGS(rqst))
-DEFINE_EVENT(svc_rqst_event, svc_defer,
- TP_PROTO(struct svc_rqst *rqst),
- TP_ARGS(rqst));
-
-DEFINE_EVENT(svc_rqst_event, svc_drop,
- TP_PROTO(struct svc_rqst *rqst),
- TP_ARGS(rqst));
+DEFINE_SVC_RQST_EVENT(defer);
+DEFINE_SVC_RQST_EVENT(drop);
DECLARE_EVENT_CLASS(svc_rqst_status,
@@ -803,7 +764,9 @@ TRACE_EVENT(svc_stats_latency,
);
DECLARE_EVENT_CLASS(svc_deferred_event,
- TP_PROTO(struct svc_deferred_req *dr),
+ TP_PROTO(
+ const struct svc_deferred_req *dr
+ ),
TP_ARGS(dr),
@@ -820,13 +783,16 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
);
+#define DEFINE_SVC_DEFERRED_EVENT(name) \
+ DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \
+ TP_PROTO( \
+ const struct svc_deferred_req *dr \
+ ), \
+ TP_ARGS(dr))
+
+DEFINE_SVC_DEFERRED_EVENT(drop);
+DEFINE_SVC_DEFERRED_EVENT(revisit);
-DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,
- TP_PROTO(struct svc_deferred_req *dr),
- TP_ARGS(dr));
-DEFINE_EVENT(svc_deferred_event, svc_revisit_deferred,
- TP_PROTO(struct svc_deferred_req *dr),
- TP_ARGS(dr));
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/kernel/cred.c b/kernel/cred.c
index ecf03657e71c..21f4a97085b4 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/binfmts.h>
#include <linux/cn_proc.h>
+#include <linux/uidgid.h>
#if 0
#define kdebug(FMT, ...) \
@@ -194,11 +195,12 @@ const struct cred *get_task_cred(struct task_struct *task)
do {
cred = __task_cred((task));
BUG_ON(!cred);
- } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
+ } while (!get_cred_rcu(cred));
rcu_read_unlock();
return cred;
}
+EXPORT_SYMBOL(get_task_cred);
/*
* Allocate blank credentials, such that the credentials can be filled in at a
@@ -564,6 +566,60 @@ void revert_creds(const struct cred *old)
}
EXPORT_SYMBOL(revert_creds);
+/**
+ * cred_fscmp - Compare two credentials with respect to filesystem access.
+ * @a: The first credential
+ * @b: The second credential
+ *
+ * cred_cmp() will return zero if both credentials have the same
+ * fsuid, fsgid, and supplementary groups. That is, if they will both
+ * provide the same access to files based on mode/uid/gid.
+ * If the credentials are different, then either -1 or 1 will
+ * be returned depending on whether @a comes before or after @b
+ * respectively in an arbitrary, but stable, ordering of credentials.
+ *
+ * Return: -1, 0, or 1 depending on comparison
+ */
+int cred_fscmp(const struct cred *a, const struct cred *b)
+{
+ struct group_info *ga, *gb;
+ int g;
+
+ if (a == b)
+ return 0;
+ if (uid_lt(a->fsuid, b->fsuid))
+ return -1;
+ if (uid_gt(a->fsuid, b->fsuid))
+ return 1;
+
+ if (gid_lt(a->fsgid, b->fsgid))
+ return -1;
+ if (gid_gt(a->fsgid, b->fsgid))
+ return 1;
+
+ ga = a->group_info;
+ gb = b->group_info;
+ if (ga == gb)
+ return 0;
+ if (ga == NULL)
+ return -1;
+ if (gb == NULL)
+ return 1;
+ if (ga->ngroups < gb->ngroups)
+ return -1;
+ if (ga->ngroups > gb->ngroups)
+ return 1;
+
+ for (g = 0; g < ga->ngroups; g++) {
+ if (gid_lt(ga->gid[g], gb->gid[g]))
+ return -1;
+ if (gid_gt(ga->gid[g], gb->gid[g]))
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cred_fscmp);
+
/*
* initialise the credentials stuff
*/
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 090658c3da12..9488600451e8 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
- auth.o auth_null.o auth_unix.o auth_generic.o \
+ auth.o auth_null.o auth_unix.o \
svc.o svcsock.o svcauth.o svcauth_unix.o \
addr.o rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index ad8ead738981..1ff9768f5456 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -39,6 +39,20 @@ static const struct rpc_authops __rcu *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
static LIST_HEAD(cred_unused);
static unsigned long number_cred_unused;
+static struct cred machine_cred = {
+ .usage = ATOMIC_INIT(1),
+};
+
+/*
+ * Return the machine_cred pointer to be used whenever
+ * the a generic machine credential is needed.
+ */
+const struct cred *rpc_machine_cred(void)
+{
+ return &machine_cred;
+}
+EXPORT_SYMBOL_GPL(rpc_machine_cred);
+
#define MAX_HASHTABLE_BITS (14)
static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
{
@@ -346,29 +360,6 @@ out_nocache:
}
EXPORT_SYMBOL_GPL(rpcauth_init_credcache);
-/*
- * Setup a credential key lifetime timeout notification
- */
-int
-rpcauth_key_timeout_notify(struct rpc_auth *auth, struct rpc_cred *cred)
-{
- if (!cred->cr_auth->au_ops->key_timeout)
- return 0;
- return cred->cr_auth->au_ops->key_timeout(auth, cred);
-}
-EXPORT_SYMBOL_GPL(rpcauth_key_timeout_notify);
-
-bool
-rpcauth_cred_key_to_expire(struct rpc_auth *auth, struct rpc_cred *cred)
-{
- if (auth->au_flags & RPCAUTH_AUTH_NO_CRKEY_TIMEOUT)
- return false;
- if (!cred->cr_ops->crkey_to_expire)
- return false;
- return cred->cr_ops->crkey_to_expire(cred);
-}
-EXPORT_SYMBOL_GPL(rpcauth_cred_key_to_expire);
-
char *
rpcauth_stringify_acceptor(struct rpc_cred *cred)
{
@@ -587,13 +578,6 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
if (!entry->cr_ops->crmatch(acred, entry, flags))
continue;
- if (flags & RPCAUTH_LOOKUP_RCU) {
- if (test_bit(RPCAUTH_CRED_NEW, &entry->cr_flags) ||
- refcount_read(&entry->cr_count) == 0)
- continue;
- cred = entry;
- break;
- }
cred = get_rpccred(entry);
if (cred)
break;
@@ -603,9 +587,6 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
if (cred != NULL)
goto found;
- if (flags & RPCAUTH_LOOKUP_RCU)
- return ERR_PTR(-ECHILD);
-
new = auth->au_ops->crcreate(auth, acred, flags, gfp);
if (IS_ERR(new)) {
cred = new;
@@ -656,9 +637,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
auth->au_ops->au_name);
memset(&acred, 0, sizeof(acred));
- acred.uid = cred->fsuid;
- acred.gid = cred->fsgid;
- acred.group_info = cred->group_info;
+ acred.cred = cred;
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
return ret;
}
@@ -672,31 +651,41 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
INIT_LIST_HEAD(&cred->cr_lru);
refcount_set(&cred->cr_count, 1);
cred->cr_auth = auth;
+ cred->cr_flags = 0;
cred->cr_ops = ops;
cred->cr_expire = jiffies;
- cred->cr_uid = acred->uid;
+ cred->cr_cred = get_cred(acred->cred);
}
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
-struct rpc_cred *
-rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
+static struct rpc_cred *
+rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
{
- dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
- cred->cr_auth->au_ops->au_name, cred);
- return get_rpccred(cred);
+ struct rpc_auth *auth = task->tk_client->cl_auth;
+ struct auth_cred acred = {
+ .cred = get_task_cred(&init_task),
+ };
+ struct rpc_cred *ret;
+
+ dprintk("RPC: %5u looking up %s cred\n",
+ task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
+ ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
+ put_cred(acred.cred);
+ return ret;
}
-EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
static struct rpc_cred *
-rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
+rpcauth_bind_machine_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred acred = {
- .uid = GLOBAL_ROOT_UID,
- .gid = GLOBAL_ROOT_GID,
+ .principal = task->tk_client->cl_principal,
+ .cred = init_task.cred,
};
- dprintk("RPC: %5u looking up %s cred\n",
+ if (!acred.principal)
+ return NULL;
+ dprintk("RPC: %5u looking up %s machine cred\n",
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
return auth->au_ops->lookup_cred(auth, &acred, lookupflags);
}
@@ -712,18 +701,33 @@ rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
}
static int
-rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
+rpcauth_bindcred(struct rpc_task *task, const struct cred *cred, int flags)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_cred *new;
+ struct rpc_cred *new = NULL;
int lookupflags = 0;
+ struct rpc_auth *auth = task->tk_client->cl_auth;
+ struct auth_cred acred = {
+ .cred = cred,
+ };
if (flags & RPC_TASK_ASYNC)
lookupflags |= RPCAUTH_LOOKUP_NEW;
- if (cred != NULL)
- new = cred->cr_ops->crbind(task, cred, lookupflags);
- else if (flags & RPC_TASK_ROOTCREDS)
+ if (task->tk_op_cred)
+ /* Task must use exactly this rpc_cred */
+ new = get_rpccred(task->tk_op_cred);
+ else if (cred != NULL && cred != &machine_cred)
+ new = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
+ else if (cred == &machine_cred)
+ new = rpcauth_bind_machine_cred(task, lookupflags);
+
+ /* If machine cred couldn't be bound, try a root cred */
+ if (new)
+ ;
+ else if (cred == &machine_cred || (flags & RPC_TASK_ROOTCREDS))
new = rpcauth_bind_root_cred(task, lookupflags);
+ else if (flags & RPC_TASK_NULLCREDS)
+ new = authnull_ops.lookup_cred(NULL, NULL, 0);
else
new = rpcauth_bind_new_cred(task, lookupflags);
if (IS_ERR(new))
@@ -901,15 +905,10 @@ int __init rpcauth_init_module(void)
err = rpc_init_authunix();
if (err < 0)
goto out1;
- err = rpc_init_generic_auth();
- if (err < 0)
- goto out2;
err = register_shrinker(&rpc_cred_shrinker);
if (err < 0)
- goto out3;
+ goto out2;
return 0;
-out3:
- rpc_destroy_generic_auth();
out2:
rpc_destroy_authunix();
out1:
@@ -919,6 +918,5 @@ out1:
void rpcauth_remove_module(void)
{
rpc_destroy_authunix();
- rpc_destroy_generic_auth();
unregister_shrinker(&rpc_cred_shrinker);
}
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
deleted file mode 100644
index ab4a3be1542a..000000000000
--- a/net/sunrpc/auth_generic.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Generic RPC credential
- *
- * Copyright (C) 2008, Trond Myklebust <Trond.Myklebust@netapp.com>
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sunrpc/auth.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/sched.h>
-
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-#define RPC_MACHINE_CRED_USERID GLOBAL_ROOT_UID
-#define RPC_MACHINE_CRED_GROUPID GLOBAL_ROOT_GID
-
-struct generic_cred {
- struct rpc_cred gc_base;
- struct auth_cred acred;
-};
-
-static struct rpc_auth generic_auth;
-static const struct rpc_credops generic_credops;
-
-/*
- * Public call interface
- */
-struct rpc_cred *rpc_lookup_cred(void)
-{
- return rpcauth_lookupcred(&generic_auth, 0);
-}
-EXPORT_SYMBOL_GPL(rpc_lookup_cred);
-
-struct rpc_cred *
-rpc_lookup_generic_cred(struct auth_cred *acred, int flags, gfp_t gfp)
-{
- return rpcauth_lookup_credcache(&generic_auth, acred, flags, gfp);
-}
-EXPORT_SYMBOL_GPL(rpc_lookup_generic_cred);
-
-struct rpc_cred *rpc_lookup_cred_nonblock(void)
-{
- return rpcauth_lookupcred(&generic_auth, RPCAUTH_LOOKUP_RCU);
-}
-EXPORT_SYMBOL_GPL(rpc_lookup_cred_nonblock);
-
-/*
- * Public call interface for looking up machine creds.
- */
-struct rpc_cred *rpc_lookup_machine_cred(const char *service_name)
-{
- struct auth_cred acred = {
- .uid = RPC_MACHINE_CRED_USERID,
- .gid = RPC_MACHINE_CRED_GROUPID,
- .principal = service_name,
- .machine_cred = 1,
- };
-
- dprintk("RPC: looking up machine cred for service %s\n",
- service_name);
- return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0);
-}
-EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
-
-static struct rpc_cred *generic_bind_cred(struct rpc_task *task,
- struct rpc_cred *cred, int lookupflags)
-{
- struct rpc_auth *auth = task->tk_client->cl_auth;
- struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
-
- return auth->au_ops->lookup_cred(auth, acred, lookupflags);
-}
-
-static int
-generic_hash_cred(struct auth_cred *acred, unsigned int hashbits)
-{
- return hash_64(from_kgid(&init_user_ns, acred->gid) |
- ((u64)from_kuid(&init_user_ns, acred->uid) <<
- (sizeof(gid_t) * 8)), hashbits);
-}
-
-/*
- * Lookup generic creds for current process
- */
-static struct rpc_cred *
-generic_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
-{
- return rpcauth_lookup_credcache(&generic_auth, acred, flags, GFP_KERNEL);
-}
-
-static struct rpc_cred *
-generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
-{
- struct generic_cred *gcred;
-
- gcred = kmalloc(sizeof(*gcred), gfp);
- if (gcred == NULL)
- return ERR_PTR(-ENOMEM);
-
- rpcauth_init_cred(&gcred->gc_base, acred, &generic_auth, &generic_credops);
- gcred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
-
- gcred->acred.uid = acred->uid;
- gcred->acred.gid = acred->gid;
- gcred->acred.group_info = acred->group_info;
- gcred->acred.ac_flags = 0;
- if (gcred->acred.group_info != NULL)
- get_group_info(gcred->acred.group_info);
- gcred->acred.machine_cred = acred->machine_cred;
- gcred->acred.principal = acred->principal;
-
- dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
- gcred->acred.machine_cred ? "machine" : "generic",
- gcred,
- from_kuid(&init_user_ns, acred->uid),
- from_kgid(&init_user_ns, acred->gid));
- return &gcred->gc_base;
-}
-
-static void
-generic_free_cred(struct rpc_cred *cred)
-{
- struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
-
- dprintk("RPC: generic_free_cred %p\n", gcred);
- if (gcred->acred.group_info != NULL)
- put_group_info(gcred->acred.group_info);
- kfree(gcred);
-}
-
-static void
-generic_free_cred_callback(struct rcu_head *head)
-{
- struct rpc_cred *cred = container_of(head, struct rpc_cred, cr_rcu);
- generic_free_cred(cred);
-}
-
-static void
-generic_destroy_cred(struct rpc_cred *cred)
-{
- call_rcu(&cred->cr_rcu, generic_free_cred_callback);
-}
-
-static int
-machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flags)
-{
- if (!gcred->acred.machine_cred ||
- gcred->acred.principal != acred->principal ||
- !uid_eq(gcred->acred.uid, acred->uid) ||
- !gid_eq(gcred->acred.gid, acred->gid))
- return 0;
- return 1;
-}
-
-/*
- * Match credentials against current process creds.
- */
-static int
-generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
-{
- struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
- int i;
-
- if (acred->machine_cred)
- return machine_cred_match(acred, gcred, flags);
-
- if (!uid_eq(gcred->acred.uid, acred->uid) ||
- !gid_eq(gcred->acred.gid, acred->gid) ||
- gcred->acred.machine_cred != 0)
- goto out_nomatch;
-
- /* Optimisation in the case where pointers are identical... */
- if (gcred->acred.group_info == acred->group_info)
- goto out_match;
-
- /* Slow path... */
- if (gcred->acred.group_info->ngroups != acred->group_info->ngroups)
- goto out_nomatch;
- for (i = 0; i < gcred->acred.group_info->ngroups; i++) {
- if (!gid_eq(gcred->acred.group_info->gid[i],
- acred->group_info->gid[i]))
- goto out_nomatch;
- }
-out_match:
- return 1;
-out_nomatch:
- return 0;
-}
-
-int __init rpc_init_generic_auth(void)
-{
- return rpcauth_init_credcache(&generic_auth);
-}
-
-void rpc_destroy_generic_auth(void)
-{
- rpcauth_destroy_credcache(&generic_auth);
-}
-
-/*
- * Test the the current time (now) against the underlying credential key expiry
- * minus a timeout and setup notification.
- *
- * The normal case:
- * If 'now' is before the key expiry minus RPC_KEY_EXPIRE_TIMEO, set
- * the RPC_CRED_NOTIFY_TIMEOUT flag to setup the underlying credential
- * rpc_credops crmatch routine to notify this generic cred when it's key
- * expiration is within RPC_KEY_EXPIRE_TIMEO, and return 0.
- *
- * The error case:
- * If the underlying cred lookup fails, return -EACCES.
- *
- * The 'almost' error case:
- * If 'now' is within key expiry minus RPC_KEY_EXPIRE_TIMEO, but not within
- * key expiry minus RPC_KEY_EXPIRE_FAIL, set the RPC_CRED_EXPIRE_SOON bit
- * on the acred ac_flags and return 0.
- */
-static int
-generic_key_timeout(struct rpc_auth *auth, struct rpc_cred *cred)
-{
- struct auth_cred *acred = &container_of(cred, struct generic_cred,
- gc_base)->acred;
- struct rpc_cred *tcred;
- int ret = 0;
-
-
- /* Fast track for non crkey_timeout (no key) underlying credentials */
- if (auth->au_flags & RPCAUTH_AUTH_NO_CRKEY_TIMEOUT)
- return 0;
-
- /* Fast track for the normal case */
- if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags))
- return 0;
-
- /* lookup_cred either returns a valid referenced rpc_cred, or PTR_ERR */
- tcred = auth->au_ops->lookup_cred(auth, acred, 0);
- if (IS_ERR(tcred))
- return -EACCES;
-
- /* Test for the almost error case */
- ret = tcred->cr_ops->crkey_timeout(tcred);
- if (ret != 0) {
- set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
- ret = 0;
- } else {
- /* In case underlying cred key has been reset */
- if (test_and_clear_bit(RPC_CRED_KEY_EXPIRE_SOON,
- &acred->ac_flags))
- dprintk("RPC: UID %d Credential key reset\n",
- from_kuid(&init_user_ns, tcred->cr_uid));
- /* set up fasttrack for the normal case */
- set_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags);
- }
-
- put_rpccred(tcred);
- return ret;
-}
-
-static const struct rpc_authops generic_auth_ops = {
- .owner = THIS_MODULE,
- .au_name = "Generic",
- .hash_cred = generic_hash_cred,
- .lookup_cred = generic_lookup_cred,
- .crcreate = generic_create_cred,
- .key_timeout = generic_key_timeout,
-};
-
-static struct rpc_auth generic_auth = {
- .au_ops = &generic_auth_ops,
- .au_count = REFCOUNT_INIT(1),
-};
-
-static bool generic_key_to_expire(struct rpc_cred *cred)
-{
- struct auth_cred *acred = &container_of(cred, struct generic_cred,
- gc_base)->acred;
- return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
-}
-
-static const struct rpc_credops generic_credops = {
- .cr_name = "Generic cred",
- .crdestroy = generic_destroy_cred,
- .crbind = generic_bind_cred,
- .crmatch = generic_match,
- .crkey_to_expire = generic_key_to_expire,
-};
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index ba765473d1f0..dc86713b32b6 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -565,7 +565,7 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
struct gss_cred *gss_cred = container_of(cred,
struct gss_cred, gc_base);
struct gss_upcall_msg *gss_new, *gss_msg;
- kuid_t uid = cred->cr_uid;
+ kuid_t uid = cred->cr_cred->fsuid;
gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
if (IS_ERR(gss_new))
@@ -604,7 +604,7 @@ gss_refresh_upcall(struct rpc_task *task)
int err = 0;
dprintk("RPC: %5u %s for uid %u\n",
- task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid));
+ task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid));
gss_msg = gss_setup_upcall(gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
@@ -637,7 +637,7 @@ gss_refresh_upcall(struct rpc_task *task)
out:
dprintk("RPC: %5u %s for uid %u result %d\n",
task->tk_pid, __func__,
- from_kuid(&init_user_ns, cred->cr_uid), err);
+ from_kuid(&init_user_ns, cred->cr_cred->fsuid), err);
return err;
}
@@ -653,7 +653,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
int err;
dprintk("RPC: %s for uid %u\n",
- __func__, from_kuid(&init_user_ns, cred->cr_uid));
+ __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid));
retry:
err = 0;
/* if gssd is down, just skip upcalling altogether */
@@ -701,7 +701,7 @@ out_intr:
gss_release_msg(gss_msg);
out:
dprintk("RPC: %s for uid %u result %d\n",
- __func__, from_kuid(&init_user_ns, cred->cr_uid), err);
+ __func__, from_kuid(&init_user_ns, cred->cr_cred->fsuid), err);
return err;
}
@@ -1248,7 +1248,7 @@ gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
if (new) {
struct auth_cred acred = {
- .uid = gss_cred->gc_base.cr_uid,
+ .cred = gss_cred->gc_base.cr_cred,
};
struct gss_cl_ctx *ctx =
rcu_dereference_protected(gss_cred->gc_ctx, 1);
@@ -1343,6 +1343,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
+ put_cred(cred->cr_cred);
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
if (ctx)
gss_put_ctx(ctx);
@@ -1361,7 +1362,7 @@ gss_destroy_cred(struct rpc_cred *cred)
static int
gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
{
- return hash_64(from_kuid(&init_user_ns, acred->uid), hashbits);
+ return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
}
/*
@@ -1381,7 +1382,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
int err = -ENOMEM;
dprintk("RPC: %s for uid %d, flavor %d\n",
- __func__, from_kuid(&init_user_ns, acred->uid),
+ __func__, from_kuid(&init_user_ns, acred->cred->fsuid),
auth->au_flavor);
if (!(cred = kzalloc(sizeof(*cred), gfp)))
@@ -1394,9 +1395,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t
*/
cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
cred->gc_service = gss_auth->service;
- cred->gc_principal = NULL;
- if (acred->machine_cred)
- cred->gc_principal = acred->principal;
+ cred->gc_principal = acred->principal;
kref_get(&gss_auth->kref);
return &cred->gc_base;
@@ -1518,23 +1517,10 @@ out:
if (gss_cred->gc_principal == NULL)
return 0;
ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
- goto check_expire;
- }
- if (gss_cred->gc_principal != NULL)
- return 0;
- ret = uid_eq(rc->cr_uid, acred->uid);
-
-check_expire:
- if (ret == 0)
- return ret;
-
- /* Notify acred users of GSS context expiration timeout */
- if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) &&
- (gss_key_timeout(rc) != 0)) {
- /* test will now be done from generic cred */
- test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags);
- /* tell NFS layer that key will expire soon */
- set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
+ } else {
+ if (gss_cred->gc_principal != NULL)
+ return 0;
+ ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
}
return ret;
}
@@ -1607,9 +1593,8 @@ static int gss_renew_cred(struct rpc_task *task)
gc_base);
struct rpc_auth *auth = oldcred->cr_auth;
struct auth_cred acred = {
- .uid = oldcred->cr_uid,
+ .cred = oldcred->cr_cred,
.principal = gss_cred->gc_principal,
- .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
};
struct rpc_cred *new;
@@ -2110,7 +2095,6 @@ static const struct rpc_credops gss_credops = {
.cr_name = "AUTH_GSS",
.crdestroy = gss_destroy_cred,
.cr_init = gss_cred_init,
- .crbind = rpcauth_generic_bind_cred,
.crmatch = gss_match,
.crmarshal = gss_marshal,
.crrefresh = gss_refresh,
@@ -2125,7 +2109,6 @@ static const struct rpc_credops gss_credops = {
static const struct rpc_credops gss_nullops = {
.cr_name = "AUTH_GSS",
.crdestroy = gss_destroy_nullcred,
- .crbind = rpcauth_generic_bind_cred,
.crmatch = gss_match,
.crmarshal = gss_marshal,
.crrefresh = gss_refresh_null,
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 16ac0f4cb7d8..379318dff534 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -244,7 +244,7 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
/**
* gss_mech_list_pseudoflavors - Discover registered GSS pseudoflavors
- * @array: array to fill in
+ * @array_ptr: array to fill in
* @size: size of "array"
*
* Returns the number of array items filled in, or a negative errno.
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 2694a1bc026b..d0ceac57c06e 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -36,8 +36,6 @@ nul_destroy(struct rpc_auth *auth)
static struct rpc_cred *
nul_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
- if (flags & RPCAUTH_LOOKUP_RCU)
- return &null_cred;
return get_rpccred(&null_cred);
}
@@ -116,7 +114,6 @@ static
struct rpc_auth null_auth = {
.au_cslack = NUL_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
- .au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
.au_ops = &authnull_ops,
.au_flavor = RPC_AUTH_NULL,
.au_count = REFCOUNT_INIT(1),
@@ -126,7 +123,6 @@ static
const struct rpc_credops null_credops = {
.cr_name = "AUTH_NULL",
.crdestroy = nul_destroy_cred,
- .crbind = rpcauth_generic_bind_cred,
.crmatch = nul_match,
.crmarshal = nul_marshal,
.crrefresh = nul_refresh,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4c1c7e56288f..387f6b3ffbea 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -11,16 +11,11 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/module.h>
+#include <linux/mempool.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
#include <linux/user_namespace.h>
-struct unx_cred {
- struct rpc_cred uc_base;
- kgid_t uc_gid;
- kgid_t uc_gids[UNX_NGROUPS];
-};
-#define uc_uid uc_base.cr_uid
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -28,6 +23,7 @@ struct unx_cred {
static struct rpc_auth unix_auth;
static const struct rpc_credops unix_credops;
+static mempool_t *unix_pool;
static struct rpc_auth *
unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
@@ -42,15 +38,6 @@ static void
unx_destroy(struct rpc_auth *auth)
{
dprintk("RPC: destroying UNIX authenticator %p\n", auth);
- rpcauth_clear_credcache(auth->au_credcache);
-}
-
-static int
-unx_hash_cred(struct auth_cred *acred, unsigned int hashbits)
-{
- return hash_64(from_kgid(&init_user_ns, acred->gid) |
- ((u64)from_kuid(&init_user_ns, acred->uid) <<
- (sizeof(gid_t) * 8)), hashbits);
}
/*
@@ -59,52 +46,24 @@ unx_hash_cred(struct auth_cred *acred, unsigned int hashbits)
static struct rpc_cred *
unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
- return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
-}
-
-static struct rpc_cred *
-unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
-{
- struct unx_cred *cred;
- unsigned int groups = 0;
- unsigned int i;
+ struct rpc_cred *ret = mempool_alloc(unix_pool, GFP_NOFS);
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
- from_kuid(&init_user_ns, acred->uid),
- from_kgid(&init_user_ns, acred->gid));
-
- if (!(cred = kmalloc(sizeof(*cred), gfp)))
- return ERR_PTR(-ENOMEM);
+ from_kuid(&init_user_ns, acred->cred->fsuid),
+ from_kgid(&init_user_ns, acred->cred->fsgid));
- rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops);
- cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
-
- if (acred->group_info != NULL)
- groups = acred->group_info->ngroups;
- if (groups > UNX_NGROUPS)
- groups = UNX_NGROUPS;
-
- cred->uc_gid = acred->gid;
- for (i = 0; i < groups; i++)
- cred->uc_gids[i] = acred->group_info->gid[i];
- if (i < UNX_NGROUPS)
- cred->uc_gids[i] = INVALID_GID;
-
- return &cred->uc_base;
-}
-
-static void
-unx_free_cred(struct unx_cred *unx_cred)
-{
- dprintk("RPC: unx_free_cred %p\n", unx_cred);
- kfree(unx_cred);
+ rpcauth_init_cred(ret, acred, auth, &unix_credops);
+ ret->cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
+ return ret;
}
static void
unx_free_cred_callback(struct rcu_head *head)
{
- struct unx_cred *unx_cred = container_of(head, struct unx_cred, uc_base.cr_rcu);
- unx_free_cred(unx_cred);
+ struct rpc_cred *rpc_cred = container_of(head, struct rpc_cred, cr_rcu);
+ dprintk("RPC: unx_free_cred %p\n", rpc_cred);
+ put_cred(rpc_cred->cr_cred);
+ mempool_free(rpc_cred, unix_pool);
}
static void
@@ -114,30 +73,32 @@ unx_destroy_cred(struct rpc_cred *cred)
}
/*
- * Match credentials against current process creds.
- * The root_override argument takes care of cases where the caller may
- * request root creds (e.g. for NFS swapping).
+ * Match credentials against current the auth_cred.
*/
static int
-unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
+unx_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
{
- struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base);
unsigned int groups = 0;
unsigned int i;
+ if (cred->cr_cred == acred->cred)
+ return 1;
- if (!uid_eq(cred->uc_uid, acred->uid) || !gid_eq(cred->uc_gid, acred->gid))
+ if (!uid_eq(cred->cr_cred->fsuid, acred->cred->fsuid) || !gid_eq(cred->cr_cred->fsgid, acred->cred->fsgid))
return 0;
- if (acred->group_info != NULL)
- groups = acred->group_info->ngroups;
+ if (acred->cred && acred->cred->group_info != NULL)
+ groups = acred->cred->group_info->ngroups;
if (groups > UNX_NGROUPS)
groups = UNX_NGROUPS;
+ if (cred->cr_cred->group_info == NULL)
+ return groups == 0;
+ if (groups != cred->cr_cred->group_info->ngroups)
+ return 0;
+
for (i = 0; i < groups ; i++)
- if (!gid_eq(cred->uc_gids[i], acred->group_info->gid[i]))
+ if (!gid_eq(cred->cr_cred->group_info->gid[i], acred->cred->group_info->gid[i]))
return 0;
- if (groups < UNX_NGROUPS && gid_valid(cred->uc_gids[groups]))
- return 0;
return 1;
}
@@ -149,9 +110,10 @@ static __be32 *
unx_marshal(struct rpc_task *task, __be32 *p)
{
struct rpc_clnt *clnt = task->tk_client;
- struct unx_cred *cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base);
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
__be32 *base, *hold;
int i;
+ struct group_info *gi = cred->cr_cred->group_info;
*p++ = htonl(RPC_AUTH_UNIX);
base = p++;
@@ -162,11 +124,12 @@ unx_marshal(struct rpc_task *task, __be32 *p)
*/
p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
- *p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid));
- *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid));
+ *p++ = htonl((u32) from_kuid(&init_user_ns, cred->cr_cred->fsuid));
+ *p++ = htonl((u32) from_kgid(&init_user_ns, cred->cr_cred->fsgid));
hold = p++;
- for (i = 0; i < UNX_NGROUPS && gid_valid(cred->uc_gids[i]); i++)
- *p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i]));
+ if (gi)
+ for (i = 0; i < UNX_NGROUPS && i < gi->ngroups; i++)
+ *p++ = htonl((u32) from_kgid(&init_user_ns, gi->gid[i]));
*hold = htonl(p - hold - 1); /* gid array length */
*base = htonl((p - base - 1) << 2); /* cred length */
@@ -213,12 +176,13 @@ unx_validate(struct rpc_task *task, __be32 *p)
int __init rpc_init_authunix(void)
{
- return rpcauth_init_credcache(&unix_auth);
+ unix_pool = mempool_create_kmalloc_pool(16, sizeof(struct rpc_cred));
+ return unix_pool ? 0 : -ENOMEM;
}
void rpc_destroy_authunix(void)
{
- rpcauth_destroy_credcache(&unix_auth);
+ mempool_destroy(unix_pool);
}
const struct rpc_authops authunix_ops = {
@@ -227,16 +191,13 @@ const struct rpc_authops authunix_ops = {
.au_name = "UNIX",
.create = unx_create,
.destroy = unx_destroy,
- .hash_cred = unx_hash_cred,
.lookup_cred = unx_lookup_cred,
- .crcreate = unx_create_cred,
};
static
struct rpc_auth unix_auth = {
.au_cslack = UNX_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
- .au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
.au_ops = &authunix_ops,
.au_flavor = RPC_AUTH_UNIX,
.au_count = REFCOUNT_INIT(1),
@@ -246,7 +207,6 @@ static
const struct rpc_credops unix_credops = {
.cr_name = "AUTH_UNIX",
.crdestroy = unx_destroy_cred,
- .crbind = rpcauth_generic_bind_cred,
.crmatch = unx_match,
.crmarshal = unx_marshal,
.crrefresh = unx_refresh,
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index fa5ba6ed3197..ec451b8114b0 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -197,7 +197,7 @@ out_free:
/**
* xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
* @xprt: the transport holding the preallocated strucures
- * @max_reqs the maximum number of preallocated structures to destroy
+ * @max_reqs: the maximum number of preallocated structures to destroy
*
* Since these structures may have been allocated by multiple calls
* to xprt_setup_backchannel, we only destroy up to the maximum number
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 24cbddc44c88..71d9599b5816 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -627,6 +627,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
new->cl_noretranstimeo = clnt->cl_noretranstimeo;
new->cl_discrtry = clnt->cl_discrtry;
new->cl_chatty = clnt->cl_chatty;
+ new->cl_principal = clnt->cl_principal;
return new;
out_err:
@@ -1029,7 +1030,7 @@ rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
task->tk_msg.rpc_argp = msg->rpc_argp;
task->tk_msg.rpc_resp = msg->rpc_resp;
if (msg->rpc_cred != NULL)
- task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
+ task->tk_msg.rpc_cred = get_cred(msg->rpc_cred);
}
}
@@ -2521,9 +2522,8 @@ static int rpc_ping(struct rpc_clnt *clnt)
.rpc_proc = &rpcproc_null,
};
int err;
- msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
- err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
- put_rpccred(msg.rpc_cred);
+ err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
+ RPC_TASK_NULLCREDS);
return err;
}
@@ -2534,15 +2534,15 @@ struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
- .rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_xprt = xprt,
.rpc_message = &msg,
+ .rpc_op_cred = cred,
.callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
.callback_data = data,
- .flags = flags,
+ .flags = flags | RPC_TASK_NULLCREDS,
};
return rpc_run_task(&task_setup_data);
@@ -2593,7 +2593,6 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
void *dummy)
{
struct rpc_cb_add_xprt_calldata *data;
- struct rpc_cred *cred;
struct rpc_task *task;
data = kmalloc(sizeof(*data), GFP_NOFS);
@@ -2602,11 +2601,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
data->xps = xprt_switch_get(xps);
data->xprt = xprt_get(xprt);
- cred = authnull_ops.lookup_cred(NULL, NULL, 0);
- task = rpc_call_null_helper(clnt, xprt, cred,
- RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC,
+ task = rpc_call_null_helper(clnt, xprt, NULL,
+ RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS,
&rpc_cb_add_xprt_call_ops, data);
- put_rpccred(cred);
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
@@ -2637,7 +2634,6 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
struct rpc_xprt *xprt,
void *data)
{
- struct rpc_cred *cred;
struct rpc_task *task;
struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data;
int status = -EADDRINUSE;
@@ -2649,11 +2645,9 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
goto out_err;
/* Test the connection */
- cred = authnull_ops.lookup_cred(NULL, NULL, 0);
- task = rpc_call_null_helper(clnt, xprt, cred,
- RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
+ task = rpc_call_null_helper(clnt, xprt, NULL,
+ RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
NULL, NULL);
- put_rpccred(cred);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out_err;
@@ -2667,6 +2661,9 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
/* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
xtest->add_xprt_test(clnt, xprt, xtest->data);
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+
/* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
return 1;
out_err:
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 4fda18d47e2c..69663681bf9d 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1266,7 +1266,7 @@ static const struct rpc_pipe_ops gssd_dummy_pipe_ops = {
* that this file will be there and have a certain format.
*/
static int
-rpc_show_dummy_info(struct seq_file *m, void *v)
+rpc_dummy_info_show(struct seq_file *m, void *v)
{
seq_printf(m, "RPC server: %s\n", utsname()->nodename);
seq_printf(m, "service: foo (1) version 0\n");
@@ -1275,25 +1275,12 @@ rpc_show_dummy_info(struct seq_file *m, void *v)
seq_printf(m, "port: 0\n");
return 0;
}
-
-static int
-rpc_dummy_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rpc_show_dummy_info, NULL);
-}
-
-static const struct file_operations rpc_dummy_info_operations = {
- .owner = THIS_MODULE,
- .open = rpc_dummy_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rpc_dummy_info);
static const struct rpc_filelist gssd_dummy_info_file[] = {
[0] = {
.name = "info",
- .i_fop = &rpc_dummy_info_operations,
+ .i_fop = &rpc_dummy_info_fops,
.mode = S_IFREG | 0400,
},
};
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c7872bc13860..41a971ac1c63 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -752,7 +752,7 @@ void rpcb_getport_async(struct rpc_task *task)
goto bailout_nofree;
}
- map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC);
+ map = kzalloc(sizeof(struct rpcbind_args), GFP_NOFS);
if (!map) {
status = -ENOMEM;
dprintk("RPC: %5u %s: no memory available\n",
@@ -770,7 +770,13 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_4:
case RPCBVERS_3:
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
- map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_NOFS);
+ if (!map->r_addr) {
+ status = -ENOMEM;
+ dprintk("RPC: %5u %s: no memory available\n",
+ task->tk_pid, __func__);
+ goto bailout_free_args;
+ }
map->r_owner = "";
break;
case RPCBVERS_2:
@@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
rpc_put_task(child);
return;
+bailout_free_args:
+ kfree(map);
bailout_release_client:
rpc_release_client(rpcb_clnt);
bailout_nofree:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 57ca5bead1cb..adc3c40cc733 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -997,6 +997,8 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
+ task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
+
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
@@ -1054,6 +1056,7 @@ static void rpc_free_task(struct rpc_task *task)
{
unsigned short tk_flags = task->tk_flags;
+ put_rpccred(task->tk_op_cred);
rpc_release_calldata(task->tk_ops, task->tk_calldata);
if (tk_flags & RPC_TASK_DYNAMIC) {
@@ -1071,7 +1074,7 @@ static void rpc_release_resources_task(struct rpc_task *task)
{
xprt_release(task);
if (task->tk_msg.rpc_cred) {
- put_rpccred(task->tk_msg.rpc_cred);
+ put_cred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
}
rpc_task_release_client(task);
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index e2d64c7138c3..8394124126f8 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -383,7 +383,7 @@ void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
/**
* xprt_iter_xchg_switch - Atomically swap out the rpc_xprt_switch
* @xpi: pointer to rpc_xprt_iter
- * @xps: pointer to a new rpc_xprt_switch or NULL
+ * @newswitch: pointer to a new rpc_xprt_switch or NULL
*
* Swaps out the existing xpi->xpi_xpswitch with a new value.
*/
@@ -401,7 +401,7 @@ struct rpc_xprt_switch *xprt_iter_xchg_switch(struct rpc_xprt_iter *xpi,
/**
* xprt_iter_destroy - Destroys the xprt iterator
- * @xpi pointer to rpc_xprt_iter
+ * @xpi: pointer to rpc_xprt_iter
*/
void xprt_iter_destroy(struct rpc_xprt_iter *xpi)
{
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile
index 8bf19e142b6b..8ed0377d7a18 100644
--- a/net/sunrpc/xprtrdma/Makefile
+++ b/net/sunrpc/xprtrdma/Makefile
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SUNRPC_XPRT_RDMA) += rpcrdma.o
-rpcrdma-y := transport.o rpc_rdma.o verbs.o \
- fmr_ops.o frwr_ops.o \
+rpcrdma-y := transport.o rpc_rdma.o verbs.o frwr_ops.o \
svc_rdma.o svc_rdma_backchannel.o svc_rdma_transport.o \
svc_rdma_sendto.o svc_rdma_recvfrom.o svc_rdma_rw.o \
module.o
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index edba0d35776b..0de9b3e63770 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -5,7 +5,6 @@
* Support for backward direction RPCs on RPC/RDMA.
*/
-#include <linux/module.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svc_xprt.h>
@@ -20,29 +19,16 @@
#undef RPCRDMA_BACKCHANNEL_DEBUG
-static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
- struct rpc_rqst *rqst)
-{
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
-
- spin_lock(&buf->rb_reqslock);
- list_del(&req->rl_all);
- spin_unlock(&buf->rb_reqslock);
-
- rpcrdma_destroy_req(req);
-}
-
static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
unsigned int count)
{
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+ struct rpcrdma_req *req;
struct rpc_rqst *rqst;
unsigned int i;
for (i = 0; i < (count << 1); i++) {
struct rpcrdma_regbuf *rb;
- struct rpcrdma_req *req;
size_t size;
req = rpcrdma_create_req(r_xprt);
@@ -68,7 +54,7 @@ static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
return 0;
out_fail:
- rpcrdma_bc_free_rqst(r_xprt, rqst);
+ rpcrdma_req_destroy(req);
return -ENOMEM;
}
@@ -101,7 +87,6 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
goto out_free;
r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
- request_module("svcrdma");
trace_xprtrdma_cb_setup(r_xprt, reqs);
return 0;
@@ -173,21 +158,21 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
*/
int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
{
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+ struct rpc_xprt *xprt = rqst->rq_xprt;
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
int rc;
- if (!xprt_connected(rqst->rq_xprt))
- goto drop_connection;
+ if (!xprt_connected(xprt))
+ return -ENOTCONN;
- if (!xprt_request_get_cong(rqst->rq_xprt, rqst))
+ if (!xprt_request_get_cong(xprt, rqst))
return -EBADSLT;
rc = rpcrdma_bc_marshal_reply(rqst);
if (rc < 0)
goto failed_marshal;
- rpcrdma_post_recvs(r_xprt, true);
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
goto drop_connection;
return 0;
@@ -196,7 +181,7 @@ failed_marshal:
if (rc != -ENOTCONN)
return rc;
drop_connection:
- xprt_disconnect_done(rqst->rq_xprt);
+ xprt_rdma_close(xprt);
return -ENOTCONN;
}
@@ -207,7 +192,6 @@ drop_connection:
*/
void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
{
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpc_rqst *rqst, *tmp;
spin_lock(&xprt->bc_pa_lock);
@@ -215,7 +199,7 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
list_del(&rqst->rq_bc_pa_list);
spin_unlock(&xprt->bc_pa_lock);
- rpcrdma_bc_free_rqst(r_xprt, rqst);
+ rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
spin_lock(&xprt->bc_pa_lock);
}
@@ -231,9 +215,6 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
struct rpc_xprt *xprt = rqst->rq_xprt;
- dprintk("RPC: %s: freeing rqst %p (req %p)\n",
- __func__, rqst, req);
-
rpcrdma_recv_buffer_put(req->rl_reply);
req->rl_reply = NULL;
@@ -319,7 +300,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
out_overflow:
pr_warn("RPC/RDMA backchannel overflow\n");
- xprt_disconnect_done(xprt);
+ xprt_force_disconnect(xprt);
/* This receive buffer gets reposted automatically
* when the connection is re-established.
*/
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
deleted file mode 100644
index fd8fea59fe92..000000000000
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ /dev/null
@@ -1,337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2015, 2017 Oracle. All rights reserved.
- * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
- */
-
-/* Lightweight memory registration using Fast Memory Regions (FMR).
- * Referred to sometimes as MTHCAFMR mode.
- *
- * FMR uses synchronous memory registration and deregistration.
- * FMR registration is known to be fast, but FMR deregistration
- * can take tens of usecs to complete.
- */
-
-/* Normal operation
- *
- * A Memory Region is prepared for RDMA READ or WRITE using the
- * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
- * finished, the Memory Region is unmapped using the ib_unmap_fmr
- * verb (fmr_op_unmap).
- */
-
-#include <linux/sunrpc/svc_rdma.h>
-
-#include "xprt_rdma.h"
-#include <trace/events/rpcrdma.h>
-
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_TRANS
-#endif
-
-/* Maximum scatter/gather per FMR */
-#define RPCRDMA_MAX_FMR_SGES (64)
-
-/* Access mode of externally registered pages */
-enum {
- RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ,
-};
-
-bool
-fmr_is_supported(struct rpcrdma_ia *ia)
-{
- if (!ia->ri_device->ops.alloc_fmr) {
- pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
- ia->ri_device->name);
- return false;
- }
- return true;
-}
-
-static void
-__fmr_unmap(struct rpcrdma_mr *mr)
-{
- LIST_HEAD(l);
- int rc;
-
- list_add(&mr->fmr.fm_mr->list, &l);
- rc = ib_unmap_fmr(&l);
- list_del(&mr->fmr.fm_mr->list);
- if (rc)
- pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
- mr, rc);
-}
-
-/* Release an MR.
- */
-static void
-fmr_op_release_mr(struct rpcrdma_mr *mr)
-{
- int rc;
-
- kfree(mr->fmr.fm_physaddrs);
- kfree(mr->mr_sg);
-
- /* In case this one was left mapped, try to unmap it
- * to prevent dealloc_fmr from failing with EBUSY
- */
- __fmr_unmap(mr);
-
- rc = ib_dealloc_fmr(mr->fmr.fm_mr);
- if (rc)
- pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
- mr, rc);
-
- kfree(mr);
-}
-
-/* MRs are dynamically allocated, so simply clean up and release the MR.
- * A replacement MR will subsequently be allocated on demand.
- */
-static void
-fmr_mr_recycle_worker(struct work_struct *work)
-{
- struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
- struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
-
- trace_xprtrdma_mr_recycle(mr);
-
- trace_xprtrdma_mr_unmap(mr);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mr->mr_sg, mr->mr_nents, mr->mr_dir);
-
- spin_lock(&r_xprt->rx_buf.rb_mrlock);
- list_del(&mr->mr_all);
- r_xprt->rx_stats.mrs_recycled++;
- spin_unlock(&r_xprt->rx_buf.rb_mrlock);
- fmr_op_release_mr(mr);
-}
-
-static int
-fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
-{
- static struct ib_fmr_attr fmr_attr = {
- .max_pages = RPCRDMA_MAX_FMR_SGES,
- .max_maps = 1,
- .page_shift = PAGE_SHIFT
- };
-
- mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
- sizeof(u64), GFP_KERNEL);
- if (!mr->fmr.fm_physaddrs)
- goto out_free;
-
- mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
- sizeof(*mr->mr_sg), GFP_KERNEL);
- if (!mr->mr_sg)
- goto out_free;
-
- sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
-
- mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
- &fmr_attr);
- if (IS_ERR(mr->fmr.fm_mr))
- goto out_fmr_err;
-
- INIT_LIST_HEAD(&mr->mr_list);
- INIT_WORK(&mr->mr_recycle, fmr_mr_recycle_worker);
- return 0;
-
-out_fmr_err:
- dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
- PTR_ERR(mr->fmr.fm_mr));
-
-out_free:
- kfree(mr->mr_sg);
- kfree(mr->fmr.fm_physaddrs);
- return -ENOMEM;
-}
-
-/* On success, sets:
- * ep->rep_attr.cap.max_send_wr
- * ep->rep_attr.cap.max_recv_wr
- * cdata->max_requests
- * ia->ri_max_segs
- */
-static int
-fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
- struct rpcrdma_create_data_internal *cdata)
-{
- int max_qp_wr;
-
- max_qp_wr = ia->ri_device->attrs.max_qp_wr;
- max_qp_wr -= RPCRDMA_BACKWARD_WRS;
- max_qp_wr -= 1;
- if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
- return -ENOMEM;
- if (cdata->max_requests > max_qp_wr)
- cdata->max_requests = max_qp_wr;
- ep->rep_attr.cap.max_send_wr = cdata->max_requests;
- ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
- ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
- ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
- ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
- ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
-
- ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
- RPCRDMA_MAX_FMR_SGES);
- ia->ri_max_segs += 2; /* segments for head and tail buffers */
- return 0;
-}
-
-/* FMR mode conveys up to 64 pages of payload per chunk segment.
- */
-static size_t
-fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
-{
- return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
-}
-
-/* Use the ib_map_phys_fmr() verb to register a memory region
- * for remote access via RDMA READ or RDMA WRITE.
- */
-static struct rpcrdma_mr_seg *
-fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, struct rpcrdma_mr **out)
-{
- struct rpcrdma_mr_seg *seg1 = seg;
- int len, pageoff, i, rc;
- struct rpcrdma_mr *mr;
- u64 *dma_pages;
-
- mr = rpcrdma_mr_get(r_xprt);
- if (!mr)
- return ERR_PTR(-EAGAIN);
-
- pageoff = offset_in_page(seg1->mr_offset);
- seg1->mr_offset -= pageoff; /* start of page */
- seg1->mr_len += pageoff;
- len = -pageoff;
- if (nsegs > RPCRDMA_MAX_FMR_SGES)
- nsegs = RPCRDMA_MAX_FMR_SGES;
- for (i = 0; i < nsegs;) {
- if (seg->mr_page)
- sg_set_page(&mr->mr_sg[i],
- seg->mr_page,
- seg->mr_len,
- offset_in_page(seg->mr_offset));
- else
- sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
- seg->mr_len);
- len += seg->mr_len;
- ++seg;
- ++i;
- /* Check for holes */
- if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
- offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
- break;
- }
- mr->mr_dir = rpcrdma_data_dir(writing);
-
- mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
- mr->mr_sg, i, mr->mr_dir);
- if (!mr->mr_nents)
- goto out_dmamap_err;
- trace_xprtrdma_mr_map(mr);
-
- for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
- dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
- rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
- dma_pages[0]);
- if (rc)
- goto out_maperr;
-
- mr->mr_handle = mr->fmr.fm_mr->rkey;
- mr->mr_length = len;
- mr->mr_offset = dma_pages[0] + pageoff;
-
- *out = mr;
- return seg;
-
-out_dmamap_err:
- pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
- mr->mr_sg, i);
- rpcrdma_mr_put(mr);
- return ERR_PTR(-EIO);
-
-out_maperr:
- pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
- len, (unsigned long long)dma_pages[0],
- pageoff, mr->mr_nents, rc);
- rpcrdma_mr_unmap_and_put(mr);
- return ERR_PTR(-EIO);
-}
-
-/* Post Send WR containing the RPC Call message.
- */
-static int
-fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
-{
- return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL);
-}
-
-/* Invalidate all memory regions that were registered for "req".
- *
- * Sleeps until it is safe for the host CPU to access the
- * previously mapped memory regions.
- *
- * Caller ensures that @mrs is not empty before the call. This
- * function empties the list.
- */
-static void
-fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
-{
- struct rpcrdma_mr *mr;
- LIST_HEAD(unmap_list);
- int rc;
-
- /* ORDER: Invalidate all of the req's MRs first
- *
- * ib_unmap_fmr() is slow, so use a single call instead
- * of one call per mapped FMR.
- */
- list_for_each_entry(mr, mrs, mr_list) {
- dprintk("RPC: %s: unmapping fmr %p\n",
- __func__, &mr->fmr);
- trace_xprtrdma_mr_localinv(mr);
- list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
- }
- r_xprt->rx_stats.local_inv_needed++;
- rc = ib_unmap_fmr(&unmap_list);
- if (rc)
- goto out_release;
-
- /* ORDER: Now DMA unmap all of the req's MRs, and return
- * them to the free MW list.
- */
- while (!list_empty(mrs)) {
- mr = rpcrdma_mr_pop(mrs);
- list_del(&mr->fmr.fm_mr->list);
- rpcrdma_mr_unmap_and_put(mr);
- }
-
- return;
-
-out_release:
- pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
-
- while (!list_empty(mrs)) {
- mr = rpcrdma_mr_pop(mrs);
- list_del(&mr->fmr.fm_mr->list);
- rpcrdma_mr_recycle(mr);
- }
-}
-
-const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
- .ro_map = fmr_op_map,
- .ro_send = fmr_op_send,
- .ro_unmap_sync = fmr_op_unmap_sync,
- .ro_open = fmr_op_open,
- .ro_maxpages = fmr_op_maxpages,
- .ro_init_mr = fmr_op_init_mr,
- .ro_release_mr = fmr_op_release_mr,
- .ro_displayname = "fmr",
- .ro_send_w_inv_ok = 0,
-};
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index fc6378cc0c1c..6a561056b538 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -15,21 +15,21 @@
/* Normal operation
*
* A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
- * Work Request (frwr_op_map). When the RDMA operation is finished, this
+ * Work Request (frwr_map). When the RDMA operation is finished, this
* Memory Region is invalidated using a LOCAL_INV Work Request
- * (frwr_op_unmap_sync).
+ * (frwr_unmap_sync).
*
* Typically these Work Requests are not signaled, and neither are RDMA
* SEND Work Requests (with the exception of signaling occasionally to
* prevent provider work queue overflows). This greatly reduces HCA
* interrupt workload.
*
- * As an optimization, frwr_op_unmap marks MRs INVALID before the
+ * As an optimization, frwr_unmap marks MRs INVALID before the
* LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
* rb_mrs immediately so that no work (like managing a linked list
* under a spinlock) is needed in the completion upcall.
*
- * But this means that frwr_op_map() can occasionally encounter an MR
+ * But this means that frwr_map() can occasionally encounter an MR
* that is INVALID but the LOCAL_INV WR has not completed. Work Queue
* ordering prevents a subsequent FAST_REG WR from executing against
* that MR while it is still being invalidated.
@@ -57,14 +57,14 @@
* FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
* state, and the pending WR was flushed.
*
- * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
+ * When frwr_map encounters FLUSHED and VALID MRs, they are recovered
* with ib_dereg_mr and then are re-initialized. Because MR recovery
* allocates fresh resources, it is deferred to a workqueue, and the
* recovered MRs are placed back on the rb_mrs list when recovery is
- * complete. frwr_op_map allocates another MR for the current RPC while
+ * complete. frwr_map allocates another MR for the current RPC while
* the broken MR is reset.
*
- * To ensure that frwr_op_map doesn't encounter an MR that is marked
+ * To ensure that frwr_map doesn't encounter an MR that is marked
* INVALID but that is about to be flushed due to a previous transport
* disconnect, the transport connect worker attempts to drain all
* pending send queue WRs before the transport is reconnected.
@@ -80,8 +80,13 @@
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif
-bool
-frwr_is_supported(struct rpcrdma_ia *ia)
+/**
+ * frwr_is_supported - Check if device supports FRWR
+ * @ia: interface adapter to check
+ *
+ * Returns true if device supports FRWR, otherwise false
+ */
+bool frwr_is_supported(struct rpcrdma_ia *ia)
{
struct ib_device_attr *attrs = &ia->ri_device->attrs;
@@ -97,15 +102,18 @@ out_not_supported:
return false;
}
-static void
-frwr_op_release_mr(struct rpcrdma_mr *mr)
+/**
+ * frwr_release_mr - Destroy one MR
+ * @mr: MR allocated by frwr_init_mr
+ *
+ */
+void frwr_release_mr(struct rpcrdma_mr *mr)
{
int rc;
rc = ib_dereg_mr(mr->frwr.fr_mr);
if (rc)
- pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
- mr, rc);
+ trace_xprtrdma_frwr_dereg(mr, rc);
kfree(mr->mr_sg);
kfree(mr);
}
@@ -117,60 +125,78 @@ static void
frwr_mr_recycle_worker(struct work_struct *work)
{
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
- enum rpcrdma_frwr_state state = mr->frwr.fr_state;
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
trace_xprtrdma_mr_recycle(mr);
- if (state != FRWR_FLUSHED_LI) {
+ if (mr->mr_dir != DMA_NONE) {
trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ mr->mr_dir = DMA_NONE;
}
spin_lock(&r_xprt->rx_buf.rb_mrlock);
list_del(&mr->mr_all);
r_xprt->rx_stats.mrs_recycled++;
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
- frwr_op_release_mr(mr);
+
+ frwr_release_mr(mr);
}
-static int
-frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
+/**
+ * frwr_init_mr - Initialize one MR
+ * @ia: interface adapter
+ * @mr: generic MR to prepare for FRWR
+ *
+ * Returns zero if successful. Otherwise a negative errno
+ * is returned.
+ */
+int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
unsigned int depth = ia->ri_max_frwr_depth;
- struct rpcrdma_frwr *frwr = &mr->frwr;
+ struct scatterlist *sg;
+ struct ib_mr *frmr;
int rc;
- frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
- if (IS_ERR(frwr->fr_mr))
+ frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
+ if (IS_ERR(frmr))
goto out_mr_err;
- mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
- if (!mr->mr_sg)
+ sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
goto out_list_err;
+ mr->frwr.fr_mr = frmr;
+ mr->frwr.fr_state = FRWR_IS_INVALID;
+ mr->mr_dir = DMA_NONE;
INIT_LIST_HEAD(&mr->mr_list);
INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
- sg_init_table(mr->mr_sg, depth);
- init_completion(&frwr->fr_linv_done);
+ init_completion(&mr->frwr.fr_linv_done);
+
+ sg_init_table(sg, depth);
+ mr->mr_sg = sg;
return 0;
out_mr_err:
- rc = PTR_ERR(frwr->fr_mr);
- dprintk("RPC: %s: ib_alloc_mr status %i\n",
- __func__, rc);
+ rc = PTR_ERR(frmr);
+ trace_xprtrdma_frwr_alloc(mr, rc);
return rc;
out_list_err:
- rc = -ENOMEM;
dprintk("RPC: %s: sg allocation failure\n",
__func__);
- ib_dereg_mr(frwr->fr_mr);
- return rc;
+ ib_dereg_mr(frmr);
+ return -ENOMEM;
}
-/* On success, sets:
+/**
+ * frwr_open - Prepare an endpoint for use with FRWR
+ * @ia: interface adapter this endpoint will use
+ * @ep: endpoint to prepare
+ * @cdata: transport parameters
+ *
+ * On success, sets:
* ep->rep_attr.cap.max_send_wr
* ep->rep_attr.cap.max_recv_wr
* cdata->max_requests
@@ -179,10 +205,11 @@ out_list_err:
* And these FRWR-related fields:
* ia->ri_max_frwr_depth
* ia->ri_mrtype
+ *
+ * On failure, a negative errno is returned.
*/
-static int
-frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
- struct rpcrdma_create_data_internal *cdata)
+int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+ struct rpcrdma_create_data_internal *cdata)
{
struct ib_device_attr *attrs = &ia->ri_device->attrs;
int max_qp_wr, depth, delta;
@@ -191,10 +218,17 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
- ia->ri_max_frwr_depth =
- min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- attrs->max_fast_reg_page_list_len);
- dprintk("RPC: %s: device's max FR page list len = %u\n",
+ /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
+ * capability, but perform optimally when the MRs are not larger
+ * than a page.
+ */
+ if (attrs->max_sge_rd > 1)
+ ia->ri_max_frwr_depth = attrs->max_sge_rd;
+ else
+ ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
+ if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
+ ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
+ dprintk("RPC: %s: max FR page list depth = %u\n",
__func__, ia->ri_max_frwr_depth);
/* Add room for frwr register and invalidate WRs.
@@ -242,20 +276,28 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
ia->ri_max_frwr_depth);
- ia->ri_max_segs += 2; /* segments for head and tail buffers */
+ /* Reply chunks require segments for head and tail buffers */
+ ia->ri_max_segs += 2;
+ if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
+ ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
return 0;
}
-/* FRWR mode conveys a list of pages per chunk segment. The
+/**
+ * frwr_maxpages - Compute size of largest payload
+ * @r_xprt: transport
+ *
+ * Returns maximum size of an RPC message, in pages.
+ *
+ * FRWR mode conveys a list of pages per chunk segment. The
* maximum length of that list is the FRWR page list depth.
*/
-static size_t
-frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
+size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
+ (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
}
static void
@@ -332,12 +374,25 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
trace_xprtrdma_wc_li_wake(wc, frwr);
}
-/* Post a REG_MR Work Request to register a memory region
+/**
+ * frwr_map - Register a memory region
+ * @r_xprt: controlling transport
+ * @seg: memory region co-ordinates
+ * @nsegs: number of segments remaining
+ * @writing: true when RDMA Write will be used
+ * @xid: XID of RPC using the registered memory
+ * @out: initialized MR
+ *
+ * Prepare a REG_MR Work Request to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
+ *
+ * Returns the next segment or a negative errno pointer.
+ * On success, the prepared MR is planted in @out.
*/
-static struct rpcrdma_mr_seg *
-frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, struct rpcrdma_mr **out)
+struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing, u32 xid,
+ struct rpcrdma_mr **out)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
@@ -384,13 +439,14 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
if (!mr->mr_nents)
goto out_dmamap_err;
- trace_xprtrdma_mr_map(mr);
ibmr = frwr->fr_mr;
n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
if (unlikely(n != mr->mr_nents))
goto out_mapmr_err;
+ ibmr->iova &= 0x00000000ffffffff;
+ ibmr->iova |= ((u64)cpu_to_be32(xid)) << 32;
key = (u8)(ibmr->rkey & 0x000000FF);
ib_update_fast_reg_key(ibmr, ++key);
@@ -404,32 +460,35 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr->mr_handle = ibmr->rkey;
mr->mr_length = ibmr->length;
mr->mr_offset = ibmr->iova;
+ trace_xprtrdma_mr_map(mr);
*out = mr;
return seg;
out_dmamap_err:
- pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
- mr->mr_sg, i);
frwr->fr_state = FRWR_IS_INVALID;
+ trace_xprtrdma_frwr_sgerr(mr, i);
rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_mapmr_err:
- pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
- frwr->fr_mr, n, mr->mr_nents);
+ trace_xprtrdma_frwr_maperr(mr, n);
rpcrdma_mr_recycle(mr);
return ERR_PTR(-EIO);
}
-/* Post Send WR containing the RPC Call message.
+/**
+ * frwr_send - post Send WR containing the RPC Call message
+ * @ia: interface adapter
+ * @req: Prepared RPC Call
*
- * For FRMR, chain any FastReg WRs to the Send WR. Only a
+ * For FRWR, chain any FastReg WRs to the Send WR. Only a
* single ib_post_send call is needed to register memory
* and then post the Send WR.
+ *
+ * Returns the result of ib_post_send.
*/
-static int
-frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
+int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
struct ib_send_wr *post_wr;
struct rpcrdma_mr *mr;
@@ -451,15 +510,18 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
}
/* If ib_post_send fails, the next ->send_request for
- * @req will queue these MWs for recovery.
+ * @req will queue these MRs for recovery.
*/
return ib_post_send(ia->ri_id->qp, post_wr, NULL);
}
-/* Handle a remotely invalidated mr on the @mrs list
+/**
+ * frwr_reminv - handle a remotely invalidated mr on the @mrs list
+ * @rep: Received reply
+ * @mrs: list of MRs to check
+ *
*/
-static void
-frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
+void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
{
struct rpcrdma_mr *mr;
@@ -473,7 +535,10 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
}
}
-/* Invalidate all memory regions that were registered for "req".
+/**
+ * frwr_unmap_sync - invalidate memory regions that were registered for @req
+ * @r_xprt: controlling transport
+ * @mrs: list of MRs to process
*
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
@@ -481,8 +546,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
* Caller ensures that @mrs is not empty before the call. This
* function empties the list.
*/
-static void
-frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
+void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
{
struct ib_send_wr *first, **prev, *last;
const struct ib_send_wr *bad_wr;
@@ -561,20 +625,7 @@ out_release:
mr = container_of(frwr, struct rpcrdma_mr, frwr);
bad_wr = bad_wr->next;
- list_del(&mr->mr_list);
- frwr_op_release_mr(mr);
+ list_del_init(&mr->mr_list);
+ rpcrdma_mr_recycle(mr);
}
}
-
-const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
- .ro_map = frwr_op_map,
- .ro_send = frwr_op_send,
- .ro_reminv = frwr_op_reminv,
- .ro_unmap_sync = frwr_op_unmap_sync,
- .ro_open = frwr_op_open,
- .ro_maxpages = frwr_op_maxpages,
- .ro_init_mr = frwr_op_init_mr,
- .ro_release_mr = frwr_op_release_mr,
- .ro_displayname = "frwr",
- .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,
-};
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 9f53e0240035..d18614e02b4e 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -218,11 +218,12 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
page_base = offset_in_page(xdrbuf->page_base);
while (len) {
- if (unlikely(!*ppages)) {
- /* XXX: Certain upper layer operations do
- * not provide receive buffer pages.
- */
- *ppages = alloc_page(GFP_ATOMIC);
+ /* ACL likes to be lazy in allocating pages - ACLs
+ * are small by default but can get huge.
+ */
+ if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
+ if (!*ppages)
+ *ppages = alloc_page(GFP_ATOMIC);
if (!*ppages)
return -ENOBUFS;
}
@@ -356,8 +357,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return nsegs;
do {
- seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- false, &mr);
+ seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
rpcrdma_mr_push(mr, &req->rl_registered);
@@ -365,7 +365,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
if (encode_read_segment(xdr, mr, pos) < 0)
return -EMSGSIZE;
- trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
+ trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
r_xprt->rx_stats.read_chunk_count++;
nsegs -= mr->mr_nents;
} while (nsegs);
@@ -414,8 +414,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
- seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- true, &mr);
+ seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
rpcrdma_mr_push(mr, &req->rl_registered);
@@ -423,7 +422,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
- trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
+ trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
r_xprt->rx_stats.write_chunk_count++;
r_xprt->rx_stats.total_rdma_request += mr->mr_length;
nchunks++;
@@ -472,8 +471,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
- seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- true, &mr);
+ seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
rpcrdma_mr_push(mr, &req->rl_registered);
@@ -481,7 +479,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
- trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
+ trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
r_xprt->rx_stats.reply_chunk_count++;
r_xprt->rx_stats.total_rdma_request += mr->mr_length;
nchunks++;
@@ -667,7 +665,7 @@ out_mapping_overflow:
out_mapping_err:
rpcrdma_unmap_sendctx(sc);
- pr_err("rpcrdma: Send mapping error\n");
+ trace_xprtrdma_dma_maperr(sge[sge_no].addr);
return false;
}
@@ -1188,17 +1186,20 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (!p)
break;
- dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
- rqst->rq_task->tk_pid, __func__,
- be32_to_cpup(p), be32_to_cpu(*(p + 1)));
+ dprintk("RPC: %s: server reports "
+ "version error (%u-%u), xid %08x\n", __func__,
+ be32_to_cpup(p), be32_to_cpu(*(p + 1)),
+ be32_to_cpu(rep->rr_xid));
break;
case err_chunk:
- dprintk("RPC: %5u: %s: server reports header decoding error\n",
- rqst->rq_task->tk_pid, __func__);
+ dprintk("RPC: %s: server reports "
+ "header decoding error, xid %08x\n", __func__,
+ be32_to_cpu(rep->rr_xid));
break;
default:
- dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
- rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
+ dprintk("RPC: %s: server reports "
+ "unrecognized error %d, xid %08x\n", __func__,
+ be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
}
r_xprt->rx_stats.bad_reply_count++;
@@ -1248,7 +1249,6 @@ out:
out_badheader:
trace_xprtrdma_reply_hdr(rep);
r_xprt->rx_stats.bad_reply_count++;
- status = -EIO;
goto out;
}
@@ -1262,8 +1262,7 @@ void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* RPC has relinquished all its Send Queue entries.
*/
if (!list_empty(&req->rl_registered))
- r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
- &req->rl_registered);
+ frwr_unmap_sync(r_xprt, &req->rl_registered);
/* Ensure that any DMA mapped pages associated with
* the Send of the RPC Call have been unmapped before
@@ -1292,7 +1291,7 @@ void rpcrdma_deferred_completion(struct work_struct *work)
trace_xprtrdma_defer_cmp(rep);
if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
- r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
+ frwr_reminv(rep, &req->rl_registered);
rpcrdma_release_rqst(r_xprt, req);
rpcrdma_complete_rqst(rep);
}
@@ -1312,11 +1311,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
u32 credits;
__be32 *p;
- --buf->rb_posted_receives;
-
- if (rep->rr_hdrbuf.head[0].iov_len == 0)
- goto out_badstatus;
-
/* Fixed transport header fields */
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
rep->rr_hdrbuf.head[0].iov_base);
@@ -1356,36 +1350,30 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
}
req = rpcr_to_rdmar(rqst);
+ if (req->rl_reply) {
+ trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
+ rpcrdma_recv_buffer_put(req->rl_reply);
+ }
req->rl_reply = rep;
rep->rr_rqst = rqst;
clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
-
- rpcrdma_post_recvs(r_xprt, false);
- queue_work(rpcrdma_receive_wq, &rep->rr_work);
+ queue_work(buf->rb_completion_wq, &rep->rr_work);
return;
out_badversion:
trace_xprtrdma_reply_vers(rep);
- goto repost;
+ goto out;
-/* The RPC transaction has already been terminated, or the header
- * is corrupt.
- */
out_norqst:
spin_unlock(&xprt->queue_lock);
trace_xprtrdma_reply_rqst(rep);
- goto repost;
+ goto out;
out_shortreply:
trace_xprtrdma_reply_short(rep);
-/* If no pending RPC transaction was matched, post a replacement
- * receive buffer before returning.
- */
-repost:
- rpcrdma_post_recvs(r_xprt, false);
-out_badstatus:
+out:
rpcrdma_recv_buffer_put(rep);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index f3c147d70286..b908f2ca08fd 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -200,11 +200,10 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
svc_rdma_send_ctxt_put(rdma, ctxt);
goto drop_connection;
}
- return rc;
+ return 0;
drop_connection:
dprintk("svcrdma: failed to send bc call\n");
- xprt_disconnect_done(xprt);
return -ENOTCONN;
}
@@ -225,8 +224,11 @@ xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
ret = -ENOTCONN;
rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
- if (!test_bit(XPT_DEAD, &sxprt->xpt_flags))
+ if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) {
ret = rpcrdma_bc_send_request(rdma, rqst);
+ if (ret == -ENOTCONN)
+ svc_close_xprt(sxprt);
+ }
mutex_unlock(&sxprt->xpt_mutex);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 9141068693fa..fbc171ebfe91 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -268,7 +268,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- trace_xprtrdma_inject_dsc(r_xprt);
+ trace_xprtrdma_op_inject_dsc(r_xprt);
rdma_disconnect(r_xprt->rx_ia.ri_id);
}
@@ -284,7 +284,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- trace_xprtrdma_destroy(r_xprt);
+ trace_xprtrdma_op_destroy(r_xprt);
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
@@ -318,17 +318,12 @@ xprt_setup_rdma(struct xprt_create *args)
struct sockaddr *sap;
int rc;
- if (args->addrlen > sizeof(xprt->addr)) {
- dprintk("RPC: %s: address too large\n", __func__);
+ if (args->addrlen > sizeof(xprt->addr))
return ERR_PTR(-EBADF);
- }
xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 0);
- if (xprt == NULL) {
- dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
- __func__);
+ if (!xprt)
return ERR_PTR(-ENOMEM);
- }
/* 60 second timeout, no retries */
xprt->timeout = &xprt_rdma_default_timeout;
@@ -399,7 +394,7 @@ xprt_setup_rdma(struct xprt_create *args)
INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
xprt_rdma_connect_worker);
- xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
+ xprt->max_payload = frwr_maxpages(new_xprt);
if (xprt->max_payload == 0)
goto out4;
xprt->max_payload <<= PAGE_SHIFT;
@@ -423,7 +418,7 @@ out3:
out2:
rpcrdma_ia_close(&new_xprt->rx_ia);
out1:
- trace_xprtrdma_destroy(new_xprt);
+ trace_xprtrdma_op_destroy(new_xprt);
xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
return ERR_PTR(rc);
@@ -433,29 +428,33 @@ out1:
* xprt_rdma_close - close a transport connection
* @xprt: transport context
*
- * Called during transport shutdown, reconnect, or device removal.
+ * Called during autoclose or device removal.
+ *
* Caller holds @xprt's send lock to prevent activity on this
* transport while the connection is torn down.
*/
-static void
-xprt_rdma_close(struct rpc_xprt *xprt)
+void xprt_rdma_close(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- dprintk("RPC: %s: closing xprt %p\n", __func__, xprt);
+ might_sleep();
+
+ trace_xprtrdma_op_close(r_xprt);
+
+ /* Prevent marshaling and sending of new requests */
+ xprt_clear_connected(xprt);
if (test_and_clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) {
- xprt_clear_connected(xprt);
rpcrdma_ia_remove(ia);
- return;
+ goto out;
}
+
if (ep->rep_connected == -ENODEV)
return;
if (ep->rep_connected > 0)
xprt->reestablish_timeout = 0;
- xprt_disconnect_done(xprt);
rpcrdma_ep_disconnect(ep, ia);
/* Prepare @xprt for the next connection by reinitializing
@@ -463,6 +462,10 @@ xprt_rdma_close(struct rpc_xprt *xprt)
*/
r_xprt->rx_buf.rb_credits = 1;
xprt->cwnd = RPC_CWNDSHIFT;
+
+out:
+ ++xprt->connect_cookie;
+ xprt_disconnect_done(xprt);
}
/**
@@ -525,6 +528,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+ trace_xprtrdma_op_connect(r_xprt);
if (r_xprt->rx_ep.rep_connected != 0) {
/* Reconnect */
schedule_delayed_work(&r_xprt->rx_connect_worker,
@@ -659,11 +663,11 @@ xprt_rdma_allocate(struct rpc_task *task)
rqst->rq_buffer = req->rl_sendbuf->rg_base;
rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
- trace_xprtrdma_allocate(task, req);
+ trace_xprtrdma_op_allocate(task, req);
return 0;
out_fail:
- trace_xprtrdma_allocate(task, NULL);
+ trace_xprtrdma_op_allocate(task, NULL);
return -ENOMEM;
}
@@ -682,7 +686,7 @@ xprt_rdma_free(struct rpc_task *task)
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
rpcrdma_release_rqst(r_xprt, req);
- trace_xprtrdma_rpc_done(task, req);
+ trace_xprtrdma_op_free(task, req);
}
/**
@@ -696,8 +700,10 @@ xprt_rdma_free(struct rpc_task *task)
* %-ENOTCONN if the caller should reconnect and call again
* %-EAGAIN if the caller should call again
* %-ENOBUFS if the caller should call again after a delay
- * %-EIO if a permanent error occurred and the request was not
- * sent. Do not try to send this message again.
+ * %-EMSGSIZE if encoding ran out of buffer space. The request
+ * was not sent. Do not try to send this message again.
+ * %-EIO if an I/O error occurred. The request was not sent.
+ * Do not try to send this message again.
*/
static int
xprt_rdma_send_request(struct rpc_rqst *rqst)
@@ -713,7 +719,7 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
if (!xprt_connected(xprt))
- goto drop_connection;
+ return -ENOTCONN;
if (!xprt_request_get_cong(xprt, rqst))
return -EBADSLT;
@@ -745,8 +751,8 @@ failed_marshal:
if (rc != -ENOTCONN)
return rc;
drop_connection:
- xprt_disconnect_done(xprt);
- return -ENOTCONN; /* implies disconnect */
+ xprt_rdma_close(xprt);
+ return -ENOTCONN;
}
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
@@ -843,58 +849,31 @@ static struct xprt_class xprt_rdma = {
void xprt_rdma_cleanup(void)
{
- int rc;
-
- dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (sunrpc_table_header) {
unregister_sysctl_table(sunrpc_table_header);
sunrpc_table_header = NULL;
}
#endif
- rc = xprt_unregister_transport(&xprt_rdma);
- if (rc)
- dprintk("RPC: %s: xprt_unregister returned %i\n",
- __func__, rc);
-
- rpcrdma_destroy_wq();
- rc = xprt_unregister_transport(&xprt_rdma_bc);
- if (rc)
- dprintk("RPC: %s: xprt_unregister(bc) returned %i\n",
- __func__, rc);
+ xprt_unregister_transport(&xprt_rdma);
+ xprt_unregister_transport(&xprt_rdma_bc);
}
int xprt_rdma_init(void)
{
int rc;
- rc = rpcrdma_alloc_wq();
- if (rc)
- return rc;
-
rc = xprt_register_transport(&xprt_rdma);
- if (rc) {
- rpcrdma_destroy_wq();
+ if (rc)
return rc;
- }
rc = xprt_register_transport(&xprt_rdma_bc);
if (rc) {
xprt_unregister_transport(&xprt_rdma);
- rpcrdma_destroy_wq();
return rc;
}
- dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
-
- dprintk("Defaults:\n");
- dprintk("\tSlots %d\n"
- "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
- xprt_rdma_slot_table_entries,
- xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
- dprintk("\tPadding 0\n\tMemreg %d\n", xprt_rdma_memreg_strategy);
-
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (!sunrpc_table_header)
sunrpc_table_header = register_sysctl_table(sunrpc_table);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3ddba94c939f..7749a2bf6887 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -78,53 +78,25 @@ static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp);
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
+static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
-struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
-
-int
-rpcrdma_alloc_wq(void)
-{
- struct workqueue_struct *recv_wq;
-
- recv_wq = alloc_workqueue("xprtrdma_receive",
- WQ_MEM_RECLAIM | WQ_HIGHPRI,
- 0);
- if (!recv_wq)
- return -ENOMEM;
-
- rpcrdma_receive_wq = recv_wq;
- return 0;
-}
-
-void
-rpcrdma_destroy_wq(void)
-{
- struct workqueue_struct *wq;
-
- if (rpcrdma_receive_wq) {
- wq = rpcrdma_receive_wq;
- rpcrdma_receive_wq = NULL;
- destroy_workqueue(wq);
- }
-}
-
-/**
- * rpcrdma_disconnect_worker - Force a disconnect
- * @work: endpoint to be disconnected
- *
- * Provider callbacks can possibly run in an IRQ context. This function
- * is invoked in a worker thread to guarantee that disconnect wake-up
- * calls are always done in process context.
+/* Wait for outstanding transport work to finish.
*/
-static void
-rpcrdma_disconnect_worker(struct work_struct *work)
+static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
{
- struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
- rep_disconnect_worker.work);
- struct rpcrdma_xprt *r_xprt =
- container_of(ep, struct rpcrdma_xprt, rx_ep);
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- xprt_force_disconnect(&r_xprt->rx_xprt);
+ /* Flush Receives, then wait for deferred Reply work
+ * to complete.
+ */
+ ib_drain_qp(ia->ri_id->qp);
+ drain_workqueue(buf->rb_completion_wq);
+
+ /* Deferred Reply processing might have scheduled
+ * local invalidations.
+ */
+ ib_drain_sq(ia->ri_id->qp);
}
/**
@@ -143,15 +115,6 @@ rpcrdma_qp_event_handler(struct ib_event *event, void *context)
rx_ep);
trace_xprtrdma_qp_event(r_xprt, event);
- pr_err("rpcrdma: %s on device %s connected to %s:%s\n",
- ib_event_msg(event->event), event->device->name,
- rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
-
- if (ep->rep_connected == 1) {
- ep->rep_connected = -EIO;
- schedule_delayed_work(&ep->rep_disconnect_worker, 0);
- wake_up_all(&ep->rep_connect_wait);
- }
}
/**
@@ -189,11 +152,13 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
rr_cqe);
+ struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
- /* WARNING: Only wr_id and status are reliable at this point */
+ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_receive(wc);
+ --r_xprt->rx_ep.rep_receive_count;
if (wc->status != IB_WC_SUCCESS)
- goto out_fail;
+ goto out_flushed;
/* status == SUCCESS means all fields in wc are trustworthy */
rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
@@ -204,17 +169,16 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
rdmab_addr(rep->rr_rdmabuf),
wc->byte_len, DMA_FROM_DEVICE);
-out_schedule:
+ rpcrdma_post_recvs(r_xprt, false);
rpcrdma_reply_handler(rep);
return;
-out_fail:
+out_flushed:
if (wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
- rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
- goto out_schedule;
+ rpcrdma_recv_buffer_put(rep);
}
static void
@@ -316,7 +280,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
ep->rep_connected = -EAGAIN;
goto disconnected;
case RDMA_CM_EVENT_DISCONNECTED:
- ++xprt->connect_cookie;
ep->rep_connected = -ECONNABORTED;
disconnected:
xprt_force_disconnect(xprt);
@@ -326,10 +289,9 @@ disconnected:
break;
}
- dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__,
+ dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__,
rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
- ia->ri_device->name, ia->ri_ops->ro_displayname,
- rdma_event_msg(event->event));
+ ia->ri_device->name, rdma_event_msg(event->event));
return 0;
}
@@ -347,22 +309,15 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
xprt, RDMA_PS_TCP, IB_QPT_RC);
- if (IS_ERR(id)) {
- rc = PTR_ERR(id);
- dprintk("RPC: %s: rdma_create_id() failed %i\n",
- __func__, rc);
+ if (IS_ERR(id))
return id;
- }
ia->ri_async_rc = -ETIMEDOUT;
rc = rdma_resolve_addr(id, NULL,
(struct sockaddr *)&xprt->rx_xprt.addr,
RDMA_RESOLVE_TIMEOUT);
- if (rc) {
- dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
- __func__, rc);
+ if (rc)
goto out;
- }
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
if (rc < 0) {
trace_xprtrdma_conn_tout(xprt);
@@ -375,11 +330,8 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
ia->ri_async_rc = -ETIMEDOUT;
rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
- if (rc) {
- dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
- __func__, rc);
+ if (rc)
goto out;
- }
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
if (rc < 0) {
trace_xprtrdma_conn_tout(xprt);
@@ -429,16 +381,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
switch (xprt_rdma_memreg_strategy) {
case RPCRDMA_FRWR:
- if (frwr_is_supported(ia)) {
- ia->ri_ops = &rpcrdma_frwr_memreg_ops;
- break;
- }
- /*FALLTHROUGH*/
- case RPCRDMA_MTHCAFMR:
- if (fmr_is_supported(ia)) {
- ia->ri_ops = &rpcrdma_fmr_memreg_ops;
+ if (frwr_is_supported(ia))
break;
- }
/*FALLTHROUGH*/
default:
pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
@@ -481,7 +425,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
* connection is already gone.
*/
if (ia->ri_id->qp) {
- ib_drain_qp(ia->ri_id->qp);
+ rpcrdma_xprt_drain(r_xprt);
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
@@ -552,7 +496,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
}
ia->ri_max_send_sges = max_sge;
- rc = ia->ri_ops->ro_open(ia, ep, cdata);
+ rc = frwr_open(ia, ep, cdata);
if (rc)
return rc;
@@ -579,16 +523,13 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
cdata->max_requests >> 2);
ep->rep_send_count = ep->rep_send_batch;
init_waitqueue_head(&ep->rep_connect_wait);
- INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
- rpcrdma_disconnect_worker);
+ ep->rep_receive_count = 0;
sendcq = ib_alloc_cq(ia->ri_device, NULL,
ep->rep_attr.cap.max_send_wr + 1,
1, IB_POLL_WORKQUEUE);
if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq);
- dprintk("RPC: %s: failed to create send CQ: %i\n",
- __func__, rc);
goto out1;
}
@@ -597,8 +538,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
0, IB_POLL_WORKQUEUE);
if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq);
- dprintk("RPC: %s: failed to create recv CQ: %i\n",
- __func__, rc);
goto out2;
}
@@ -611,7 +550,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
/* Prepare RDMA-CM private message */
pmsg->cp_magic = rpcrdma_cmp_magic;
pmsg->cp_version = RPCRDMA_CMP_VERSION;
- pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
+ pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
ep->rep_remote_cma.private_data = pmsg;
@@ -653,8 +592,6 @@ out1:
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- cancel_delayed_work_sync(&ep->rep_disconnect_worker);
-
if (ia->ri_id && ia->ri_id->qp) {
rpcrdma_ep_disconnect(ep, ia);
rdma_destroy_qp(ia->ri_id);
@@ -740,11 +677,8 @@ rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
}
err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
- if (err) {
- dprintk("RPC: %s: rdma_create_qp returned %d\n",
- __func__, err);
+ if (err)
goto out_destroy;
- }
/* Atomically replace the transport's ID and QP. */
rc = 0;
@@ -775,8 +709,6 @@ retry:
dprintk("RPC: %s: connecting...\n", __func__);
rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
if (rc) {
- dprintk("RPC: %s: rdma_create_qp failed %i\n",
- __func__, rc);
rc = -ENETUNREACH;
goto out_noupdate;
}
@@ -798,11 +730,8 @@ retry:
rpcrdma_post_recvs(r_xprt, true);
rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
- if (rc) {
- dprintk("RPC: %s: rdma_connect() failed with %i\n",
- __func__, rc);
+ if (rc)
goto out;
- }
wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
if (ep->rep_connected <= 0) {
@@ -822,8 +751,10 @@ out_noupdate:
return rc;
}
-/*
- * rpcrdma_ep_disconnect
+/**
+ * rpcrdma_ep_disconnect - Disconnect underlying transport
+ * @ep: endpoint to disconnect
+ * @ia: associated interface adapter
*
* This is separate from destroy to facilitate the ability
* to reconnect without recreating the endpoint.
@@ -834,19 +765,20 @@ out_noupdate:
void
rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
+ struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
+ rx_ep);
int rc;
+ /* returns without wait if ID is not connected */
rc = rdma_disconnect(ia->ri_id);
if (!rc)
- /* returns without wait if not connected */
wait_event_interruptible(ep->rep_connect_wait,
ep->rep_connected != 1);
else
ep->rep_connected = rc;
- trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
- rx_ep), rc);
+ trace_xprtrdma_disconnect(r_xprt, rc);
- ib_drain_qp(ia->ri_id->qp);
+ rpcrdma_xprt_drain(r_xprt);
}
/* Fixed-size circular FIFO queue. This implementation is wait-free and
@@ -1034,7 +966,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
if (!mr)
break;
- rc = ia->ri_ops->ro_init_mr(ia, mr);
+ rc = frwr_init_mr(ia, mr);
if (rc) {
kfree(mr);
break;
@@ -1089,9 +1021,9 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
req->rl_buffer = buffer;
INIT_LIST_HEAD(&req->rl_registered);
- spin_lock(&buffer->rb_reqslock);
+ spin_lock(&buffer->rb_lock);
list_add(&req->rl_all, &buffer->rb_allreqs);
- spin_unlock(&buffer->rb_reqslock);
+ spin_unlock(&buffer->rb_lock);
return req;
}
@@ -1134,8 +1066,6 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp)
out_free:
kfree(rep);
out:
- dprintk("RPC: %s: reply buffer %d alloc failed\n",
- __func__, rc);
return rc;
}
@@ -1159,7 +1089,6 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
INIT_LIST_HEAD(&buf->rb_send_bufs);
INIT_LIST_HEAD(&buf->rb_allreqs);
- spin_lock_init(&buf->rb_reqslock);
for (i = 0; i < buf->rb_max_requests; i++) {
struct rpcrdma_req *req;
@@ -1174,13 +1103,19 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
buf->rb_credits = 1;
- buf->rb_posted_receives = 0;
INIT_LIST_HEAD(&buf->rb_recv_bufs);
rc = rpcrdma_sendctxs_create(r_xprt);
if (rc)
goto out;
+ buf->rb_completion_wq = alloc_workqueue("rpcrdma-%s",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ 0,
+ r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
+ if (!buf->rb_completion_wq)
+ goto out;
+
return 0;
out:
rpcrdma_buffer_destroy(buf);
@@ -1194,9 +1129,18 @@ rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
kfree(rep);
}
+/**
+ * rpcrdma_req_destroy - Destroy an rpcrdma_req object
+ * @req: unused object to be destroyed
+ *
+ * This function assumes that the caller prevents concurrent device
+ * unload and transport tear-down.
+ */
void
-rpcrdma_destroy_req(struct rpcrdma_req *req)
+rpcrdma_req_destroy(struct rpcrdma_req *req)
{
+ list_del(&req->rl_all);
+
rpcrdma_free_regbuf(req->rl_recvbuf);
rpcrdma_free_regbuf(req->rl_sendbuf);
rpcrdma_free_regbuf(req->rl_rdmabuf);
@@ -1208,7 +1152,6 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
{
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
- struct rpcrdma_ia *ia = rdmab_to_ia(buf);
struct rpcrdma_mr *mr;
unsigned int count;
@@ -1224,7 +1167,7 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
if (!list_empty(&mr->mr_list))
list_del(&mr->mr_list);
- ia->ri_ops->ro_release_mr(mr);
+ frwr_release_mr(mr);
count++;
spin_lock(&buf->rb_mrlock);
}
@@ -1234,11 +1177,24 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
dprintk("RPC: %s: released %u MRs\n", __func__, count);
}
+/**
+ * rpcrdma_buffer_destroy - Release all hw resources
+ * @buf: root control block for resources
+ *
+ * ORDERING: relies on a prior ib_drain_qp :
+ * - No more Send or Receive completions can occur
+ * - All MRs, reps, and reqs are returned to their free lists
+ */
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
cancel_delayed_work_sync(&buf->rb_refresh_worker);
+ if (buf->rb_completion_wq) {
+ destroy_workqueue(buf->rb_completion_wq);
+ buf->rb_completion_wq = NULL;
+ }
+
rpcrdma_sendctxs_destroy(buf);
while (!list_empty(&buf->rb_recv_bufs)) {
@@ -1250,19 +1206,14 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
rpcrdma_destroy_rep(rep);
}
- spin_lock(&buf->rb_reqslock);
- while (!list_empty(&buf->rb_allreqs)) {
+ while (!list_empty(&buf->rb_send_bufs)) {
struct rpcrdma_req *req;
- req = list_first_entry(&buf->rb_allreqs,
- struct rpcrdma_req, rl_all);
- list_del(&req->rl_all);
-
- spin_unlock(&buf->rb_reqslock);
- rpcrdma_destroy_req(req);
- spin_lock(&buf->rb_reqslock);
+ req = list_first_entry(&buf->rb_send_bufs,
+ struct rpcrdma_req, rl_list);
+ list_del(&req->rl_list);
+ rpcrdma_req_destroy(req);
}
- spin_unlock(&buf->rb_reqslock);
rpcrdma_mrs_destroy(buf);
}
@@ -1329,9 +1280,12 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
{
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- trace_xprtrdma_mr_unmap(mr);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ if (mr->mr_dir != DMA_NONE) {
+ trace_xprtrdma_mr_unmap(mr);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ mr->mr_dir = DMA_NONE;
+ }
__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
}
@@ -1410,7 +1364,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
*
* xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
* receiving the payload of RDMA RECV operations. During Long Calls
- * or Replies they may be registered externally via ro_map.
+ * or Replies they may be registered externally via frwr_map.
*/
struct rpcrdma_regbuf *
rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
@@ -1446,8 +1400,10 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
(void *)rb->rg_base,
rdmab_length(rb),
rb->rg_direction);
- if (ib_dma_mapping_error(device, rdmab_addr(rb)))
+ if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
+ trace_xprtrdma_dma_maperr(rdmab_addr(rb));
return false;
+ }
rb->rg_device = device;
rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
@@ -1479,10 +1435,14 @@ rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
kfree(rb);
}
-/*
- * Prepost any receive buffer, then post send.
+/**
+ * rpcrdma_ep_post - Post WRs to a transport's Send Queue
+ * @ia: transport's device information
+ * @ep: transport's RDMA endpoint information
+ * @req: rpcrdma_req containing the Send WR to post
*
- * Receive buffer is donated to hardware, reclaimed upon recv completion.
+ * Returns 0 if the post was successful, otherwise -ENOTCONN
+ * is returned.
*/
int
rpcrdma_ep_post(struct rpcrdma_ia *ia,
@@ -1501,32 +1461,27 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
--ep->rep_send_count;
}
- rc = ia->ri_ops->ro_send(ia, req);
+ rc = frwr_send(ia, req);
trace_xprtrdma_post_send(req, rc);
if (rc)
return -ENOTCONN;
return 0;
}
-/**
- * rpcrdma_post_recvs - Maybe post some Receive buffers
- * @r_xprt: controlling transport
- * @temp: when true, allocate temp rpcrdma_rep objects
- *
- */
-void
+static void
rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
struct ib_recv_wr *wr, *bad_wr;
int needed, count, rc;
rc = 0;
count = 0;
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
- if (buf->rb_posted_receives > needed)
+ if (ep->rep_receive_count > needed)
goto out;
- needed -= buf->rb_posted_receives;
+ needed -= ep->rep_receive_count;
count = 0;
wr = NULL;
@@ -1574,7 +1529,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
--count;
}
}
- buf->rb_posted_receives += count;
+ ep->rep_receive_count += count;
out:
trace_xprtrdma_post_recvs(r_xprt, count, rc);
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 9218dbebedce..5a18472f2c9c 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -66,7 +66,6 @@
* Interface Adapter -- one per transport instance
*/
struct rpcrdma_ia {
- const struct rpcrdma_memreg_ops *ri_ops;
struct ib_device *ri_device;
struct rdma_cm_id *ri_id;
struct ib_pd *ri_pd;
@@ -81,8 +80,6 @@ struct rpcrdma_ia {
bool ri_implicit_roundup;
enum ib_mr_type ri_mrtype;
unsigned long ri_flags;
- struct ib_qp_attr ri_qp_attr;
- struct ib_qp_init_attr ri_qp_init_attr;
};
enum {
@@ -101,7 +98,7 @@ struct rpcrdma_ep {
wait_queue_head_t rep_connect_wait;
struct rpcrdma_connect_private rep_cm_private;
struct rdma_conn_param rep_remote_cma;
- struct delayed_work rep_disconnect_worker;
+ int rep_receive_count;
};
/* Pre-allocate extra Work Requests for handling backward receives
@@ -262,20 +259,12 @@ struct rpcrdma_frwr {
};
};
-struct rpcrdma_fmr {
- struct ib_fmr *fm_mr;
- u64 *fm_physaddrs;
-};
-
struct rpcrdma_mr {
struct list_head mr_list;
struct scatterlist *mr_sg;
int mr_nents;
enum dma_data_direction mr_dir;
- union {
- struct rpcrdma_fmr fmr;
- struct rpcrdma_frwr frwr;
- };
+ struct rpcrdma_frwr frwr;
struct rpcrdma_xprt *mr_xprt;
u32 mr_handle;
u32 mr_length;
@@ -401,20 +390,18 @@ struct rpcrdma_buffer {
spinlock_t rb_lock; /* protect buf lists */
struct list_head rb_send_bufs;
struct list_head rb_recv_bufs;
+ struct list_head rb_allreqs;
+
unsigned long rb_flags;
u32 rb_max_requests;
u32 rb_credits; /* most recent credit grant */
- int rb_posted_receives;
u32 rb_bc_srv_max_requests;
- spinlock_t rb_reqslock; /* protect rb_allreqs */
- struct list_head rb_allreqs;
-
u32 rb_bc_max_requests;
+ struct workqueue_struct *rb_completion_wq;
struct delayed_work rb_refresh_worker;
};
-#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
/* rb_flags */
enum {
@@ -465,35 +452,6 @@ struct rpcrdma_stats {
};
/*
- * Per-registration mode operations
- */
-struct rpcrdma_xprt;
-struct rpcrdma_memreg_ops {
- struct rpcrdma_mr_seg *
- (*ro_map)(struct rpcrdma_xprt *,
- struct rpcrdma_mr_seg *, int, bool,
- struct rpcrdma_mr **);
- int (*ro_send)(struct rpcrdma_ia *ia,
- struct rpcrdma_req *req);
- void (*ro_reminv)(struct rpcrdma_rep *rep,
- struct list_head *mrs);
- void (*ro_unmap_sync)(struct rpcrdma_xprt *,
- struct list_head *);
- int (*ro_open)(struct rpcrdma_ia *,
- struct rpcrdma_ep *,
- struct rpcrdma_create_data_internal *);
- size_t (*ro_maxpages)(struct rpcrdma_xprt *);
- int (*ro_init_mr)(struct rpcrdma_ia *,
- struct rpcrdma_mr *);
- void (*ro_release_mr)(struct rpcrdma_mr *mr);
- const char *ro_displayname;
- const int ro_send_w_inv_ok;
-};
-
-extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
-extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
-
-/*
* RPCRDMA transport -- encapsulates the structures above for
* integration with RPC.
*
@@ -544,10 +502,6 @@ extern unsigned int xprt_rdma_memreg_strategy;
int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
void rpcrdma_ia_close(struct rpcrdma_ia *);
-bool frwr_is_supported(struct rpcrdma_ia *);
-bool fmr_is_supported(struct rpcrdma_ia *);
-
-extern struct workqueue_struct *rpcrdma_receive_wq;
/*
* Endpoint calls - xprtrdma/verbs.c
@@ -560,13 +514,12 @@ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
struct rpcrdma_req *);
-void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
/*
* Buffer calls - xprtrdma/verbs.c
*/
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
-void rpcrdma_destroy_req(struct rpcrdma_req *);
+void rpcrdma_req_destroy(struct rpcrdma_req *req);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
@@ -604,9 +557,6 @@ rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
return __rpcrdma_dma_map_regbuf(ia, rb);
}
-int rpcrdma_alloc_wq(void);
-void rpcrdma_destroy_wq(void);
-
/*
* Wrappers for chunk registration, shared by read/write chunk code.
*/
@@ -617,6 +567,23 @@ rpcrdma_data_dir(bool writing)
return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}
+/* Memory registration calls xprtrdma/frwr_ops.c
+ */
+bool frwr_is_supported(struct rpcrdma_ia *);
+int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
+ struct rpcrdma_create_data_internal *cdata);
+int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
+void frwr_release_mr(struct rpcrdma_mr *mr);
+size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
+struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_mr_seg *seg,
+ int nsegs, bool writing, u32 xid,
+ struct rpcrdma_mr **mr);
+int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
+void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
+void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt,
+ struct list_head *mrs);
+
/*
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
*/
@@ -653,6 +620,7 @@ static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
extern unsigned int xprt_rdma_max_inline_read;
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
+void xprt_rdma_close(struct rpc_xprt *xprt);
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
int xprt_rdma_init(void);
void xprt_rdma_cleanup(void);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 44467caf3cd8..13559e6a460b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -68,8 +68,6 @@ static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-
#define XS_TCP_LINGER_TO (15U * HZ)
static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
@@ -159,8 +157,6 @@ static struct ctl_table sunrpc_table[] = {
{ },
};
-#endif
-
/*
* Wait duration for a reply from the RPC portmapper.
*/
@@ -1589,6 +1585,7 @@ static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t
/**
* xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
+ * @xprt: controlling transport
* @task: task that timed out
*
* Adjust the congestion window after a retransmit timeout has occurred.
@@ -2246,6 +2243,7 @@ out:
/**
* xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
+ * @work: queued work item
*
* Invoked by a work queue tasklet.
*/
@@ -3095,10 +3093,8 @@ static struct xprt_class xs_bc_tcp_transport = {
*/
int init_socket_xprt(void)
{
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (!sunrpc_table_header)
sunrpc_table_header = register_sysctl_table(sunrpc_table);
-#endif
xprt_register_transport(&xs_local_transport);
xprt_register_transport(&xs_udp_transport);
@@ -3114,12 +3110,10 @@ int init_socket_xprt(void)
*/
void cleanup_socket_xprt(void)
{
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (sunrpc_table_header) {
unregister_sysctl_table(sunrpc_table_header);
sunrpc_table_header = NULL;
}
-#endif
xprt_unregister_transport(&xs_local_transport);
xprt_unregister_transport(&xs_udp_transport);