summaryrefslogtreecommitdiff
path: root/net/sunrpc/sched.c
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2022-03-15 05:02:22 +0300
committerTrond Myklebust <trond.myklebust@hammerspace.com>2022-03-22 22:52:55 +0300
commit33e5c765bc1ea5e06ea7603637f14d727e6fcdf3 (patch)
tree03fce3288dbf7e4ab704cd96ccc88a6b6613f6db /net/sunrpc/sched.c
parentd0afde5fc6fb13531e2434fc4b6a65f131671f68 (diff)
downloadlinux-33e5c765bc1ea5e06ea7603637f14d727e6fcdf3.tar.xz
NFS: Fix memory allocation in rpc_malloc()
When in a low memory situation, we do want rpciod to kick off direct reclaim in the case where that helps, however we don't want it looping forever in mempool_alloc(). So first try allocating from the slab using GFP_KERNEL | __GFP_NORETRY, and then fall back to a GFP_NOWAIT allocation from the mempool. Ditto for rpc_alloc_task() Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r--net/sunrpc/sched.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 7c8f87ebdbc0..d59a033820be 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -57,6 +57,13 @@ struct workqueue_struct *rpciod_workqueue __read_mostly;
struct workqueue_struct *xprtiod_workqueue __read_mostly;
EXPORT_SYMBOL_GPL(xprtiod_workqueue);
+gfp_t rpc_task_gfp_mask(void)
+{
+ if (current->flags & PF_WQ_WORKER)
+ return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+ return GFP_KERNEL;
+}
+
unsigned long
rpc_task_timeout(const struct rpc_task *task)
{
@@ -1030,15 +1037,15 @@ int rpc_malloc(struct rpc_task *task)
struct rpc_rqst *rqst = task->tk_rqstp;
size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
struct rpc_buffer *buf;
- gfp_t gfp = GFP_KERNEL;
-
- if (RPC_IS_ASYNC(task))
- gfp = GFP_NOWAIT | __GFP_NOWARN;
+ gfp_t gfp = rpc_task_gfp_mask();
size += sizeof(struct rpc_buffer);
- if (size <= RPC_BUFFER_MAXSIZE)
- buf = mempool_alloc(rpc_buffer_mempool, gfp);
- else
+ if (size <= RPC_BUFFER_MAXSIZE) {
+ buf = kmem_cache_alloc(rpc_buffer_slabp, gfp);
+ /* Reach for the mempool if dynamic allocation fails */
+ if (!buf && RPC_IS_ASYNC(task))
+ buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT);
+ } else
buf = kmalloc(size, gfp);
if (!buf)