diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2022-01-29 12:49:44 -0500 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2022-02-25 18:50:12 -0500 |
commit | 0adc87940618648b3dcccc819c20068bd6b4ec93 (patch) | |
tree | 51d9bad823a02d5a1f89468a3d8162edc808ff56 /net/sunrpc/sched.c | |
parent | 4fb547be355d4af349681ba4c3bab81d99f4f774 (diff) |
SUNRPC: Convert GFP_NOFS to GFP_KERNEL
The sections which should not re-enter the filesystem are already
protected with memalloc_nofs_save/restore calls, so it is better to use
GFP_KERNEL in these calls to allow better performance for synchronous
RPC calls.
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc/sched.c')
-rw-r--r-- | net/sunrpc/sched.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index e2c835482791..52769b883c0a 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -1021,7 +1021,7 @@ int rpc_malloc(struct rpc_task *task) struct rpc_rqst *rqst = task->tk_rqstp; size_t size = rqst->rq_callsize + rqst->rq_rcvsize; struct rpc_buffer *buf; - gfp_t gfp = GFP_NOFS; + gfp_t gfp = GFP_KERNEL; if (RPC_IS_SWAPPER(task)) gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; @@ -1095,7 +1095,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta static struct rpc_task * rpc_alloc_task(void) { - return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); + return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_KERNEL); } /* |