summaryrefslogtreecommitdiff
path: root/io_uring/opdef.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-18 16:13:01 -0600
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:25 -0600
commita9165b83c1937eeed1f0c731468216d6371d647f (patch)
treeb7444f11aca3a27908c5b1660344efe44cbc190e /io_uring/opdef.c
parentd80f940701302e84d1398ecb103083468b566a69 (diff)
io_uring/rw: always setup io_async_rw for read/write requests
read/write requests try to put everything on the stack, and then alloc and copy if a retry is needed. This necessitates a bunch of nasty code that deals with intermediate state. Get rid of this, and have the prep side setup everything that is needed upfront, which greatly simplifies the opcode handlers. This includes adding an alloc cache for io_async_rw, to make it cheap to handle. In terms of cost, this should be basically free and transparent. For the worst case of {READ,WRITE}_FIXED which didn't need it before, performance is unaffected in the normal peak workload that is being used to test that. Still runs at 122M IOPS. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/opdef.c')
-rw-r--r--io_uring/opdef.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index dd4a1e1425e1..fcae75a08f2c 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -67,7 +67,7 @@ const struct io_issue_def io_issue_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.vectored = 1,
- .prep = io_prep_rwv,
+ .prep = io_prep_readv,
.issue = io_read,
},
[IORING_OP_WRITEV] = {
@@ -81,7 +81,7 @@ const struct io_issue_def io_issue_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.vectored = 1,
- .prep = io_prep_rwv,
+ .prep = io_prep_writev,
.issue = io_write,
},
[IORING_OP_FSYNC] = {
@@ -99,7 +99,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw_fixed,
+ .prep = io_prep_read_fixed,
.issue = io_read,
},
[IORING_OP_WRITE_FIXED] = {
@@ -112,7 +112,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw_fixed,
+ .prep = io_prep_write_fixed,
.issue = io_write,
},
[IORING_OP_POLL_ADD] = {
@@ -239,7 +239,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_read,
.issue = io_read,
},
[IORING_OP_WRITE] = {
@@ -252,7 +252,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .prep = io_prep_rw,
+ .prep = io_prep_write,
.issue = io_write,
},
[IORING_OP_FADVISE] = {
@@ -490,14 +490,12 @@ const struct io_cold_def io_cold_defs[] = {
[IORING_OP_READV] = {
.async_size = sizeof(struct io_async_rw),
.name = "READV",
- .prep_async = io_readv_prep_async,
.cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
[IORING_OP_WRITEV] = {
.async_size = sizeof(struct io_async_rw),
.name = "WRITEV",
- .prep_async = io_writev_prep_async,
.cleanup = io_readv_writev_cleanup,
.fail = io_rw_fail,
},
@@ -699,6 +697,7 @@ const struct io_cold_def io_cold_defs[] = {
#endif
},
[IORING_OP_READ_MULTISHOT] = {
+ .async_size = sizeof(struct io_async_rw),
.name = "READ_MULTISHOT",
},
[IORING_OP_WAITID] = {