summaryrefslogtreecommitdiff
path: root/io_uring/napi.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-07-26 15:24:30 +0100
committerJens Axboe <axboe@kernel.dk>2024-07-26 08:31:59 -0600
commit342b2e395d5f34c9f111a818556e617939f83a8c (patch)
tree4e09d19146f6e741dd98faeb071514a376085ff4 /io_uring/napi.c
parent0db4618e8fabfcc404af4dda23799bba726785a5 (diff)
io_uring/napi: use ktime in busy polling
It's more natural to use ktime/ns instead of keeping around usec, especially since we're comparing it against user provided timers, so convert napi busy poll internal handling to ktime. It's also nicer since the type (ktime_t vs unsigned long) now tells the unit of measure. Keep everything as ktime, which we convert to/from micro seconds for IORING_[UN]REGISTER_NAPI. The net/ busy polling works seems to work with usec, however it's not real usec as shift by 10 is used to get it from nsecs, see busy_loop_current_time(), so it's easy to get truncated nsec back and we get back better precision. Note, we can further improve it later by removing the truncation and maybe convincing net/ to use ktime/ns instead. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/95e7ec8d095069a3ed5d40a4bc6f8b586698bc7e.1722003776.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/napi.c')
-rw-r--r--io_uring/napi.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 327e5f3a8abe..6bdb267e9c33 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -33,6 +33,12 @@ static struct io_napi_entry *io_napi_hash_find(struct hlist_head *hash_list,
return NULL;
}
+static inline ktime_t net_to_ktime(unsigned long t)
+{
+ /* napi approximating usecs, reverse busy_loop_current_time */
+ return ns_to_ktime(t << 10);
+}
+
void __io_napi_add(struct io_ring_ctx *ctx, struct socket *sock)
{
struct hlist_head *hash_list;
@@ -102,14 +108,14 @@ static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
__io_napi_remove_stale(ctx);
}
-static inline bool io_napi_busy_loop_timeout(unsigned long start_time,
- unsigned long bp_usec)
+static inline bool io_napi_busy_loop_timeout(ktime_t start_time,
+ ktime_t bp)
{
- if (bp_usec) {
- unsigned long end_time = start_time + bp_usec;
- unsigned long now = busy_loop_current_time();
+ if (bp) {
+ ktime_t end_time = ktime_add(start_time, bp);
+ ktime_t now = net_to_ktime(busy_loop_current_time());
- return time_after(now, end_time);
+ return ktime_after(now, end_time);
}
return true;
@@ -124,7 +130,8 @@ static bool io_napi_busy_loop_should_end(void *data,
return true;
if (io_should_wake(iowq) || io_has_work(iowq->ctx))
return true;
- if (io_napi_busy_loop_timeout(start_time, iowq->napi_busy_poll_to))
+ if (io_napi_busy_loop_timeout(net_to_ktime(start_time),
+ iowq->napi_busy_poll_dt))
return true;
return false;
@@ -181,10 +188,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
*/
void io_napi_init(struct io_ring_ctx *ctx)
{
+ u64 sys_dt = READ_ONCE(sysctl_net_busy_poll) * NSEC_PER_USEC;
+
INIT_LIST_HEAD(&ctx->napi_list);
spin_lock_init(&ctx->napi_lock);
ctx->napi_prefer_busy_poll = false;
- ctx->napi_busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
+ ctx->napi_busy_poll_dt = ns_to_ktime(sys_dt);
}
/*
@@ -217,7 +226,7 @@ void io_napi_free(struct io_ring_ctx *ctx)
int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
{
const struct io_uring_napi curr = {
- .busy_poll_to = ctx->napi_busy_poll_to,
+ .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
.prefer_busy_poll = ctx->napi_prefer_busy_poll
};
struct io_uring_napi napi;
@@ -232,7 +241,7 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
if (copy_to_user(arg, &curr, sizeof(curr)))
return -EFAULT;
- WRITE_ONCE(ctx->napi_busy_poll_to, napi.busy_poll_to);
+ WRITE_ONCE(ctx->napi_busy_poll_dt, napi.busy_poll_to * NSEC_PER_USEC);
WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi.prefer_busy_poll);
WRITE_ONCE(ctx->napi_enabled, true);
return 0;
@@ -249,14 +258,14 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
{
const struct io_uring_napi curr = {
- .busy_poll_to = ctx->napi_busy_poll_to,
+ .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
.prefer_busy_poll = ctx->napi_prefer_busy_poll
};
if (arg && copy_to_user(arg, &curr, sizeof(curr)))
return -EFAULT;
- WRITE_ONCE(ctx->napi_busy_poll_to, 0);
+ WRITE_ONCE(ctx->napi_busy_poll_dt, 0);
WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
WRITE_ONCE(ctx->napi_enabled, false);
return 0;
@@ -275,23 +284,20 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
struct timespec64 *ts)
{
- unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
+ ktime_t poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
if (ts) {
struct timespec64 poll_to_ts;
- poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
+ poll_to_ts = ns_to_timespec64(ktime_to_ns(poll_dt));
if (timespec64_compare(ts, &poll_to_ts) < 0) {
s64 poll_to_ns = timespec64_to_ns(ts);
- if (poll_to_ns > 0) {
- u64 val = poll_to_ns + 999;
- do_div(val, 1000);
- poll_to = val;
- }
+ if (poll_to_ns > 0)
+ poll_dt = ns_to_ktime(poll_to_ns);
}
}
- iowq->napi_busy_poll_to = poll_to;
+ iowq->napi_busy_poll_dt = poll_dt;
}
/*
@@ -320,7 +326,7 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
LIST_HEAD(napi_list);
bool is_stale = false;
- if (!READ_ONCE(ctx->napi_busy_poll_to))
+ if (!READ_ONCE(ctx->napi_busy_poll_dt))
return 0;
if (list_empty_careful(&ctx->napi_list))
return 0;