diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2022-10-09 20:44:02 -0600 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2022-11-18 02:18:02 +0100 |
commit | e8a533cbeb79809206f8724e89961e0079508c3c (patch) | |
tree | b81da4151f67029174482ab2fdbee7dc8c98c931 /net/core/pktgen.c | |
parent | d247aabd391c3b2fa4f26874ed9136a7a142fcfd (diff) |
treewide: use get_random_u32_inclusive() when possible
These cases were done with this Coccinelle:
@@
expression H;
expression L;
@@
- (get_random_u32_below(H) + L)
+ get_random_u32_inclusive(L, H + L - 1)
@@
expression H;
expression L;
expression E;
@@
get_random_u32_inclusive(L,
H
- + E
- - E
)
@@
expression H;
expression L;
expression E;
@@
get_random_u32_inclusive(L,
H
- - E
- + E
)
@@
expression H;
expression L;
expression E;
expression F;
@@
get_random_u32_inclusive(L,
H
- - E
+ F
- + E
)
@@
expression H;
expression L;
expression E;
expression F;
@@
get_random_u32_inclusive(L,
H
- + E
+ F
- - E
)
And then subsequently cleaned up by hand, with several automatic cases
rejected if it didn't make sense contextually.
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> # for infiniband
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r-- | net/core/pktgen.c | 25 |
1 files changed, 10 insertions, 15 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 95da2ddc1c20..760238196db1 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2380,9 +2380,8 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev) else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { __u16 t; if (pkt_dev->flags & F_QUEUE_MAP_RND) { - t = get_random_u32_below(pkt_dev->queue_map_max - - pkt_dev->queue_map_min + 1) + - pkt_dev->queue_map_min; + t = get_random_u32_inclusive(pkt_dev->queue_map_min, + pkt_dev->queue_map_max); } else { t = pkt_dev->cur_queue_map + 1; if (t > pkt_dev->queue_map_max) @@ -2478,9 +2477,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { if (pkt_dev->flags & F_UDPSRC_RND) - pkt_dev->cur_udp_src = get_random_u32_below( - pkt_dev->udp_src_max - pkt_dev->udp_src_min) + - pkt_dev->udp_src_min; + pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min, + pkt_dev->udp_src_max - 1); else { pkt_dev->cur_udp_src++; @@ -2491,9 +2489,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { if (pkt_dev->flags & F_UDPDST_RND) { - pkt_dev->cur_udp_dst = get_random_u32_below( - pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) + - pkt_dev->udp_dst_min; + pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min, + pkt_dev->udp_dst_max - 1); } else { pkt_dev->cur_udp_dst++; if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) @@ -2508,7 +2505,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (imn < imx) { __u32 t; if (pkt_dev->flags & F_IPSRC_RND) - t = get_random_u32_below(imx - imn) + imn; + t = get_random_u32_inclusive(imn, imx - 1); else { t = ntohl(pkt_dev->cur_saddr); t++; @@ -2530,8 +2527,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->flags & F_IPDST_RND) { do { - t = get_random_u32_below(imx - imn) + - imn; + t = get_random_u32_inclusive(imn, imx - 1); s = htonl(t); } while (ipv4_is_loopback(s) || ipv4_is_multicast(s) || @@ -2578,9 +2574,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { __u32 t; if (pkt_dev->flags & F_TXSIZE_RND) { - t = get_random_u32_below(pkt_dev->max_pkt_size - - pkt_dev->min_pkt_size) + - pkt_dev->min_pkt_size; + t = get_random_u32_inclusive(pkt_dev->min_pkt_size, + pkt_dev->max_pkt_size - 1); } else { t = pkt_dev->cur_pkt_size + 1; if (t > pkt_dev->max_pkt_size) |