summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-10-12 16:17:46 -0700
committerJakub Kicinski <kuba@kernel.org>2023-10-12 17:07:34 -0700
commit0e6bb5b7f4c8e6665e76bdafce37ad4a8daf83c5 (patch)
tree6f71fc5628f01bcf9d2dc6eceb3000ee4e73c79a /kernel
parent2f0968a030f2a5dd4897a0151c8395bf5babe5b0 (diff)
parente8c127b0576660da9195504fe8393fe9da3de9ce (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts. Adjacent changes: kernel/bpf/verifier.c 829955981c55 ("bpf: Fix verifier log for async callback return values") a923819fb2c5 ("bpf: Treat first argument as return value for bpf_throw") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/mprog.c10
-rw-r--r--kernel/bpf/syscall.c21
-rw-r--r--kernel/bpf/tcx.c8
-rw-r--r--kernel/bpf/verifier.c6
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/printk/printk.c8
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/fair.c16
8 files changed, 49 insertions, 27 deletions
diff --git a/kernel/bpf/mprog.c b/kernel/bpf/mprog.c
index 007d98c799e2..1394168062e8 100644
--- a/kernel/bpf/mprog.c
+++ b/kernel/bpf/mprog.c
@@ -401,14 +401,16 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
struct bpf_mprog_cp *cp;
struct bpf_prog *prog;
const u32 flags = 0;
+ u32 id, count = 0;
+ u64 revision = 1;
int i, ret = 0;
- u32 id, count;
- u64 revision;
if (attr->query.query_flags || attr->query.attach_flags)
return -EINVAL;
- revision = bpf_mprog_revision(entry);
- count = bpf_mprog_total(entry);
+ if (entry) {
+ revision = bpf_mprog_revision(entry);
+ count = bpf_mprog_total(entry);
+ }
if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
return -EFAULT;
if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 85c1d908f70f..fc088fd4874f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3800,7 +3800,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
- u32 mask;
int ret;
if (CHECK_ATTR(BPF_PROG_ATTACH))
@@ -3809,10 +3808,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
ptype = attach_type_to_prog_type(attr->attach_type);
if (ptype == BPF_PROG_TYPE_UNSPEC)
return -EINVAL;
- mask = bpf_mprog_supported(ptype) ?
- BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
- if (attr->attach_flags & ~mask)
- return -EINVAL;
+ if (bpf_mprog_supported(ptype)) {
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
+ return -EINVAL;
+ } else {
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
+ return -EINVAL;
+ if (attr->relative_fd ||
+ attr->expected_revision)
+ return -EINVAL;
+ }
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
if (IS_ERR(prog))
@@ -3882,6 +3887,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
}
+ } else if (attr->attach_flags ||
+ attr->relative_fd ||
+ attr->expected_revision) {
+ return -EINVAL;
}
switch (ptype) {
@@ -3917,7 +3926,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
return ret;
}
-#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
+#define BPF_PROG_QUERY_LAST_FIELD query.revision
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
diff --git a/kernel/bpf/tcx.c b/kernel/bpf/tcx.c
index 13f0b5dc8262..1338a13a8b64 100644
--- a/kernel/bpf/tcx.c
+++ b/kernel/bpf/tcx.c
@@ -123,7 +123,6 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
{
bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
struct net *net = current->nsproxy->net_ns;
- struct bpf_mprog_entry *entry;
struct net_device *dev;
int ret;
@@ -133,12 +132,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
ret = -ENODEV;
goto out;
}
- entry = tcx_entry_fetch(dev, ingress);
- if (!entry) {
- ret = -ENOENT;
- goto out;
- }
- ret = bpf_mprog_query(attr, uattr, entry);
+ ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
out:
rtnl_unlock();
return ret;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c8dae2fde115..e59d6c2c6bf4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14727,7 +14727,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
- struct tnum range = tnum_range(0, 1);
+ struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14775,8 +14775,8 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
return -EINVAL;
}
- if (!tnum_in(tnum_const(0), reg->var_off)) {
- verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
+ if (!tnum_in(const_0, reg->var_off)) {
+ verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
return -EINVAL;
}
return 0;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 87e9f7e2bdc0..0f12e0a97e43 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -2647,7 +2647,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
memory_bm_free(bm, PG_UNSAFE_KEEP);
/* Make a copy of zero_bm so it can be created in safe pages */
- error = memory_bm_create(&tmp, GFP_ATOMIC, PG_ANY);
+ error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
if (error)
goto Free;
@@ -2660,7 +2660,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
goto Free;
duplicate_memory_bitmap(zero_bm, &tmp);
- memory_bm_free(&tmp, PG_UNSAFE_KEEP);
+ memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
/* At this point zero_bm is in safe pages and it can be used for restoring. */
if (nr_highmem > 0) {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 7e0b4dd02398..0b3af1529778 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3740,12 +3740,18 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
seq = prb_next_seq(prb);
+ /* Flush the consoles so that records up to @seq are printed. */
+ console_lock();
+ console_unlock();
+
for (;;) {
diff = 0;
/*
* Hold the console_lock to guarantee safe access to
- * console->seq.
+ * console->seq. Releasing console_lock flushes more
+ * records in case @seq is still not printed on all
+ * usable consoles.
*/
console_lock();
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 4492608b7d7f..458d359f5991 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -350,7 +350,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
* Except when the rq is capped by uclamp_max.
*/
if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
- sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
+ sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
+ !sg_policy->need_freq_update) {
next_f = sg_policy->next_freq;
/* Restore cached freq as next_freq has changed */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cb225921bbca..ef7490c4b8b4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -664,6 +664,10 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
}
+/*
+ * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
+ * For this to be so, the result of this function must have a left bias.
+ */
u64 avg_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
@@ -677,8 +681,12 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
load += weight;
}
- if (load)
+ if (load) {
+ /* sign flips effective floor / ceil */
+ if (avg < 0)
+ avg -= (load - 1);
avg = div_s64(avg, load);
+ }
return cfs_rq->min_vruntime + avg;
}
@@ -4919,10 +4927,12 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- u64 vslice = calc_delta_fair(se->slice, se);
- u64 vruntime = avg_vruntime(cfs_rq);
+ u64 vslice, vruntime = avg_vruntime(cfs_rq);
s64 lag = 0;
+ se->slice = sysctl_sched_base_slice;
+ vslice = calc_delta_fair(se->slice, se);
+
/*
* Due to how V is constructed as the weighted average of entities,
* adding tasks with positive lag, or removing tasks with negative lag