diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/syscall.c | 25 | ||||
-rw-r--r-- | kernel/cgroup.c | 2 | ||||
-rw-r--r-- | kernel/events/core.c | 15 | ||||
-rw-r--r-- | kernel/sched/core.c | 5 |
4 files changed, 33 insertions, 14 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 088ac0b1b106..536edc2be307 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr) int ufd = attr->map_fd; struct fd f = fdget(ufd); struct bpf_map *map; - void *key, *value; + void *key, *value, *ptr; int err; if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) @@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr) if (copy_from_user(key, ukey, map->key_size) != 0) goto free_key; - err = -ENOENT; - rcu_read_lock(); - value = map->ops->map_lookup_elem(map, key); + err = -ENOMEM; + value = kmalloc(map->value_size, GFP_USER); if (!value) - goto err_unlock; + goto free_key; + + rcu_read_lock(); + ptr = map->ops->map_lookup_elem(map, key); + if (ptr) + memcpy(value, ptr, map->value_size); + rcu_read_unlock(); + + err = -ENOENT; + if (!ptr) + goto free_value; err = -EFAULT; if (copy_to_user(uvalue, value, map->value_size) != 0) - goto err_unlock; + goto free_value; err = 0; -err_unlock: - rcu_read_unlock(); +free_value: + kfree(value); free_key: kfree(key); err_put: diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bb263d0caab3..04cfe8ace520 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb) * * And don't kill the default root. */ - if (css_has_online_children(&root->cgrp.self) || + if (!list_empty(&root->cgrp.self.children) || root == &cgrp_dfl_root) cgroup_put(&root->cgrp); else diff --git a/kernel/events/core.c b/kernel/events/core.c index 882f835a0d85..19efcf13375a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6776,7 +6776,6 @@ skip_type: __perf_event_init_context(&cpuctx->ctx); lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); - cpuctx->ctx.type = cpu_context; cpuctx->ctx.pmu = pmu; __perf_cpu_hrtimer_init(cpuctx, cpu); @@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open, * task or CPU context: */ if (move_group) { - if (group_leader->ctx->type != ctx->type) + /* + * Make sure we're both on the same task, or both + * per-cpu events. + */ + if (group_leader->ctx->task != ctx->task) + goto err_context; + + /* + * Make sure we're both events for the same CPU; + * grouping events for different CPUs is broken; since + * you can never concurrently schedule them anyhow. + */ + if (group_leader->cpu != event->cpu) goto err_context; } else { if (group_leader->ctx != ctx) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c0accc00566e..e628cb11b560 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7292,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset) * since we will exit with TASK_RUNNING make sure we enter with it, * otherwise we will destroy state. */ - if (WARN_ONCE(current->state != TASK_RUNNING, + WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, "do not call blocking ops when !TASK_RUNNING; " "state=%lx set at [<%p>] %pS\n", current->state, (void *)current->task_state_change, - (void *)current->task_state_change)) - __set_current_state(TASK_RUNNING); + (void *)current->task_state_change); ___might_sleep(file, line, preempt_offset); } |