diff options
Diffstat (limited to 'kernel')
123 files changed, 4957 insertions, 2325 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 60f1bfc3c7b2..ce77f0265660 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,12 +1,23 @@ # SPDX-License-Identifier: GPL-2.0-only +config PREEMPT_NONE_BUILD + bool + +config PREEMPT_VOLUNTARY_BUILD + bool + +config PREEMPT_BUILD + bool + select PREEMPTION + select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK + choice prompt "Preemption Model" - default PREEMPT_NONE_BEHAVIOUR + default PREEMPT_NONE -config PREEMPT_NONE_BEHAVIOUR +config PREEMPT_NONE bool "No Forced Preemption (Server)" - select PREEMPT_NONE if !PREEMPT_DYNAMIC + select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC help This is the traditional Linux preemption model, geared towards throughput. It will still provide good latencies most of the @@ -18,10 +29,10 @@ config PREEMPT_NONE_BEHAVIOUR raw processing power of the kernel, irrespective of scheduling latencies. -config PREEMPT_VOLUNTARY_BEHAVIOUR +config PREEMPT_VOLUNTARY bool "Voluntary Kernel Preemption (Desktop)" depends on !ARCH_NO_PREEMPT - select PREEMPT_VOLUNTARY if !PREEMPT_DYNAMIC + select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC help This option reduces the latency of the kernel by adding more "explicit preemption points" to the kernel code. These new @@ -37,10 +48,10 @@ config PREEMPT_VOLUNTARY_BEHAVIOUR Select this if you are building a kernel for a desktop system. -config PREEMPT_BEHAVIOUR +config PREEMPT bool "Preemptible Kernel (Low-Latency Desktop)" depends on !ARCH_NO_PREEMPT - select PREEMPT + select PREEMPT_BUILD help This option reduces the latency of the kernel by making all kernel code (that is not executing in a critical section) @@ -58,7 +69,7 @@ config PREEMPT_BEHAVIOUR config PREEMPT_RT bool "Fully Preemptible Kernel (Real-Time)" - depends on EXPERT && ARCH_SUPPORTS_RT && !PREEMPT_DYNAMIC + depends on EXPERT && ARCH_SUPPORTS_RT select PREEMPTION help This option turns the kernel into a real-time kernel by replacing @@ -75,17 +86,6 @@ config PREEMPT_RT endchoice -config PREEMPT_NONE - bool - -config PREEMPT_VOLUNTARY - bool - -config PREEMPT - bool - select PREEMPTION - select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK - config PREEMPT_COUNT bool @@ -95,8 +95,8 @@ config PREEMPTION config PREEMPT_DYNAMIC bool "Preemption behaviour defined on boot" - depends on HAVE_PREEMPT_DYNAMIC - select PREEMPT + depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT + select PREEMPT_BUILD default y help This option allows to define the preemption model on the kernel diff --git a/kernel/Makefile b/kernel/Makefile index 3f6ab5d5041b..186c49582f45 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -85,7 +85,6 @@ obj-$(CONFIG_PID_NS) += pid_namespace.o obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_IKHEADERS) += kheaders.o obj-$(CONFIG_SMP) += stop_machine.o -obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o obj-$(CONFIG_AUDITSYSCALL) += auditsc.o audit_watch.o audit_fsnotify.o audit_tree.o obj-$(CONFIG_GCOV_KERNEL) += gcov/ diff --git a/kernel/audit.h b/kernel/audit.h index d6a2c899a8db..c4498090a5bd 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -14,6 +14,7 @@ #include <linux/skbuff.h> #include <uapi/linux/mqueue.h> #include <linux/tty.h> +#include <uapi/linux/openat2.h> // struct open_how /* AUDIT_NAMES is the number of slots we reserve in the audit_context * for saving names from getname(). If we get more names we will allocate @@ -100,10 +101,15 @@ struct audit_proctitle { /* The per-task audit context. */ struct audit_context { int dummy; /* must be the first element */ - int in_syscall; /* 1 if task is in a syscall */ + enum { + AUDIT_CTX_UNUSED, /* audit_context is currently unused */ + AUDIT_CTX_SYSCALL, /* in use by syscall */ + AUDIT_CTX_URING, /* in use by io_uring */ + } context; enum audit_state state, current_state; unsigned int serial; /* serial number for record */ int major; /* syscall number */ + int uring_op; /* uring operation */ struct timespec64 ctime; /* time of syscall entry */ unsigned long argv[4]; /* syscall arguments */ long return_code;/* syscall return code */ @@ -188,6 +194,7 @@ struct audit_context { int fd; int flags; } mmap; + struct open_how openat2; struct { int argc; } execve; diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index 60739d5e3373..02348b48447c 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -160,8 +160,7 @@ static int audit_mark_handle_event(struct fsnotify_mark *inode_mark, u32 mask, audit_mark = container_of(inode_mark, struct audit_fsnotify_mark, mark); - if (WARN_ON_ONCE(inode_mark->group != audit_fsnotify_group) || - WARN_ON_ONCE(!inode)) + if (WARN_ON_ONCE(inode_mark->group != audit_fsnotify_group)) return 0; if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) { diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 2cd7b5694422..72324afcffef 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -30,7 +30,7 @@ struct audit_chunk { int count; atomic_long_t refs; struct rcu_head head; - struct node { + struct audit_node { struct list_head list; struct audit_tree *owner; unsigned index; /* index; upper bit indicates 'will prune' */ @@ -269,7 +269,7 @@ bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) /* tagging and untagging inodes with trees */ -static struct audit_chunk *find_chunk(struct node *p) +static struct audit_chunk *find_chunk(struct audit_node *p) { int index = p->index & ~(1U<<31); p -= index; @@ -322,7 +322,7 @@ static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old) list_replace_rcu(&old->hash, &new->hash); } -static void remove_chunk_node(struct audit_chunk *chunk, struct node *p) +static void remove_chunk_node(struct audit_chunk *chunk, struct audit_node *p) { struct audit_tree *owner = p->owner; @@ -459,7 +459,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *mark; struct audit_chunk *chunk, *old; - struct node *p; + struct audit_node *p; int n; mutex_lock(&audit_tree_group->mark_mutex); @@ -570,11 +570,11 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged) { spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { - struct node *p; + struct audit_node *p; struct audit_chunk *chunk; struct fsnotify_mark *mark; - p = list_first_entry(&victim->chunks, struct node, list); + p = list_first_entry(&victim->chunks, struct audit_node, list); /* have we run out of marked? */ if (tagged && !(p->index & (1U<<31))) break; @@ -616,7 +616,7 @@ static void trim_marked(struct audit_tree *tree) } /* reorder */ for (p = tree->chunks.next; p != &tree->chunks; p = q) { - struct node *node = list_entry(p, struct node, list); + struct audit_node *node = list_entry(p, struct audit_node, list); q = p->next; if (node->index & (1U<<31)) { list_del_init(p); @@ -684,7 +684,7 @@ void audit_trim_trees(void) struct audit_tree *tree; struct path path; struct vfsmount *root_mnt; - struct node *node; + struct audit_node *node; int err; tree = container_of(cursor.next, struct audit_tree, list); @@ -726,7 +726,8 @@ int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) { if (pathname[0] != '/' || - rule->listnr != AUDIT_FILTER_EXIT || + (rule->listnr != AUDIT_FILTER_EXIT && + rule->listnr != AUDIT_FILTER_URING_EXIT) || op != Audit_equal || rule->inode_f || rule->watch || rule->tree) return -EINVAL; @@ -839,7 +840,7 @@ int audit_add_tree_rule(struct audit_krule *rule) drop_collected_mounts(mnt); if (!err) { - struct node *node; + struct audit_node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); @@ -938,7 +939,7 @@ int audit_tag_tree(char *old, char *new) mutex_unlock(&audit_filter_mutex); if (!failed) { - struct node *node; + struct audit_node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 2acf7ca49154..713b256be944 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -183,7 +183,8 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op) return -EOPNOTSUPP; if (path[0] != '/' || path[len-1] == '/' || - krule->listnr != AUDIT_FILTER_EXIT || + (krule->listnr != AUDIT_FILTER_EXIT && + krule->listnr != AUDIT_FILTER_URING_EXIT) || op != Audit_equal || krule->inode_f || krule->watch || krule->tree) return -EINVAL; @@ -472,8 +473,7 @@ static int audit_watch_handle_event(struct fsnotify_mark *inode_mark, u32 mask, parent = container_of(inode_mark, struct audit_parent, mark); - if (WARN_ON_ONCE(inode_mark->group != audit_watch_group) || - WARN_ON_ONCE(!inode)) + if (WARN_ON_ONCE(inode_mark->group != audit_watch_group)) return 0; if (mask & (FS_CREATE|FS_MOVED_TO) && inode) diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index db2c6b59dfc3..d75acb014ccd 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -44,7 +44,8 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_filter_list[4]), LIST_HEAD_INIT(audit_filter_list[5]), LIST_HEAD_INIT(audit_filter_list[6]), -#if AUDIT_NR_FILTERS != 7 + LIST_HEAD_INIT(audit_filter_list[7]), +#if AUDIT_NR_FILTERS != 8 #error Fix audit_filter_list initialiser #endif }; @@ -56,6 +57,7 @@ static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_rules_list[4]), LIST_HEAD_INIT(audit_rules_list[5]), LIST_HEAD_INIT(audit_rules_list[6]), + LIST_HEAD_INIT(audit_rules_list[7]), }; DEFINE_MUTEX(audit_filter_mutex); @@ -151,7 +153,8 @@ char *audit_unpack_string(void **bufp, size_t *remain, size_t len) static inline int audit_to_inode(struct audit_krule *krule, struct audit_field *f) { - if (krule->listnr != AUDIT_FILTER_EXIT || + if ((krule->listnr != AUDIT_FILTER_EXIT && + krule->listnr != AUDIT_FILTER_URING_EXIT) || krule->inode_f || krule->watch || krule->tree || (f->op != Audit_equal && f->op != Audit_not_equal)) return -EINVAL; @@ -248,6 +251,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data * pr_err("AUDIT_FILTER_ENTRY is deprecated\n"); goto exit_err; case AUDIT_FILTER_EXIT: + case AUDIT_FILTER_URING_EXIT: case AUDIT_FILTER_TASK: #endif case AUDIT_FILTER_USER: @@ -332,6 +336,10 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) if (entry->rule.listnr != AUDIT_FILTER_FS) return -EINVAL; break; + case AUDIT_PERM: + if (entry->rule.listnr == AUDIT_FILTER_URING_EXIT) + return -EINVAL; + break; } switch (entry->rule.listnr) { @@ -980,7 +988,8 @@ static inline int audit_add_rule(struct audit_entry *entry) } entry->rule.prio = ~0ULL; - if (entry->rule.listnr == AUDIT_FILTER_EXIT) { + if (entry->rule.listnr == AUDIT_FILTER_EXIT || + entry->rule.listnr == AUDIT_FILTER_URING_EXIT) { if (entry->rule.flags & AUDIT_FILTER_PREPEND) entry->rule.prio = ++prio_high; else diff --git a/kernel/auditsc.c b/kernel/auditsc.c index b1cb1dbf7417..b517947bfa48 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* auditsc.c -- System-call auditing support * Handles all system-call specific auditing features. * @@ -6,20 +7,6 @@ * Copyright (C) 2005, 2006 IBM Corporation * All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Many of the ideas implemented here are from Stephen C. Tweedie, @@ -76,6 +63,7 @@ #include <linux/fsnotify_backend.h> #include <uapi/linux/limits.h> #include <uapi/linux/netfilter/nf_tables.h> +#include <uapi/linux/openat2.h> // struct open_how #include "audit.h" @@ -166,7 +154,7 @@ static int audit_match_perm(struct audit_context *ctx, int mask) n = ctx->major; switch (audit_classify_syscall(ctx->arch, n)) { - case 0: /* native */ + case AUDITSC_NATIVE: if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE, n)) return 1; @@ -177,7 +165,7 @@ static int audit_match_perm(struct audit_context *ctx, int mask) audit_match_class(AUDIT_CLASS_CHATTR, n)) return 1; return 0; - case 1: /* 32bit on biarch */ + case AUDITSC_COMPAT: /* 32bit on biarch */ if ((mask & AUDIT_PERM_WRITE) && audit_match_class(AUDIT_CLASS_WRITE_32, n)) return 1; @@ -188,14 +176,16 @@ static int audit_match_perm(struct audit_context *ctx, int mask) audit_match_class(AUDIT_CLASS_CHATTR_32, n)) return 1; return 0; - case 2: /* open */ + case AUDITSC_OPEN: return mask & ACC_MODE(ctx->argv[1]); - case 3: /* openat */ + case AUDITSC_OPENAT: return mask & ACC_MODE(ctx->argv[2]); - case 4: /* socketcall */ + case AUDITSC_SOCKETCALL: return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); - case 5: /* execve */ + case AUDITSC_EXECVE: return mask & AUDIT_PERM_EXEC; + case AUDITSC_OPENAT2: + return mask & ACC_MODE((u32)((struct open_how *)ctx->argv[2])->flags); default: return 0; } @@ -480,6 +470,9 @@ static int audit_filter_rules(struct task_struct *tsk, u32 sid; unsigned int sessionid; + if (ctx && rule->prio <= ctx->prio) + return 0; + cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); for (i = 0; i < rule->field_count; i++) { @@ -747,8 +740,6 @@ static int audit_filter_rules(struct task_struct *tsk, } if (ctx) { - if (rule->prio <= ctx->prio) - return 0; if (rule->filterkey) { kfree(ctx->filterkey); ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); @@ -805,6 +796,34 @@ static int audit_in_mask(const struct audit_krule *rule, unsigned long val) return rule->mask[word] & bit; } +/** + * audit_filter_uring - apply filters to an io_uring operation + * @tsk: associated task + * @ctx: audit context + */ +static void audit_filter_uring(struct task_struct *tsk, + struct audit_context *ctx) +{ + struct audit_entry *e; + enum audit_state state; + + if (auditd_test_task(tsk)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_URING_EXIT], + list) { + if (audit_in_mask(&e->rule, ctx->uring_op) && + audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, + false)) { + rcu_read_unlock(); + ctx->current_state = state; + return; + } + } + rcu_read_unlock(); +} + /* At syscall exit time, this filter is called if the audit_state is * not low enough that auditing cannot take place, but is also not * high enough that we already know we have to write an audit record @@ -915,10 +934,81 @@ static inline void audit_free_aux(struct audit_context *context) context->aux = aux->next; kfree(aux); } + context->aux = NULL; while ((aux = context->aux_pids)) { context->aux_pids = aux->next; kfree(aux); } + context->aux_pids = NULL; +} + +/** + * audit_reset_context - reset a audit_context structure + * @ctx: the audit_context to reset + * + * All fields in the audit_context will be reset to an initial state, all + * references held by fields will be dropped, and private memory will be + * released. When this function returns the audit_context will be suitable + * for reuse, so long as the passed context is not NULL or a dummy context. + */ +static void audit_reset_context(struct audit_context *ctx) +{ + if (!ctx) + return; + + /* if ctx is non-null, reset the "ctx->state" regardless */ + ctx->context = AUDIT_CTX_UNUSED; + if (ctx->dummy) + return; + + /* + * NOTE: It shouldn't matter in what order we release the fields, so + * release them in the order in which they appear in the struct; + * this gives us some hope of quickly making sure we are + * resetting the audit_context properly. + * + * Other things worth mentioning: + * - we don't reset "dummy" + * - we don't reset "state", we do reset "current_state" + * - we preserve "filterkey" if "state" is AUDIT_STATE_RECORD + * - much of this is likely overkill, but play it safe for now + * - we really need to work on improving the audit_context struct + */ + + ctx->current_state = ctx->state; + ctx->serial = 0; + ctx->major = 0; + ctx->uring_op = 0; + ctx->ctime = (struct timespec64){ .tv_sec = 0, .tv_nsec = 0 }; + memset(ctx->argv, 0, sizeof(ctx->argv)); + ctx->return_code = 0; + ctx->prio = (ctx->state == AUDIT_STATE_RECORD ? ~0ULL : 0); + ctx->return_valid = AUDITSC_INVALID; + audit_free_names(ctx); + if (ctx->state != AUDIT_STATE_RECORD) { + kfree(ctx->filterkey); + ctx->filterkey = NULL; + } + audit_free_aux(ctx); + kfree(ctx->sockaddr); + ctx->sockaddr = NULL; + ctx->sockaddr_len = 0; + ctx->pid = ctx->ppid = 0; + ctx->uid = ctx->euid = ctx->suid = ctx->fsuid = KUIDT_INIT(0); + ctx->gid = ctx->egid = ctx->sgid = ctx->fsgid = KGIDT_INIT(0); + ctx->personality = 0; + ctx->arch = 0; + ctx->target_pid = 0; + ctx->target_auid = ctx->target_uid = KUIDT_INIT(0); + ctx->target_sessionid = 0; + ctx->target_sid = 0; + ctx->target_comm[0] = '\0'; + unroll_tree_refs(ctx, NULL, 0); + WARN_ON(!list_empty(&ctx->killed_trees)); + ctx->type = 0; + audit_free_module(ctx); + ctx->fds[0] = -1; + audit_proctitle_free(ctx); } static inline struct audit_context *audit_alloc_context(enum audit_state state) @@ -928,6 +1018,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state) context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return NULL; + context->context = AUDIT_CTX_UNUSED; context->state = state; context->prio = state == AUDIT_STATE_RECORD ? ~0ULL : 0; INIT_LIST_HEAD(&context->killed_trees); @@ -953,7 +1044,7 @@ int audit_alloc(struct task_struct *tsk) char *key = NULL; if (likely(!audit_ever_enabled)) - return 0; /* Return if not auditing. */ + return 0; state = audit_filter_task(tsk, &key); if (state == AUDIT_STATE_DISABLED) { @@ -973,16 +1064,37 @@ int audit_alloc(struct task_struct *tsk) return 0; } +/** + * audit_alloc_kernel - allocate an audit_context for a kernel task + * @tsk: the kernel task + * + * Similar to the audit_alloc() function, but intended for kernel private + * threads. Returns zero on success, negative values on failure. + */ +int audit_alloc_kernel(struct task_struct *tsk) +{ + /* + * At the moment we are just going to call into audit_alloc() to + * simplify the code, but there two things to keep in mind with this + * approach: + * + * 1. Filtering internal kernel tasks is a bit laughable in almost all + * cases, but there is at least one case where there is a benefit: + * the '-a task,never' case allows the admin to effectively disable + * task auditing at runtime. + * + * 2. The {set,clear}_task_syscall_work() ops likely have zero effect + * on these internal kernel tasks, but they probably don't hurt either. + */ + return audit_alloc(tsk); +} + static inline void audit_free_context(struct audit_context *context) { - audit_free_module(context); - audit_free_names(context); - unroll_tree_refs(context, NULL, 0); + /* resetting is extra work, but it is likely just noise */ + audit_reset_context(context); free_tree_refs(context); - audit_free_aux(context); kfree(context->filterkey); - kfree(context->sockaddr); - audit_proctitle_free(context); kfree(context); } @@ -1316,6 +1428,12 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, context->mmap.flags); break; + case AUDIT_OPENAT2: + audit_log_format(ab, "oflag=0%llo mode=0%llo resolve=0x%llx", + context->openat2.flags, + context->openat2.mode, + context->openat2.resolve); + break; case AUDIT_EXECVE: audit_log_execve_info(context, &ab); break; @@ -1479,6 +1597,44 @@ out: audit_log_end(ab); } +/** + * audit_log_uring - generate a AUDIT_URINGOP record + * @ctx: the audit context + */ +static void audit_log_uring(struct audit_context *ctx) +{ + struct audit_buffer *ab; + const struct cred *cred; + + ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_URINGOP); + if (!ab) + return; + cred = current_cred(); + audit_log_format(ab, "uring_op=%d", ctx->uring_op); + if (ctx->return_valid != AUDITSC_INVALID) + audit_log_format(ab, " success=%s exit=%ld", + (ctx->return_valid == AUDITSC_SUCCESS ? + "yes" : "no"), + ctx->return_code); + audit_log_format(ab, + " items=%d" + " ppid=%d pid=%d uid=%u gid=%u euid=%u suid=%u" + " fsuid=%u egid=%u sgid=%u fsgid=%u", + ctx->name_count, + task_ppid_nr(current), task_tgid_nr(current), + from_kuid(&init_user_ns, cred->uid), + from_kgid(&init_user_ns, cred->gid), + from_kuid(&init_user_ns, cred->euid), + from_kuid(&init_user_ns, cred->suid), + from_kuid(&init_user_ns, cred->fsuid), + from_kgid(&init_user_ns, cred->egid), + from_kgid(&init_user_ns, cred->sgid), + from_kgid(&init_user_ns, cred->fsgid)); + audit_log_task_context(ab); + audit_log_key(ab, ctx->filterkey); + audit_log_end(ab); +} + static void audit_log_exit(void) { int i, call_panic = 0; @@ -1489,29 +1645,38 @@ static void audit_log_exit(void) context->personality = current->personality; - ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); - if (!ab) - return; /* audit_panic has been called */ - audit_log_format(ab, "arch=%x syscall=%d", - context->arch, context->major); - if (context->personality != PER_LINUX) - audit_log_format(ab, " per=%lx", context->personality); - if (context->return_valid != AUDITSC_INVALID) - audit_log_format(ab, " success=%s exit=%ld", - (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", - context->return_code); - - audit_log_format(ab, - " a0=%lx a1=%lx a2=%lx a3=%lx items=%d", - context->argv[0], - context->argv[1], - context->argv[2], - context->argv[3], - context->name_count); - - audit_log_task_info(ab); - audit_log_key(ab, context->filterkey); - audit_log_end(ab); + switch (context->context) { + case AUDIT_CTX_SYSCALL: + ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); + if (!ab) + return; + audit_log_format(ab, "arch=%x syscall=%d", + context->arch, context->major); + if (context->personality != PER_LINUX) + audit_log_format(ab, " per=%lx", context->personality); + if (context->return_valid != AUDITSC_INVALID) + audit_log_format(ab, " success=%s exit=%ld", + (context->return_valid == AUDITSC_SUCCESS ? + "yes" : "no"), + context->return_code); + audit_log_format(ab, + " a0=%lx a1=%lx a2=%lx a3=%lx items=%d", + context->argv[0], + context->argv[1], + context->argv[2], + context->argv[3], + context->name_count); + audit_log_task_info(ab); + audit_log_key(ab, context->filterkey); + audit_log_end(ab); + break; + case AUDIT_CTX_URING: + audit_log_uring(context); + break; + default: + BUG(); + break; + } for (aux = context->aux; aux; aux = aux->next) { @@ -1602,21 +1767,22 @@ static void audit_log_exit(void) audit_log_name(context, n, NULL, i++, &call_panic); } - audit_log_proctitle(); + if (context->context == AUDIT_CTX_SYSCALL) + audit_log_proctitle(); /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); if (ab) audit_log_end(ab); if (call_panic) - audit_panic("error converting sid to string"); + audit_panic("error in audit_log_exit()"); } /** * __audit_free - free a per-task audit context * @tsk: task whose audit context block to free * - * Called from copy_process and do_exit + * Called from copy_process, do_exit, and the io_uring code */ void __audit_free(struct task_struct *tsk) { @@ -1625,6 +1791,7 @@ void __audit_free(struct task_struct *tsk) if (!context) return; + /* this may generate CONFIG_CHANGE records */ if (!list_empty(&context->killed_trees)) audit_kill_trees(context); @@ -1633,14 +1800,21 @@ void __audit_free(struct task_struct *tsk) * random task_struct that doesn't doesn't have any meaningful data we * need to log via audit_log_exit(). */ - if (tsk == current && !context->dummy && context->in_syscall) { + if (tsk == current && !context->dummy) { context->return_valid = AUDITSC_INVALID; context->return_code = 0; - - audit_filter_syscall(tsk, context); - audit_filter_inodes(tsk, context); - if (context->current_state == AUDIT_STATE_RECORD) - audit_log_exit(); + if (context->context == AUDIT_CTX_SYSCALL) { + audit_filter_syscall(tsk, context); + audit_filter_inodes(tsk, context); + if (context->current_state == AUDIT_STATE_RECORD) + audit_log_exit(); + } else if (context->context == AUDIT_CTX_URING) { + /* TODO: verify this case is real and valid */ + audit_filter_uring(tsk, context); + audit_filter_inodes(tsk, context); + if (context->current_state == AUDIT_STATE_RECORD) + audit_log_uring(context); + } } audit_set_context(tsk, NULL); @@ -1648,6 +1822,131 @@ void __audit_free(struct task_struct *tsk) } /** + * audit_return_fixup - fixup the return codes in the audit_context + * @ctx: the audit_context + * @success: true/false value to indicate if the operation succeeded or not + * @code: operation return code + * + * We need to fixup the return code in the audit logs if the actual return + * codes are later going to be fixed by the arch specific signal handlers. + */ +static void audit_return_fixup(struct audit_context *ctx, + int success, long code) +{ + /* + * This is actually a test for: + * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) || + * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK) + * + * but is faster than a bunch of || + */ + if (unlikely(code <= -ERESTARTSYS) && + (code >= -ERESTART_RESTARTBLOCK) && + (code != -ENOIOCTLCMD)) + ctx->return_code = -EINTR; + else + ctx->return_code = code; + ctx->return_valid = (success ? AUDITSC_SUCCESS : AUDITSC_FAILURE); +} + +/** + * __audit_uring_entry - prepare the kernel task's audit context for io_uring + * @op: the io_uring opcode + * + * This is similar to audit_syscall_entry() but is intended for use by io_uring + * operations. This function should only ever be called from + * audit_uring_entry() as we rely on the audit context checking present in that + * function. + */ +void __audit_uring_entry(u8 op) +{ + struct audit_context *ctx = audit_context(); + + if (ctx->state == AUDIT_STATE_DISABLED) + return; + + /* + * NOTE: It's possible that we can be called from the process' context + * before it returns to userspace, and before audit_syscall_exit() + * is called. In this case there is not much to do, just record + * the io_uring details and return. + */ + ctx->uring_op = op; + if (ctx->context == AUDIT_CTX_SYSCALL) + return; + + ctx->dummy = !audit_n_rules; + if (!ctx->dummy && ctx->state == AUDIT_STATE_BUILD) + ctx->prio = 0; + + ctx->context = AUDIT_CTX_URING; + ctx->current_state = ctx->state; + ktime_get_coarse_real_ts64(&ctx->ctime); +} + +/** + * __audit_uring_exit - wrap up the kernel task's audit context after io_uring + * @success: true/false value to indicate if the operation succeeded or not + * @code: operation return code + * + * This is similar to audit_syscall_exit() but is intended for use by io_uring + * operations. This function should only ever be called from + * audit_uring_exit() as we rely on the audit context checking present in that + * function. + */ +void __audit_uring_exit(int success, long code) +{ + struct audit_context *ctx = audit_context(); + + if (ctx->context == AUDIT_CTX_SYSCALL) { + /* + * NOTE: See the note in __audit_uring_entry() about the case + * where we may be called from process context before we + * return to userspace via audit_syscall_exit(). In this + * case we simply emit a URINGOP record and bail, the + * normal syscall exit handling will take care of + * everything else. + * It is also worth mentioning that when we are called, + * the current process creds may differ from the creds + * used during the normal syscall processing; keep that + * in mind if/when we move the record generation code. + */ + + /* + * We need to filter on the syscall info here to decide if we + * should emit a URINGOP record. I know it seems odd but this + * solves the problem where users have a filter to block *all* + * syscall records in the "exit" filter; we want to preserve + * the behavior here. + */ + audit_filter_syscall(current, ctx); + if (ctx->current_state != AUDIT_STATE_RECORD) + audit_filter_uring(current, ctx); + audit_filter_inodes(current, ctx); + if (ctx->current_state != AUDIT_STATE_RECORD) + return; + + audit_log_uring(ctx); + return; + } + + /* this may generate CONFIG_CHANGE records */ + if (!list_empty(&ctx->killed_trees)) + audit_kill_trees(ctx); + + /* run through both filters to ensure we set the filterkey properly */ + audit_filter_uring(current, ctx); + audit_filter_inodes(current, ctx); + if (ctx->current_state != AUDIT_STATE_RECORD) + goto out; + audit_return_fixup(ctx, success, code); + audit_log_exit(); + +out: + audit_reset_context(ctx); +} + +/** * __audit_syscall_entry - fill in an audit record at syscall entry * @major: major syscall type (function) * @a1: additional syscall register 1 @@ -1672,7 +1971,12 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, if (!audit_enabled || !context) return; - BUG_ON(context->in_syscall || context->name_count); + WARN_ON(context->context != AUDIT_CTX_UNUSED); + WARN_ON(context->name_count); + if (context->context != AUDIT_CTX_UNUSED || context->name_count) { + audit_panic("unrecoverable error in audit_syscall_entry()"); + return; + } state = context->state; if (state == AUDIT_STATE_DISABLED) @@ -1691,10 +1995,8 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, context->argv[1] = a2; context->argv[2] = a3; context->argv[3] = a4; - context->serial = 0; - context->in_syscall = 1; + context->context = AUDIT_CTX_SYSCALL; context->current_state = state; - context->ppid = 0; ktime_get_coarse_real_ts64(&context->ctime); } @@ -1711,63 +2013,27 @@ void __audit_syscall_entry(int major, unsigned long a1, unsigned long a2, */ void __audit_syscall_exit(int success, long return_code) { - struct audit_context *context; + struct audit_context *context = audit_context(); - context = audit_context(); - if (!context) - return; + if (!context || context->dummy || + context->context != AUDIT_CTX_SYSCALL) + goto out; + /* this may generate CONFIG_CHANGE records */ if (!list_empty(&context->killed_trees)) audit_kill_trees(context); - if (!context->dummy && context->in_syscall) { - if (success) - context->return_valid = AUDITSC_SUCCESS; - else - context->return_valid = AUDITSC_FAILURE; + /* run through both filters to ensure we set the filterkey properly */ + audit_filter_syscall(current, context); + audit_filter_inodes(current, context); + if (context->current_state < AUDIT_STATE_RECORD) + goto out; - /* - * we need to fix up the return code in the audit logs if the - * actual return codes are later going to be fixed up by the - * arch specific signal handlers - * - * This is actually a test for: - * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) || - * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK) - * - * but is faster than a bunch of || - */ - if (unlikely(return_code <= -ERESTARTSYS) && - (return_code >= -ERESTART_RESTARTBLOCK) && - (return_code != -ENOIOCTLCMD)) - context->return_code = -EINTR; - else - context->return_code = return_code; - - audit_filter_syscall(current, context); - audit_filter_inodes(current, context); - if (context->current_state == AUDIT_STATE_RECORD) - audit_log_exit(); - } - - context->in_syscall = 0; - context->prio = context->state == AUDIT_STATE_RECORD ? ~0ULL : 0; - - audit_free_module(context); - audit_free_names(context); - unroll_tree_refs(context, NULL, 0); - audit_free_aux(context); - context->aux = NULL; - context->aux_pids = NULL; - context->target_pid = 0; - context->target_sid = 0; - context->sockaddr_len = 0; - context->type = 0; - context->fds[0] = -1; - if (context->state != AUDIT_STATE_RECORD) { - kfree(context->filterkey); - context->filterkey = NULL; - } + audit_return_fixup(context, success, return_code); + audit_log_exit(); + +out: + audit_reset_context(context); } static inline void handle_one(const struct inode *inode) @@ -1919,7 +2185,7 @@ void __audit_getname(struct filename *name) struct audit_context *context = audit_context(); struct audit_names *n; - if (!context->in_syscall) + if (context->context == AUDIT_CTX_UNUSED) return; n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); @@ -1991,7 +2257,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry, struct list_head *list = &audit_filter_list[AUDIT_FILTER_FS]; int i; - if (!context->in_syscall) + if (context->context == AUDIT_CTX_UNUSED) return; rcu_read_lock(); @@ -2109,7 +2375,7 @@ void __audit_inode_child(struct inode *parent, struct list_head *list = &audit_filter_list[AUDIT_FILTER_FS]; int i; - if (!context->in_syscall) + if (context->context == AUDIT_CTX_UNUSED) return; rcu_read_lock(); @@ -2208,7 +2474,7 @@ EXPORT_SYMBOL_GPL(__audit_inode_child); int auditsc_get_stamp(struct audit_context *ctx, struct timespec64 *t, unsigned int *serial) { - if (!ctx->in_syscall) + if (ctx->context == AUDIT_CTX_UNUSED) return 0; if (!ctx->serial) ctx->serial = audit_serial(); @@ -2546,6 +2812,16 @@ void __audit_mmap_fd(int fd, int flags) context->type = AUDIT_MMAP; } +void __audit_openat2_how(struct open_how *how) +{ + struct audit_context *context = audit_context(); + + context->openat2.flags = how->flags; + context->openat2.mode = how->mode; + context->openat2.resolve = how->resolve; + context->type = AUDIT_OPENAT2; +} + void __audit_log_kern_module(char *name) { struct audit_context *context = audit_context(); @@ -2706,8 +2982,7 @@ void audit_seccomp_actions_logged(const char *names, const char *old_names, struct list_head *audit_killed_trees(void) { struct audit_context *ctx = audit_context(); - - if (likely(!ctx || !ctx->in_syscall)) + if (likely(!ctx || ctx->context == AUDIT_CTX_UNUSED)) return NULL; return &ctx->killed_trees; } diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig index a82d6de86522..d24d518ddd63 100644 --- a/kernel/bpf/Kconfig +++ b/kernel/bpf/Kconfig @@ -64,6 +64,7 @@ config BPF_JIT_DEFAULT_ON config BPF_UNPRIV_DEFAULT_OFF bool "Disable unprivileged BPF by default" + default y depends on BPF_SYSCALL help Disables unprivileged BPF by default by setting the corresponding @@ -72,6 +73,12 @@ config BPF_UNPRIV_DEFAULT_OFF disable it by setting it to 1 (from which no other transition to 0 is possible anymore). + Unprivileged BPF could be used to exploit certain potential + speculative execution side-channel vulnerabilities on unmitigated + affected hardware. + + If you are unsure how to answer this question, answer Y. + source "kernel/bpf/preload/Kconfig" config BPF_LSM diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 7f33098ca63f..cf6ca339f3cd 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -7,7 +7,7 @@ endif CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy) obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o -obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o +obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 447def540544..c7a5be3bf8be 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -645,7 +645,7 @@ static const struct bpf_iter_seq_info iter_seq_info = { .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), }; -static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, +static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { u32 i, key, num_elems = 0; @@ -668,9 +668,8 @@ static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, val = array->value + array->elem_size * i; num_elems++; key = i; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)&key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)&key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) break; diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c new file mode 100644 index 000000000000..277a05e9c984 --- /dev/null +++ b/kernel/bpf/bloom_filter.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ + +#include <linux/bitmap.h> +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/err.h> +#include <linux/jhash.h> +#include <linux/random.h> + +#define BLOOM_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) + +struct bpf_bloom_filter { + struct bpf_map map; + u32 bitset_mask; + u32 hash_seed; + /* If the size of the values in the bloom filter is u32 aligned, + * then it is more performant to use jhash2 as the underlying hash + * function, else we use jhash. This tracks the number of u32s + * in an u32-aligned value size. If the value size is not u32 aligned, + * this will be 0. + */ + u32 aligned_u32_count; + u32 nr_hash_funcs; + unsigned long bitset[]; +}; + +static u32 hash(struct bpf_bloom_filter *bloom, void *value, + u32 value_size, u32 index) +{ + u32 h; + + if (bloom->aligned_u32_count) + h = jhash2(value, bloom->aligned_u32_count, + bloom->hash_seed + index); + else + h = jhash(value, value_size, bloom->hash_seed + index); + + return h & bloom->bitset_mask; +} + +static int bloom_map_peek_elem(struct bpf_map *map, void *value) +{ + struct bpf_bloom_filter *bloom = + container_of(map, struct bpf_bloom_filter, map); + u32 i, h; + + for (i = 0; i < bloom->nr_hash_funcs; i++) { + h = hash(bloom, value, map->value_size, i); + if (!test_bit(h, bloom->bitset)) + return -ENOENT; + } + + return 0; +} + +static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags) +{ + struct bpf_bloom_filter *bloom = + container_of(map, struct bpf_bloom_filter, map); + u32 i, h; + + if (flags != BPF_ANY) + return -EINVAL; + + for (i = 0; i < bloom->nr_hash_funcs; i++) { + h = hash(bloom, value, map->value_size, i); + set_bit(h, bloom->bitset); + } + + return 0; +} + +static int bloom_map_pop_elem(struct bpf_map *map, void *value) +{ + return -EOPNOTSUPP; +} + +static int bloom_map_delete_elem(struct bpf_map *map, void *value) +{ + return -EOPNOTSUPP; +} + +static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) +{ + u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits; + int numa_node = bpf_map_attr_numa_node(attr); + struct bpf_bloom_filter *bloom; + + if (!bpf_capable()) + return ERR_PTR(-EPERM); + + if (attr->key_size != 0 || attr->value_size == 0 || + attr->max_entries == 0 || + attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || + !bpf_map_flags_access_ok(attr->map_flags) || + /* The lower 4 bits of map_extra (0xF) specify the number + * of hash functions + */ + (attr->map_extra & ~0xF)) + return ERR_PTR(-EINVAL); + + nr_hash_funcs = attr->map_extra; + if (nr_hash_funcs == 0) + /* Default to using 5 hash functions if unspecified */ + nr_hash_funcs = 5; + + /* For the bloom filter, the optimal bit array size that minimizes the + * false positive probability is n * k / ln(2) where n is the number of + * expected entries in the bloom filter and k is the number of hash + * functions. We use 7 / 5 to approximate 1 / ln(2). + * + * We round this up to the nearest power of two to enable more efficient + * hashing using bitmasks. The bitmask will be the bit array size - 1. + * + * If this overflows a u32, the bit array size will have 2^32 (4 + * GB) bits. + */ + if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) || + check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) || + nr_bits > (1UL << 31)) { + /* The bit array size is 2^32 bits but to avoid overflowing the + * u32, we use U32_MAX, which will round up to the equivalent + * number of bytes + */ + bitset_bytes = BITS_TO_BYTES(U32_MAX); + bitset_mask = U32_MAX; + } else { + if (nr_bits <= BITS_PER_LONG) + nr_bits = BITS_PER_LONG; + else + nr_bits = roundup_pow_of_two(nr_bits); + bitset_bytes = BITS_TO_BYTES(nr_bits); + bitset_mask = nr_bits - 1; + } + + bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long)); + bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node); + + if (!bloom) + return ERR_PTR(-ENOMEM); + + bpf_map_init_from_attr(&bloom->map, attr); + + bloom->nr_hash_funcs = nr_hash_funcs; + bloom->bitset_mask = bitset_mask; + + /* Check whether the value size is u32-aligned */ + if ((attr->value_size & (sizeof(u32) - 1)) == 0) + bloom->aligned_u32_count = + attr->value_size / sizeof(u32); + + if (!(attr->map_flags & BPF_F_ZERO_SEED)) + bloom->hash_seed = get_random_int(); + + return &bloom->map; +} + +static void bloom_map_free(struct bpf_map *map) +{ + struct bpf_bloom_filter *bloom = + container_of(map, struct bpf_bloom_filter, map); + + bpf_map_area_free(bloom); +} + +static void *bloom_map_lookup_elem(struct bpf_map *map, void *key) +{ + /* The eBPF program should use map_peek_elem instead */ + return ERR_PTR(-EINVAL); +} + +static int bloom_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 flags) +{ + /* The eBPF program should use map_push_elem instead */ + return -EINVAL; +} + +static int bloom_map_check_btf(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type) +{ + /* Bloom filter maps are keyless */ + return btf_type_is_void(key_type) ? 0 : -EINVAL; +} + +static int bpf_bloom_map_btf_id; +const struct bpf_map_ops bloom_filter_map_ops = { + .map_meta_equal = bpf_map_meta_equal, + .map_alloc = bloom_map_alloc, + .map_free = bloom_map_free, + .map_push_elem = bloom_map_push_elem, + .map_peek_elem = bloom_map_peek_elem, + .map_pop_elem = bloom_map_pop_elem, + .map_lookup_elem = bloom_map_lookup_elem, + .map_update_elem = bloom_map_update_elem, + .map_delete_elem = bloom_map_delete_elem, + .map_check_btf = bloom_map_check_btf, + .map_btf_name = "bpf_bloom_filter", + .map_btf_id = &bpf_bloom_map_btf_id, +}; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 9abcc33f02cf..8ecfe4752769 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -93,6 +93,9 @@ const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { }; const struct bpf_prog_ops bpf_struct_ops_prog_ops = { +#ifdef CONFIG_NET + .test_run = bpf_struct_ops_test_run, +#endif }; static const struct btf_type *module_type; @@ -312,6 +315,20 @@ static int check_zero_holes(const struct btf_type *t, void *data) return 0; } +int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs, + struct bpf_prog *prog, + const struct btf_func_model *model, + void *image, void *image_end) +{ + u32 flags; + + tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; + tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; + flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0; + return arch_prepare_bpf_trampoline(NULL, image, image_end, + model, flags, tprogs, NULL); +} + static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { @@ -323,7 +340,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, struct bpf_tramp_progs *tprogs = NULL; void *udata, *kdata; int prog_fd, err = 0; - void *image; + void *image, *image_end; u32 i; if (flags) @@ -363,12 +380,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, udata = &uvalue->data; kdata = &kvalue->data; image = st_map->image; + image_end = st_map->image + PAGE_SIZE; for_each_member(i, t, member) { const struct btf_type *mtype, *ptype; struct bpf_prog *prog; u32 moff; - u32 flags; moff = btf_member_bit_offset(t, member) / 8; ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); @@ -430,14 +447,9 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, goto reset_unlock; } - tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; - tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; - flags = st_ops->func_models[i].ret_size > 0 ? - BPF_TRAMP_F_RET_FENTRY_RET : 0; - err = arch_prepare_bpf_trampoline(NULL, image, - st_map->image + PAGE_SIZE, - &st_ops->func_models[i], - flags, tprogs, NULL); + err = bpf_struct_ops_prepare_trampoline(tprogs, prog, + &st_ops->func_models[i], + image, image_end); if (err < 0) goto reset_unlock; diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h index 066d83ea1c99..5678a9ddf817 100644 --- a/kernel/bpf/bpf_struct_ops_types.h +++ b/kernel/bpf/bpf_struct_ops_types.h @@ -2,6 +2,9 @@ /* internal file - do not include directly */ #ifdef CONFIG_BPF_JIT +#ifdef CONFIG_NET +BPF_STRUCT_OPS_TYPE(bpf_dummy_ops) +#endif #ifdef CONFIG_INET #include <net/tcp.h> BPF_STRUCT_OPS_TYPE(tcp_congestion_ops) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index dfe61df4f974..dbc3ad07e21b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -281,6 +281,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_VAR] = "VAR", [BTF_KIND_DATASEC] = "DATASEC", [BTF_KIND_FLOAT] = "FLOAT", + [BTF_KIND_DECL_TAG] = "DECL_TAG", }; const char *btf_type_str(const struct btf_type *t) @@ -459,6 +460,17 @@ static bool btf_type_is_datasec(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; } +static bool btf_type_is_decl_tag(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG; +} + +static bool btf_type_is_decl_tag_target(const struct btf_type *t) +{ + return btf_type_is_func(t) || btf_type_is_struct(t) || + btf_type_is_var(t) || btf_type_is_typedef(t); +} + u32 btf_nr_types(const struct btf *btf) { u32 total = 0; @@ -537,6 +549,7 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, static bool btf_type_is_resolve_source_only(const struct btf_type *t) { return btf_type_is_var(t) || + btf_type_is_decl_tag(t) || btf_type_is_datasec(t); } @@ -563,6 +576,7 @@ static bool btf_type_needs_resolve(const struct btf_type *t) btf_type_is_struct(t) || btf_type_is_array(t) || btf_type_is_var(t) || + btf_type_is_decl_tag(t) || btf_type_is_datasec(t); } @@ -616,6 +630,11 @@ static const struct btf_var *btf_type_var(const struct btf_type *t) return (const struct btf_var *)(t + 1); } +static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t) +{ + return (const struct btf_decl_tag *)(t + 1); +} + static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) { return kind_ops[BTF_INFO_KIND(t->info)]; @@ -3801,6 +3820,110 @@ static const struct btf_kind_operations float_ops = { .show = btf_df_show, }; +static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env, + const struct btf_type *t, + u32 meta_left) +{ + const struct btf_decl_tag *tag; + u32 meta_needed = sizeof(*tag); + s32 component_idx; + const char *value; + + if (meta_left < meta_needed) { + btf_verifier_log_basic(env, t, + "meta_left:%u meta_needed:%u", + meta_left, meta_needed); + return -EINVAL; + } + + value = btf_name_by_offset(env->btf, t->name_off); + if (!value || !value[0]) { + btf_verifier_log_type(env, t, "Invalid value"); + return -EINVAL; + } + + if (btf_type_vlen(t)) { + btf_verifier_log_type(env, t, "vlen != 0"); + return -EINVAL; + } + + if (btf_type_kflag(t)) { + btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); + return -EINVAL; + } + + component_idx = btf_type_decl_tag(t)->component_idx; + if (component_idx < -1) { + btf_verifier_log_type(env, t, "Invalid component_idx"); + return -EINVAL; + } + + btf_verifier_log_type(env, t, NULL); + + return meta_needed; +} + +static int btf_decl_tag_resolve(struct btf_verifier_env *env, + const struct resolve_vertex *v) +{ + const struct btf_type *next_type; + const struct btf_type *t = v->t; + u32 next_type_id = t->type; + struct btf *btf = env->btf; + s32 component_idx; + u32 vlen; + + next_type = btf_type_by_id(btf, next_type_id); + if (!next_type || !btf_type_is_decl_tag_target(next_type)) { + btf_verifier_log_type(env, v->t, "Invalid type_id"); + return -EINVAL; + } + + if (!env_type_is_resolve_sink(env, next_type) && + !env_type_is_resolved(env, next_type_id)) + return env_stack_push(env, next_type, next_type_id); + + component_idx = btf_type_decl_tag(t)->component_idx; + if (component_idx != -1) { + if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) { + btf_verifier_log_type(env, v->t, "Invalid component_idx"); + return -EINVAL; + } + + if (btf_type_is_struct(next_type)) { + vlen = btf_type_vlen(next_type); + } else { + /* next_type should be a function */ + next_type = btf_type_by_id(btf, next_type->type); + vlen = btf_type_vlen(next_type); + } + + if ((u32)component_idx >= vlen) { + btf_verifier_log_type(env, v->t, "Invalid component_idx"); + return -EINVAL; + } + } + + env_stack_pop_resolved(env, next_type_id, 0); + + return 0; +} + +static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t) +{ + btf_verifier_log(env, "type=%u component_idx=%d", t->type, + btf_type_decl_tag(t)->component_idx); +} + +static const struct btf_kind_operations decl_tag_ops = { + .check_meta = btf_decl_tag_check_meta, + .resolve = btf_decl_tag_resolve, + .check_member = btf_df_check_member, + .check_kflag_member = btf_df_check_kflag_member, + .log_details = btf_decl_tag_log, + .show = btf_df_show, +}; + static int btf_func_proto_check(struct btf_verifier_env *env, const struct btf_type *t) { @@ -3935,6 +4058,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { [BTF_KIND_VAR] = &var_ops, [BTF_KIND_DATASEC] = &datasec_ops, [BTF_KIND_FLOAT] = &float_ops, + [BTF_KIND_DECL_TAG] = &decl_tag_ops, }; static s32 btf_check_meta(struct btf_verifier_env *env, @@ -4019,6 +4143,10 @@ static bool btf_resolve_valid(struct btf_verifier_env *env, return !btf_resolved_type_id(btf, type_id) && !btf_resolved_type_size(btf, type_id); + if (btf_type_is_decl_tag(t)) + return btf_resolved_type_id(btf, type_id) && + !btf_resolved_type_size(btf, type_id); + if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || btf_type_is_var(t)) { t = btf_type_id_resolve(btf, &type_id); @@ -6215,3 +6343,58 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { }; BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct) + +/* BTF ID set registration API for modules */ + +struct kfunc_btf_id_list { + struct list_head list; + struct mutex mutex; +}; + +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + +void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s) +{ + mutex_lock(&l->mutex); + list_add(&s->list, &l->list); + mutex_unlock(&l->mutex); +} +EXPORT_SYMBOL_GPL(register_kfunc_btf_id_set); + +void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, + struct kfunc_btf_id_set *s) +{ + mutex_lock(&l->mutex); + list_del_init(&s->list); + mutex_unlock(&l->mutex); +} +EXPORT_SYMBOL_GPL(unregister_kfunc_btf_id_set); + +bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, + struct module *owner) +{ + struct kfunc_btf_id_set *s; + + if (!owner) + return false; + mutex_lock(&klist->mutex); + list_for_each_entry(s, &klist->list, list) { + if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) { + mutex_unlock(&klist->mutex); + return true; + } + } + mutex_unlock(&klist->mutex); + return false; +} + +#endif + +#define DEFINE_KFUNC_BTF_ID_LIST(name) \ + struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \ + __MUTEX_INITIALIZER(name.mutex) }; \ + EXPORT_SYMBOL_GPL(name) + +DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list); +DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list); diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 03145d45e3d5..2ca643af9a54 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -430,10 +430,10 @@ static struct bpf_prog_list *find_attach_entry(struct list_head *progs, * Exactly one of @prog or @link can be non-null. * Must be called with cgroup_mutex held. */ -int __cgroup_bpf_attach(struct cgroup *cgrp, - struct bpf_prog *prog, struct bpf_prog *replace_prog, - struct bpf_cgroup_link *link, - enum bpf_attach_type type, u32 flags) +static int __cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, + enum bpf_attach_type type, u32 flags) { u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); struct bpf_prog *old_prog = NULL; @@ -523,6 +523,20 @@ cleanup: return err; } +static int cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, + enum bpf_attach_type type, + u32 flags) +{ + int ret; + + mutex_lock(&cgroup_mutex); + ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags); + mutex_unlock(&cgroup_mutex); + return ret; +} + /* Swap updated BPF program for given link in effective program arrays across * all descendant cgroups. This function is guaranteed to succeed. */ @@ -672,14 +686,14 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs, * propagate the change to descendants * @cgrp: The cgroup which descendants to traverse * @prog: A program to detach or NULL - * @prog: A link to detach or NULL + * @link: A link to detach or NULL * @type: Type of detach operation * * At most one of @prog or @link can be non-NULL. * Must be called with cgroup_mutex held. */ -int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - struct bpf_cgroup_link *link, enum bpf_attach_type type) +static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type) { enum cgroup_bpf_attach_type atype; struct bpf_prog *old_prog; @@ -730,9 +744,20 @@ cleanup: return err; } +static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type) +{ + int ret; + + mutex_lock(&cgroup_mutex); + ret = __cgroup_bpf_detach(cgrp, prog, NULL, type); + mutex_unlock(&cgroup_mutex); + return ret; +} + /* Must be called with cgroup_mutex held to avoid races. */ -int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, - union bpf_attr __user *uattr) +static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr __user *uattr) { __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); enum bpf_attach_type type = attr->query.attach_type; @@ -789,6 +814,17 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, return ret; } +static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + int ret; + + mutex_lock(&cgroup_mutex); + ret = __cgroup_bpf_query(cgrp, attr, uattr); + mutex_unlock(&cgroup_mutex); + return ret; +} + int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 6e3ae90ad107..2405e39d800f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -32,6 +32,7 @@ #include <linux/perf_event.h> #include <linux/extable.h> #include <linux/log2.h> +#include <linux/bpf_verifier.h> #include <asm/barrier.h> #include <asm/unaligned.h> @@ -389,6 +390,13 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, i = end_new; insn = prog->insnsi + end_old; } + if (bpf_pseudo_func(insn)) { + ret = bpf_adj_delta_to_imm(insn, pos, end_old, + end_new, i, probe_pass); + if (ret) + return ret; + continue; + } code = insn->code; if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) || @@ -2263,6 +2271,9 @@ static void bpf_prog_free_deferred(struct work_struct *work) int i; aux = container_of(work, struct bpf_prog_aux, work); +#ifdef CONFIG_BPF_SYSCALL + bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab); +#endif bpf_free_used_maps(aux); bpf_free_used_btfs(aux); if (bpf_prog_is_dev_bound(aux)) @@ -2365,6 +2376,11 @@ const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) return NULL; } +const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) +{ + return NULL; +} + u64 __weak bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 32471ba02708..d29af9988f37 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -668,7 +668,7 @@ static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, offsetof(struct htab_elem, key) + @@ -709,7 +709,7 @@ static int htab_lru_map_gen_lookup(struct bpf_map *map, BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, offsetof(struct htab_elem, lru_node) + @@ -2049,7 +2049,7 @@ static const struct bpf_iter_seq_info iter_seq_info = { .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), }; -static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn, +static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); @@ -2089,9 +2089,8 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn, val = elem->key + roundup_key_size; } num_elems++; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) { rcu_read_unlock(); @@ -2397,7 +2396,7 @@ static int htab_of_map_gen_lookup(struct bpf_map *map, BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); - *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); + *insn++ = BPF_EMIT_CALL(__htab_map_lookup_elem); *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, offsetof(struct htab_elem, key) + diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 9aabf84afd4b..1ffd469c217f 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -979,15 +979,13 @@ out: return err; } -#define MAX_SNPRINTF_VARARGS 12 - BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, const void *, data, u32, data_len) { int err, num_args; u32 *bin_args; - if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 || + if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || (data_len && !data)) return -EINVAL; num_args = data_len / 8; @@ -1058,7 +1056,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); struct bpf_map *map = t->map; void *value = t->value; - void *callback_fn; + bpf_callback_t callback_fn; void *key; u32 idx; @@ -1083,8 +1081,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) key = value - round_up(map->key_size, 8); } - BPF_CAST_CALL(callback_fn)((u64)(long)map, (u64)(long)key, - (u64)(long)value, 0, 0); + callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); /* The verifier checked that return value is zero. */ this_cpu_write(hrtimer_running, NULL); @@ -1437,6 +1434,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_snprintf_proto; case BPF_FUNC_task_pt_regs: return &bpf_task_pt_regs_proto; + case BPF_FUNC_trace_vprintk: + return bpf_get_trace_vprintk_proto(); default: return NULL; } diff --git a/kernel/bpf/preload/.gitignore b/kernel/bpf/preload/.gitignore index 856a4c5ad0dd..9452322902a5 100644 --- a/kernel/bpf/preload/.gitignore +++ b/kernel/bpf/preload/.gitignore @@ -1,4 +1,2 @@ -/FEATURE-DUMP.libbpf -/bpf_helper_defs.h -/feature +/libbpf /bpf_preload_umd diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile index 1951332dd15f..1400ac58178e 100644 --- a/kernel/bpf/preload/Makefile +++ b/kernel/bpf/preload/Makefile @@ -1,21 +1,35 @@ # SPDX-License-Identifier: GPL-2.0 LIBBPF_SRCS = $(srctree)/tools/lib/bpf/ -LIBBPF_A = $(obj)/libbpf.a -LIBBPF_OUT = $(abspath $(obj)) +LIBBPF_OUT = $(abspath $(obj))/libbpf +LIBBPF_A = $(LIBBPF_OUT)/libbpf.a +LIBBPF_DESTDIR = $(LIBBPF_OUT) +LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include # Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test # in tools/scripts/Makefile.include always succeeds when building the kernel # with $(O) pointing to a relative path, as in "make O=build bindeb-pkg". -$(LIBBPF_A): - $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a +$(LIBBPF_A): | $(LIBBPF_OUT) + $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ \ + DESTDIR=$(LIBBPF_DESTDIR) prefix= \ + $(LIBBPF_OUT)/libbpf.a install_headers + +libbpf_hdrs: $(LIBBPF_A) + +.PHONY: libbpf_hdrs + +$(LIBBPF_OUT): + $(call msg,MKDIR,$@) + $(Q)mkdir -p $@ userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \ - -I $(srctree)/tools/lib/ -Wno-unused-result + -I $(LIBBPF_INCLUDE) -Wno-unused-result userprogs := bpf_preload_umd -clean-files := $(userprogs) bpf_helper_defs.h FEATURE-DUMP.libbpf staticobjs/ feature/ +clean-files := libbpf/ + +$(obj)/iterators/iterators.o: | libbpf_hdrs bpf_preload_umd-objs := iterators/iterators.o bpf_preload_umd-userldlibs := $(LIBBPF_A) -lelf -lz diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile index 28fa8c1440f4..b8bd60511227 100644 --- a/kernel/bpf/preload/iterators/Makefile +++ b/kernel/bpf/preload/iterators/Makefile @@ -1,18 +1,26 @@ # SPDX-License-Identifier: GPL-2.0 OUTPUT := .output +abs_out := $(abspath $(OUTPUT)) + CLANG ?= clang LLC ?= llc LLVM_STRIP ?= llvm-strip + +TOOLS_PATH := $(abspath ../../../../tools) +BPFTOOL_SRC := $(TOOLS_PATH)/bpf/bpftool +BPFTOOL_OUTPUT := $(abs_out)/bpftool DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool BPFTOOL ?= $(DEFAULT_BPFTOOL) -LIBBPF_SRC := $(abspath ../../../../tools/lib/bpf) -BPFOBJ := $(OUTPUT)/libbpf.a -BPF_INCLUDE := $(OUTPUT) -INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../../../tools/lib) \ - -I$(abspath ../../../../tools/include/uapi) + +LIBBPF_SRC := $(TOOLS_PATH)/lib/bpf +LIBBPF_OUTPUT := $(abs_out)/libbpf +LIBBPF_DESTDIR := $(LIBBPF_OUTPUT) +LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include +BPFOBJ := $(LIBBPF_OUTPUT)/libbpf.a + +INCLUDES := -I$(OUTPUT) -I$(LIBBPF_INCLUDE) -I$(TOOLS_PATH)/include/uapi CFLAGS := -g -Wall -abs_out := $(abspath $(OUTPUT)) ifeq ($(V),1) Q = msg = @@ -44,14 +52,18 @@ $(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT) -c $(filter %.c,$^) -o $@ && \ $(LLVM_STRIP) -g $@ -$(OUTPUT): +$(OUTPUT) $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT): $(call msg,MKDIR,$@) - $(Q)mkdir -p $(OUTPUT) + $(Q)mkdir -p $@ -$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT) $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \ - OUTPUT=$(abspath $(dir $@))/ $(abspath $@) + OUTPUT=$(abspath $(dir $@))/ prefix= \ + DESTDIR=$(LIBBPF_DESTDIR) $(abspath $@) install_headers -$(DEFAULT_BPFTOOL): - $(Q)$(MAKE) $(submake_extras) -C ../../../../tools/bpf/bpftool \ - prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install +$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \ + OUTPUT=$(BPFTOOL_OUTPUT)/ \ + LIBBPF_OUTPUT=$(LIBBPF_OUTPUT)/ \ + LIBBPF_DESTDIR=$(LIBBPF_DESTDIR)/ \ + prefix= DESTDIR=$(abs_out)/ install-bin diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1cad6979a0d0..50f96ea4452a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -199,7 +199,8 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, err = bpf_fd_reuseport_array_update_elem(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_QUEUE || - map->map_type == BPF_MAP_TYPE_STACK) { + map->map_type == BPF_MAP_TYPE_STACK || + map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { err = map->ops->map_push_elem(map, value, flags); } else { rcu_read_lock(); @@ -238,7 +239,8 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { err = bpf_fd_reuseport_array_lookup_elem(map, key, value); } else if (map->map_type == BPF_MAP_TYPE_QUEUE || - map->map_type == BPF_MAP_TYPE_STACK) { + map->map_type == BPF_MAP_TYPE_STACK || + map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { err = map->ops->map_peek_elem(map, value); } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { /* struct_ops map requires directly updating "value" */ @@ -348,6 +350,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) map->max_entries = attr->max_entries; map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); map->numa_node = bpf_map_attr_numa_node(attr); + map->map_extra = attr->map_extra; } static int bpf_map_alloc_id(struct bpf_map *map) @@ -555,6 +558,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) "value_size:\t%u\n" "max_entries:\t%u\n" "map_flags:\t%#x\n" + "map_extra:\t%#llx\n" "memlock:\t%lu\n" "map_id:\t%u\n" "frozen:\t%u\n", @@ -563,6 +567,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) map->value_size, map->max_entries, map->map_flags, + (unsigned long long)map->map_extra, bpf_map_memory_footprint(map), map->id, READ_ONCE(map->frozen)); @@ -812,7 +817,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, return ret; } -#define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id +#define BPF_MAP_CREATE_LAST_FIELD map_extra /* called via syscall */ static int map_create(union bpf_attr *attr) { @@ -833,6 +838,10 @@ static int map_create(union bpf_attr *attr) return -EINVAL; } + if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && + attr->map_extra != 0) + return -EINVAL; + f_flags = bpf_get_file_flag(attr->map_flags); if (f_flags < 0) return f_flags; @@ -1082,6 +1091,14 @@ static int map_lookup_elem(union bpf_attr *attr) if (!value) goto free_key; + if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { + if (copy_from_user(value, uvalue, value_size)) + err = -EFAULT; + else + err = bpf_map_copy_value(map, key, value, attr->flags); + goto free_value; + } + err = bpf_map_copy_value(map, key, value, attr->flags); if (err) goto free_value; @@ -1807,8 +1824,14 @@ static int bpf_prog_release(struct inode *inode, struct file *filp) return 0; } +struct bpf_prog_kstats { + u64 nsecs; + u64 cnt; + u64 misses; +}; + static void bpf_prog_get_stats(const struct bpf_prog *prog, - struct bpf_prog_stats *stats) + struct bpf_prog_kstats *stats) { u64 nsecs = 0, cnt = 0, misses = 0; int cpu; @@ -1821,9 +1844,9 @@ static void bpf_prog_get_stats(const struct bpf_prog *prog, st = per_cpu_ptr(prog->stats, cpu); do { start = u64_stats_fetch_begin_irq(&st->syncp); - tnsecs = st->nsecs; - tcnt = st->cnt; - tmisses = st->misses; + tnsecs = u64_stats_read(&st->nsecs); + tcnt = u64_stats_read(&st->cnt); + tmisses = u64_stats_read(&st->misses); } while (u64_stats_fetch_retry_irq(&st->syncp, start)); nsecs += tnsecs; cnt += tcnt; @@ -1839,7 +1862,7 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) { const struct bpf_prog *prog = filp->private_data; char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; - struct bpf_prog_stats stats; + struct bpf_prog_kstats stats; bpf_prog_get_stats(prog, &stats); bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); @@ -1851,7 +1874,8 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) "prog_id:\t%u\n" "run_time_ns:\t%llu\n" "run_cnt:\t%llu\n" - "recursion_misses:\t%llu\n", + "recursion_misses:\t%llu\n" + "verified_insns:\t%u\n", prog->type, prog->jited, prog_tag, @@ -1859,7 +1883,8 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) prog->aux->id, stats.nsecs, stats.cnt, - stats.misses); + stats.misses, + prog->aux->verified_insns); } #endif @@ -3578,7 +3603,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); struct bpf_prog_info info; u32 info_len = attr->info.info_len; - struct bpf_prog_stats stats; + struct bpf_prog_kstats stats; char __user *uinsns; u32 ulen; int err; @@ -3628,6 +3653,8 @@ static int bpf_prog_get_info_by_fd(struct file *file, info.run_cnt = stats.cnt; info.recursion_misses = stats.misses; + info.verified_insns = prog->aux->verified_insns; + if (!bpf_capable()) { info.jited_prog_len = 0; info.xlated_prog_len = 0; @@ -3874,6 +3901,7 @@ static int bpf_map_get_info_by_fd(struct file *file, info.value_size = map->value_size; info.max_entries = map->max_entries; info.map_flags = map->map_flags; + info.map_extra = map->map_extra; memcpy(info.name, map->name, sizeof(map->name)); if (map->btf) { @@ -4756,6 +4784,31 @@ static const struct bpf_func_proto bpf_sys_close_proto = { .arg1_type = ARG_ANYTHING, }; +BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) +{ + if (flags) + return -EINVAL; + + if (name_sz <= 1 || name[name_sz - 1]) + return -EINVAL; + + if (!bpf_dump_raw_ok(current_cred())) + return -EPERM; + + *res = kallsyms_lookup_name(name); + return *res ? 0 : -ENOENT; +} + +const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { + .func = bpf_kallsyms_lookup_name, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_LONG, +}; + static const struct bpf_func_proto * syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -4766,6 +4819,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_btf_find_by_name_kind_proto; case BPF_FUNC_sys_close: return &bpf_sys_close_proto; + case BPF_FUNC_kallsyms_lookup_name: + return &bpf_kallsyms_lookup_name_proto; default: return tracing_prog_func_proto(func_id, prog); } diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index fe1e857324e6..e98de5e73ba5 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -10,6 +10,7 @@ #include <linux/rcupdate_trace.h> #include <linux/rcupdate_wait.h> #include <linux/module.h> +#include <linux/static_call.h> /* dummy _ops. The verifier will operate on target program's ops. */ const struct bpf_verifier_ops bpf_extension_verifier_ops = { @@ -526,7 +527,7 @@ out: } #define NO_START_TIME 1 -static u64 notrace bpf_prog_start_time(void) +static __always_inline u64 notrace bpf_prog_start_time(void) { u64 start = NO_START_TIME; @@ -544,7 +545,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog) stats = this_cpu_ptr(prog->stats); u64_stats_update_begin(&stats->syncp); - stats->misses++; + u64_stats_inc(&stats->misses); u64_stats_update_end(&stats->syncp); } @@ -585,11 +586,13 @@ static void notrace update_prog_stats(struct bpf_prog *prog, * Hence check that 'start' is valid. */ start > NO_START_TIME) { + unsigned long flags; + stats = this_cpu_ptr(prog->stats); - u64_stats_update_begin(&stats->syncp); - stats->cnt++; - stats->nsecs += sched_clock() - start; - u64_stats_update_end(&stats->syncp); + flags = u64_stats_update_begin_irqsave(&stats->syncp); + u64_stats_inc(&stats->cnt); + u64_stats_add(&stats->nsecs, sched_clock() - start); + u64_stats_update_end_irqrestore(&stats->syncp, flags); } } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index de006552be8a..890b3ec375a3 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -240,12 +240,6 @@ static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) insn->src_reg == BPF_PSEUDO_KFUNC_CALL; } -static bool bpf_pseudo_func(const struct bpf_insn *insn) -{ - return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && - insn->src_reg == BPF_PSEUDO_FUNC; -} - struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; @@ -612,6 +606,20 @@ static const char *kernel_type_name(const struct btf* btf, u32 id) return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); } +/* The reg state of a pointer or a bounded scalar was saved when + * it was spilled to the stack. + */ +static bool is_spilled_reg(const struct bpf_stack_state *stack) +{ + return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; +} + +static void scrub_spilled_slot(u8 *stype) +{ + if (*stype != STACK_INVALID) + *stype = STACK_MISC; +} + static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { @@ -717,7 +725,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, continue; verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); - if (state->stack[i].slot_type[0] == STACK_SPILL) { + if (is_spilled_reg(&state->stack[i])) { reg = &state->stack[i].spilled_ptr; t = reg->type; verbose(env, "=%s", reg_type_str[t]); @@ -1406,12 +1414,12 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg) static bool __reg64_bound_s32(s64 a) { - return a > S32_MIN && a < S32_MAX; + return a >= S32_MIN && a <= S32_MAX; } static bool __reg64_bound_u32(u64 a) { - return a > U32_MIN && a < U32_MAX; + return a >= U32_MIN && a <= U32_MAX; } static void __reg_combine_64_into_32(struct bpf_reg_state *reg) @@ -1626,52 +1634,168 @@ static int add_subprog(struct bpf_verifier_env *env, int off) return env->subprog_cnt - 1; } +#define MAX_KFUNC_DESCS 256 +#define MAX_KFUNC_BTFS 256 + struct bpf_kfunc_desc { struct btf_func_model func_model; u32 func_id; s32 imm; + u16 offset; +}; + +struct bpf_kfunc_btf { + struct btf *btf; + struct module *module; + u16 offset; }; -#define MAX_KFUNC_DESCS 256 struct bpf_kfunc_desc_tab { struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; u32 nr_descs; }; -static int kfunc_desc_cmp_by_id(const void *a, const void *b) +struct bpf_kfunc_btf_tab { + struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; + u32 nr_descs; +}; + +static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) { const struct bpf_kfunc_desc *d0 = a; const struct bpf_kfunc_desc *d1 = b; /* func_id is not greater than BTF_MAX_TYPE */ - return d0->func_id - d1->func_id; + return d0->func_id - d1->func_id ?: d0->offset - d1->offset; +} + +static int kfunc_btf_cmp_by_off(const void *a, const void *b) +{ + const struct bpf_kfunc_btf *d0 = a; + const struct bpf_kfunc_btf *d1 = b; + + return d0->offset - d1->offset; } static const struct bpf_kfunc_desc * -find_kfunc_desc(const struct bpf_prog *prog, u32 func_id) +find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) { struct bpf_kfunc_desc desc = { .func_id = func_id, + .offset = offset, }; struct bpf_kfunc_desc_tab *tab; tab = prog->aux->kfunc_tab; return bsearch(&desc, tab->descs, tab->nr_descs, - sizeof(tab->descs[0]), kfunc_desc_cmp_by_id); + sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); } -static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) +static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, + s16 offset, struct module **btf_modp) +{ + struct bpf_kfunc_btf kf_btf = { .offset = offset }; + struct bpf_kfunc_btf_tab *tab; + struct bpf_kfunc_btf *b; + struct module *mod; + struct btf *btf; + int btf_fd; + + tab = env->prog->aux->kfunc_btf_tab; + b = bsearch(&kf_btf, tab->descs, tab->nr_descs, + sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); + if (!b) { + if (tab->nr_descs == MAX_KFUNC_BTFS) { + verbose(env, "too many different module BTFs\n"); + return ERR_PTR(-E2BIG); + } + + if (bpfptr_is_null(env->fd_array)) { + verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); + return ERR_PTR(-EPROTO); + } + + if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, + offset * sizeof(btf_fd), + sizeof(btf_fd))) + return ERR_PTR(-EFAULT); + + btf = btf_get_by_fd(btf_fd); + if (IS_ERR(btf)) { + verbose(env, "invalid module BTF fd specified\n"); + return btf; + } + + if (!btf_is_module(btf)) { + verbose(env, "BTF fd for kfunc is not a module BTF\n"); + btf_put(btf); + return ERR_PTR(-EINVAL); + } + + mod = btf_try_get_module(btf); + if (!mod) { + btf_put(btf); + return ERR_PTR(-ENXIO); + } + + b = &tab->descs[tab->nr_descs++]; + b->btf = btf; + b->module = mod; + b->offset = offset; + + sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), + kfunc_btf_cmp_by_off, NULL); + } + if (btf_modp) + *btf_modp = b->module; + return b->btf; +} + +void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) +{ + if (!tab) + return; + + while (tab->nr_descs--) { + module_put(tab->descs[tab->nr_descs].module); + btf_put(tab->descs[tab->nr_descs].btf); + } + kfree(tab); +} + +static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, + u32 func_id, s16 offset, + struct module **btf_modp) +{ + if (offset) { + if (offset < 0) { + /* In the future, this can be allowed to increase limit + * of fd index into fd_array, interpreted as u16. + */ + verbose(env, "negative offset disallowed for kernel module function call\n"); + return ERR_PTR(-EINVAL); + } + + return __find_kfunc_desc_btf(env, offset, btf_modp); + } + return btf_vmlinux ?: ERR_PTR(-ENOENT); +} + +static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) { const struct btf_type *func, *func_proto; + struct bpf_kfunc_btf_tab *btf_tab; struct bpf_kfunc_desc_tab *tab; struct bpf_prog_aux *prog_aux; struct bpf_kfunc_desc *desc; const char *func_name; + struct btf *desc_btf; unsigned long addr; int err; prog_aux = env->prog->aux; tab = prog_aux->kfunc_tab; + btf_tab = prog_aux->kfunc_btf_tab; if (!tab) { if (!btf_vmlinux) { verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); @@ -1699,7 +1823,29 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) prog_aux->kfunc_tab = tab; } - if (find_kfunc_desc(env->prog, func_id)) + /* func_id == 0 is always invalid, but instead of returning an error, be + * conservative and wait until the code elimination pass before returning + * error, so that invalid calls that get pruned out can be in BPF programs + * loaded from userspace. It is also required that offset be untouched + * for such calls. + */ + if (!func_id && !offset) + return 0; + + if (!btf_tab && offset) { + btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); + if (!btf_tab) + return -ENOMEM; + prog_aux->kfunc_btf_tab = btf_tab; + } + + desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL); + if (IS_ERR(desc_btf)) { + verbose(env, "failed to find BTF for kernel function\n"); + return PTR_ERR(desc_btf); + } + + if (find_kfunc_desc(env->prog, func_id, offset)) return 0; if (tab->nr_descs == MAX_KFUNC_DESCS) { @@ -1707,20 +1853,20 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) return -E2BIG; } - func = btf_type_by_id(btf_vmlinux, func_id); + func = btf_type_by_id(desc_btf, func_id); if (!func || !btf_type_is_func(func)) { verbose(env, "kernel btf_id %u is not a function\n", func_id); return -EINVAL; } - func_proto = btf_type_by_id(btf_vmlinux, func->type); + func_proto = btf_type_by_id(desc_btf, func->type); if (!func_proto || !btf_type_is_func_proto(func_proto)) { verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", func_id); return -EINVAL; } - func_name = btf_name_by_offset(btf_vmlinux, func->name_off); + func_name = btf_name_by_offset(desc_btf, func->name_off); addr = kallsyms_lookup_name(func_name); if (!addr) { verbose(env, "cannot find address for kernel function %s\n", @@ -1730,13 +1876,14 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id) desc = &tab->descs[tab->nr_descs++]; desc->func_id = func_id; - desc->imm = BPF_CAST_CALL(addr) - __bpf_call_base; - err = btf_distill_func_proto(&env->log, btf_vmlinux, + desc->imm = BPF_CALL_IMM(addr); + desc->offset = offset; + err = btf_distill_func_proto(&env->log, desc_btf, func_proto, func_name, &desc->func_model); if (!err) sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), - kfunc_desc_cmp_by_id, NULL); + kfunc_desc_cmp_by_id_off, NULL); return err; } @@ -1807,16 +1954,10 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env) return -EPERM; } - if (bpf_pseudo_func(insn)) { + if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) ret = add_subprog(env, i + insn->imm + 1); - if (ret >= 0) - /* remember subprog */ - insn[1].imm = ret; - } else if (bpf_pseudo_call(insn)) { - ret = add_subprog(env, i + insn->imm + 1); - } else { - ret = add_kfunc_call(env, insn->imm); - } + else + ret = add_kfunc_call(env, insn->imm, insn->off); if (ret < 0) return ret; @@ -2152,12 +2293,17 @@ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) { const struct btf_type *func; + struct btf *desc_btf; if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) return NULL; - func = btf_type_by_id(btf_vmlinux, insn->imm); - return btf_name_by_offset(btf_vmlinux, func->name_off); + desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL); + if (IS_ERR(desc_btf)) + return "<error>"; + + func = btf_type_by_id(desc_btf, insn->imm); + return btf_name_by_offset(desc_btf, func->name_off); } /* For given verifier state backtrack_insn() is called from the last insn to @@ -2373,7 +2519,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { - if (func->stack[j].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&func->stack[j])) continue; reg = &func->stack[j].spilled_ptr; if (reg->type != SCALAR_VALUE) @@ -2415,7 +2561,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, } while (spi >= 0) { - if (func->stack[spi].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[spi])) { stack_mask = 0; break; } @@ -2514,7 +2660,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, return 0; } - if (func->stack[i].slot_type[0] != STACK_SPILL) { + if (!is_spilled_reg(&func->stack[i])) { stack_mask &= ~(1ull << i); continue; } @@ -2626,15 +2772,21 @@ static bool __is_pointer_value(bool allow_ptr_leaks, } static void save_register_state(struct bpf_func_state *state, - int spi, struct bpf_reg_state *reg) + int spi, struct bpf_reg_state *reg, + int size) { int i; state->stack[spi].spilled_ptr = *reg; - state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + if (size == BPF_REG_SIZE) + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; + + for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) + state->stack[spi].slot_type[i - 1] = STACK_SPILL; - for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_SPILL; + /* size < 8 bytes spill */ + for (; i; i--) + scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); } /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, @@ -2681,7 +2833,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, env->insn_aux_data[insn_idx].sanitize_stack_spill = true; } - if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && + if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && !register_is_null(reg) && env->bpf_capable) { if (dst_reg != BPF_REG_FP) { /* The backtracking logic can only recognize explicit @@ -2694,7 +2846,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, if (err) return err; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else if (reg && is_spillable_regtype(reg->type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { @@ -2706,16 +2858,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } - save_register_state(state, spi, reg); + save_register_state(state, spi, reg, size); } else { u8 type = STACK_MISC; /* regular write of data into stack destroys any spilled ptr */ state->stack[spi].spilled_ptr.type = NOT_INIT; /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ - if (state->stack[spi].slot_type[0] == STACK_SPILL) + if (is_spilled_reg(&state->stack[spi])) for (i = 0; i < BPF_REG_SIZE; i++) - state->stack[spi].slot_type[i] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[i]); /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon @@ -2918,31 +3070,52 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; struct bpf_reg_state *reg; - u8 *stype; + u8 *stype, type; stype = reg_state->stack[spi].slot_type; reg = ®_state->stack[spi].spilled_ptr; - if (stype[0] == STACK_SPILL) { - if (size != BPF_REG_SIZE) { + if (is_spilled_reg(®_state->stack[spi])) { + u8 spill_size = 1; + + for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) + spill_size++; + + if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { if (reg->type != SCALAR_VALUE) { verbose_linfo(env, env->insn_idx, "; "); verbose(env, "invalid size of register fill\n"); return -EACCES; } - if (dst_regno >= 0) { + + mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + if (dst_regno < 0) + return 0; + + if (!(off % BPF_REG_SIZE) && size == spill_size) { + /* The earlier check_reg_arg() has decided the + * subreg_def for this insn. Save it first. + */ + s32 subreg_def = state->regs[dst_regno].subreg_def; + + state->regs[dst_regno] = *reg; + state->regs[dst_regno].subreg_def = subreg_def; + } else { + for (i = 0; i < size; i++) { + type = stype[(slot - i) % BPF_REG_SIZE]; + if (type == STACK_SPILL) + continue; + if (type == STACK_MISC) + continue; + verbose(env, "invalid read from stack off %d+%d size %d\n", + off, i, size); + return -EACCES; + } mark_reg_unknown(env, state->regs, dst_regno); - state->regs[dst_regno].live |= REG_LIVE_WRITTEN; } - mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + state->regs[dst_regno].live |= REG_LIVE_WRITTEN; return 0; } - for (i = 1; i < BPF_REG_SIZE; i++) { - if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { - verbose(env, "corrupted spill memory\n"); - return -EACCES; - } - } if (dst_regno >= 0) { /* restore register state from stack */ @@ -2965,8 +3138,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, } mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); } else { - u8 type; - for (i = 0; i < size; i++) { type = stype[(slot - i) % BPF_REG_SIZE]; if (type == STACK_MISC) @@ -4514,17 +4685,17 @@ static int check_stack_range_initialized( goto mark; } - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) goto mark; - if (state->stack[spi].slot_type[0] == STACK_SPILL && + if (is_spilled_reg(&state->stack[spi]) && (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || env->allow_ptr_leaks)) { if (clobber) { __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) - state->stack[spi].slot_type[j] = STACK_MISC; + scrub_spilled_slot(&state->stack[spi].slot_type[j]); } goto mark; } @@ -4813,7 +4984,10 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env, return -EINVAL; } break; - + case BPF_MAP_TYPE_BLOOM_FILTER: + if (meta->func_id == BPF_FUNC_map_peek_elem) + *arg_type = ARG_PTR_TO_MAP_VALUE; + break; default: break; } @@ -5388,6 +5562,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, func_id != BPF_FUNC_task_storage_delete) goto error; break; + case BPF_MAP_TYPE_BLOOM_FILTER: + if (func_id != BPF_FUNC_map_peek_elem && + func_id != BPF_FUNC_map_push_elem) + goto error; + break; default: break; } @@ -5455,13 +5634,18 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; - case BPF_FUNC_map_peek_elem: case BPF_FUNC_map_pop_elem: - case BPF_FUNC_map_push_elem: if (map->map_type != BPF_MAP_TYPE_QUEUE && map->map_type != BPF_MAP_TYPE_STACK) goto error; break; + case BPF_FUNC_map_peek_elem: + case BPF_FUNC_map_push_elem: + if (map->map_type != BPF_MAP_TYPE_QUEUE && + map->map_type != BPF_MAP_TYPE_STACK && + map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) + goto error; + break; case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_delete: if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) @@ -6485,23 +6669,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) struct bpf_reg_state *regs = cur_regs(env); const char *func_name, *ptr_type_name; u32 i, nargs, func_id, ptr_type_id; + struct module *btf_mod = NULL; const struct btf_param *args; + struct btf *desc_btf; int err; + /* skip for now, but return error when we find this in fixup_kfunc_call */ + if (!insn->imm) + return 0; + + desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod); + if (IS_ERR(desc_btf)) + return PTR_ERR(desc_btf); + func_id = insn->imm; - func = btf_type_by_id(btf_vmlinux, func_id); - func_name = btf_name_by_offset(btf_vmlinux, func->name_off); - func_proto = btf_type_by_id(btf_vmlinux, func->type); + func = btf_type_by_id(desc_btf, func_id); + func_name = btf_name_by_offset(desc_btf, func->name_off); + func_proto = btf_type_by_id(desc_btf, func->type); if (!env->ops->check_kfunc_call || - !env->ops->check_kfunc_call(func_id)) { + !env->ops->check_kfunc_call(func_id, btf_mod)) { verbose(env, "calling kernel function %s is not allowed\n", func_name); return -EACCES; } /* Check the arguments */ - err = btf_check_kfunc_arg_match(env, btf_vmlinux, func_id, regs); + err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs); if (err) return err; @@ -6509,15 +6703,15 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) mark_reg_not_init(env, regs, caller_saved[i]); /* Check return type */ - t = btf_type_skip_modifiers(btf_vmlinux, func_proto->type, NULL); + t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL); if (btf_type_is_scalar(t)) { mark_reg_unknown(env, regs, BPF_REG_0); mark_btf_func_reg_size(env, BPF_REG_0, t->size); } else if (btf_type_is_ptr(t)) { - ptr_type = btf_type_skip_modifiers(btf_vmlinux, t->type, + ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); if (!btf_type_is_struct(ptr_type)) { - ptr_type_name = btf_name_by_offset(btf_vmlinux, + ptr_type_name = btf_name_by_offset(desc_btf, ptr_type->name_off); verbose(env, "kernel function %s returns pointer type %s %s is not supported\n", func_name, btf_type_str(ptr_type), @@ -6525,7 +6719,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].btf = btf_vmlinux; + regs[BPF_REG_0].btf = desc_btf; regs[BPF_REG_0].type = PTR_TO_BTF_ID; regs[BPF_REG_0].btf_id = ptr_type_id; mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); @@ -6536,7 +6730,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) for (i = 0; i < nargs; i++) { u32 regno = i + 1; - t = btf_type_skip_modifiers(btf_vmlinux, args[i].type, NULL); + t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); if (btf_type_is_ptr(t)) mark_btf_func_reg_size(env, regno, sizeof(void *)); else @@ -9181,7 +9375,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) if (insn->src_reg == BPF_PSEUDO_FUNC) { struct bpf_prog_aux *aux = env->prog->aux; - u32 subprogno = insn[1].imm; + u32 subprogno = find_subprog(env, + env->insn_idx + insn->imm + 1); if (!aux->func_info) { verbose(env, "missing btf func_info\n"); @@ -10356,9 +10551,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, * return false to continue verification of this path */ return false; - if (i % BPF_REG_SIZE) + if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) continue; - if (old->stack[spi].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&old->stack[spi])) continue; if (!regsafe(env, &old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) @@ -10565,7 +10760,7 @@ static int propagate_precision(struct bpf_verifier_env *env, } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { - if (state->stack[i].slot_type[0] != STACK_SPILL) + if (!is_spilled_reg(&state->stack[i])) continue; state_reg = &state->stack[i].spilled_ptr; if (state_reg->type != SCALAR_VALUE || @@ -11076,7 +11271,8 @@ static int do_check(struct bpf_verifier_env *env) env->jmps_processed++; if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || - insn->off != 0 || + (insn->src_reg != BPF_PSEUDO_KFUNC_CALL + && insn->off != 0) || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL && insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || @@ -12350,14 +12546,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { - if (bpf_pseudo_func(insn)) { - env->insn_aux_data[i].call_imm = insn->imm; - /* subprog is encoded in insn[1].imm */ + if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) continue; - } - if (!bpf_pseudo_call(insn)) - continue; /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. @@ -12378,6 +12569,12 @@ static int jit_subprogs(struct bpf_verifier_env *env) env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; + if (bpf_pseudo_func(insn)) + /* jit (e.g. x86_64) may emit fewer instructions + * if it learns a u32 imm is the same as a u64 imm. + * Force a non zero here. + */ + insn[1].imm = 1; } err = bpf_prog_alloc_jited_linfo(prog); @@ -12432,6 +12629,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; + func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; func[i]->aux->linfo = prog->aux->linfo; func[i]->aux->nr_linfo = prog->aux->nr_linfo; func[i]->aux->jited_linfo = prog->aux->jited_linfo; @@ -12461,7 +12659,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (bpf_pseudo_func(insn)) { - subprog = insn[1].imm; + subprog = insn->off; insn[0].imm = (u32)(long)func[subprog]->bpf_func; insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; continue; @@ -12469,8 +12667,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) if (!bpf_pseudo_call(insn)) continue; subprog = insn->off; - insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); } /* we use the aux data to keep a list of the start addresses @@ -12513,7 +12710,8 @@ static int jit_subprogs(struct bpf_verifier_env *env) for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (bpf_pseudo_func(insn)) { insn[0].imm = env->insn_aux_data[i].call_imm; - insn[1].imm = find_subprog(env, i + insn[0].imm + 1); + insn[1].imm = insn->off; + insn->off = 0; continue; } if (!bpf_pseudo_call(insn)) @@ -12618,10 +12816,15 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, { const struct bpf_kfunc_desc *desc; + if (!insn->imm) { + verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); + return -EINVAL; + } + /* insn->imm has the btf func_id. Replace it with * an address (relative to __bpf_base_call). */ - desc = find_kfunc_desc(env->prog, insn->imm); + desc = find_kfunc_desc(env->prog, insn->imm, insn->off); if (!desc) { verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", insn->imm); @@ -12902,7 +13105,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env) insn->imm == BPF_FUNC_map_push_elem || insn->imm == BPF_FUNC_map_pop_elem || insn->imm == BPF_FUNC_map_peek_elem || - insn->imm == BPF_FUNC_redirect_map)) { + insn->imm == BPF_FUNC_redirect_map || + insn->imm == BPF_FUNC_for_each_map_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; @@ -12946,36 +13150,37 @@ static int do_misc_fixups(struct bpf_verifier_env *env) (int (*)(struct bpf_map *map, void *value))NULL)); BUILD_BUG_ON(!__same_type(ops->map_redirect, (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL)); + BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, + (int (*)(struct bpf_map *map, + bpf_callback_t callback_fn, + void *callback_ctx, + u64 flags))NULL)); patch_map_ops_generic: switch (insn->imm) { case BPF_FUNC_map_lookup_elem: - insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); continue; case BPF_FUNC_map_update_elem: - insn->imm = BPF_CAST_CALL(ops->map_update_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_update_elem); continue; case BPF_FUNC_map_delete_elem: - insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_delete_elem); continue; case BPF_FUNC_map_push_elem: - insn->imm = BPF_CAST_CALL(ops->map_push_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_push_elem); continue; case BPF_FUNC_map_pop_elem: - insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_pop_elem); continue; case BPF_FUNC_map_peek_elem: - insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_peek_elem); continue; case BPF_FUNC_redirect_map: - insn->imm = BPF_CAST_CALL(ops->map_redirect) - - __bpf_call_base; + insn->imm = BPF_CALL_IMM(ops->map_redirect); + continue; + case BPF_FUNC_for_each_map_elem: + insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); continue; } @@ -13826,6 +14031,7 @@ skip_full_check: env->verification_time = ktime_get_ns() - start_time; print_verification_stats(env); + env->prog->aux->verified_insns = env->insn_processed; if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 35b920328344..81c9e0685948 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -63,9 +63,6 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) for_each_root(root) { struct cgroup *from_cgrp; - if (root == &cgrp_dfl_root) - continue; - spin_lock_irq(&css_set_lock); from_cgrp = task_cgroup_from_root(from, root); spin_unlock_irq(&css_set_lock); @@ -662,11 +659,9 @@ int proc_cgroupstats_show(struct seq_file *m, void *v) seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); /* - * ideally we don't want subsystems moving around while we do this. - * cgroup_mutex is also necessary to guarantee an atomic snapshot of - * subsys/hierarchy state. + * Grab the subsystems state racily. No need to add avenue to + * cgroup_mutex contention. */ - mutex_lock(&cgroup_mutex); for_each_subsys(ss, i) seq_printf(m, "%s\t%d\t%d\t%d\n", @@ -674,7 +669,6 @@ int proc_cgroupstats_show(struct seq_file *m, void *v) atomic_read(&ss->root->nr_cgrps), cgroup_ssid_enabled(i)); - mutex_unlock(&cgroup_mutex); return 0; } @@ -701,8 +695,6 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) kernfs_type(kn) != KERNFS_DIR) return -EINVAL; - mutex_lock(&cgroup_mutex); - /* * We aren't being called from kernfs and there's no guarantee on * @kn->priv's validity. For this and css_tryget_online_from_dir(), @@ -710,9 +702,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) */ rcu_read_lock(); cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); - if (!cgrp || cgroup_is_dead(cgrp)) { + if (!cgrp || !cgroup_tryget(cgrp)) { rcu_read_unlock(); - mutex_unlock(&cgroup_mutex); return -ENOENT; } rcu_read_unlock(); @@ -740,7 +731,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) } css_task_iter_end(&it); - mutex_unlock(&cgroup_mutex); + cgroup_put(cgrp); return 0; } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index ea08f01d0111..919194de39c8 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1740,6 +1740,7 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) struct cgroup *dcgrp = &dst_root->cgrp; struct cgroup_subsys *ss; int ssid, i, ret; + u16 dfl_disable_ss_mask = 0; lockdep_assert_held(&cgroup_mutex); @@ -1756,8 +1757,28 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) /* can't move between two non-dummy roots either */ if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) return -EBUSY; + + /* + * Collect ssid's that need to be disabled from default + * hierarchy. + */ + if (ss->root == &cgrp_dfl_root) + dfl_disable_ss_mask |= 1 << ssid; + } while_each_subsys_mask(); + if (dfl_disable_ss_mask) { + struct cgroup *scgrp = &cgrp_dfl_root.cgrp; + + /* + * Controllers from default hierarchy that need to be rebound + * are all disabled together in one go. + */ + cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask; + WARN_ON(cgroup_apply_control(scgrp)); + cgroup_finalize_control(scgrp, 0); + } + do_each_subsys_mask(ss, ssid, ss_mask) { struct cgroup_root *src_root = ss->root; struct cgroup *scgrp = &src_root->cgrp; @@ -1766,10 +1787,12 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) WARN_ON(!css || cgroup_css(dcgrp, ss)); - /* disable from the source */ - src_root->subsys_mask &= ~(1 << ssid); - WARN_ON(cgroup_apply_control(scgrp)); - cgroup_finalize_control(scgrp, 0); + if (src_root != &cgrp_dfl_root) { + /* disable from the source */ + src_root->subsys_mask &= ~(1 << ssid); + WARN_ON(cgroup_apply_control(scgrp)); + cgroup_finalize_control(scgrp, 0); + } /* rebind */ RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); @@ -5911,17 +5934,20 @@ struct cgroup *cgroup_get_from_id(u64 id) struct kernfs_node *kn; struct cgroup *cgrp = NULL; - mutex_lock(&cgroup_mutex); kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id); if (!kn) - goto out_unlock; + goto out; - cgrp = kn->priv; - if (cgroup_is_dead(cgrp) || !cgroup_tryget(cgrp)) + rcu_read_lock(); + + cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); + if (cgrp && !cgroup_tryget(cgrp)) cgrp = NULL; + + rcu_read_unlock(); + kernfs_put(kn); -out_unlock: - mutex_unlock(&cgroup_mutex); +out: return cgrp; } EXPORT_SYMBOL_GPL(cgroup_get_from_id); @@ -6474,30 +6500,34 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) * * Find the cgroup at @path on the default hierarchy, increment its * reference count and return it. Returns pointer to the found cgroup on - * success, ERR_PTR(-ENOENT) if @path doesn't exist and ERR_PTR(-ENOTDIR) - * if @path points to a non-directory. + * success, ERR_PTR(-ENOENT) if @path doesn't exist or if the cgroup has already + * been released and ERR_PTR(-ENOTDIR) if @path points to a non-directory. */ struct cgroup *cgroup_get_from_path(const char *path) { struct kernfs_node *kn; - struct cgroup *cgrp; - - mutex_lock(&cgroup_mutex); + struct cgroup *cgrp = ERR_PTR(-ENOENT); kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path); - if (kn) { - if (kernfs_type(kn) == KERNFS_DIR) { - cgrp = kn->priv; - cgroup_get_live(cgrp); - } else { - cgrp = ERR_PTR(-ENOTDIR); - } - kernfs_put(kn); - } else { - cgrp = ERR_PTR(-ENOENT); + if (!kn) + goto out; + + if (kernfs_type(kn) != KERNFS_DIR) { + cgrp = ERR_PTR(-ENOTDIR); + goto out_kernfs; } - mutex_unlock(&cgroup_mutex); + rcu_read_lock(); + + cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); + if (!cgrp || !cgroup_tryget(cgrp)) + cgrp = ERR_PTR(-ENOENT); + + rcu_read_unlock(); + +out_kernfs: + kernfs_put(kn); +out: return cgrp; } EXPORT_SYMBOL_GPL(cgroup_get_from_path); @@ -6625,44 +6655,6 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd) #endif /* CONFIG_SOCK_CGROUP_DATA */ -#ifdef CONFIG_CGROUP_BPF -int cgroup_bpf_attach(struct cgroup *cgrp, - struct bpf_prog *prog, struct bpf_prog *replace_prog, - struct bpf_cgroup_link *link, - enum bpf_attach_type type, - u32 flags) -{ - int ret; - - mutex_lock(&cgroup_mutex); - ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags); - mutex_unlock(&cgroup_mutex); - return ret; -} - -int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type) -{ - int ret; - - mutex_lock(&cgroup_mutex); - ret = __cgroup_bpf_detach(cgrp, prog, NULL, type); - mutex_unlock(&cgroup_mutex); - return ret; -} - -int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, - union bpf_attr __user *uattr) -{ - int ret; - - mutex_lock(&cgroup_mutex); - ret = __cgroup_bpf_query(cgrp, attr, uattr); - mutex_unlock(&cgroup_mutex); - return ret; -} -#endif /* CONFIG_CGROUP_BPF */ - #ifdef CONFIG_SYSFS static ssize_t show_delegatable_files(struct cftype *files, char *buf, ssize_t size, const char *prefix) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 2a9695ccb65f..d0e163a02099 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -69,6 +69,13 @@ DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); +/* + * There could be abnormal cpuset configurations for cpu or memory + * node binding, add this key to provide a quick low-cost judgement + * of the situation. + */ +DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key); + /* See "Frequency meter" comments, below. */ struct fmeter { @@ -372,6 +379,17 @@ static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); +static inline void check_insane_mems_config(nodemask_t *nodes) +{ + if (!cpusets_insane_config() && + movable_only_nodes(nodes)) { + static_branch_enable(&cpusets_insane_config_key); + pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" + "Cpuset allocations might fail even with a lot of memory available.\n", + nodemask_pr_args(nodes)); + } +} + /* * Cgroup v2 behavior is used on the "cpus" and "mems" control files when * on default hierarchy or when the cpuset_v2_mode flag is set by mounting @@ -1870,6 +1888,8 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) goto done; + check_insane_mems_config(&trialcs->mems_allowed); + spin_lock_irq(&callback_lock); cs->mems_allowed = trialcs->mems_allowed; spin_unlock_irq(&callback_lock); @@ -3173,6 +3193,9 @@ update_tasks: cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); mems_updated = !nodes_equal(new_mems, cs->effective_mems); + if (mems_updated) + check_insane_mems_config(&new_mems); + if (is_in_v2_mode()) hotplug_update_tasks(cs, &new_cpus, &new_mems, cpus_updated, mems_updated); diff --git a/kernel/cgroup/misc.c b/kernel/cgroup/misc.c index ec02d963cad1..fe3e8a0eb7ed 100644 --- a/kernel/cgroup/misc.c +++ b/kernel/cgroup/misc.c @@ -157,13 +157,6 @@ int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, new_usage = atomic_long_add_return(amount, &res->usage); if (new_usage > READ_ONCE(res->max) || new_usage > READ_ONCE(misc_res_capacity[type])) { - if (!res->failed) { - pr_info("cgroup: charge rejected by the misc controller for %s resource in ", - misc_res_name[type]); - pr_cont_cgroup_path(i->css.cgroup); - pr_cont("\n"); - res->failed = true; - } ret = -EBUSY; goto err_charge; } @@ -171,6 +164,11 @@ int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, return 0; err_charge: + for (j = i; j; j = parent_misc(j)) { + atomic_long_inc(&j->res[type].events); + cgroup_file_notify(&j->events_file); + } + for (j = cg; j != i; j = parent_misc(j)) misc_cg_cancel_charge(type, j, amount); misc_cg_cancel_charge(type, i, amount); @@ -335,6 +333,19 @@ static int misc_cg_capacity_show(struct seq_file *sf, void *v) return 0; } +static int misc_events_show(struct seq_file *sf, void *v) +{ + struct misc_cg *cg = css_misc(seq_css(sf)); + unsigned long events, i; + + for (i = 0; i < MISC_CG_RES_TYPES; i++) { + events = atomic_long_read(&cg->res[i].events); + if (READ_ONCE(misc_res_capacity[i]) || events) + seq_printf(sf, "%s.max %lu\n", misc_res_name[i], events); + } + return 0; +} + /* Misc cgroup interface files */ static struct cftype misc_cg_files[] = { { @@ -353,6 +364,12 @@ static struct cftype misc_cg_files[] = { .seq_show = misc_cg_capacity_show, .flags = CFTYPE_ONLY_ON_ROOT, }, + { + .name = "events", + .flags = CFTYPE_NOT_ON_ROOT, + .file_offset = offsetof(struct misc_cg, events_file), + .seq_show = misc_events_show, + }, {} }; diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index b264ab5652ba..1486768f2318 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -433,8 +433,6 @@ static void root_cgroup_cputime(struct task_cputime *cputime) cputime->sum_exec_runtime += user; cputime->sum_exec_runtime += sys; cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL]; - cputime->sum_exec_runtime += cpustat[CPUTIME_GUEST]; - cputime->sum_exec_runtime += cpustat[CPUTIME_GUEST_NICE]; } } diff --git a/kernel/cred.c b/kernel/cred.c index 1ae0b4948a5a..473d17c431f3 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -676,15 +676,14 @@ int set_cred_ucounts(struct cred *new) * This optimization is needed because alloc_ucounts() uses locks * for table lookups. */ - if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid)) + if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid)) return 0; if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid))) return -EAGAIN; new->ucounts = new_ucounts; - if (old_ucounts) - put_ucounts(old_ucounts); + put_ucounts(old_ucounts); return 0; } diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c index 1f9f0e47aeda..10b454554ab0 100644 --- a/kernel/debug/kdb/kdb_bt.c +++ b/kernel/debug/kdb/kdb_bt.c @@ -46,7 +46,7 @@ static void kdb_show_stack(struct task_struct *p, void *addr) * btp <pid> Kernel stack for <pid> * btt <address-expression> Kernel stack for task structure at * <address-expression> - * bta [DRSTCZEUIMA] All useful processes, optionally + * bta [state_chars>|A] All useful processes, optionally * filtered by state * btc [<cpu>] The current process on one cpu, * default is all cpus @@ -74,7 +74,7 @@ static void kdb_show_stack(struct task_struct *p, void *addr) */ static int -kdb_bt1(struct task_struct *p, unsigned long mask, bool btaprompt) +kdb_bt1(struct task_struct *p, const char *mask, bool btaprompt) { char ch; @@ -120,7 +120,7 @@ kdb_bt_cpu(unsigned long cpu) return; } - kdb_bt1(kdb_tsk, ~0UL, false); + kdb_bt1(kdb_tsk, "A", false); } int @@ -138,8 +138,8 @@ kdb_bt(int argc, const char **argv) if (strcmp(argv[0], "bta") == 0) { struct task_struct *g, *p; unsigned long cpu; - unsigned long mask = kdb_task_state_string(argc ? argv[1] : - NULL); + const char *mask = argc ? argv[1] : kdbgetenv("PS"); + if (argc == 0) kdb_ps_suppressed(); /* Run the active tasks first */ @@ -167,7 +167,7 @@ kdb_bt(int argc, const char **argv) return diag; p = find_task_by_pid_ns(pid, &init_pid_ns); if (p) - return kdb_bt1(p, ~0UL, false); + return kdb_bt1(p, "A", false); kdb_printf("No process with pid == %ld found\n", pid); return 0; } else if (strcmp(argv[0], "btt") == 0) { @@ -176,7 +176,7 @@ kdb_bt(int argc, const char **argv) diag = kdbgetularg((char *)argv[1], &addr); if (diag) return diag; - return kdb_bt1((struct task_struct *)addr, ~0UL, false); + return kdb_bt1((struct task_struct *)addr, "A", false); } else if (strcmp(argv[0], "btc") == 0) { unsigned long cpu = ~0; if (argc > 1) @@ -212,7 +212,7 @@ kdb_bt(int argc, const char **argv) kdb_show_stack(kdb_current_task, (void *)addr); return 0; } else { - return kdb_bt1(kdb_current_task, ~0UL, false); + return kdb_bt1(kdb_current_task, "A", false); } } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index fa6deda894a1..0852a537dad4 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2203,8 +2203,8 @@ static void kdb_cpu_status(void) state = 'D'; /* cpu is online but unresponsive */ } else { state = ' '; /* cpu is responding to kdb */ - if (kdb_task_state_char(KDB_TSK(i)) == 'I') - state = 'I'; /* idle task */ + if (kdb_task_state_char(KDB_TSK(i)) == '-') + state = '-'; /* idle task */ } if (state != prev_state) { if (prev_state != '?') { @@ -2271,37 +2271,30 @@ static int kdb_cpu(int argc, const char **argv) void kdb_ps_suppressed(void) { int idle = 0, daemon = 0; - unsigned long mask_I = kdb_task_state_string("I"), - mask_M = kdb_task_state_string("M"); unsigned long cpu; const struct task_struct *p, *g; for_each_online_cpu(cpu) { p = kdb_curr_task(cpu); - if (kdb_task_state(p, mask_I)) + if (kdb_task_state(p, "-")) ++idle; } for_each_process_thread(g, p) { - if (kdb_task_state(p, mask_M)) + if (kdb_task_state(p, "ims")) ++daemon; } if (idle || daemon) { if (idle) - kdb_printf("%d idle process%s (state I)%s\n", + kdb_printf("%d idle process%s (state -)%s\n", idle, idle == 1 ? "" : "es", daemon ? " and " : ""); if (daemon) - kdb_printf("%d sleeping system daemon (state M) " + kdb_printf("%d sleeping system daemon (state [ims]) " "process%s", daemon, daemon == 1 ? "" : "es"); kdb_printf(" suppressed,\nuse 'ps A' to see all.\n"); } } -/* - * kdb_ps - This function implements the 'ps' command which shows a - * list of the active processes. - * ps [DRSTCZEUIMA] All processes, optionally filtered by state - */ void kdb_ps1(const struct task_struct *p) { int cpu; @@ -2330,17 +2323,25 @@ void kdb_ps1(const struct task_struct *p) } } +/* + * kdb_ps - This function implements the 'ps' command which shows a + * list of the active processes. + * + * ps [<state_chars>] Show processes, optionally selecting only those whose + * state character is found in <state_chars>. + */ static int kdb_ps(int argc, const char **argv) { struct task_struct *g, *p; - unsigned long mask, cpu; + const char *mask; + unsigned long cpu; if (argc == 0) kdb_ps_suppressed(); kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n", (int)(2*sizeof(void *))+2, "Task Addr", (int)(2*sizeof(void *))+2, "Thread"); - mask = kdb_task_state_string(argc ? argv[1] : NULL); + mask = argc ? argv[1] : kdbgetenv("PS"); /* Run the active tasks first */ for_each_online_cpu(cpu) { if (KDB_FLAG(CMD_INTERRUPT)) @@ -2742,8 +2743,8 @@ static kdbtab_t maintab[] = { }, { .name = "bta", .func = kdb_bt, - .usage = "[D|R|S|T|C|Z|E|U|I|M|A]", - .help = "Backtrace all processes matching state flag", + .usage = "[<state_chars>|A]", + .help = "Backtrace all processes whose state matches", .flags = KDB_ENABLE_INSPECT, }, { .name = "btc", @@ -2797,7 +2798,7 @@ static kdbtab_t maintab[] = { }, { .name = "ps", .func = kdb_ps, - .usage = "[<flags>|A]", + .usage = "[<state_chars>|A]", .help = "Display active task list", .flags = KDB_ENABLE_INSPECT, }, diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 629590084a0d..0d2f9feea0a4 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -190,10 +190,8 @@ extern char kdb_grep_string[]; extern int kdb_grep_leading; extern int kdb_grep_trailing; extern char *kdb_cmds[]; -extern unsigned long kdb_task_state_string(const char *); extern char kdb_task_state_char (const struct task_struct *); -extern unsigned long kdb_task_state(const struct task_struct *p, - unsigned long mask); +extern bool kdb_task_state(const struct task_struct *p, const char *mask); extern void kdb_ps_suppressed(void); extern void kdb_ps1(const struct task_struct *p); extern void kdb_send_sig(struct task_struct *p, int sig); diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 7507d9a8dc6a..df2bface866e 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -24,6 +24,7 @@ #include <linux/uaccess.h> #include <linux/kdb.h> #include <linux/slab.h> +#include <linux/ctype.h> #include "kdb_private.h" /* @@ -473,82 +474,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size) return diag; } -/* - * kdb_task_state_string - Convert a string containing any of the - * letters DRSTCZEUIMA to a mask for the process state field and - * return the value. If no argument is supplied, return the mask - * that corresponds to environment variable PS, DRSTCZEU by - * default. - * Inputs: - * s String to convert - * Returns: - * Mask for process state. - * Notes: - * The mask folds data from several sources into a single long value, so - * be careful not to overlap the bits. TASK_* bits are in the LSB, - * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there - * is no overlap between TASK_* and EXIT_* but that may not always be - * true, so EXIT_* bits are shifted left 16 bits before being stored in - * the mask. - */ - -/* unrunnable is < 0 */ -#define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1)) -#define RUNNING (1UL << (8*sizeof(unsigned long) - 2)) -#define IDLE (1UL << (8*sizeof(unsigned long) - 3)) -#define DAEMON (1UL << (8*sizeof(unsigned long) - 4)) -unsigned long kdb_task_state_string(const char *s) -{ - long res = 0; - if (!s) { - s = kdbgetenv("PS"); - if (!s) - s = "DRSTCZEU"; /* default value for ps */ - } - while (*s) { - switch (*s) { - case 'D': - res |= TASK_UNINTERRUPTIBLE; - break; - case 'R': - res |= RUNNING; - break; - case 'S': - res |= TASK_INTERRUPTIBLE; - break; - case 'T': - res |= TASK_STOPPED; - break; - case 'C': - res |= TASK_TRACED; - break; - case 'Z': - res |= EXIT_ZOMBIE << 16; - break; - case 'E': - res |= EXIT_DEAD << 16; - break; - case 'U': - res |= UNRUNNABLE; - break; - case 'I': - res |= IDLE; - break; - case 'M': - res |= DAEMON; - break; - case 'A': - res = ~0UL; - break; - default: - kdb_func_printf("unknown flag '%c' ignored\n", *s); - break; - } - ++s; - } - return res; -} /* * kdb_task_state_char - Return the character that represents the task state. @@ -559,7 +485,6 @@ unsigned long kdb_task_state_string(const char *s) */ char kdb_task_state_char (const struct task_struct *p) { - unsigned int p_state; unsigned long tmp; char state; int cpu; @@ -568,25 +493,18 @@ char kdb_task_state_char (const struct task_struct *p) copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long))) return 'E'; - cpu = kdb_process_cpu(p); - p_state = READ_ONCE(p->__state); - state = (p_state == 0) ? 'R' : - (p_state < 0) ? 'U' : - (p_state & TASK_UNINTERRUPTIBLE) ? 'D' : - (p_state & TASK_STOPPED) ? 'T' : - (p_state & TASK_TRACED) ? 'C' : - (p->exit_state & EXIT_ZOMBIE) ? 'Z' : - (p->exit_state & EXIT_DEAD) ? 'E' : - (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?'; + state = task_state_to_char((struct task_struct *) p); + if (is_idle_task(p)) { /* Idle task. Is it really idle, apart from the kdb * interrupt? */ + cpu = kdb_process_cpu(p); if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) { if (cpu != kdb_initial_cpu) - state = 'I'; /* idle task */ + state = '-'; /* idle task */ } - } else if (!p->mm && state == 'S') { - state = 'M'; /* sleeping system daemon */ + } else if (!p->mm && strchr("IMS", state)) { + state = tolower(state); /* sleeping system daemon */ } return state; } @@ -596,14 +514,28 @@ char kdb_task_state_char (const struct task_struct *p) * given by the mask. * Inputs: * p struct task for the process - * mask mask from kdb_task_state_string to select processes + * mask set of characters used to select processes; both NULL + * and the empty string mean adopt a default filter, which + * is to suppress sleeping system daemons and the idle tasks * Returns: * True if the process matches at least one criteria defined by the mask. */ -unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask) +bool kdb_task_state(const struct task_struct *p, const char *mask) { - char state[] = { kdb_task_state_char(p), '\0' }; - return (mask & kdb_task_state_string(state)) != 0; + char state = kdb_task_state_char(p); + + /* If there is no mask, then we will filter code that runs when the + * scheduler is idling and any system daemons that are currently + * sleeping. + */ + if (!mask || mask[0] == '\0') + return !strchr("-ims", state); + + /* A is a special case that matches all states */ + if (strchr(mask, 'A')) + return true; + + return strchr(mask, state); } /* Maintain a small stack of kdb_flags to allow recursion without disturbing diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 25fc85a7aebe..375fb3c9538d 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -40,7 +40,6 @@ static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, { struct dma_coherent_mem *dma_mem; int pages = size >> PAGE_SHIFT; - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); void *mem_base; if (!size) @@ -53,7 +52,7 @@ static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); if (!dma_mem) goto out_unmap_membase; - dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); if (!dma_mem->bitmap) goto out_free_dma_mem; @@ -81,7 +80,7 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) return; memunmap(mem->virt_base); - kfree(mem->bitmap); + bitmap_free(mem->bitmap); kfree(mem); } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 8349a9f2c345..9478eccd1c8e 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -296,10 +296,6 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, if (WARN_ON_ONCE(!dev->dma_mask)) return DMA_MAPPING_ERROR; - /* Don't allow RAM to be mapped */ - if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) - return DMA_MAPPING_ERROR; - if (dma_map_direct(dev, ops)) addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); else if (ops->map_resource) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c4ca040fdb05..8e840fbbed7c 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -247,7 +247,7 @@ swiotlb_init(int verbose) return; fail_free_mem: - memblock_free_early(__pa(tlb), bytes); + memblock_free(tlb, bytes); fail: pr_warn("Cannot allocate buffer"); } @@ -459,7 +459,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) * allocate a buffer from that IO TLB pool. */ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, - size_t alloc_size) + size_t alloc_size, unsigned int alloc_align_mask) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned long boundary_mask = dma_get_seg_boundary(dev); @@ -483,6 +483,7 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; if (alloc_size >= PAGE_SIZE) stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); + stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); spin_lock_irqsave(&mem->lock, flags); if (unlikely(nslots > mem->nslabs - mem->used)) @@ -541,7 +542,8 @@ found: phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, size_t mapping_size, size_t alloc_size, - enum dma_data_direction dir, unsigned long attrs) + unsigned int alloc_align_mask, enum dma_data_direction dir, + unsigned long attrs) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned int offset = swiotlb_align_offset(dev, orig_addr); @@ -561,7 +563,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, return (phys_addr_t)DMA_MAPPING_ERROR; } - index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset); + index = swiotlb_find_slots(dev, orig_addr, + alloc_size + offset, alloc_align_mask); if (index == -1) { if (!(attrs & DMA_ATTR_NO_WARN)) dev_warn_ratelimited(dev, @@ -675,7 +678,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, swiotlb_force); - swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir, + swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, attrs); if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; @@ -759,7 +762,7 @@ struct page *swiotlb_alloc(struct device *dev, size_t size) if (!mem) return NULL; - index = swiotlb_find_slots(dev, 0, size); + index = swiotlb_find_slots(dev, 0, size, 0); if (index == -1) return NULL; diff --git a/kernel/entry/syscall_user_dispatch.c b/kernel/entry/syscall_user_dispatch.c index c240302f56e2..4508201847d2 100644 --- a/kernel/entry/syscall_user_dispatch.c +++ b/kernel/entry/syscall_user_dispatch.c @@ -47,14 +47,18 @@ bool syscall_user_dispatch(struct pt_regs *regs) * access_ok() is performed once, at prctl time, when * the selector is loaded by userspace. */ - if (unlikely(__get_user(state, sd->selector))) - do_exit(SIGSEGV); + if (unlikely(__get_user(state, sd->selector))) { + force_fatal_sig(SIGSEGV); + return true; + } if (likely(state == SYSCALL_DISPATCH_FILTER_ALLOW)) return false; - if (state != SYSCALL_DISPATCH_FILTER_BLOCK) - do_exit(SIGSYS); + if (state != SYSCALL_DISPATCH_FILTER_BLOCK) { + force_fatal_sig(SIGSYS); + return true; + } } sd->on_dispatch = true; diff --git a/kernel/events/core.c b/kernel/events/core.c index a89b01b7e59a..523106a506ee 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7154,7 +7154,6 @@ void perf_output_sample(struct perf_output_handle *handle, static u64 perf_virt_to_phys(u64 virt) { u64 phys_addr = 0; - struct page *p = NULL; if (!virt) return 0; @@ -7173,14 +7172,15 @@ static u64 perf_virt_to_phys(u64 virt) * If failed, leave phys_addr as 0. */ if (current->mm != NULL) { + struct page *p; + pagefault_disable(); - if (get_user_page_fast_only(virt, 0, &p)) + if (get_user_page_fast_only(virt, 0, &p)) { phys_addr = page_to_phys(p) + virt % PAGE_SIZE; + put_page(p); + } pagefault_enable(); } - - if (p) - put_page(p); } return phys_addr; @@ -13491,3 +13491,5 @@ struct cgroup_subsys perf_event_cgrp_subsys = { .threaded = true, }; #endif /* CONFIG_CGROUP_PERF */ + +DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 228801e20788..082832738c8f 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -205,12 +205,7 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) static inline int get_recursion_context(int *recursion) { - unsigned int pc = preempt_count(); - unsigned char rctx = 0; - - rctx += !!(pc & (NMI_MASK)); - rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK)); - rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)); + unsigned char rctx = interrupt_context_level(); if (recursion[rctx]) return -1; diff --git a/kernel/exit.c b/kernel/exit.c index 50f1692c732d..f702a6a63686 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -340,6 +340,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) } } +static void coredump_task_exit(struct task_struct *tsk) +{ + struct core_state *core_state; + + /* + * Serialize with any possible pending coredump. + * We must hold siglock around checking core_state + * and setting PF_POSTCOREDUMP. The core-inducing thread + * will increment ->nr_threads for each thread in the + * group without PF_POSTCOREDUMP set. + */ + spin_lock_irq(&tsk->sighand->siglock); + tsk->flags |= PF_POSTCOREDUMP; + core_state = tsk->signal->core_state; + spin_unlock_irq(&tsk->sighand->siglock); + if (core_state) { + struct core_thread self; + + self.task = current; + if (self.task->flags & PF_SIGNALED) + self.next = xchg(&core_state->dumper.next, &self); + else + self.task = NULL; + /* + * Implies mb(), the result of xchg() must be visible + * to core_state->dumper. + */ + if (atomic_dec_and_test(&core_state->nr_threads)) + complete(&core_state->startup); + + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!self.task) /* see coredump_finish() */ + break; + freezable_schedule(); + } + __set_current_state(TASK_RUNNING); + } +} + #ifdef CONFIG_MEMCG /* * A task is exiting. If it owned this mm, find a new owner for the mm. @@ -435,47 +475,12 @@ assign_new_owner: static void exit_mm(void) { struct mm_struct *mm = current->mm; - struct core_state *core_state; exit_mm_release(current, mm); if (!mm) return; sync_mm_rss(mm); - /* - * Serialize with any possible pending coredump. - * We must hold mmap_lock around checking core_state - * and clearing tsk->mm. The core-inducing thread - * will increment ->nr_threads for each thread in the - * group with ->mm != NULL. - */ mmap_read_lock(mm); - core_state = mm->core_state; - if (core_state) { - struct core_thread self; - - mmap_read_unlock(mm); - - self.task = current; - if (self.task->flags & PF_SIGNALED) - self.next = xchg(&core_state->dumper.next, &self); - else - self.task = NULL; - /* - * Implies mb(), the result of xchg() must be visible - * to core_state->dumper. - */ - if (atomic_dec_and_test(&core_state->nr_threads)) - complete(&core_state->startup); - - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (!self.task) /* see coredump_finish() */ - break; - freezable_schedule(); - } - __set_current_state(TASK_RUNNING); - mmap_read_lock(mm); - } mmgrab(mm); BUG_ON(mm != current->active_mm); /* more a memory barrier than a real lock */ @@ -763,6 +768,7 @@ void __noreturn do_exit(long code) profile_task_exit(tsk); kcov_task_exit(tsk); + coredump_task_exit(tsk); ptrace_event(PTRACE_EVENT_EXIT, code); validate_creds_for_do_exit(tsk); diff --git a/kernel/extable.c b/kernel/extable.c index b0ea5eb0c3b4..b6f330f0fe74 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -62,40 +62,13 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) return e; } -int init_kernel_text(unsigned long addr) -{ - if (addr >= (unsigned long)_sinittext && - addr < (unsigned long)_einittext) - return 1; - return 0; -} - int notrace core_kernel_text(unsigned long addr) { - if (addr >= (unsigned long)_stext && - addr < (unsigned long)_etext) + if (is_kernel_text(addr)) return 1; - if (system_state < SYSTEM_RUNNING && - init_kernel_text(addr)) - return 1; - return 0; -} - -/** - * core_kernel_data - tell if addr points to kernel data - * @addr: address to test - * - * Returns true if @addr passed in is from the core kernel data - * section. - * - * Note: On some archs it may return true for core RODATA, and false - * for others. But will always be true for core RW data. - */ -int core_kernel_data(unsigned long addr) -{ - if (addr >= (unsigned long)_sdata && - addr < (unsigned long)_edata) + if (system_state < SYSTEM_FREEING_INITMEM && + is_kernel_inittext(addr)) return 1; return 0; } @@ -112,7 +85,7 @@ int __kernel_text_address(unsigned long addr) * Since we are after the module-symbols check, there's * no danger of address overlap: */ - if (init_kernel_text(addr)) + if (is_kernel_inittext(addr)) return 1; return 0; } diff --git a/kernel/fork.c b/kernel/fork.c index 8269ae2e5d7c..3244cc56b697 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1043,7 +1043,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, seqcount_init(&mm->write_protect_seq); mmap_init_lock(mm); INIT_LIST_HEAD(&mm->mmlist); - mm->core_state = NULL; mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; @@ -1391,8 +1390,7 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm) * purposes. */ if (tsk->clear_child_tid) { - if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && - atomic_read(&mm->mm_users) > 1) { + if (atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. @@ -3027,7 +3025,7 @@ int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, int ksys_unshare(unsigned long unshare_flags) { struct fs_struct *fs, *new_fs = NULL; - struct files_struct *fd, *new_fd = NULL; + struct files_struct *new_fd = NULL; struct cred *new_cred = NULL; struct nsproxy *new_nsproxy = NULL; int do_sysvsem = 0; @@ -3114,11 +3112,8 @@ int ksys_unshare(unsigned long unshare_flags) spin_unlock(&fs->lock); } - if (new_fd) { - fd = current->files; - current->files = new_fd; - new_fd = fd; - } + if (new_fd) + swap(current->files, new_fd); task_unlock(current); diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 4d8fc65cf38f..bf38c546aa25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -744,9 +744,8 @@ static int irq_domain_translate(struct irq_domain *d, return 0; } -static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, - unsigned int count, - struct irq_fwspec *fwspec) +void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, + unsigned int count, struct irq_fwspec *fwspec) { int i; @@ -756,6 +755,7 @@ static void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, for (i = 0; i < count; i++) fwspec->param[i] = args[i]; } +EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec); unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) { @@ -1502,6 +1502,7 @@ out_free_desc: irq_free_descs(virq, nr_irqs); return ret; } +EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs); /* The irq_data was moved, fix the revmap to refer to the new location */ static void irq_domain_fix_revmap(struct irq_data *d) diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 6a5ecee6e567..7f350ae59c5f 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -529,10 +529,10 @@ static bool msi_check_reservation_mode(struct irq_domain *domain, /* * Checking the first MSI descriptor is sufficient. MSIX supports - * masking and MSI does so when the maskbit is set. + * masking and MSI does so when the can_mask attribute is set. */ desc = first_msi_entry(dev); - return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; + return desc->msi_attrib.is_msix || desc->msi_attrib.can_mask; } int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 0ba87982d017..3011bc33a5ba 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -164,26 +164,46 @@ static unsigned long kallsyms_sym_address(int idx) return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; } -#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_LTO_CLANG_THIN) -/* - * LLVM appends a hash to static function names when ThinLTO and CFI are - * both enabled, i.e. foo() becomes foo$707af9a22804d33c81801f27dcfe489b. - * This causes confusion and potentially breaks user space tools, so we - * strip the suffix from expanded symbol names. - */ -static inline bool cleanup_symbol_name(char *s) +static bool cleanup_symbol_name(char *s) { char *res; + if (!IS_ENABLED(CONFIG_LTO_CLANG)) + return false; + + /* + * LLVM appends various suffixes for local functions and variables that + * must be promoted to global scope as part of LTO. This can break + * hooking of static functions with kprobes. '.' is not a valid + * character in an identifier in C. Suffixes observed: + * - foo.llvm.[0-9a-f]+ + * - foo.[0-9a-f]+ + * - foo.[0-9a-f]+.cfi_jt + */ + res = strchr(s, '.'); + if (res) { + *res = '\0'; + return true; + } + + if (!IS_ENABLED(CONFIG_CFI_CLANG) || + !IS_ENABLED(CONFIG_LTO_CLANG_THIN) || + CONFIG_CLANG_VERSION >= 130000) + return false; + + /* + * Prior to LLVM 13, the following suffixes were observed when thinLTO + * and CFI are both enabled: + * - foo$[0-9]+ + */ res = strrchr(s, '$'); - if (res) + if (res) { *res = '\0'; + return true; + } - return res != NULL; + return false; } -#else -static inline bool cleanup_symbol_name(char *s) { return false; } -#endif /* Lookup the address for this symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name) diff --git a/kernel/kcov.c b/kernel/kcov.c index 80bfe71bbe13..36ca640c4f8e 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -88,6 +88,7 @@ static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); struct kcov_percpu_data { void *irq_area; + local_lock_t lock; unsigned int saved_mode; unsigned int saved_size; @@ -96,7 +97,9 @@ struct kcov_percpu_data { int saved_sequence; }; -static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data); +static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = { + .lock = INIT_LOCAL_LOCK(lock), +}; /* Must be called with kcov_remote_lock locked. */ static struct kcov_remote *kcov_remote_find(u64 handle) @@ -824,7 +827,7 @@ void kcov_remote_start(u64 handle) if (!in_task() && !in_serving_softirq()) return; - local_irq_save(flags); + local_lock_irqsave(&kcov_percpu_data.lock, flags); /* * Check that kcov_remote_start() is not called twice in background @@ -832,7 +835,7 @@ void kcov_remote_start(u64 handle) */ mode = READ_ONCE(t->kcov_mode); if (WARN_ON(in_task() && kcov_mode_enabled(mode))) { - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } /* @@ -841,14 +844,15 @@ void kcov_remote_start(u64 handle) * happened while collecting coverage from a background thread. */ if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) { - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } spin_lock(&kcov_remote_lock); remote = kcov_remote_find(handle); if (!remote) { - spin_unlock_irqrestore(&kcov_remote_lock, flags); + spin_unlock(&kcov_remote_lock); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } kcov_debug("handle = %llx, context: %s\n", handle, @@ -869,19 +873,19 @@ void kcov_remote_start(u64 handle) size = CONFIG_KCOV_IRQ_AREA_SIZE; area = this_cpu_ptr(&kcov_percpu_data)->irq_area; } - spin_unlock_irqrestore(&kcov_remote_lock, flags); + spin_unlock(&kcov_remote_lock); /* Can only happen when in_task(). */ if (!area) { + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); area = vmalloc(size * sizeof(unsigned long)); if (!area) { kcov_put(kcov); return; } + local_lock_irqsave(&kcov_percpu_data.lock, flags); } - local_irq_save(flags); - /* Reset coverage size. */ *(u64 *)area = 0; @@ -891,7 +895,7 @@ void kcov_remote_start(u64 handle) } kcov_start(t, kcov, size, area, mode, sequence); - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); } EXPORT_SYMBOL(kcov_remote_start); @@ -965,12 +969,12 @@ void kcov_remote_stop(void) if (!in_task() && !in_serving_softirq()) return; - local_irq_save(flags); + local_lock_irqsave(&kcov_percpu_data.lock, flags); mode = READ_ONCE(t->kcov_mode); barrier(); if (!kcov_mode_enabled(mode)) { - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } /* @@ -978,12 +982,12 @@ void kcov_remote_stop(void) * actually found the remote handle and started collecting coverage. */ if (in_serving_softirq() && !t->kcov_softirq) { - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } /* Make sure that kcov_softirq is only set when in softirq. */ if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) { - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); return; } @@ -1013,7 +1017,7 @@ void kcov_remote_stop(void) spin_unlock(&kcov_remote_lock); } - local_irq_restore(flags); + local_unlock_irqrestore(&kcov_percpu_data.lock, flags); /* Get in kcov_remote_start(). */ kcov_put(kcov); @@ -1034,8 +1038,8 @@ static int __init kcov_init(void) int cpu; for_each_possible_cpu(cpu) { - void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE * - sizeof(unsigned long)); + void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE * + sizeof(unsigned long), cpu_to_node(cpu)); if (!area) return -ENOMEM; per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area; diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c index 76e67d1e02d4..4b84c8e7884b 100644 --- a/kernel/kcsan/core.c +++ b/kernel/kcsan/core.c @@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void) return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx); } +static __always_inline void +check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); + /* Check scoped accesses; never inline because this is a slow-path! */ static noinline void kcsan_check_scoped_accesses(void) { @@ -210,14 +213,16 @@ static noinline void kcsan_check_scoped_accesses(void) struct kcsan_scoped_access *scoped_access; ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */ - list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) - __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type); + list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) { + check_access(scoped_access->ptr, scoped_access->size, + scoped_access->type, scoped_access->ip); + } ctx->scoped_accesses.prev = prev_save; } /* Rules for generic atomic accesses. Called from fast-path. */ static __always_inline bool -is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) +is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) { if (type & KCSAN_ACCESS_ATOMIC) return true; @@ -254,7 +259,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx } static __always_inline bool -should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) +should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) { /* * Never set up watchpoints when memory operations are atomic. @@ -263,7 +268,7 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx * * should not count towards skipped instructions, and (2) to actually * decrement kcsan_atomic_next for consecutive instruction stream. */ - if (is_atomic(ptr, size, type, ctx)) + if (is_atomic(ctx, ptr, size, type)) return false; if (this_cpu_dec_return(kcsan_skip) >= 0) @@ -350,6 +355,7 @@ void kcsan_restore_irqtrace(struct task_struct *task) static noinline void kcsan_found_watchpoint(const volatile void *ptr, size_t size, int type, + unsigned long ip, atomic_long_t *watchpoint, long encoded_watchpoint) { @@ -396,7 +402,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, if (consumed) { kcsan_save_irqtrace(current); - kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints); + kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints); kcsan_restore_irqtrace(current); } else { /* @@ -416,7 +422,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr, } static noinline void -kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) +kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip) { const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0; @@ -568,8 +574,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); - kcsan_report_known_origin(ptr, size, type, value_change, - watchpoint - watchpoints, + kcsan_report_known_origin(ptr, size, type, ip, + value_change, watchpoint - watchpoints, old, new, access_mask); } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) { /* Inferring a race, since the value should not have changed. */ @@ -578,8 +584,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type) if (is_assert) atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]); - if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) - kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask); + if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) { + kcsan_report_unknown_origin(ptr, size, type, ip, + old, new, access_mask); + } } /* @@ -596,8 +604,8 @@ out: user_access_restore(ua_flags); } -static __always_inline void check_access(const volatile void *ptr, size_t size, - int type) +static __always_inline void +check_access(const volatile void *ptr, size_t size, int type, unsigned long ip) { const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0; atomic_long_t *watchpoint; @@ -625,13 +633,12 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, */ if (unlikely(watchpoint != NULL)) - kcsan_found_watchpoint(ptr, size, type, watchpoint, - encoded_watchpoint); + kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint); else { struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */ - if (unlikely(should_watch(ptr, size, type, ctx))) - kcsan_setup_watchpoint(ptr, size, type); + if (unlikely(should_watch(ctx, ptr, size, type))) + kcsan_setup_watchpoint(ptr, size, type, ip); else if (unlikely(ctx->scoped_accesses.prev)) kcsan_check_scoped_accesses(); } @@ -757,7 +764,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, { struct kcsan_ctx *ctx = get_ctx(); - __kcsan_check_access(ptr, size, type); + check_access(ptr, size, type, _RET_IP_); ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */ @@ -765,6 +772,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, sa->ptr = ptr; sa->size = size; sa->type = type; + sa->ip = _RET_IP_; if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */ INIT_LIST_HEAD(&ctx->scoped_accesses); @@ -796,13 +804,13 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) ctx->disable_count--; - __kcsan_check_access(sa->ptr, sa->size, sa->type); + check_access(sa->ptr, sa->size, sa->type, sa->ip); } EXPORT_SYMBOL(kcsan_end_scoped_access); void __kcsan_check_access(const volatile void *ptr, size_t size, int type) { - check_access(ptr, size, type); + check_access(ptr, size, type, _RET_IP_); } EXPORT_SYMBOL(__kcsan_check_access); @@ -823,7 +831,7 @@ EXPORT_SYMBOL(__kcsan_check_access); void __tsan_read##size(void *ptr); \ void __tsan_read##size(void *ptr) \ { \ - check_access(ptr, size, 0); \ + check_access(ptr, size, 0, _RET_IP_); \ } \ EXPORT_SYMBOL(__tsan_read##size); \ void __tsan_unaligned_read##size(void *ptr) \ @@ -832,7 +840,7 @@ EXPORT_SYMBOL(__kcsan_check_access); void __tsan_write##size(void *ptr); \ void __tsan_write##size(void *ptr) \ { \ - check_access(ptr, size, KCSAN_ACCESS_WRITE); \ + check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \ } \ EXPORT_SYMBOL(__tsan_write##size); \ void __tsan_unaligned_write##size(void *ptr) \ @@ -842,7 +850,8 @@ EXPORT_SYMBOL(__kcsan_check_access); void __tsan_read_write##size(void *ptr) \ { \ check_access(ptr, size, \ - KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \ + KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \ + _RET_IP_); \ } \ EXPORT_SYMBOL(__tsan_read_write##size); \ void __tsan_unaligned_read_write##size(void *ptr) \ @@ -858,14 +867,14 @@ DEFINE_TSAN_READ_WRITE(16); void __tsan_read_range(void *ptr, size_t size); void __tsan_read_range(void *ptr, size_t size) { - check_access(ptr, size, 0); + check_access(ptr, size, 0, _RET_IP_); } EXPORT_SYMBOL(__tsan_read_range); void __tsan_write_range(void *ptr, size_t size); void __tsan_write_range(void *ptr, size_t size) { - check_access(ptr, size, KCSAN_ACCESS_WRITE); + check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); } EXPORT_SYMBOL(__tsan_write_range); @@ -886,7 +895,8 @@ EXPORT_SYMBOL(__tsan_write_range); IS_ALIGNED((unsigned long)ptr, size); \ if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \ return; \ - check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \ + check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \ + _RET_IP_); \ } \ EXPORT_SYMBOL(__tsan_volatile_read##size); \ void __tsan_unaligned_volatile_read##size(void *ptr) \ @@ -901,7 +911,8 @@ EXPORT_SYMBOL(__tsan_write_range); return; \ check_access(ptr, size, \ KCSAN_ACCESS_WRITE | \ - (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \ + (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \ + _RET_IP_); \ } \ EXPORT_SYMBOL(__tsan_volatile_write##size); \ void __tsan_unaligned_volatile_write##size(void *ptr) \ @@ -955,7 +966,7 @@ EXPORT_SYMBOL(__tsan_init); u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \ { \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ - check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \ + check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ return __atomic_load_n(ptr, memorder); \ } \ @@ -965,7 +976,7 @@ EXPORT_SYMBOL(__tsan_init); { \ if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ - KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \ + KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ __atomic_store_n(ptr, v, memorder); \ } \ @@ -978,7 +989,7 @@ EXPORT_SYMBOL(__tsan_init); if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ - KCSAN_ACCESS_ATOMIC); \ + KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ return __atomic_##op##suffix(ptr, v, memorder); \ } \ @@ -1010,7 +1021,7 @@ EXPORT_SYMBOL(__tsan_init); if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ - KCSAN_ACCESS_ATOMIC); \ + KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \ } \ @@ -1025,7 +1036,7 @@ EXPORT_SYMBOL(__tsan_init); if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \ check_access(ptr, bits / BITS_PER_BYTE, \ KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \ - KCSAN_ACCESS_ATOMIC); \ + KCSAN_ACCESS_ATOMIC, _RET_IP_); \ } \ __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \ return exp; \ diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h index f36e25c497ed..ae33c2a7f07e 100644 --- a/kernel/kcsan/kcsan.h +++ b/kernel/kcsan/kcsan.h @@ -121,7 +121,7 @@ enum kcsan_value_change { * to be consumed by the reporting thread. No report is printed yet. */ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, - int watchpoint_idx); + unsigned long ip, int watchpoint_idx); /* * The calling thread observed that the watchpoint it set up was hit and @@ -129,14 +129,14 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ * thread. */ void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, int watchpoint_idx, - u64 old, u64 new, u64 mask); + unsigned long ip, enum kcsan_value_change value_change, + int watchpoint_idx, u64 old, u64 new, u64 mask); /* * No other thread was observed to race with the access, but the data value * before and after the stall differs. Reports a race of "unknown origin". */ void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, - u64 old, u64 new, u64 mask); + unsigned long ip, u64 old, u64 new, u64 mask); #endif /* _KERNEL_KCSAN_KCSAN_H */ diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index dc55fd5a36fc..660729238588 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -29,6 +29,11 @@ #include <linux/types.h> #include <trace/events/printk.h> +#define KCSAN_TEST_REQUIRES(test, cond) do { \ + if (!(cond)) \ + kunit_skip((test), "Test requires: " #cond); \ +} while (0) + #ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE #define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE) #else @@ -205,10 +210,12 @@ static bool report_matches(const struct expect_report *r) "read-write" : "write") : "read"); + const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC); + const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED); const char *const access_type_aux = - (ty & KCSAN_ACCESS_ATOMIC) ? - " (marked)" : - ((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : ""); + (is_atomic && is_scoped) ? " (marked, scoped)" + : (is_atomic ? " (marked)" + : (is_scoped ? " (scoped)" : "")); if (i == 1) { /* Access 2 */ @@ -333,7 +340,10 @@ static noinline void test_kernel_assert_bits_nochange(void) ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS); } -/* To check that scoped assertions do trigger anywhere in scope. */ +/* + * Scoped assertions do trigger anywhere in scope. However, the report should + * still only point at the start of the scope. + */ static noinline void test_enter_scope(void) { int x = 0; @@ -488,17 +498,24 @@ static void test_concurrent_races(struct kunit *test) __no_kcsan static void test_novalue_change(struct kunit *test) { - const struct expect_report expect = { + const struct expect_report expect_rw = { .access = { { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, }, }; + const struct expect_report expect_ww = { + .access = { + { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, + { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, + }, + }; bool match_expect = false; + test_kernel_write_nochange(); /* Reset value. */ begin_test_checks(test_kernel_write_nochange, test_kernel_read); do { - match_expect = report_matches(&expect); + match_expect = report_matches(&expect_rw) || report_matches(&expect_ww); } while (!end_test_checks(match_expect)); if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY)) KUNIT_EXPECT_FALSE(test, match_expect); @@ -513,17 +530,24 @@ static void test_novalue_change(struct kunit *test) __no_kcsan static void test_novalue_change_exception(struct kunit *test) { - const struct expect_report expect = { + const struct expect_report expect_rw = { .access = { { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, { test_kernel_read, &test_var, sizeof(test_var), 0 }, }, }; + const struct expect_report expect_ww = { + .access = { + { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, + { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, + }, + }; bool match_expect = false; + test_kernel_write_nochange_rcu(); /* Reset value. */ begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read); do { - match_expect = report_matches(&expect); + match_expect = report_matches(&expect_rw) || report_matches(&expect_ww); } while (!end_test_checks(match_expect)); KUNIT_EXPECT_TRUE(test, match_expect); } @@ -642,8 +666,7 @@ static void test_read_plain_atomic_write(struct kunit *test) }; bool match_expect = false; - if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) - return; + KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)); begin_test_checks(test_kernel_read, test_kernel_write_atomic); do { @@ -665,8 +688,7 @@ static void test_read_plain_atomic_rmw(struct kunit *test) }; bool match_expect = false; - if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) - return; + KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)); begin_test_checks(test_kernel_read, test_kernel_atomic_rmw); do { @@ -828,22 +850,22 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test) { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, }, }; - const struct expect_report expect_anywhere = { + const struct expect_report expect_inscope = { .access = { { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, }, }; bool match_expect_start = false; - bool match_expect_anywhere = false; + bool match_expect_inscope = false; begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange); do { match_expect_start |= report_matches(&expect_start); - match_expect_anywhere |= report_matches(&expect_anywhere); - } while (!end_test_checks(match_expect_start && match_expect_anywhere)); + match_expect_inscope |= report_matches(&expect_inscope); + } while (!end_test_checks(match_expect_inscope)); KUNIT_EXPECT_TRUE(test, match_expect_start); - KUNIT_EXPECT_TRUE(test, match_expect_anywhere); + KUNIT_EXPECT_FALSE(test, match_expect_inscope); } __no_kcsan @@ -872,9 +894,9 @@ static void test_assert_exclusive_access_scoped(struct kunit *test) do { match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2); match_expect_inscope |= report_matches(&expect_inscope); - } while (!end_test_checks(match_expect_start && match_expect_inscope)); + } while (!end_test_checks(match_expect_inscope)); KUNIT_EXPECT_TRUE(test, match_expect_start); - KUNIT_EXPECT_TRUE(test, match_expect_inscope); + KUNIT_EXPECT_FALSE(test, match_expect_inscope); } /* @@ -1224,7 +1246,7 @@ static void kcsan_test_exit(void) tracepoint_synchronize_unregister(); } -late_initcall(kcsan_test_init); +late_initcall_sync(kcsan_test_init); module_exit(kcsan_test_exit); MODULE_LICENSE("GPL v2"); diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c index 21137929d428..fc15077991c4 100644 --- a/kernel/kcsan/report.c +++ b/kernel/kcsan/report.c @@ -8,6 +8,7 @@ #include <linux/debug_locks.h> #include <linux/delay.h> #include <linux/jiffies.h> +#include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/lockdep.h> #include <linux/preempt.h> @@ -31,6 +32,7 @@ struct access_info { int access_type; int task_pid; int cpu_id; + unsigned long ip; }; /* @@ -245,6 +247,10 @@ static const char *get_access_type(int type) return "write (scoped)"; case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: return "write (marked, scoped)"; + case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE: + return "read-write (scoped)"; + case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC: + return "read-write (marked, scoped)"; default: BUG(); } @@ -300,6 +306,48 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries return skip; } +/* + * Skips to the first entry that matches the function of @ip, and then replaces + * that entry with @ip, returning the entries to skip. + */ +static int +replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip) +{ + unsigned long symbolsize, offset; + unsigned long target_func; + int skip; + + if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset)) + target_func = ip - offset; + else + goto fallback; + + for (skip = 0; skip < num_entries; ++skip) { + unsigned long func = stack_entries[skip]; + + if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset)) + goto fallback; + func -= offset; + + if (func == target_func) { + stack_entries[skip] = ip; + return skip; + } + } + +fallback: + /* Should not happen; the resulting stack trace is likely misleading. */ + WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip); + return get_stack_skipnr(stack_entries, num_entries); +} + +static int +sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip) +{ + return ip ? replace_stack_entry(stack_entries, num_entries, ip) : + get_stack_skipnr(stack_entries, num_entries); +} + /* Compares symbolized strings of addr1 and addr2. */ static int sym_strcmp(void *addr1, void *addr2) { @@ -327,12 +375,12 @@ static void print_verbose_info(struct task_struct *task) static void print_report(enum kcsan_value_change value_change, const struct access_info *ai, - const struct other_info *other_info, + struct other_info *other_info, u64 old, u64 new, u64 mask) { unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 }; int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1); - int skipnr = get_stack_skipnr(stack_entries, num_stack_entries); + int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip); unsigned long this_frame = stack_entries[skipnr]; unsigned long other_frame = 0; int other_skipnr = 0; /* silence uninit warnings */ @@ -344,8 +392,9 @@ static void print_report(enum kcsan_value_change value_change, return; if (other_info) { - other_skipnr = get_stack_skipnr(other_info->stack_entries, - other_info->num_stack_entries); + other_skipnr = sanitize_stack_entries(other_info->stack_entries, + other_info->num_stack_entries, + other_info->ai.ip); other_frame = other_info->stack_entries[other_skipnr]; /* @value_change is only known for the other thread */ @@ -576,21 +625,23 @@ discard: } static struct access_info prepare_access_info(const volatile void *ptr, size_t size, - int access_type) + int access_type, unsigned long ip) { return (struct access_info) { .ptr = ptr, .size = size, .access_type = access_type, .task_pid = in_task() ? task_pid_nr(current) : -1, - .cpu_id = raw_smp_processor_id() + .cpu_id = raw_smp_processor_id(), + /* Only replace stack entry with @ip if scoped access. */ + .ip = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0, }; } void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type, - int watchpoint_idx) + unsigned long ip, int watchpoint_idx) { - const struct access_info ai = prepare_access_info(ptr, size, access_type); + const struct access_info ai = prepare_access_info(ptr, size, access_type, ip); unsigned long flags; kcsan_disable_current(); @@ -603,10 +654,10 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ } void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type, - enum kcsan_value_change value_change, int watchpoint_idx, - u64 old, u64 new, u64 mask) + unsigned long ip, enum kcsan_value_change value_change, + int watchpoint_idx, u64 old, u64 new, u64 mask) { - const struct access_info ai = prepare_access_info(ptr, size, access_type); + const struct access_info ai = prepare_access_info(ptr, size, access_type, ip); struct other_info *other_info = &other_infos[watchpoint_idx]; unsigned long flags = 0; @@ -637,9 +688,9 @@ out: } void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type, - u64 old, u64 new, u64 mask) + unsigned long ip, u64 old, u64 new, u64 mask) { - const struct access_info ai = prepare_access_info(ptr, size, access_type); + const struct access_info ai = prepare_access_info(ptr, size, access_type, ip); unsigned long flags; kcsan_disable_current(); diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 7f29cb0f5e63..b4295a3892b7 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -18,7 +18,7 @@ #define ITERS_PER_TEST 2000 /* Test requirements. */ -static bool test_requires(void) +static bool __init test_requires(void) { /* random should be initialized for the below tests */ return prandom_u32() + prandom_u32() != 0; @@ -28,14 +28,18 @@ static bool test_requires(void) * Test watchpoint encode and decode: check that encoding some access's info, * and then subsequent decode preserves the access's info. */ -static bool test_encode_decode(void) +static bool __init test_encode_decode(void) { int i; for (i = 0; i < ITERS_PER_TEST; ++i) { size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1; bool is_write = !!prandom_u32_max(2); + unsigned long verif_masked_addr; + long encoded_watchpoint; + bool verif_is_write; unsigned long addr; + size_t verif_size; prandom_bytes(&addr, sizeof(addr)); if (addr < PAGE_SIZE) @@ -44,53 +48,37 @@ static bool test_encode_decode(void) if (WARN_ON(!check_encodable(addr, size))) return false; - /* Encode and decode */ - { - const long encoded_watchpoint = - encode_watchpoint(addr, size, is_write); - unsigned long verif_masked_addr; - size_t verif_size; - bool verif_is_write; - - /* Check special watchpoints */ - if (WARN_ON(decode_watchpoint( - INVALID_WATCHPOINT, &verif_masked_addr, - &verif_size, &verif_is_write))) - return false; - if (WARN_ON(decode_watchpoint( - CONSUMED_WATCHPOINT, &verif_masked_addr, - &verif_size, &verif_is_write))) - return false; - - /* Check decoding watchpoint returns same data */ - if (WARN_ON(!decode_watchpoint( - encoded_watchpoint, &verif_masked_addr, - &verif_size, &verif_is_write))) - return false; - if (WARN_ON(verif_masked_addr != - (addr & WATCHPOINT_ADDR_MASK))) - goto fail; - if (WARN_ON(verif_size != size)) - goto fail; - if (WARN_ON(is_write != verif_is_write)) - goto fail; - - continue; -fail: - pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n", - __func__, is_write ? "write" : "read", size, - addr, encoded_watchpoint, - verif_is_write ? "write" : "read", verif_size, - verif_masked_addr); + encoded_watchpoint = encode_watchpoint(addr, size, is_write); + + /* Check special watchpoints */ + if (WARN_ON(decode_watchpoint(INVALID_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write))) return false; - } + if (WARN_ON(decode_watchpoint(CONSUMED_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write))) + return false; + + /* Check decoding watchpoint returns same data */ + if (WARN_ON(!decode_watchpoint(encoded_watchpoint, &verif_masked_addr, &verif_size, &verif_is_write))) + return false; + if (WARN_ON(verif_masked_addr != (addr & WATCHPOINT_ADDR_MASK))) + goto fail; + if (WARN_ON(verif_size != size)) + goto fail; + if (WARN_ON(is_write != verif_is_write)) + goto fail; + + continue; +fail: + pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n", + __func__, is_write ? "write" : "read", size, addr, encoded_watchpoint, + verif_is_write ? "write" : "read", verif_size, verif_masked_addr); + return false; } return true; } /* Test access matching function. */ -static bool test_matching_access(void) +static bool __init test_matching_access(void) { if (WARN_ON(!matching_access(10, 1, 10, 1))) return false; diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 33400ff051a8..8347fc158d2b 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -556,6 +556,11 @@ static int kexec_walk_memblock(struct kexec_buf *kbuf, if (kbuf->image->type == KEXEC_TYPE_CRASH) return func(&crashk_res, kbuf); + /* + * Using MEMBLOCK_NONE will properly skip MEMBLOCK_DRIVER_MANAGED. See + * IORESOURCE_SYSRAM_DRIVER_MANAGED handling in + * locate_mem_hole_callback(). + */ if (kbuf->top_down) { for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE, &mstart, &mend, NULL) { diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9a38e7581a5c..e9db0c810554 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * Kernel Probes (KProbes) - * kernel/kprobes.c * * Copyright (C) IBM Corporation, 2002, 2004 * @@ -18,6 +17,9 @@ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. */ + +#define pr_fmt(fmt) "kprobes: " fmt + #include <linux/kprobes.h> #include <linux/hash.h> #include <linux/init.h> @@ -49,18 +51,18 @@ static int kprobes_initialized; /* kprobe_table can be accessed by - * - Normal hlist traversal and RCU add/del under kprobe_mutex is held. + * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held. * Or * - RCU hlist traversal under disabling preempt (breakpoint handlers) */ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; -/* NOTE: change this value only with kprobe_mutex held */ +/* NOTE: change this value only with 'kprobe_mutex' held */ static bool kprobes_all_disarmed; -/* This protects kprobe_table and optimizing_list */ +/* This protects 'kprobe_table' and 'optimizing_list' */ static DEFINE_MUTEX(kprobe_mutex); -static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; +static DEFINE_PER_CPU(struct kprobe *, kprobe_instance); kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, unsigned int __unused) @@ -68,12 +70,15 @@ kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); } -/* Blacklist -- list of struct kprobe_blacklist_entry */ +/* + * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where + * kprobes can not probe. + */ static LIST_HEAD(kprobe_blacklist); #ifdef __ARCH_WANT_KPROBES_INSN_SLOT /* - * kprobe->ainsn.insn points to the copy of the instruction to be + * 'kprobe::ainsn.insn' points to the copy of the instruction to be * single-stepped. x86_64, POWER4 and above have no-exec support and * stepping on the instruction on a vmalloced/kmalloced/data page * is a recipe for disaster @@ -104,6 +109,12 @@ enum kprobe_slot_state { void __weak *alloc_insn_page(void) { + /* + * Use module_alloc() so this page is within +/- 2GB of where the + * kernel image and loaded module images reside. This is required + * for most of the architectures. + * (e.g. x86-64 needs this to handle the %rip-relative fixups.) + */ return module_alloc(PAGE_SIZE); } @@ -139,6 +150,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) list_for_each_entry_rcu(kip, &c->pages, list) { if (kip->nused < slots_per_page(c)) { int i; + for (i = 0; i < slots_per_page(c); i++) { if (kip->slot_used[i] == SLOT_CLEAN) { kip->slot_used[i] = SLOT_USED; @@ -164,11 +176,6 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) if (!kip) goto out; - /* - * Use module_alloc so this page is within +/- 2GB of where the - * kernel image and loaded module images reside. This is required - * so x86_64 can correctly handle the %rip-relative fixups. - */ kip->insns = c->alloc(); if (!kip->insns) { kfree(kip); @@ -191,8 +198,8 @@ out: return slot; } -/* Return 1 if all garbages are collected, otherwise 0. */ -static int collect_one_slot(struct kprobe_insn_page *kip, int idx) +/* Return true if all garbages are collected, otherwise false. */ +static bool collect_one_slot(struct kprobe_insn_page *kip, int idx) { kip->slot_used[idx] = SLOT_CLEAN; kip->nused--; @@ -216,9 +223,9 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx) kip->cache->free(kip->insns); kfree(kip); } - return 1; + return true; } - return 0; + return false; } static int collect_garbage_slots(struct kprobe_insn_cache *c) @@ -230,6 +237,7 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c) list_for_each_entry_safe(kip, next, &c->pages, list) { int i; + if (kip->ngarbage == 0) continue; kip->ngarbage = 0; /* we will collect all garbages */ @@ -310,7 +318,7 @@ int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, list_for_each_entry_rcu(kip, &c->pages, list) { if ((*symnum)--) continue; - strlcpy(sym, c->sym, KSYM_NAME_LEN); + strscpy(sym, c->sym, KSYM_NAME_LEN); *type = 't'; *value = (unsigned long)kip->insns; ret = 0; @@ -358,9 +366,9 @@ static inline void reset_kprobe_instance(void) /* * This routine is called either: - * - under the kprobe_mutex - during kprobe_[un]register() - * OR - * - with preemption disabled - from arch/xxx/kernel/kprobes.c + * - under the 'kprobe_mutex' - during kprobe_[un]register(). + * OR + * - with preemption disabled - from architecture specific code. */ struct kprobe *get_kprobe(void *addr) { @@ -380,22 +388,20 @@ NOKPROBE_SYMBOL(get_kprobe); static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); -/* Return true if the kprobe is an aggregator */ -static inline int kprobe_aggrprobe(struct kprobe *p) +/* Return true if 'p' is an aggregator */ +static inline bool kprobe_aggrprobe(struct kprobe *p) { return p->pre_handler == aggr_pre_handler; } -/* Return true(!0) if the kprobe is unused */ -static inline int kprobe_unused(struct kprobe *p) +/* Return true if 'p' is unused */ +static inline bool kprobe_unused(struct kprobe *p) { return kprobe_aggrprobe(p) && kprobe_disabled(p) && list_empty(&p->list); } -/* - * Keep all fields in the kprobe consistent - */ +/* Keep all fields in the kprobe consistent. */ static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) { memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); @@ -403,11 +409,11 @@ static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) } #ifdef CONFIG_OPTPROBES -/* NOTE: change this value only with kprobe_mutex held */ +/* NOTE: This is protected by 'kprobe_mutex'. */ static bool kprobes_allow_optimization; /* - * Call all pre_handler on the list, but ignores its return value. + * Call all 'kprobe::pre_handler' on the list, but ignores its return value. * This must be called from arch-dep optimized caller. */ void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) @@ -435,7 +441,7 @@ static void free_aggr_kprobe(struct kprobe *p) kfree(op); } -/* Return true(!0) if the kprobe is ready for optimization. */ +/* Return true if the kprobe is ready for optimization. */ static inline int kprobe_optready(struct kprobe *p) { struct optimized_kprobe *op; @@ -448,8 +454,8 @@ static inline int kprobe_optready(struct kprobe *p) return 0; } -/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ -static inline int kprobe_disarmed(struct kprobe *p) +/* Return true if the kprobe is disarmed. Note: p must be on hash list */ +static inline bool kprobe_disarmed(struct kprobe *p) { struct optimized_kprobe *op; @@ -462,32 +468,32 @@ static inline int kprobe_disarmed(struct kprobe *p) return kprobe_disabled(p) && list_empty(&op->list); } -/* Return true(!0) if the probe is queued on (un)optimizing lists */ -static int kprobe_queued(struct kprobe *p) +/* Return true if the probe is queued on (un)optimizing lists */ +static bool kprobe_queued(struct kprobe *p) { struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); if (!list_empty(&op->list)) - return 1; + return true; } - return 0; + return false; } /* * Return an optimized kprobe whose optimizing code replaces - * instructions including addr (exclude breakpoint). + * instructions including 'addr' (exclude breakpoint). */ -static struct kprobe *get_optimized_kprobe(unsigned long addr) +static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr) { int i; struct kprobe *p = NULL; struct optimized_kprobe *op; /* Don't check i == 0, since that is a breakpoint case. */ - for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) - p = get_kprobe((void *)(addr - i)); + for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++) + p = get_kprobe(addr - i); if (p && kprobe_optready(p)) { op = container_of(p, struct optimized_kprobe, kp); @@ -498,7 +504,7 @@ static struct kprobe *get_optimized_kprobe(unsigned long addr) return NULL; } -/* Optimization staging list, protected by kprobe_mutex */ +/* Optimization staging list, protected by 'kprobe_mutex' */ static LIST_HEAD(optimizing_list); static LIST_HEAD(unoptimizing_list); static LIST_HEAD(freeing_list); @@ -509,20 +515,20 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); /* * Optimize (replace a breakpoint with a jump) kprobes listed on - * optimizing_list. + * 'optimizing_list'. */ static void do_optimize_kprobes(void) { lockdep_assert_held(&text_mutex); /* - * The optimization/unoptimization refers online_cpus via - * stop_machine() and cpu-hotplug modifies online_cpus. - * And same time, text_mutex will be held in cpu-hotplug and here. - * This combination can cause a deadlock (cpu-hotplug try to lock - * text_mutex but stop_machine can not be done because online_cpus - * has been changed) - * To avoid this deadlock, caller must have locked cpu hotplug - * for preventing cpu-hotplug outside of text_mutex locking. + * The optimization/unoptimization refers 'online_cpus' via + * stop_machine() and cpu-hotplug modifies the 'online_cpus'. + * And same time, 'text_mutex' will be held in cpu-hotplug and here. + * This combination can cause a deadlock (cpu-hotplug tries to lock + * 'text_mutex' but stop_machine() can not be done because + * the 'online_cpus' has been changed) + * To avoid this deadlock, caller must have locked cpu-hotplug + * for preventing cpu-hotplug outside of 'text_mutex' locking. */ lockdep_assert_cpus_held(); @@ -536,7 +542,7 @@ static void do_optimize_kprobes(void) /* * Unoptimize (replace a jump with a breakpoint and remove the breakpoint - * if need) kprobes listed on unoptimizing_list. + * if need) kprobes listed on 'unoptimizing_list'. */ static void do_unoptimize_kprobes(void) { @@ -551,7 +557,7 @@ static void do_unoptimize_kprobes(void) return; arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); - /* Loop free_list for disarming */ + /* Loop on 'freeing_list' for disarming */ list_for_each_entry_safe(op, tmp, &freeing_list, list) { /* Switching from detour code to origin */ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; @@ -562,7 +568,7 @@ static void do_unoptimize_kprobes(void) /* * Remove unused probes from hash list. After waiting * for synchronization, these probes are reclaimed. - * (reclaiming is done by do_free_cleaned_kprobes.) + * (reclaiming is done by do_free_cleaned_kprobes().) */ hlist_del_rcu(&op->kp.hlist); } else @@ -570,7 +576,7 @@ static void do_unoptimize_kprobes(void) } } -/* Reclaim all kprobes on the free_list */ +/* Reclaim all kprobes on the 'freeing_list' */ static void do_free_cleaned_kprobes(void) { struct optimized_kprobe *op, *tmp; @@ -642,9 +648,9 @@ void wait_for_kprobe_optimizer(void) while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); - /* this will also make optimizing_work execute immmediately */ + /* This will also make 'optimizing_work' execute immmediately */ flush_delayed_work(&optimizing_work); - /* @optimizing_work might not have been queued yet, relax */ + /* 'optimizing_work' might not have been queued yet, relax */ cpu_relax(); mutex_lock(&kprobe_mutex); @@ -675,7 +681,7 @@ static void optimize_kprobe(struct kprobe *p) (kprobe_disabled(p) || kprobes_all_disarmed)) return; - /* kprobes with post_handler can not be optimized */ + /* kprobes with 'post_handler' can not be optimized */ if (p->post_handler) return; @@ -695,7 +701,10 @@ static void optimize_kprobe(struct kprobe *p) } op->kp.flags |= KPROBE_FLAG_OPTIMIZED; - /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ + /* + * On the 'unoptimizing_list' and 'optimizing_list', + * 'op' must have OPTIMIZED flag + */ if (WARN_ON_ONCE(!list_empty(&op->list))) return; @@ -765,7 +774,7 @@ static int reuse_unused_kprobe(struct kprobe *ap) WARN_ON_ONCE(list_empty(&op->list)); /* Enable the probe again */ ap->flags &= ~KPROBE_FLAG_DISABLED; - /* Optimize it again (remove from op->list) */ + /* Optimize it again. (remove from 'op->list') */ if (!kprobe_optready(ap)) return -EINVAL; @@ -815,7 +824,7 @@ static void prepare_optimized_kprobe(struct kprobe *p) __prepare_optimized_kprobe(op, p); } -/* Allocate new optimized_kprobe and try to prepare optimized instructions */ +/* Allocate new optimized_kprobe and try to prepare optimized instructions. */ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { struct optimized_kprobe *op; @@ -834,19 +843,19 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); /* - * Prepare an optimized_kprobe and optimize it - * NOTE: p must be a normal registered kprobe + * Prepare an optimized_kprobe and optimize it. + * NOTE: 'p' must be a normal registered kprobe. */ static void try_to_optimize_kprobe(struct kprobe *p) { struct kprobe *ap; struct optimized_kprobe *op; - /* Impossible to optimize ftrace-based kprobe */ + /* Impossible to optimize ftrace-based kprobe. */ if (kprobe_ftrace(p)) return; - /* For preparing optimization, jump_label_text_reserved() is called */ + /* For preparing optimization, jump_label_text_reserved() is called. */ cpus_read_lock(); jump_label_lock(); mutex_lock(&text_mutex); @@ -857,14 +866,14 @@ static void try_to_optimize_kprobe(struct kprobe *p) op = container_of(ap, struct optimized_kprobe, kp); if (!arch_prepared_optinsn(&op->optinsn)) { - /* If failed to setup optimizing, fallback to kprobe */ + /* If failed to setup optimizing, fallback to kprobe. */ arch_remove_optimized_kprobe(op); kfree(op); goto out; } init_aggr_kprobe(ap, p); - optimize_kprobe(ap); /* This just kicks optimizer thread */ + optimize_kprobe(ap); /* This just kicks optimizer thread. */ out: mutex_unlock(&text_mutex); @@ -879,7 +888,7 @@ static void optimize_all_kprobes(void) unsigned int i; mutex_lock(&kprobe_mutex); - /* If optimization is already allowed, just return */ + /* If optimization is already allowed, just return. */ if (kprobes_allow_optimization) goto out; @@ -892,7 +901,7 @@ static void optimize_all_kprobes(void) optimize_kprobe(p); } cpus_read_unlock(); - printk(KERN_INFO "Kprobes globally optimized\n"); + pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); out: mutex_unlock(&kprobe_mutex); } @@ -905,7 +914,7 @@ static void unoptimize_all_kprobes(void) unsigned int i; mutex_lock(&kprobe_mutex); - /* If optimization is already prohibited, just return */ + /* If optimization is already prohibited, just return. */ if (!kprobes_allow_optimization) { mutex_unlock(&kprobe_mutex); return; @@ -923,9 +932,9 @@ static void unoptimize_all_kprobes(void) cpus_read_unlock(); mutex_unlock(&kprobe_mutex); - /* Wait for unoptimizing completion */ + /* Wait for unoptimizing completion. */ wait_for_kprobe_optimizer(); - printk(KERN_INFO "Kprobes globally unoptimized\n"); + pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); } static DEFINE_MUTEX(kprobe_sysctl_mutex); @@ -950,13 +959,15 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, } #endif /* CONFIG_SYSCTL */ -/* Put a breakpoint for a probe. Must be called with text_mutex locked */ +/* Put a breakpoint for a probe. */ static void __arm_kprobe(struct kprobe *p) { struct kprobe *_p; - /* Check collision with other optimized kprobes */ - _p = get_optimized_kprobe((unsigned long)p->addr); + lockdep_assert_held(&text_mutex); + + /* Find the overlapping optimized kprobes. */ + _p = get_optimized_kprobe(p->addr); if (unlikely(_p)) /* Fallback to unoptimized kprobe */ unoptimize_kprobe(_p, true); @@ -965,22 +976,29 @@ static void __arm_kprobe(struct kprobe *p) optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ } -/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ +/* Remove the breakpoint of a probe. */ static void __disarm_kprobe(struct kprobe *p, bool reopt) { struct kprobe *_p; + lockdep_assert_held(&text_mutex); + /* Try to unoptimize */ unoptimize_kprobe(p, kprobes_all_disarmed); if (!kprobe_queued(p)) { arch_disarm_kprobe(p); - /* If another kprobe was blocked, optimize it. */ - _p = get_optimized_kprobe((unsigned long)p->addr); + /* If another kprobe was blocked, re-optimize it. */ + _p = get_optimized_kprobe(p->addr); if (unlikely(_p) && reopt) optimize_kprobe(_p); } - /* TODO: reoptimize others after unoptimized this probe */ + /* + * TODO: Since unoptimization and real disarming will be done by + * the worker thread, we can not check whether another probe are + * unoptimized because of this probe here. It should be re-optimized + * by the worker thread. + */ } #else /* !CONFIG_OPTPROBES */ @@ -1003,7 +1021,7 @@ static int reuse_unused_kprobe(struct kprobe *ap) * unregistered. * Thus there should be no chance to reuse unused kprobe. */ - printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); + WARN_ON_ONCE(1); return -EINVAL; } @@ -1033,34 +1051,21 @@ static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { static int kprobe_ipmodify_enabled; static int kprobe_ftrace_enabled; -/* Must ensure p->addr is really on ftrace */ -static int prepare_kprobe(struct kprobe *p) -{ - if (!kprobe_ftrace(p)) - return arch_prepare_kprobe(p); - - return arch_prepare_kprobe_ftrace(p); -} - -/* Caller must lock kprobe_mutex */ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, int *cnt) { int ret = 0; + lockdep_assert_held(&kprobe_mutex); + ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); - if (ret) { - pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", - p->addr, ret); + if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) return ret; - } if (*cnt == 0) { ret = register_ftrace_function(ops); - if (ret) { - pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); + if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) goto err_ftrace; - } } (*cnt)++; @@ -1084,22 +1089,23 @@ static int arm_kprobe_ftrace(struct kprobe *p) ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); } -/* Caller must lock kprobe_mutex */ static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, int *cnt) { int ret = 0; + lockdep_assert_held(&kprobe_mutex); + if (*cnt == 1) { ret = unregister_ftrace_function(ops); - if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) + if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret)) return ret; } (*cnt)--; ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); - WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", + WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n", p->addr, ret); return ret; } @@ -1113,11 +1119,6 @@ static int disarm_kprobe_ftrace(struct kprobe *p) ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); } #else /* !CONFIG_KPROBES_ON_FTRACE */ -static inline int prepare_kprobe(struct kprobe *p) -{ - return arch_prepare_kprobe(p); -} - static inline int arm_kprobe_ftrace(struct kprobe *p) { return -ENODEV; @@ -1129,7 +1130,15 @@ static inline int disarm_kprobe_ftrace(struct kprobe *p) } #endif -/* Arm a kprobe with text_mutex */ +static int prepare_kprobe(struct kprobe *p) +{ + /* Must ensure p->addr is really on ftrace */ + if (kprobe_ftrace(p)) + return arch_prepare_kprobe_ftrace(p); + + return arch_prepare_kprobe(p); +} + static int arm_kprobe(struct kprobe *kp) { if (unlikely(kprobe_ftrace(kp))) @@ -1144,7 +1153,6 @@ static int arm_kprobe(struct kprobe *kp) return 0; } -/* Disarm a kprobe with text_mutex */ static int disarm_kprobe(struct kprobe *kp, bool reopt) { if (unlikely(kprobe_ftrace(kp))) @@ -1194,17 +1202,17 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, } NOKPROBE_SYMBOL(aggr_post_handler); -/* Walks the list and increments nmissed count for multiprobe case */ +/* Walks the list and increments 'nmissed' if 'p' has child probes. */ void kprobes_inc_nmissed_count(struct kprobe *p) { struct kprobe *kp; + if (!kprobe_aggrprobe(p)) { p->nmissed++; } else { list_for_each_entry_rcu(kp, &p->list, list) kp->nmissed++; } - return; } NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); @@ -1222,9 +1230,9 @@ static void recycle_rp_inst(struct kretprobe_instance *ri) { struct kretprobe *rp = get_kretprobe(ri); - if (likely(rp)) { + if (likely(rp)) freelist_add(&ri->freelist, &rp->freelist); - } else + else call_rcu(&ri->rcu, free_rp_inst_rcu); } NOKPROBE_SYMBOL(recycle_rp_inst); @@ -1251,9 +1259,9 @@ void kprobe_busy_end(void) /* * This function is called from delayed_put_task_struct() when a task is - * dead and cleaned up to recycle any function-return probe instances - * associated with this task. These left over instances represent probed - * functions that have been called but will never return. + * dead and cleaned up to recycle any kretprobe instances associated with + * this task. These left over instances represent probed functions that + * have been called but will never return. */ void kprobe_flush_task(struct task_struct *tk) { @@ -1299,7 +1307,7 @@ static inline void free_rp_inst(struct kretprobe *rp) } } -/* Add the new probe to ap->list */ +/* Add the new probe to 'ap->list'. */ static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) { if (p->post_handler) @@ -1313,12 +1321,12 @@ static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) } /* - * Fill in the required fields of the "manager kprobe". Replace the - * earlier kprobe in the hlist with the manager kprobe + * Fill in the required fields of the aggregator kprobe. Replace the + * earlier kprobe in the hlist with the aggregator kprobe. */ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { - /* Copy p's insn slot to ap */ + /* Copy the insn slot of 'p' to 'ap'. */ copy_kprobe(p, ap); flush_insn_slot(ap); ap->addr = p->addr; @@ -1336,8 +1344,7 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) } /* - * This is the second or subsequent kprobe at the address - handle - * the intricacies + * This registers the second or subsequent kprobe at the same address. */ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) { @@ -1351,7 +1358,7 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) mutex_lock(&text_mutex); if (!kprobe_aggrprobe(orig_p)) { - /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ + /* If 'orig_p' is not an 'aggr_kprobe', create new one. */ ap = alloc_aggr_kprobe(orig_p); if (!ap) { ret = -ENOMEM; @@ -1376,8 +1383,8 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) if (ret) /* * Even if fail to allocate new slot, don't need to - * free aggr_probe. It will be used next time, or - * freed by unregister_kprobe. + * free the 'ap'. It will be used next time, or + * freed by unregister_kprobe(). */ goto out; @@ -1392,7 +1399,7 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) | KPROBE_FLAG_DISABLED; } - /* Copy ap's insn slot to p */ + /* Copy the insn slot of 'p' to 'ap'. */ copy_kprobe(ap, p); ret = add_new_kprobe(ap, p); @@ -1418,7 +1425,7 @@ out: bool __weak arch_within_kprobe_blacklist(unsigned long addr) { - /* The __kprobes marked functions and entry code must not be probed */ + /* The '__kprobes' functions and entry code must not be probed. */ return addr >= (unsigned long)__kprobes_text_start && addr < (unsigned long)__kprobes_text_end; } @@ -1430,8 +1437,8 @@ static bool __within_kprobe_blacklist(unsigned long addr) if (arch_within_kprobe_blacklist(addr)) return true; /* - * If there exists a kprobe_blacklist, verify and - * fail any probe registration in the prohibited area + * If 'kprobe_blacklist' is defined, check the address and + * reject any probe registration in the prohibited area. */ list_for_each_entry(ent, &kprobe_blacklist, list) { if (addr >= ent->start_addr && addr < ent->end_addr) @@ -1461,7 +1468,7 @@ bool within_kprobe_blacklist(unsigned long addr) } /* - * If we have a symbol_name argument, look it up and add the offset field + * If 'symbol_name' is specified, look it up and add the 'offset' * to it. This way, we can specify a relative address to a symbol. * This returns encoded errors if it fails to look up symbol or invalid * combination of parameters. @@ -1491,7 +1498,10 @@ static kprobe_opcode_t *kprobe_addr(struct kprobe *p) return _kprobe_addr(p->addr, p->symbol_name, p->offset); } -/* Check passed kprobe is valid and return kprobe in kprobe_table. */ +/* + * Check the 'p' is valid and return the aggregator kprobe + * at the same address. + */ static struct kprobe *__get_valid_kprobe(struct kprobe *p) { struct kprobe *ap, *list_p; @@ -1529,7 +1539,7 @@ static inline int warn_kprobe_rereg(struct kprobe *p) return ret; } -int __weak arch_check_ftrace_location(struct kprobe *p) +static int check_ftrace_location(struct kprobe *p) { unsigned long ftrace_addr; @@ -1552,7 +1562,7 @@ static int check_kprobe_address_safe(struct kprobe *p, { int ret; - ret = arch_check_ftrace_location(p); + ret = check_ftrace_location(p); if (ret) return ret; jump_label_lock(); @@ -1568,7 +1578,7 @@ static int check_kprobe_address_safe(struct kprobe *p, goto out; } - /* Check if are we probing a module */ + /* Check if 'p' is probing a module. */ *probed_mod = __module_text_address((unsigned long) p->addr); if (*probed_mod) { /* @@ -1581,7 +1591,7 @@ static int check_kprobe_address_safe(struct kprobe *p, } /* - * If the module freed .init.text, we couldn't insert + * If the module freed '.init.text', we couldn't insert * kprobes in there. */ if (within_module_init((unsigned long)p->addr, *probed_mod) && @@ -1628,7 +1638,7 @@ int register_kprobe(struct kprobe *p) old_p = get_kprobe(p->addr); if (old_p) { - /* Since this may unoptimize old_p, locking text_mutex. */ + /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ ret = register_aggr_kprobe(old_p, p); goto out; } @@ -1667,8 +1677,8 @@ out: } EXPORT_SYMBOL_GPL(register_kprobe); -/* Check if all probes on the aggrprobe are disabled */ -static int aggr_kprobe_disabled(struct kprobe *ap) +/* Check if all probes on the 'ap' are disabled. */ +static bool aggr_kprobe_disabled(struct kprobe *ap) { struct kprobe *kp; @@ -1677,20 +1687,21 @@ static int aggr_kprobe_disabled(struct kprobe *ap) list_for_each_entry(kp, &ap->list, list) if (!kprobe_disabled(kp)) /* - * There is an active probe on the list. - * We can't disable this ap. + * Since there is an active probe on the list, + * we can't disable this 'ap'. */ - return 0; + return false; - return 1; + return true; } -/* Disable one kprobe: Make sure called under kprobe_mutex is locked */ static struct kprobe *__disable_kprobe(struct kprobe *p) { struct kprobe *orig_p; int ret; + lockdep_assert_held(&kprobe_mutex); + /* Get an original kprobe for return */ orig_p = __get_valid_kprobe(p); if (unlikely(orig_p == NULL)) @@ -1704,7 +1715,7 @@ static struct kprobe *__disable_kprobe(struct kprobe *p) /* Try to disarm and disable this/parent probe */ if (p == orig_p || aggr_kprobe_disabled(orig_p)) { /* - * If kprobes_all_disarmed is set, orig_p + * If 'kprobes_all_disarmed' is set, 'orig_p' * should have already been disarmed, so * skip unneed disarming process. */ @@ -1850,53 +1861,105 @@ static struct notifier_block kprobe_exceptions_nb = { .priority = 0x7fffffff /* we need to be notified first */ }; -unsigned long __weak arch_deref_entry_point(void *entry) +#ifdef CONFIG_KRETPROBES + +/* This assumes the 'tsk' is the current task or the is not running. */ +static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk, + struct llist_node **cur) { - return (unsigned long)entry; + struct kretprobe_instance *ri = NULL; + struct llist_node *node = *cur; + + if (!node) + node = tsk->kretprobe_instances.first; + else + node = node->next; + + while (node) { + ri = container_of(node, struct kretprobe_instance, llist); + if (ri->ret_addr != kretprobe_trampoline_addr()) { + *cur = node; + return ri->ret_addr; + } + node = node->next; + } + return NULL; } +NOKPROBE_SYMBOL(__kretprobe_find_ret_addr); -#ifdef CONFIG_KRETPROBES +/** + * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe + * @tsk: Target task + * @fp: A frame pointer + * @cur: a storage of the loop cursor llist_node pointer for next call + * + * Find the correct return address modified by a kretprobe on @tsk in unsigned + * long type. If it finds the return address, this returns that address value, + * or this returns 0. + * The @tsk must be 'current' or a task which is not running. @fp is a hint + * to get the currect return address - which is compared with the + * kretprobe_instance::fp field. The @cur is a loop cursor for searching the + * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the + * first call, but '@cur' itself must NOT NULL. + */ +unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, + struct llist_node **cur) +{ + struct kretprobe_instance *ri = NULL; + kprobe_opcode_t *ret; + + if (WARN_ON_ONCE(!cur)) + return 0; + + do { + ret = __kretprobe_find_ret_addr(tsk, cur); + if (!ret) + break; + ri = container_of(*cur, struct kretprobe_instance, llist); + } while (ri->fp != fp); + + return (unsigned long)ret; +} +NOKPROBE_SYMBOL(kretprobe_find_ret_addr); + +void __weak arch_kretprobe_fixup_return(struct pt_regs *regs, + kprobe_opcode_t *correct_ret_addr) +{ + /* + * Do nothing by default. Please fill this to update the fake return + * address on the stack with the correct one on each arch if possible. + */ +} unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, - void *trampoline_address, void *frame_pointer) { kprobe_opcode_t *correct_ret_addr = NULL; struct kretprobe_instance *ri = NULL; - struct llist_node *first, *node; + struct llist_node *first, *node = NULL; struct kretprobe *rp; - /* Find all nodes for this frame. */ - first = node = current->kretprobe_instances.first; - while (node) { - ri = container_of(node, struct kretprobe_instance, llist); - - BUG_ON(ri->fp != frame_pointer); - - if (ri->ret_addr != trampoline_address) { - correct_ret_addr = ri->ret_addr; - /* - * This is the real return address. Any other - * instances associated with this task are for - * other calls deeper on the call stack - */ - goto found; - } - - node = node->next; + /* Find correct address and all nodes for this frame. */ + correct_ret_addr = __kretprobe_find_ret_addr(current, &node); + if (!correct_ret_addr) { + pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n"); + BUG_ON(1); } - pr_err("Oops! Kretprobe fails to find correct return address.\n"); - BUG_ON(1); -found: - /* Unlink all nodes for this frame. */ - current->kretprobe_instances.first = node->next; - node->next = NULL; + /* + * Set the return address as the instruction pointer, because if the + * user handler calls stack_trace_save_regs() with this 'regs', + * the stack trace will start from the instruction pointer. + */ + instruction_pointer_set(regs, (unsigned long)correct_ret_addr); - /* Run them.. */ + /* Run the user handler of the nodes. */ + first = current->kretprobe_instances.first; while (first) { ri = container_of(first, struct kretprobe_instance, llist); - first = first->next; + + if (WARN_ON_ONCE(ri->fp != frame_pointer)) + break; rp = get_kretprobe(ri); if (rp && rp->handler) { @@ -1907,6 +1970,23 @@ found: rp->handler(ri, regs); __this_cpu_write(current_kprobe, prev); } + if (first == node) + break; + + first = first->next; + } + + arch_kretprobe_fixup_return(regs, correct_ret_addr); + + /* Unlink all nodes for this frame. */ + first = current->kretprobe_instances.first; + current->kretprobe_instances.first = node->next; + node->next = NULL; + + /* Recycle free instances. */ + while (first) { + ri = container_of(first, struct kretprobe_instance, llist); + first = first->next; recycle_rp_inst(ri); } @@ -1991,7 +2071,7 @@ int register_kretprobe(struct kretprobe *rp) if (ret) return ret; - /* If only rp->kp.addr is specified, check reregistering kprobes */ + /* If only 'rp->kp.addr' is specified, check reregistering kprobes */ if (rp->kp.addr && warn_kprobe_rereg(&rp->kp)) return -EINVAL; @@ -2096,13 +2176,13 @@ EXPORT_SYMBOL_GPL(unregister_kretprobes); #else /* CONFIG_KRETPROBES */ int register_kretprobe(struct kretprobe *rp) { - return -ENOSYS; + return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(register_kretprobe); int register_kretprobes(struct kretprobe **rps, int num) { - return -ENOSYS; + return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(register_kretprobes); @@ -2151,7 +2231,7 @@ static void kill_kprobe(struct kprobe *p) /* * The module is going away. We should disarm the kprobe which * is using ftrace, because ftrace framework is still available at - * MODULE_STATE_GOING notification. + * 'MODULE_STATE_GOING' notification. */ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) disarm_kprobe_ftrace(p); @@ -2214,8 +2294,7 @@ EXPORT_SYMBOL_GPL(enable_kprobe); /* Caller must NOT call this in usual path. This is only for critical case */ void dump_kprobe(struct kprobe *kp) { - pr_err("Dumping kprobe:\n"); - pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", + pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n", kp->symbol_name, kp->offset, kp->addr); } NOKPROBE_SYMBOL(dump_kprobe); @@ -2317,7 +2396,7 @@ static int __init populate_kprobe_blacklist(unsigned long *start, int ret; for (iter = start; iter < end; iter++) { - entry = arch_deref_entry_point((void *)*iter); + entry = (unsigned long)dereference_symbol_descriptor((void *)*iter); ret = kprobe_add_ksym_blacklist(entry); if (ret == -EINVAL) continue; @@ -2325,13 +2404,13 @@ static int __init populate_kprobe_blacklist(unsigned long *start, return ret; } - /* Symbols in __kprobes_text are blacklisted */ + /* Symbols in '__kprobes_text' are blacklisted */ ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, (unsigned long)__kprobes_text_end); if (ret) return ret; - /* Symbols in noinstr section are blacklisted */ + /* Symbols in 'noinstr' section are blacklisted */ ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, (unsigned long)__noinstr_text_end); @@ -2403,9 +2482,9 @@ static int kprobes_module_callback(struct notifier_block *nb, return NOTIFY_DONE; /* - * When MODULE_STATE_GOING was notified, both of module .text and - * .init.text sections would be freed. When MODULE_STATE_LIVE was - * notified, only .init.text section would be freed. We need to + * When 'MODULE_STATE_GOING' was notified, both of module '.text' and + * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was + * notified, only '.init.text' section would be freed. We need to * disable kprobes which have been inserted in the sections. */ mutex_lock(&kprobe_mutex); @@ -2422,9 +2501,9 @@ static int kprobes_module_callback(struct notifier_block *nb, * * Note, this will also move any optimized probes * that are pending to be removed from their - * corresponding lists to the freeing_list and + * corresponding lists to the 'freeing_list' and * will not be touched by the delayed - * kprobe_optimizer work handler. + * kprobe_optimizer() work handler. */ kill_kprobe(p); } @@ -2440,10 +2519,6 @@ static struct notifier_block kprobe_module_nb = { .priority = 0 }; -/* Markers of _kprobe_blacklist section */ -extern unsigned long __start_kprobe_blacklist[]; -extern unsigned long __stop_kprobe_blacklist[]; - void kprobe_free_init_mem(void) { void *start = (void *)(&__init_begin); @@ -2454,7 +2529,7 @@ void kprobe_free_init_mem(void) mutex_lock(&kprobe_mutex); - /* Kill all kprobes on initmem */ + /* Kill all kprobes on initmem because the target code has been freed. */ for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry(p, head, hlist) { @@ -2477,10 +2552,8 @@ static int __init init_kprobes(void) err = populate_kprobe_blacklist(__start_kprobe_blacklist, __stop_kprobe_blacklist); - if (err) { - pr_err("kprobes: failed to populate blacklist: %d\n", err); - pr_err("Please take care of using kprobes.\n"); - } + if (err) + pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err); if (kretprobe_blacklist_size) { /* lookup the function address from its name */ @@ -2488,7 +2561,7 @@ static int __init init_kprobes(void) kretprobe_blacklist[i].addr = kprobe_lookup_name(kretprobe_blacklist[i].name, 0); if (!kretprobe_blacklist[i].addr) - printk("kretprobe: lookup failed: %s\n", + pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n", kretprobe_blacklist[i].name); } } @@ -2497,7 +2570,7 @@ static int __init init_kprobes(void) kprobes_all_disarmed = false; #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) - /* Init kprobe_optinsn_slots for allocation */ + /* Init 'kprobe_optinsn_slots' for allocation */ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; #endif @@ -2508,9 +2581,6 @@ static int __init init_kprobes(void) err = register_module_notifier(&kprobe_module_nb); kprobes_initialized = (err == 0); - - if (!err) - init_test_probes(); return err; } early_initcall(init_kprobes); @@ -2631,7 +2701,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) list_entry(v, struct kprobe_blacklist_entry, list); /* - * If /proc/kallsyms is not showing kernel address, we won't + * If '/proc/kallsyms' is not showing kernel address, we won't * show them here either. */ if (!kallsyms_show_value(m->file->f_cred)) @@ -2692,7 +2762,7 @@ static int arm_all_kprobes(void) } if (errors) - pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", + pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n", errors, total); else pr_info("Kprobes globally enabled\n"); @@ -2735,7 +2805,7 @@ static int disarm_all_kprobes(void) } if (errors) - pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n", + pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n", errors, total); else pr_info("Kprobes globally disabled\n"); @@ -2770,30 +2840,14 @@ static ssize_t read_enabled_file_bool(struct file *file, static ssize_t write_enabled_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - char buf[32]; - size_t buf_size; - int ret = 0; - - buf_size = min(count, (sizeof(buf)-1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; + bool enable; + int ret; - buf[buf_size] = '\0'; - switch (buf[0]) { - case 'y': - case 'Y': - case '1': - ret = arm_all_kprobes(); - break; - case 'n': - case 'N': - case '0': - ret = disarm_all_kprobes(); - break; - default: - return -EINVAL; - } + ret = kstrtobool_from_user(user_buf, count, &enable); + if (ret) + return ret; + ret = enable ? arm_all_kprobes() : disarm_all_kprobes(); if (ret) return ret; @@ -2809,13 +2863,12 @@ static const struct file_operations fops_kp = { static int __init debugfs_kprobe_init(void) { struct dentry *dir; - unsigned int value = 1; dir = debugfs_create_dir("kprobes", NULL); debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); - debugfs_create_file("enabled", 0600, dir, &value, &fops_kp); + debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp); debugfs_create_file("blacklist", 0400, dir, NULL, &kprobe_blacklist_fops); diff --git a/kernel/kthread.c b/kernel/kthread.c index 4a4d7092a2d8..7113003fab63 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -433,7 +433,7 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. * When woken, the thread will run @threadfn() with @data as its - * argument. @threadfn() can either call do_exit() directly if it is a + * argument. @threadfn() can either return directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index e8029aea67f1..fe316c021d73 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -49,14 +49,15 @@ static void notrace klp_ftrace_handler(unsigned long ip, ops = container_of(fops, struct klp_ops, fops); + /* + * The ftrace_test_recursion_trylock() will disable preemption, + * which is required for the variant of synchronize_rcu() that is + * used to allow patching functions where RCU is not watching. + * See klp_synchronize_transition() for more details. + */ bit = ftrace_test_recursion_trylock(ip, parent_ip); if (WARN_ON_ONCE(bit < 0)) return; - /* - * A variant of synchronize_rcu() is used to allow patching functions - * where RCU is not watching, see klp_synchronize_transition(). - */ - preempt_disable_notrace(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); @@ -120,7 +121,6 @@ static void notrace klp_ftrace_handler(unsigned long ip, klp_arch_set_pc(fregs, (unsigned long)func->new_func); unlock: - preempt_enable_notrace(); ftrace_test_recursion_unlock(bit); } diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7096384dc60f..2270ec68f10a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -788,6 +788,21 @@ static int very_verbose(struct lock_class *class) * Is this the address of a static object: */ #ifdef __KERNEL__ +/* + * Check if an address is part of freed initmem. After initmem is freed, + * memory can be allocated from it, and such allocations would then have + * addresses within the range [_stext, _end]. + */ +#ifndef arch_is_kernel_initmem_freed +static int arch_is_kernel_initmem_freed(unsigned long addr) +{ + if (system_state < SYSTEM_FREEING_INITMEM) + return 0; + + return init_section_contains((void *)addr, 1); +} +#endif + static int static_obj(const void *obj) { unsigned long start = (unsigned long) &_stext, @@ -803,9 +818,6 @@ static int static_obj(const void *obj) if ((addr >= start) && (addr < end)) return 1; - if (arch_is_kernel_data(addr)) - return 1; - /* * in-kernel percpu var? */ diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 7c5a4a087cc7..397ac13d2ef7 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -1022,23 +1022,23 @@ static int __init lock_torture_init(void) if (onoff_interval > 0) { firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ, NULL); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (shuffle_interval > 0) { firsterr = torture_shuffle_init(shuffle_interval); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (shutdown_secs > 0) { firsterr = torture_shutdown_init(shutdown_secs, lock_torture_cleanup); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (stutter > 0) { firsterr = torture_stutter_init(stutter, stutter); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } @@ -1082,7 +1082,7 @@ static int __init lock_torture_init(void) /* Create writer. */ firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], writer_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; create_reader: @@ -1091,13 +1091,13 @@ static int __init lock_torture_init(void) /* Create reader. */ firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], reader_tasks[j]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (stat_interval > 0) { firsterr = torture_create_kthread(lock_torture_stats, NULL, stats_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } torture_init_end(); diff --git a/kernel/module.c b/kernel/module.c index 5c26a76e800b..84a9141a5e15 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2942,7 +2942,11 @@ static int module_sig_check(struct load_info *info, int flags) static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) { +#if defined(CONFIG_64BIT) + unsigned long long secend; +#else unsigned long secend; +#endif /* * Check for both overflow and offset/size being @@ -2967,14 +2971,29 @@ static int elf_validity_check(struct load_info *info) Elf_Shdr *shdr, *strhdr; int err; - if (info->len < sizeof(*(info->hdr))) - return -ENOEXEC; + if (info->len < sizeof(*(info->hdr))) { + pr_err("Invalid ELF header len %lu\n", info->len); + goto no_exec; + } - if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 - || info->hdr->e_type != ET_REL - || !elf_check_arch(info->hdr) - || info->hdr->e_shentsize != sizeof(Elf_Shdr)) - return -ENOEXEC; + if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { + pr_err("Invalid ELF header magic: != %s\n", ELFMAG); + goto no_exec; + } + if (info->hdr->e_type != ET_REL) { + pr_err("Invalid ELF header type: %u != %u\n", + info->hdr->e_type, ET_REL); + goto no_exec; + } + if (!elf_check_arch(info->hdr)) { + pr_err("Invalid architecture in ELF header: %u\n", + info->hdr->e_machine); + goto no_exec; + } + if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { + pr_err("Invalid ELF section header size\n"); + goto no_exec; + } /* * e_shnum is 16 bits, and sizeof(Elf_Shdr) is @@ -2983,8 +3002,10 @@ static int elf_validity_check(struct load_info *info) */ if (info->hdr->e_shoff >= info->len || (info->hdr->e_shnum * sizeof(Elf_Shdr) > - info->len - info->hdr->e_shoff)) - return -ENOEXEC; + info->len - info->hdr->e_shoff)) { + pr_err("Invalid ELF section header overflow\n"); + goto no_exec; + } info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; @@ -2992,13 +3013,19 @@ static int elf_validity_check(struct load_info *info) * Verify if the section name table index is valid. */ if (info->hdr->e_shstrndx == SHN_UNDEF - || info->hdr->e_shstrndx >= info->hdr->e_shnum) - return -ENOEXEC; + || info->hdr->e_shstrndx >= info->hdr->e_shnum) { + pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", + info->hdr->e_shstrndx, info->hdr->e_shstrndx, + info->hdr->e_shnum); + goto no_exec; + } strhdr = &info->sechdrs[info->hdr->e_shstrndx]; err = validate_section_offset(info, strhdr); - if (err < 0) + if (err < 0) { + pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); return err; + } /* * The section name table must be NUL-terminated, as required @@ -3006,8 +3033,10 @@ static int elf_validity_check(struct load_info *info) * strings in the section safe. */ info->secstrings = (void *)info->hdr + strhdr->sh_offset; - if (info->secstrings[strhdr->sh_size - 1] != '\0') - return -ENOEXEC; + if (info->secstrings[strhdr->sh_size - 1] != '\0') { + pr_err("ELF Spec violation: section name table isn't null terminated\n"); + goto no_exec; + } /* * The code assumes that section 0 has a length of zero and @@ -3015,8 +3044,11 @@ static int elf_validity_check(struct load_info *info) */ if (info->sechdrs[0].sh_type != SHT_NULL || info->sechdrs[0].sh_size != 0 - || info->sechdrs[0].sh_addr != 0) - return -ENOEXEC; + || info->sechdrs[0].sh_addr != 0) { + pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", + info->sechdrs[0].sh_type); + goto no_exec; + } for (i = 1; i < info->hdr->e_shnum; i++) { shdr = &info->sechdrs[i]; @@ -3026,8 +3058,12 @@ static int elf_validity_check(struct load_info *info) continue; case SHT_SYMTAB: if (shdr->sh_link == SHN_UNDEF - || shdr->sh_link >= info->hdr->e_shnum) - return -ENOEXEC; + || shdr->sh_link >= info->hdr->e_shnum) { + pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", + shdr->sh_link, shdr->sh_link, + info->hdr->e_shnum); + goto no_exec; + } fallthrough; default: err = validate_section_offset(info, shdr); @@ -3049,6 +3085,9 @@ static int elf_validity_check(struct load_info *info) } return 0; + +no_exec: + return -ENOEXEC; } #define COPY_CHUNK_SIZE (16*PAGE_SIZE) @@ -3940,10 +3979,8 @@ static int load_module(struct load_info *info, const char __user *uargs, * sections. */ err = elf_validity_check(info); - if (err) { - pr_err("Module has invalid ELF structures\n"); + if (err) goto free_copy; - } /* * Everything checks out, so set up the section info diff --git a/kernel/pid.c b/kernel/pid.c index efe87db44683..2fc0a16ec77b 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -540,6 +540,42 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) } /** + * pidfd_get_task() - Get the task associated with a pidfd + * + * @pidfd: pidfd for which to get the task + * @flags: flags associated with this pidfd + * + * Return the task associated with @pidfd. The function takes a reference on + * the returned task. The caller is responsible for releasing that reference. + * + * Currently, the process identified by @pidfd is always a thread-group leader. + * This restriction currently exists for all aspects of pidfds including pidfd + * creation (CLONE_PIDFD cannot be used with CLONE_THREAD) and pidfd polling + * (only supports thread group leaders). + * + * Return: On success, the task_struct associated with the pidfd. + * On error, a negative errno number will be returned. + */ +struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) +{ + unsigned int f_flags; + struct pid *pid; + struct task_struct *task; + + pid = pidfd_get_pid(pidfd, &f_flags); + if (IS_ERR(pid)) + return ERR_CAST(pid); + + task = get_pid_task(pid, PIDTYPE_TGID); + put_pid(pid); + if (!task) + return ERR_PTR(-ESRCH); + + *flags = f_flags; + return task; +} + +/** * pidfd_create() - Create a new pid file descriptor. * * @pid: struct pid that the pidfd will reference diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index a332ccd829e2..0153b0ca7b23 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -2,7 +2,7 @@ /* * Energy Model of devices * - * Copyright (c) 2018-2020, Arm ltd. + * Copyright (c) 2018-2021, Arm ltd. * Written by: Quentin Perret, Arm ltd. * Improvements provided by: Lukasz Luba, Arm ltd. */ @@ -10,6 +10,7 @@ #define pr_fmt(fmt) "energy_model: " fmt #include <linux/cpu.h> +#include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/debugfs.h> #include <linux/energy_model.h> @@ -42,6 +43,7 @@ static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd) debugfs_create_ulong("frequency", 0444, d, &ps->frequency); debugfs_create_ulong("power", 0444, d, &ps->power); debugfs_create_ulong("cost", 0444, d, &ps->cost); + debugfs_create_ulong("inefficient", 0444, d, &ps->flags); } static int em_debug_cpus_show(struct seq_file *s, void *unused) @@ -55,7 +57,8 @@ DEFINE_SHOW_ATTRIBUTE(em_debug_cpus); static int em_debug_units_show(struct seq_file *s, void *unused) { struct em_perf_domain *pd = s->private; - char *units = pd->milliwatts ? "milliWatts" : "bogoWatts"; + char *units = (pd->flags & EM_PERF_DOMAIN_MILLIWATTS) ? + "milliWatts" : "bogoWatts"; seq_printf(s, "%s\n", units); @@ -63,6 +66,17 @@ static int em_debug_units_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(em_debug_units); +static int em_debug_skip_inefficiencies_show(struct seq_file *s, void *unused) +{ + struct em_perf_domain *pd = s->private; + int enabled = (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES) ? 1 : 0; + + seq_printf(s, "%d\n", enabled); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(em_debug_skip_inefficiencies); + static void em_debug_create_pd(struct device *dev) { struct dentry *d; @@ -76,6 +90,8 @@ static void em_debug_create_pd(struct device *dev) &em_debug_cpus_fops); debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops); + debugfs_create_file("skip-inefficiencies", 0444, d, dev->em_pd, + &em_debug_skip_inefficiencies_fops); /* Create a sub-directory for each performance state */ for (i = 0; i < dev->em_pd->nr_perf_states; i++) @@ -107,8 +123,7 @@ static void em_debug_remove_pd(struct device *dev) {} static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, int nr_states, struct em_data_callback *cb) { - unsigned long opp_eff, prev_opp_eff = ULONG_MAX; - unsigned long power, freq, prev_freq = 0; + unsigned long power, freq, prev_freq = 0, prev_cost = ULONG_MAX; struct em_perf_state *table; int i, ret; u64 fmax; @@ -153,27 +168,22 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, table[i].power = power; table[i].frequency = prev_freq = freq; - - /* - * The hertz/watts efficiency ratio should decrease as the - * frequency grows on sane platforms. But this isn't always - * true in practice so warn the user if a higher OPP is more - * power efficient than a lower one. - */ - opp_eff = freq / power; - if (opp_eff >= prev_opp_eff) - dev_dbg(dev, "EM: hertz/watts ratio non-monotonically decreasing: em_perf_state %d >= em_perf_state%d\n", - i, i - 1); - prev_opp_eff = opp_eff; } /* Compute the cost of each performance state. */ fmax = (u64) table[nr_states - 1].frequency; - for (i = 0; i < nr_states; i++) { + for (i = nr_states - 1; i >= 0; i--) { unsigned long power_res = em_scale_power(table[i].power); table[i].cost = div64_u64(fmax * power_res, table[i].frequency); + if (table[i].cost >= prev_cost) { + table[i].flags = EM_PERF_STATE_INEFFICIENT; + dev_dbg(dev, "EM: OPP:%lu is inefficient\n", + table[i].frequency); + } else { + prev_cost = table[i].cost; + } } pd->table = table; @@ -222,6 +232,43 @@ static int em_create_pd(struct device *dev, int nr_states, return 0; } +static void em_cpufreq_update_efficiencies(struct device *dev) +{ + struct em_perf_domain *pd = dev->em_pd; + struct em_perf_state *table; + struct cpufreq_policy *policy; + int found = 0; + int i; + + if (!_is_cpu_device(dev) || !pd) + return; + + policy = cpufreq_cpu_get(cpumask_first(em_span_cpus(pd))); + if (!policy) { + dev_warn(dev, "EM: Access to CPUFreq policy failed"); + return; + } + + table = pd->table; + + for (i = 0; i < pd->nr_perf_states; i++) { + if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT)) + continue; + + if (!cpufreq_table_set_inefficient(policy, table[i].frequency)) + found++; + } + + if (!found) + return; + + /* + * Efficiencies have been installed in CPUFreq, inefficient frequencies + * will be skipped. The EM can do the same. + */ + pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES; +} + /** * em_pd_get() - Return the performance domain for a device * @dev : Device to find the performance domain for @@ -335,7 +382,10 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, if (ret) goto unlock; - dev->em_pd->milliwatts = milliwatts; + if (milliwatts) + dev->em_pd->flags |= EM_PERF_DOMAIN_MILLIWATTS; + + em_cpufreq_update_efficiencies(dev); em_debug_create_pd(dev); dev_info(dev, "EM: created perf domain\n"); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 559acef3fddb..9ed9b744876c 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -300,7 +300,7 @@ static int create_image(int platform_mode) if (error || hibernation_test(TEST_PLATFORM)) goto Platform_finish; - error = suspend_disable_secondary_cpus(); + error = pm_sleep_disable_secondary_cpus(); if (error || hibernation_test(TEST_CPUS)) goto Enable_cpus; @@ -342,7 +342,7 @@ static int create_image(int platform_mode) local_irq_enable(); Enable_cpus: - suspend_enable_secondary_cpus(); + pm_sleep_enable_secondary_cpus(); /* Allow architectures to do nosmt-specific post-resume dances */ if (!in_suspend) @@ -466,6 +466,8 @@ static int resume_target_kernel(bool platform_mode) if (error) goto Cleanup; + cpuidle_pause(); + error = hibernate_resume_nonboot_cpu_disable(); if (error) goto Enable_cpus; @@ -509,7 +511,7 @@ static int resume_target_kernel(bool platform_mode) local_irq_enable(); Enable_cpus: - suspend_enable_secondary_cpus(); + pm_sleep_enable_secondary_cpus(); Cleanup: platform_restore_cleanup(platform_mode); @@ -587,7 +589,7 @@ int hibernation_platform_enter(void) if (error) goto Platform_finish; - error = suspend_disable_secondary_cpus(); + error = pm_sleep_disable_secondary_cpus(); if (error) goto Enable_cpus; @@ -609,7 +611,7 @@ int hibernation_platform_enter(void) local_irq_enable(); Enable_cpus: - suspend_enable_secondary_cpus(); + pm_sleep_enable_secondary_cpus(); Platform_finish: hibernation_ops->finish(); diff --git a/kernel/power/power.h b/kernel/power/power.h index 778bf431ec02..326f8d032eb5 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -4,6 +4,8 @@ #include <linux/utsname.h> #include <linux/freezer.h> #include <linux/compiler.h> +#include <linux/cpu.h> +#include <linux/cpuidle.h> struct swsusp_info { struct new_utsname uts; @@ -310,3 +312,15 @@ extern int pm_wake_lock(const char *buf); extern int pm_wake_unlock(const char *buf); #endif /* !CONFIG_PM_WAKELOCKS */ + +static inline int pm_sleep_disable_secondary_cpus(void) +{ + cpuidle_pause(); + return suspend_disable_secondary_cpus(); +} + +static inline void pm_sleep_enable_secondary_cpus(void) +{ + suspend_enable_secondary_cpus(); + cpuidle_resume(); +} diff --git a/kernel/power/process.c b/kernel/power/process.c index 37401c99b7d7..b7e7798637b8 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -94,7 +94,7 @@ static int try_to_freeze_tasks(bool user_only) todo - wq_busy, wq_busy); if (wq_busy) - show_workqueue_state(); + show_all_workqueues(); if (!wakeup || pm_debug_messages_on) { read_lock(&tasklist_lock); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index eb75f394a059..80cc1f0f502b 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -97,7 +97,6 @@ static void s2idle_enter(void) raw_spin_unlock_irq(&s2idle_lock); cpus_read_lock(); - cpuidle_resume(); /* Push all the CPUs into the idle loop. */ wake_up_all_idle_cpus(); @@ -105,7 +104,6 @@ static void s2idle_enter(void) swait_event_exclusive(s2idle_wait_head, s2idle_state == S2IDLE_STATE_WAKE); - cpuidle_pause(); cpus_read_unlock(); raw_spin_lock_irq(&s2idle_lock); @@ -162,11 +160,13 @@ EXPORT_SYMBOL_GPL(s2idle_wake); static bool valid_state(suspend_state_t state) { /* - * PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level - * support and need to be valid to the low level - * implementation, no valid callback implies that none are valid. + * The PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states require low-level + * support and need to be valid to the low-level implementation. + * + * No ->valid() or ->enter() callback implies that none are valid. */ - return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); + return suspend_ops && suspend_ops->valid && suspend_ops->valid(state) && + suspend_ops->enter; } void __init pm_states_init(void) @@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(suspend_valid_only_mem); static bool sleep_state_supported(suspend_state_t state) { - return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter); + return state == PM_SUSPEND_TO_IDLE || valid_state(state); } static int platform_suspend_prepare(suspend_state_t state) @@ -422,7 +422,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) goto Platform_wake; } - error = suspend_disable_secondary_cpus(); + error = pm_sleep_disable_secondary_cpus(); if (error || suspend_test(TEST_CPUS)) goto Enable_cpus; @@ -452,7 +452,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) BUG_ON(irqs_disabled()); Enable_cpus: - suspend_enable_secondary_cpus(); + pm_sleep_enable_secondary_cpus(); Platform_wake: platform_resume_noirq(state); diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 3cb89baebc79..ff326c2cb77b 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -299,7 +299,7 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, return error; } -static blk_status_t hib_wait_io(struct hib_bio_batch *hb) +static int hib_wait_io(struct hib_bio_batch *hb) { /* * We are relying on the behavior of blk_plug that a thread with @@ -705,22 +705,19 @@ static int save_image_lzo(struct swap_map_handle *handle, goto out_clean; } - data = vmalloc(array_size(nr_threads, sizeof(*data))); + data = vzalloc(array_size(nr_threads, sizeof(*data))); if (!data) { pr_err("Failed to allocate LZO data\n"); ret = -ENOMEM; goto out_clean; } - for (thr = 0; thr < nr_threads; thr++) - memset(&data[thr], 0, offsetof(struct cmp_data, go)); - crc = kmalloc(sizeof(*crc), GFP_KERNEL); + crc = kzalloc(sizeof(*crc), GFP_KERNEL); if (!crc) { pr_err("Failed to allocate crc\n"); ret = -ENOMEM; goto out_clean; } - memset(crc, 0, offsetof(struct crc_data, go)); /* * Start the compression threads. @@ -1198,22 +1195,19 @@ static int load_image_lzo(struct swap_map_handle *handle, goto out_clean; } - data = vmalloc(array_size(nr_threads, sizeof(*data))); + data = vzalloc(array_size(nr_threads, sizeof(*data))); if (!data) { pr_err("Failed to allocate LZO data\n"); ret = -ENOMEM; goto out_clean; } - for (thr = 0; thr < nr_threads; thr++) - memset(&data[thr], 0, offsetof(struct dec_data, go)); - crc = kmalloc(sizeof(*crc), GFP_KERNEL); + crc = kzalloc(sizeof(*crc), GFP_KERNEL); if (!crc) { pr_err("Failed to allocate crc\n"); ret = -ENOMEM; goto out_clean; } - memset(crc, 0, offsetof(struct crc_data, go)); clean_pages_on_decompress = true; @@ -1521,9 +1515,10 @@ end: int swsusp_check(void) { int error; + void *holder; hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, - FMODE_READ, NULL); + FMODE_READ | FMODE_EXCL, &holder); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); clear_page(swsusp_header); @@ -1545,7 +1540,7 @@ int swsusp_check(void) put: if (error) - blkdev_put(hib_resume_bdev, FMODE_READ); + blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL); else pr_debug("Image signature found, resuming\n"); } else { diff --git a/kernel/printk/index.c b/kernel/printk/index.c index d3709408debe..c85be186a783 100644 --- a/kernel/printk/index.c +++ b/kernel/printk/index.c @@ -26,10 +26,9 @@ static struct pi_entry *pi_get_entry(const struct module *mod, loff_t pos) if (mod) { entries = mod->printk_index_start; nr_entries = mod->printk_index_size; - } + } else #endif - - if (!mod) { + { /* vmlinux, comes from linker symbols */ entries = __start_printk_index; nr_entries = __stop_printk_index - __start_printk_index; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index a8d0a58deebc..013bfd6dcc34 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -847,7 +847,7 @@ static int devkmsg_open(struct inode *inode, struct file *file) return err; } - user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); + user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); if (!user) return -ENOMEM; @@ -875,7 +875,7 @@ static int devkmsg_release(struct inode *inode, struct file *file) ratelimit_state_exit(&user->rs); mutex_destroy(&user->lock); - kfree(user); + kvfree(user); return 0; } @@ -1166,9 +1166,9 @@ void __init setup_log_buf(int early) return; err_free_descs: - memblock_free_ptr(new_descs, new_descs_size); + memblock_free(new_descs, new_descs_size); err_free_log_buf: - memblock_free_ptr(new_log_buf, new_log_buf_len); + memblock_free(new_log_buf, new_log_buf_len); } static bool __read_mostly ignore_loglevel; @@ -2066,6 +2066,7 @@ u16 printk_parse_prefix(const char *text, int *level, return prefix_len; } +__printf(5, 0) static u16 printk_sprint(char *text, u16 size, int facility, enum printk_info_flags *flags, const char *fmt, va_list args) diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 2cc34a22a506..228f143bf935 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -758,7 +758,7 @@ kfree_scale_init(void) init_waitqueue_head(&shutdown_wq); firsterr = torture_create_kthread(kfree_scale_shutdown, NULL, shutdown_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; schedule_timeout_uninterruptible(1); } @@ -775,7 +775,7 @@ kfree_scale_init(void) for (i = 0; i < kfree_nrealthreads; i++) { firsterr = torture_create_kthread(kfree_scale_thread, (void *)i, kfree_reader_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } @@ -838,7 +838,7 @@ rcu_scale_init(void) init_waitqueue_head(&shutdown_wq); firsterr = torture_create_kthread(rcu_scale_shutdown, NULL, shutdown_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; schedule_timeout_uninterruptible(1); } @@ -852,7 +852,7 @@ rcu_scale_init(void) for (i = 0; i < nrealreaders; i++) { firsterr = torture_create_kthread(rcu_scale_reader, (void *)i, reader_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) @@ -879,7 +879,7 @@ rcu_scale_init(void) } firsterr = torture_create_kthread(rcu_scale_writer, (void *)i, writer_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } torture_init_end(); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index ab4215266ebe..8b410d982990 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1432,28 +1432,34 @@ static void rcutorture_one_extend(int *readstate, int newstate, /* First, put new protection in place to avoid critical-section gap. */ if (statesnew & RCUTORTURE_RDR_BH) local_bh_disable(); + if (statesnew & RCUTORTURE_RDR_RBH) + rcu_read_lock_bh(); if (statesnew & RCUTORTURE_RDR_IRQ) local_irq_disable(); if (statesnew & RCUTORTURE_RDR_PREEMPT) preempt_disable(); - if (statesnew & RCUTORTURE_RDR_RBH) - rcu_read_lock_bh(); if (statesnew & RCUTORTURE_RDR_SCHED) rcu_read_lock_sched(); if (statesnew & RCUTORTURE_RDR_RCU) idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; - /* Next, remove old protection, irq first due to bh conflict. */ + /* + * Next, remove old protection, in decreasing order of strength + * to avoid unlock paths that aren't safe in the stronger + * context. Namely: BH can not be enabled with disabled interrupts. + * Additionally PREEMPT_RT requires that BH is enabled in preemptible + * context. + */ if (statesold & RCUTORTURE_RDR_IRQ) local_irq_enable(); - if (statesold & RCUTORTURE_RDR_BH) - local_bh_enable(); if (statesold & RCUTORTURE_RDR_PREEMPT) preempt_enable(); - if (statesold & RCUTORTURE_RDR_RBH) - rcu_read_unlock_bh(); if (statesold & RCUTORTURE_RDR_SCHED) rcu_read_unlock_sched(); + if (statesold & RCUTORTURE_RDR_BH) + local_bh_enable(); + if (statesold & RCUTORTURE_RDR_RBH) + rcu_read_unlock_bh(); if (statesold & RCUTORTURE_RDR_RCU) { bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); @@ -1496,6 +1502,9 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) int mask = rcutorture_extend_mask_max(); unsigned long randmask1 = torture_random(trsp) >> 8; unsigned long randmask2 = randmask1 >> 3; + unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; + unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; + unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); /* Mostly only one bit (need preemption!), sometimes lots of bits. */ @@ -1503,11 +1512,26 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) mask = mask & randmask2; else mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); - /* Can't enable bh w/irq disabled. */ - if ((mask & RCUTORTURE_RDR_IRQ) && - ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || - (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) - mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; + + /* + * Can't enable bh w/irq disabled. + */ + if (mask & RCUTORTURE_RDR_IRQ) + mask |= oldmask & bhs; + + /* + * Ideally these sequences would be detected in debug builds + * (regardless of RT), but until then don't stop testing + * them on non-RT. + */ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { + /* Can't modify BH in atomic context */ + if (oldmask & preempts_irq) + mask &= ~bhs; + if ((oldmask | mask) & preempts_irq) + mask |= oldmask & bhs; + } + return mask ?: RCUTORTURE_RDR_RCU; } @@ -2449,7 +2473,7 @@ static int __init rcu_torture_fwd_prog_init(void) } if (stall_cpu > 0) { VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); - if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) + if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) return -EINVAL; /* In module, can fail back to user. */ WARN_ON(1); /* Make sure rcutorture notices conflict. */ return 0; @@ -2741,7 +2765,7 @@ static int rcu_torture_read_exit(void *unused) static int rcu_torture_read_exit_init(void) { if (read_exit_burst <= 0) - return -EINVAL; + return 0; init_waitqueue_head(&read_exit_wq); read_exit_child_stop = false; read_exit_child_stopped = false; @@ -2819,7 +2843,7 @@ rcu_torture_cleanup(void) rcutorture_seq_diff(gp_seq, start_gp_seq)); torture_stop_kthread(rcu_torture_stats, stats_task); torture_stop_kthread(rcu_torture_fqs, fqs_task); - if (rcu_torture_can_boost()) + if (rcu_torture_can_boost() && rcutor_hp >= 0) cpuhp_remove_state(rcutor_hp); /* @@ -3037,7 +3061,7 @@ rcu_torture_init(void) rcu_torture_write_types(); firsterr = torture_create_kthread(rcu_torture_writer, NULL, writer_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; if (nfakewriters > 0) { fakewriter_tasks = kcalloc(nfakewriters, @@ -3052,7 +3076,7 @@ rcu_torture_init(void) for (i = 0; i < nfakewriters; i++) { firsterr = torture_create_kthread(rcu_torture_fakewriter, NULL, fakewriter_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), @@ -3068,7 +3092,7 @@ rcu_torture_init(void) rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, reader_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } nrealnocbers = nocbs_nthreads; @@ -3088,18 +3112,18 @@ rcu_torture_init(void) } for (i = 0; i < nrealnocbers; i++) { firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (stat_interval > 0) { firsterr = torture_create_kthread(rcu_torture_stats, NULL, stats_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (test_no_idle_hz && shuffle_interval > 0) { firsterr = torture_shuffle_init(shuffle_interval * HZ); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (stutter < 0) @@ -3109,7 +3133,7 @@ rcu_torture_init(void) t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; firsterr = torture_stutter_init(stutter * HZ, t); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (fqs_duration < 0) @@ -3118,7 +3142,7 @@ rcu_torture_init(void) /* Create the fqs thread */ firsterr = torture_create_kthread(rcu_torture_fqs, NULL, fqs_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (test_boost_interval < 1) @@ -3132,9 +3156,9 @@ rcu_torture_init(void) firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", rcutorture_booster_init, rcutorture_booster_cleanup); - if (firsterr < 0) - goto unwind; rcutor_hp = firsterr; + if (torture_init_error(firsterr)) + goto unwind; // Testing RCU priority boosting requires rcutorture do // some serious abuse. Counter this by running ksoftirqd @@ -3153,23 +3177,23 @@ rcu_torture_init(void) } shutdown_jiffies = jiffies + shutdown_secs * HZ; firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, rcutorture_sync); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; firsterr = rcu_torture_stall_init(); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; firsterr = rcu_torture_fwd_prog_init(); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; firsterr = rcu_torture_barrier_init(); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; firsterr = rcu_torture_read_exit_init(); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; if (object_debug) rcu_test_debug_objects(); diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 66dc14cf5687..1631ef8a138d 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -824,7 +824,7 @@ ref_scale_init(void) init_waitqueue_head(&shutdown_wq); firsterr = torture_create_kthread(ref_scale_shutdown, NULL, shutdown_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; schedule_timeout_uninterruptible(1); } @@ -851,7 +851,7 @@ ref_scale_init(void) for (i = 0; i < nreaders; i++) { firsterr = torture_create_kthread(ref_scale_reader, (void *)i, reader_tasks[i].task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; init_waitqueue_head(&(reader_tasks[i].wq)); @@ -860,7 +860,7 @@ ref_scale_init(void) // Main Task init_waitqueue_head(&main_wq); firsterr = torture_create_kthread(main_func, NULL, main_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; torture_init_end(); diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 171bc848e8e3..7da3c81c3f59 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -197,6 +197,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) * This loop is terminated by the system going down. ;-) */ for (;;) { + set_tasks_gp_state(rtp, RTGS_WAIT_CBS); /* Pick up any new callbacks. */ raw_spin_lock_irqsave(&rtp->cbs_lock, flags); @@ -236,8 +237,6 @@ static int __noreturn rcu_tasks_kthread(void *arg) } /* Paranoid sleep to keep this from entering a tight loop */ schedule_timeout_idle(rtp->gp_sleep); - - set_tasks_gp_state(rtp, RTGS_WAIT_CBS); } } @@ -369,7 +368,7 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) //////////////////////////////////////////////////////////////////////// // // Simple variant of RCU whose quiescent states are voluntary context -// switch, cond_resched_rcu_qs(), user-space execution, and idle. +// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. // As such, grace periods can take one good long time. There are no // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() // because this implementation is intended to get the system into a safe @@ -540,7 +539,7 @@ DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_tasks() assumes * that the read-side critical sections end at a voluntary context - * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle, + * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, * or transition to usermode execution. As such, there are no read-side * primitives analogous to rcu_read_lock() and rcu_read_unlock() because * this primitive is intended to determine that all tasks have passed @@ -678,11 +677,11 @@ DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_tasks_rude() * assumes that the read-side critical sections end at context switch, - * cond_resched_rcu_qs(), or transition to usermode execution. As such, - * there are no read-side primitives analogous to rcu_read_lock() and - * rcu_read_unlock() because this primitive is intended to determine - * that all tasks have passed through a safe state, not so much for - * data-structure synchronization. + * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as + * usermode execution is schedulable). As such, there are no read-side + * primitives analogous to rcu_read_lock() and rcu_read_unlock() because + * this primitive is intended to determine that all tasks have passed + * through a safe state, not so much for data-structure synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. @@ -700,8 +699,8 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); * grace period has elapsed, in other words after all currently * executing rcu-tasks read-side critical sections have elapsed. These * read-side critical sections are delimited by calls to schedule(), - * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory, - * anyway) cond_resched(). + * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable + * context), and (in theory, anyway) cond_resched(). * * This is a very specialized primitive, intended only for a few uses in * tracing and other situations requiring manipulation of function preambles @@ -758,7 +757,7 @@ EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); // 2. Protects code in the idle loop, exception entry/exit, and // CPU-hotplug code paths, similar to the capabilities of SRCU. // -// 3. Avoids expensive read-side instruction, having overhead similar +// 3. Avoids expensive read-side instructions, having overhead similar // to that of Preemptible RCU. // // There are of course downsides. The grace-period code can send IPIs to @@ -848,7 +847,7 @@ static void rcu_read_unlock_iw(struct irq_work *iwp) static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw); /* If we are the last reader, wake up the grace-period kthread. */ -void rcu_read_unlock_trace_special(struct task_struct *t, int nesting) +void rcu_read_unlock_trace_special(struct task_struct *t) { int nq = READ_ONCE(t->trc_reader_special.b.need_qs); @@ -858,7 +857,7 @@ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting) // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. if (nq) WRITE_ONCE(t->trc_reader_special.b.need_qs, false); - WRITE_ONCE(t->trc_reader_nesting, nesting); + WRITE_ONCE(t->trc_reader_nesting, 0); if (nq && atomic_dec_and_test(&trc_n_readers_need_end)) irq_work_queue(&rcu_tasks_trace_iw); } @@ -890,32 +889,24 @@ static void trc_read_check_handler(void *t_in) // If the task is no longer running on this CPU, leave. if (unlikely(texp != t)) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); goto reset_ipi; // Already on holdout list, so will check later. } // If the task is not in a read-side critical section, and // if this is the last reader, awaken the grace-period kthread. if (likely(!READ_ONCE(t->trc_reader_nesting))) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); - // Mark as checked after decrement to avoid false - // positives on the above WARN_ON_ONCE(). WRITE_ONCE(t->trc_reader_checked, true); goto reset_ipi; } // If we are racing with an rcu_read_unlock_trace(), try again later. - if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) { - if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end))) - wake_up(&trc_wait); + if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) goto reset_ipi; - } WRITE_ONCE(t->trc_reader_checked, true); // Get here if the task is in a read-side critical section. Set // its state so that it will awaken the grace-period kthread upon // exit from that critical section. + atomic_inc(&trc_n_readers_need_end); // One more to wait on. WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); WRITE_ONCE(t->trc_reader_special.b.need_qs, true); @@ -931,7 +922,7 @@ reset_ipi: static int trc_inspect_reader(struct task_struct *t, void *arg) { int cpu = task_cpu(t); - bool in_qs = false; + int nesting; bool ofl = cpu_is_offline(cpu); if (task_curr(t)) { @@ -951,18 +942,18 @@ static int trc_inspect_reader(struct task_struct *t, void *arg) n_heavy_reader_updates++; if (ofl) n_heavy_reader_ofl_updates++; - in_qs = true; + nesting = 0; } else { // The task is not running, so C-language access is safe. - in_qs = likely(!t->trc_reader_nesting); + nesting = t->trc_reader_nesting; } - // Mark as checked so that the grace-period kthread will - // remove it from the holdout list. - t->trc_reader_checked = true; - - if (in_qs) - return 0; // Already in quiescent state, done!!! + // If not exiting a read-side critical section, mark as checked + // so that the grace-period kthread will remove it from the + // holdout list. + t->trc_reader_checked = nesting >= 0; + if (nesting <= 0) + return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later. // The task is in a read-side critical section, so set up its // state so that it will awaken the grace-period kthread upon exit @@ -1000,7 +991,7 @@ static void trc_wait_for_one_reader(struct task_struct *t, // If this task is not yet on the holdout list, then we are in // an RCU read-side critical section. Otherwise, the invocation of - // rcu_add_holdout() that added it to the list did the necessary + // trc_add_holdout() that added it to the list did the necessary // get_task_struct(). Either way, the task cannot be freed out // from under this code. @@ -1015,21 +1006,17 @@ static void trc_wait_for_one_reader(struct task_struct *t, if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) return; - atomic_inc(&trc_n_readers_need_end); per_cpu(trc_ipi_to_cpu, cpu) = true; t->trc_ipi_to_cpu = cpu; rcu_tasks_trace.n_ipis++; - if (smp_call_function_single(cpu, - trc_read_check_handler, t, 0)) { + if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { // Just in case there is some other reason for // failure than the target CPU being offline. + WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", + __func__, cpu); rcu_tasks_trace.n_ipis_fails++; per_cpu(trc_ipi_to_cpu, cpu) = false; - t->trc_ipi_to_cpu = cpu; - if (atomic_dec_and_test(&trc_n_readers_need_end)) { - WARN_ON_ONCE(1); - wake_up(&trc_wait); - } + t->trc_ipi_to_cpu = -1; } } } @@ -1099,9 +1086,9 @@ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) cpu = task_cpu(t); pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", t->pid, - ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0], + ".I"[READ_ONCE(t->trc_ipi_to_cpu) >= 0], ".i"[is_idle_task(t)], - ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)], + ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], READ_ONCE(t->trc_reader_nesting), " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)], cpu); @@ -1144,20 +1131,34 @@ static void check_all_holdout_tasks_trace(struct list_head *hop, cpus_read_unlock(); if (needreport) { - if (firstreport) + if (*firstreport) pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); show_stalled_ipi_trace(); } } +static void rcu_tasks_trace_empty_fn(void *unused) +{ +} + /* Wait for grace period to complete and provide ordering. */ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) { + int cpu; bool firstreport; struct task_struct *g, *t; LIST_HEAD(holdouts); long ret; + // Wait for any lingering IPI handlers to complete. Note that + // if a CPU has gone offline or transitioned to userspace in the + // meantime, all IPI handlers should have been drained beforehand. + // Yes, this assumes that CPUs process IPIs in order. If that ever + // changes, there will need to be a recheck and/or timed wait. + for_each_online_cpu(cpu) + if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))) + smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); + // Remove the safety count. smp_mb__before_atomic(); // Order vs. earlier atomics atomic_dec(&trc_n_readers_need_end); @@ -1200,7 +1201,7 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t) WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); WRITE_ONCE(t->trc_reader_nesting, 0); if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs))) - rcu_read_unlock_trace_special(t, 0); + rcu_read_unlock_trace_special(t); } /** @@ -1208,15 +1209,11 @@ static void exit_tasks_rcu_finish_trace(struct task_struct *t) * @rhp: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * - * The callback function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_tasks_trace() - * assumes that the read-side critical sections end at context switch, - * cond_resched_rcu_qs(), or transition to usermode execution. As such, - * there are no read-side primitives analogous to rcu_read_lock() and - * rcu_read_unlock() because this primitive is intended to determine - * that all tasks have passed through a safe state, not so much for - * data-structure synchronization. + * The callback function will be invoked some time after a trace rcu-tasks + * grace period elapses, in other words after all currently executing + * trace rcu-tasks read-side critical sections have completed. These + * read-side critical sections are delimited by calls to rcu_read_lock_trace() + * and rcu_read_unlock_trace(). * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. @@ -1232,7 +1229,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); * * Control will return to the caller some time after a trace rcu-tasks * grace period has elapsed, in other words after all currently executing - * rcu-tasks read-side critical sections have elapsed. These read-side + * trace rcu-tasks read-side critical sections have elapsed. These read-side * critical sections are delimited by calls to rcu_read_lock_trace() * and rcu_read_unlock_trace(). * diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index bce848e50512..ef8d36f580fc 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -327,7 +327,7 @@ static void rcu_dynticks_eqs_online(void) */ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) { - return !(atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1); + return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1); } /* @@ -1219,8 +1219,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { unsigned long jtsq; - bool *rnhqp; - bool *ruqp; struct rcu_node *rnp = rdp->mynode; /* @@ -1285,17 +1283,15 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * is set way high. */ jtsq = READ_ONCE(jiffies_to_sched_qs); - ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); - rnhqp = per_cpu_ptr(&rcu_data.rcu_need_heavy_qs, rdp->cpu); - if (!READ_ONCE(*rnhqp) && + if (!READ_ONCE(rdp->rcu_need_heavy_qs) && (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || time_after(jiffies, rcu_state.jiffies_resched) || rcu_state.cbovld)) { - WRITE_ONCE(*rnhqp, true); + WRITE_ONCE(rdp->rcu_need_heavy_qs, true); /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ - smp_store_release(ruqp, true); + smp_store_release(&rdp->rcu_urgent_qs, true); } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { - WRITE_ONCE(*ruqp, true); + WRITE_ONCE(rdp->rcu_urgent_qs, true); } /* @@ -1309,7 +1305,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) if (tick_nohz_full_cpu(rdp->cpu) && (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || rcu_state.cbovld)) { - WRITE_ONCE(*ruqp, true); + WRITE_ONCE(rdp->rcu_urgent_qs, true); resched_cpu(rdp->cpu); WRITE_ONCE(rdp->last_fqs_resched, jiffies); } @@ -1779,6 +1775,8 @@ static noinline_for_stack bool rcu_gp_init(void) */ WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); rcu_for_each_leaf_node(rnp) { + // Wait for CPU-hotplug operations that might have + // started before this grace period did. smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values. firstseq = READ_ONCE(rnp->ofl_seq); if (firstseq & 0x1) @@ -1907,7 +1905,7 @@ static void rcu_gp_fqs(bool first_time) struct rcu_node *rnp = rcu_get_root(); WRITE_ONCE(rcu_state.gp_activity, jiffies); - rcu_state.n_force_qs++; + WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1); if (first_time) { /* Collect dyntick-idle snapshots. */ force_qs_rnp(dyntick_save_progress_counter); @@ -2358,7 +2356,7 @@ rcu_check_quiescent_state(struct rcu_data *rdp) int rcutree_dying_cpu(unsigned int cpu) { bool blkd; - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -2550,7 +2548,7 @@ static void rcu_do_batch(struct rcu_data *rdp) /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ if (count == 0 && rdp->qlen_last_fqs_check != 0) { rdp->qlen_last_fqs_check = 0; - rdp->n_force_qs_snap = rcu_state.n_force_qs; + rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); } else if (count < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = count; @@ -2898,10 +2896,10 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, } else { /* Give the grace period a kick. */ rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; - if (rcu_state.n_force_qs == rdp->n_force_qs_snap && + if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && rcu_segcblist_first_pend_cb(&rdp->cblist) != head) rcu_force_quiescent_state(); - rdp->n_force_qs_snap = rcu_state.n_force_qs; + rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); } } @@ -4128,10 +4126,9 @@ int rcutree_prepare_cpu(unsigned int cpu) /* Set up local state, ensuring consistent view of global state. */ raw_spin_lock_irqsave_rcu_node(rnp, flags); rdp->qlen_last_fqs_check = 0; - rdp->n_force_qs_snap = rcu_state.n_force_qs; + rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); rdp->blimit = blimit; rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ - rcu_dynticks_eqs_online(); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ /* @@ -4251,6 +4248,7 @@ void rcu_cpu_starting(unsigned int cpu) mask = rdp->grpmask; WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); + rcu_dynticks_eqs_online(); smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier(). raw_spin_lock_irqsave_rcu_node(rnp, flags); WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); @@ -4296,9 +4294,7 @@ void rcu_report_dead(unsigned int cpu) do_nocb_deferred_wakeup(rdp); /* QS for any half-done expedited grace period. */ - preempt_disable(); - rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); - preempt_enable(); + rcu_report_exp_rdp(rdp); rcu_preempt_deferred_qs(current); /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 2796084ef85a..f3947c49eee7 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -512,7 +512,6 @@ static void synchronize_rcu_expedited_wait(void) j = READ_ONCE(jiffies_till_first_fqs); if (synchronize_rcu_expedited_wait_once(j + HZ)) return; - WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)); } for (;;) { @@ -760,7 +759,7 @@ static void sync_sched_exp_online_cleanup(int cpu) my_cpu = get_cpu(); /* Quiescent state either not needed or already requested, leave. */ if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || - __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) { + rdp->cpu_no_qs.b.exp) { put_cpu(); return; } diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 8fdf44f8523f..368ef7b9af4f 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -549,7 +549,6 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, rcu_nocb_unlock_irqrestore(rdp, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } - return; } /* @@ -767,6 +766,7 @@ static int rcu_nocb_gp_kthread(void *arg) static inline bool nocb_cb_can_run(struct rcu_data *rdp) { u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB; + return rcu_segcblist_test_flags(&rdp->cblist, flags); } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index d070059163d7..5199559fbbf0 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -814,8 +814,7 @@ void rcu_read_unlock_strict(void) { struct rcu_data *rdp; - if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) || - irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) + if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) return; rdp = this_cpu_ptr(&rcu_data); rcu_report_qs_rdp(rdp); @@ -1480,7 +1479,7 @@ static void rcu_bind_gp_kthread(void) } /* Record the current task on dyntick-idle entry. */ -static void noinstr rcu_dynticks_task_enter(void) +static __always_inline void rcu_dynticks_task_enter(void) { #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); @@ -1488,7 +1487,7 @@ static void noinstr rcu_dynticks_task_enter(void) } /* Record no current task on dyntick-idle exit. */ -static void noinstr rcu_dynticks_task_exit(void) +static __always_inline void rcu_dynticks_task_exit(void) { #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); @@ -1496,7 +1495,7 @@ static void noinstr rcu_dynticks_task_exit(void) } /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */ -static void rcu_dynticks_task_trace_enter(void) +static __always_inline void rcu_dynticks_task_trace_enter(void) { #ifdef CONFIG_TASKS_TRACE_RCU if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) @@ -1505,7 +1504,7 @@ static void rcu_dynticks_task_trace_enter(void) } /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */ -static void rcu_dynticks_task_trace_exit(void) +static __always_inline void rcu_dynticks_task_trace_exit(void) { #ifdef CONFIG_TASKS_TRACE_RCU if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 690b0cec7459..156892c22bb5 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -54,11 +54,11 @@ #define MODULE_PARAM_PREFIX "rcupdate." #ifndef CONFIG_TINY_RCU -module_param(rcu_expedited, int, 0); -module_param(rcu_normal, int, 0); +module_param(rcu_expedited, int, 0444); +module_param(rcu_normal, int, 0444); static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT); -#ifndef CONFIG_PREEMPT_RT -module_param(rcu_normal_after_boot, int, 0); +#if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL) +module_param(rcu_normal_after_boot, int, 0444); #endif #endif /* #ifndef CONFIG_TINY_RCU */ diff --git a/kernel/reboot.c b/kernel/reboot.c index f7440c0c7e43..6bcc5d6a6572 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -33,6 +33,7 @@ EXPORT_SYMBOL(cad_pid); #define DEFAULT_REBOOT_MODE #endif enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE; +EXPORT_SYMBOL_GPL(reboot_mode); enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED; /* @@ -359,7 +360,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, case LINUX_REBOOT_CMD_HALT: kernel_halt(); do_exit(0); - panic("cannot halt"); case LINUX_REBOOT_CMD_POWER_OFF: kernel_power_off(); diff --git a/kernel/resource.c b/kernel/resource.c index ca9f5198a01f..5ad3eba619ba 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -73,6 +73,18 @@ static struct resource *next_resource(struct resource *p) return p->sibling; } +static struct resource *next_resource_skip_children(struct resource *p) +{ + while (!p->sibling && p->parent) + p = p->parent; + return p->sibling; +} + +#define for_each_resource(_root, _p, _skip_children) \ + for ((_p) = (_root)->child; (_p); \ + (_p) = (_skip_children) ? next_resource_skip_children(_p) : \ + next_resource(_p)) + static void *r_next(struct seq_file *m, void *v, loff_t *pos) { struct resource *p = v; @@ -1707,37 +1719,49 @@ static int strict_iomem_checks; #endif /* - * check if an address is reserved in the iomem resource tree - * returns true if reserved, false if not reserved. + * Check if an address is exclusive to the kernel and must not be mapped to + * user space, for example, via /dev/mem. + * + * Returns true if exclusive to the kernel, otherwise returns false. */ bool iomem_is_exclusive(u64 addr) { - struct resource *p = &iomem_resource; - bool err = false; - loff_t l; + const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | + IORESOURCE_EXCLUSIVE; + bool skip_children = false, err = false; int size = PAGE_SIZE; - - if (!strict_iomem_checks) - return false; + struct resource *p; addr = addr & PAGE_MASK; read_lock(&resource_lock); - for (p = p->child; p ; p = r_next(NULL, p, &l)) { - /* - * We can probably skip the resources without - * IORESOURCE_IO attribute? - */ + for_each_resource(&iomem_resource, p, skip_children) { if (p->start >= addr + size) break; - if (p->end < addr) + if (p->end < addr) { + skip_children = true; continue; + } + skip_children = false; + + /* + * IORESOURCE_SYSTEM_RAM resources are exclusive if + * IORESOURCE_EXCLUSIVE is set, even if they + * are not busy and even if "iomem=relaxed" is set. The + * responsible driver dynamically adds/removes system RAM within + * such an area and uncontrolled access is dangerous. + */ + if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { + err = true; + break; + } + /* * A resource is exclusive if IORESOURCE_EXCLUSIVE is set * or CONFIG_IO_STRICT_DEVMEM is enabled and the * resource is busy. */ - if ((p->flags & IORESOURCE_BUSY) == 0) + if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) continue; if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) || p->flags & IORESOURCE_EXCLUSIVE) { diff --git a/kernel/scftorture.c b/kernel/scftorture.c index 64a08288b1a6..5d42f44e3e1a 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -341,6 +341,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra cpu = torture_random(trsp) % nr_cpu_ids; scfp->n_resched++; resched_cpu(cpu); + this_cpu_inc(scf_invoked_count); } break; case SCF_PRIM_SINGLE: @@ -553,18 +554,18 @@ static int __init scf_torture_init(void) scftorture_print_module_parms("Start of test"); - if (weight_resched == -1 && - weight_single == -1 && weight_single_rpc == -1 && weight_single_wait == -1 && - weight_many == -1 && weight_many_wait == -1 && - weight_all == -1 && weight_all_wait == -1) { - weight_resched1 = 2 * nr_cpu_ids; - weight_single1 = 2 * nr_cpu_ids; - weight_single_rpc1 = 2 * nr_cpu_ids; - weight_single_wait1 = 2 * nr_cpu_ids; - weight_many1 = 2; - weight_many_wait1 = 2; - weight_all1 = 1; - weight_all_wait1 = 1; + if (weight_resched <= 0 && + weight_single <= 0 && weight_single_rpc <= 0 && weight_single_wait <= 0 && + weight_many <= 0 && weight_many_wait <= 0 && + weight_all <= 0 && weight_all_wait <= 0) { + weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids; + weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids; + weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids; + weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids; + weight_many1 = weight_many == 0 ? 0 : 2; + weight_many_wait1 = weight_many_wait == 0 ? 0 : 2; + weight_all1 = weight_all == 0 ? 0 : 1; + weight_all_wait1 = weight_all_wait == 0 ? 0 : 1; } else { if (weight_resched == -1) weight_resched1 = 0; @@ -583,8 +584,8 @@ static int __init scf_torture_init(void) if (weight_all_wait == -1) weight_all_wait1 = 0; } - if (weight_single1 == 0 && weight_single_rpc1 == 0 && weight_single_wait1 == 0 && - weight_many1 == 0 && weight_many_wait1 == 0 && + if (weight_resched1 == 0 && weight_single1 == 0 && weight_single_rpc1 == 0 && + weight_single_wait1 == 0 && weight_many1 == 0 && weight_many_wait1 == 0 && weight_all1 == 0 && weight_all_wait1 == 0) { VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense"); firsterr = -EINVAL; @@ -605,17 +606,17 @@ static int __init scf_torture_init(void) if (onoff_interval > 0) { firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (shutdown_secs > 0) { firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (stutter > 0) { firsterr = torture_stutter_init(stutter, stutter); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } @@ -636,12 +637,12 @@ static int __init scf_torture_init(void) scf_stats_p[i].cpu = i; firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i], scf_stats_p[i].task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } if (stat_interval > 0) { firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task); - if (firsterr) + if (torture_init_error(firsterr)) goto unwind; } @@ -651,6 +652,10 @@ static int __init scf_torture_init(void) unwind: torture_init_end(); scf_torture_cleanup(); + if (shutdown_secs) { + WARN_ON(!IS_MODULE(CONFIG_SCF_TORTURE_TEST)); + kernel_power_off(); + } return firsterr; } diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 2067080bb235..8629b37d118e 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -31,7 +31,7 @@ static inline void autogroup_destroy(struct kref *kref) ag->tg->rt_se = NULL; ag->tg->rt_rq = NULL; #endif - sched_offline_group(ag->tg); + sched_release_group(ag->tg); sched_destroy_group(ag->tg); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 523fd602ea90..3c9b0fda64ac 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3726,6 +3726,9 @@ out: bool cpus_share_cache(int this_cpu, int that_cpu) { + if (this_cpu == that_cpu) + return true; + return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } @@ -6625,13 +6628,13 @@ __setup("preempt=", setup_preempt_mode); static void __init preempt_dynamic_init(void) { if (preempt_dynamic_mode == preempt_dynamic_undefined) { - if (IS_ENABLED(CONFIG_PREEMPT_NONE_BEHAVIOUR)) { + if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { sched_dynamic_update(preempt_dynamic_none); - } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BEHAVIOUR)) { + } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { sched_dynamic_update(preempt_dynamic_voluntary); } else { /* Default static call setting, nothing to do */ - WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_BEHAVIOUR)); + WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); preempt_dynamic_mode = preempt_dynamic_full; pr_info("Dynamic Preempt: full\n"); } @@ -9716,6 +9719,22 @@ static void sched_free_group(struct task_group *tg) kmem_cache_free(task_group_cache, tg); } +static void sched_free_group_rcu(struct rcu_head *rcu) +{ + sched_free_group(container_of(rcu, struct task_group, rcu)); +} + +static void sched_unregister_group(struct task_group *tg) +{ + unregister_fair_sched_group(tg); + unregister_rt_sched_group(tg); + /* + * We have to wait for yet another RCU grace period to expire, as + * print_cfs_stats() might run concurrently. + */ + call_rcu(&tg->rcu, sched_free_group_rcu); +} + /* allocate runqueue etc for a new task group */ struct task_group *sched_create_group(struct task_group *parent) { @@ -9759,25 +9778,35 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) } /* rcu callback to free various structures associated with a task group */ -static void sched_free_group_rcu(struct rcu_head *rhp) +static void sched_unregister_group_rcu(struct rcu_head *rhp) { /* Now it should be safe to free those cfs_rqs: */ - sched_free_group(container_of(rhp, struct task_group, rcu)); + sched_unregister_group(container_of(rhp, struct task_group, rcu)); } void sched_destroy_group(struct task_group *tg) { /* Wait for possible concurrent references to cfs_rqs complete: */ - call_rcu(&tg->rcu, sched_free_group_rcu); + call_rcu(&tg->rcu, sched_unregister_group_rcu); } -void sched_offline_group(struct task_group *tg) +void sched_release_group(struct task_group *tg) { unsigned long flags; - /* End participation in shares distribution: */ - unregister_fair_sched_group(tg); - + /* + * Unlink first, to avoid walk_tg_tree_from() from finding us (via + * sched_cfs_period_timer()). + * + * For this to be effective, we have to wait for all pending users of + * this task group to leave their RCU critical section to ensure no new + * user will see our dying task group any more. Specifically ensure + * that tg_unthrottle_up() won't add decayed cfs_rq's to it. + * + * We therefore defer calling unregister_fair_sched_group() to + * sched_unregister_group() which is guarantied to get called only after the + * current RCU grace period has expired. + */ spin_lock_irqsave(&task_group_lock, flags); list_del_rcu(&tg->list); list_del_rcu(&tg->siblings); @@ -9896,7 +9925,7 @@ static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); - sched_offline_group(tg); + sched_release_group(tg); } static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) @@ -9906,7 +9935,7 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) /* * Relies on the RCU grace period between css_released() and this. */ - sched_free_group(tg); + sched_unregister_group(tg); } /* diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 48ac72696012..517f72b008f5 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -135,6 +135,10 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, if (!static_branch_likely(&sched_smt_present)) return -ENODEV; + BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD != PIDTYPE_PID); + BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD_GROUP != PIDTYPE_TGID); + BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_PROCESS_GROUP != PIDTYPE_PGID); + if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 || (cmd != PR_SCHED_CORE_GET && uaddr)) return -EINVAL; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 13950beb01a2..6e476f6d9435 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11456,8 +11456,6 @@ void free_fair_sched_group(struct task_group *tg) { int i; - destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); - for_each_possible_cpu(i) { if (tg->cfs_rq) kfree(tg->cfs_rq[i]); @@ -11534,6 +11532,8 @@ void unregister_fair_sched_group(struct task_group *tg) struct rq *rq; int cpu; + destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); + for_each_possible_cpu(cpu) { if (tg->se[cpu]) remove_entity_load_avg(tg->se[cpu]); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index bb945f8faeca..b48baaba2fc2 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -137,13 +137,17 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) return rt_rq->rq; } -void free_rt_sched_group(struct task_group *tg) +void unregister_rt_sched_group(struct task_group *tg) { - int i; - if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); +} + +void free_rt_sched_group(struct task_group *tg) +{ + int i; + for_each_possible_cpu(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); @@ -250,6 +254,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) return &rq->rt; } +void unregister_rt_sched_group(struct task_group *tg) { } + void free_rt_sched_group(struct task_group *tg) { } int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 56ff343d4119..0e66749486e7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -488,6 +488,7 @@ extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); +extern void unregister_rt_sched_group(struct task_group *tg); extern void free_rt_sched_group(struct task_group *tg); extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, @@ -503,7 +504,7 @@ extern struct task_group *sched_create_group(struct task_group *parent); extern void sched_online_group(struct task_group *tg, struct task_group *parent); extern void sched_destroy_group(struct task_group *tg); -extern void sched_offline_group(struct task_group *tg); +extern void sched_release_group(struct task_group *tg); extern void sched_move_task(struct task_struct *tsk); @@ -1912,11 +1913,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) * per-task data have been completed by this moment. */ smp_wmb(); -#ifdef CONFIG_THREAD_INFO_IN_TASK - WRITE_ONCE(p->cpu, cpu); -#else WRITE_ONCE(task_thread_info(p)->cpu, cpu); -#endif p->wake_cpu = cpu; #endif } diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 30169c7685b6..d201a7052a29 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1492,7 +1492,6 @@ static int sched_domains_curr_level; int sched_max_numa_distance; static int *sched_domains_numa_distance; static struct cpumask ***sched_domains_numa_masks; -int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; static unsigned long __read_mostly *sched_numa_onlined_nodes; #endif diff --git a/kernel/scs.c b/kernel/scs.c index e2a71fc82fa0..579841be8864 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -78,6 +78,7 @@ void scs_free(void *s) if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL) return; + kasan_unpoison_vmalloc(s, SCS_SIZE); vfree_atomic(s); } diff --git a/kernel/signal.c b/kernel/signal.c index e99aff33ff14..7c4b7ae714d4 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1323,6 +1323,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool blocked = sigismember(&t->blocked, sig); if (blocked || ignored || sigdfl) { action->sa.sa_handler = SIG_DFL; + action->sa.sa_flags |= SA_IMMUTABLE; if (blocked) { sigdelset(&t->blocked, sig); recalc_sigpending_and_wake(t); @@ -1649,6 +1650,19 @@ void force_sig(int sig) } EXPORT_SYMBOL(force_sig); +void force_fatal_sig(int sig) +{ + struct kernel_siginfo info; + + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info_to_task(&info, current, true); +} + /* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused @@ -1657,15 +1671,10 @@ EXPORT_SYMBOL(force_sig); */ void force_sigsegv(int sig) { - struct task_struct *p = current; - - if (sig == SIGSEGV) { - unsigned long flags; - spin_lock_irqsave(&p->sighand->siglock, flags); - p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; - spin_unlock_irqrestore(&p->sighand->siglock, flags); - } - force_sig(SIGSEGV); + if (sig == SIGSEGV) + force_fatal_sig(SIGSEGV); + else + force_sig(SIGSEGV); } int force_sig_fault_to_task(int sig, int code, void __user *addr @@ -2145,40 +2154,6 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, spin_unlock_irqrestore(&sighand->siglock, flags); } -static inline bool may_ptrace_stop(void) -{ - if (!likely(current->ptrace)) - return false; - /* - * Are we in the middle of do_coredump? - * If so and our tracer is also part of the coredump stopping - * is a deadlock situation, and pointless because our tracer - * is dead so don't allow us to stop. - * If SIGKILL was already sent before the caller unlocked - * ->siglock we must see ->core_state != NULL. Otherwise it - * is safe to enter schedule(). - * - * This is almost outdated, a task with the pending SIGKILL can't - * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported - * after SIGKILL was already dequeued. - */ - if (unlikely(current->mm->core_state) && - unlikely(current->mm == current->parent->mm)) - return false; - - return true; -} - -/* - * Return non-zero if there is a SIGKILL that should be waking us up. - * Called with the siglock held. - */ -static bool sigkill_pending(struct task_struct *tsk) -{ - return sigismember(&tsk->pending.signal, SIGKILL) || - sigismember(&tsk->signal->shared_pending.signal, SIGKILL); -} - /* * This must be called with current->sighand->siglock held. * @@ -2196,7 +2171,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t { bool gstop_done = false; - if (arch_ptrace_stop_needed(exit_code, info)) { + if (arch_ptrace_stop_needed()) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults @@ -2204,17 +2179,16 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. - * Meanwhile, a SIGKILL could come in before we retake the - * siglock. That must prevent us from sleeping in TASK_TRACED. - * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(¤t->sighand->siglock); - arch_ptrace_stop(exit_code, info); + arch_ptrace_stop(); spin_lock_irq(¤t->sighand->siglock); - if (sigkill_pending(current)) - return; } + /* + * schedule() will not sleep if there is a pending signal that + * can awaken the task. + */ set_special_state(TASK_TRACED); /* @@ -2260,7 +2234,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); - if (may_ptrace_stop()) { + if (likely(current->ptrace)) { /* * Notify parents of the stop. * @@ -2739,7 +2713,8 @@ relock: if (!signr) break; /* will return 0 */ - if (unlikely(current->ptrace) && signr != SIGKILL) { + if (unlikely(current->ptrace) && (signr != SIGKILL) && + !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { signr = ptrace_signal(signr, &ksig->info); if (!signr) continue; @@ -4089,6 +4064,10 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) k = &p->sighand->action[sig-1]; spin_lock_irq(&p->sighand->siglock); + if (k->sa.sa_flags & SA_IMMUTABLE) { + spin_unlock_irq(&p->sighand->siglock); + return -EINVAL; + } if (oact) *oact = *k; diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index 9f8117c7cfdd..9c625257023d 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> +#include <linux/interrupt.h> /** * stack_trace_print - Print the entries in the stack trace @@ -373,3 +374,32 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) #endif /* CONFIG_USER_STACKTRACE_SUPPORT */ #endif /* !CONFIG_ARCH_STACKWALK */ + +static inline bool in_irqentry_text(unsigned long ptr) +{ + return (ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end) || + (ptr >= (unsigned long)&__softirqentry_text_start && + ptr < (unsigned long)&__softirqentry_text_end); +} + +/** + * filter_irq_stacks - Find first IRQ stack entry in trace + * @entries: Pointer to stack trace array + * @nr_entries: Number of entries in the storage array + * + * Return: Number of trace entries until IRQ stack starts. + */ +unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries) +{ + unsigned int i; + + for (i = 0; i < nr_entries; i++) { + if (in_irqentry_text(entries[i])) { + /* Include the irqentry function into the stack. */ + return i + 1; + } + } + return nr_entries; +} +EXPORT_SYMBOL_GPL(filter_irq_stacks); diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c deleted file mode 100644 index 76c997fdbc9d..000000000000 --- a/kernel/test_kprobes.c +++ /dev/null @@ -1,313 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * test_kprobes.c - simple sanity test for *probes - * - * Copyright IBM Corp. 2008 - */ - -#define pr_fmt(fmt) "Kprobe smoke test: " fmt - -#include <linux/kernel.h> -#include <linux/kprobes.h> -#include <linux/random.h> - -#define div_factor 3 - -static u32 rand1, preh_val, posth_val; -static int errors, handler_errors, num_tests; -static u32 (*target)(u32 value); -static u32 (*target2)(u32 value); - -static noinline u32 kprobe_target(u32 value) -{ - return (value / div_factor); -} - -static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs) -{ - if (preemptible()) { - handler_errors++; - pr_err("pre-handler is preemptible\n"); - } - preh_val = (rand1 / div_factor); - return 0; -} - -static void kp_post_handler(struct kprobe *p, struct pt_regs *regs, - unsigned long flags) -{ - if (preemptible()) { - handler_errors++; - pr_err("post-handler is preemptible\n"); - } - if (preh_val != (rand1 / div_factor)) { - handler_errors++; - pr_err("incorrect value in post_handler\n"); - } - posth_val = preh_val + div_factor; -} - -static struct kprobe kp = { - .symbol_name = "kprobe_target", - .pre_handler = kp_pre_handler, - .post_handler = kp_post_handler -}; - -static int test_kprobe(void) -{ - int ret; - - ret = register_kprobe(&kp); - if (ret < 0) { - pr_err("register_kprobe returned %d\n", ret); - return ret; - } - - ret = target(rand1); - unregister_kprobe(&kp); - - if (preh_val == 0) { - pr_err("kprobe pre_handler not called\n"); - handler_errors++; - } - - if (posth_val == 0) { - pr_err("kprobe post_handler not called\n"); - handler_errors++; - } - - return 0; -} - -static noinline u32 kprobe_target2(u32 value) -{ - return (value / div_factor) + 1; -} - -static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs) -{ - preh_val = (rand1 / div_factor) + 1; - return 0; -} - -static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs, - unsigned long flags) -{ - if (preh_val != (rand1 / div_factor) + 1) { - handler_errors++; - pr_err("incorrect value in post_handler2\n"); - } - posth_val = preh_val + div_factor; -} - -static struct kprobe kp2 = { - .symbol_name = "kprobe_target2", - .pre_handler = kp_pre_handler2, - .post_handler = kp_post_handler2 -}; - -static int test_kprobes(void) -{ - int ret; - struct kprobe *kps[2] = {&kp, &kp2}; - - /* addr and flags should be cleard for reusing kprobe. */ - kp.addr = NULL; - kp.flags = 0; - ret = register_kprobes(kps, 2); - if (ret < 0) { - pr_err("register_kprobes returned %d\n", ret); - return ret; - } - - preh_val = 0; - posth_val = 0; - ret = target(rand1); - - if (preh_val == 0) { - pr_err("kprobe pre_handler not called\n"); - handler_errors++; - } - - if (posth_val == 0) { - pr_err("kprobe post_handler not called\n"); - handler_errors++; - } - - preh_val = 0; - posth_val = 0; - ret = target2(rand1); - - if (preh_val == 0) { - pr_err("kprobe pre_handler2 not called\n"); - handler_errors++; - } - - if (posth_val == 0) { - pr_err("kprobe post_handler2 not called\n"); - handler_errors++; - } - - unregister_kprobes(kps, 2); - return 0; - -} - -#ifdef CONFIG_KRETPROBES -static u32 krph_val; - -static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) -{ - if (preemptible()) { - handler_errors++; - pr_err("kretprobe entry handler is preemptible\n"); - } - krph_val = (rand1 / div_factor); - return 0; -} - -static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) -{ - unsigned long ret = regs_return_value(regs); - - if (preemptible()) { - handler_errors++; - pr_err("kretprobe return handler is preemptible\n"); - } - if (ret != (rand1 / div_factor)) { - handler_errors++; - pr_err("incorrect value in kretprobe handler\n"); - } - if (krph_val == 0) { - handler_errors++; - pr_err("call to kretprobe entry handler failed\n"); - } - - krph_val = rand1; - return 0; -} - -static struct kretprobe rp = { - .handler = return_handler, - .entry_handler = entry_handler, - .kp.symbol_name = "kprobe_target" -}; - -static int test_kretprobe(void) -{ - int ret; - - ret = register_kretprobe(&rp); - if (ret < 0) { - pr_err("register_kretprobe returned %d\n", ret); - return ret; - } - - ret = target(rand1); - unregister_kretprobe(&rp); - if (krph_val != rand1) { - pr_err("kretprobe handler not called\n"); - handler_errors++; - } - - return 0; -} - -static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs) -{ - unsigned long ret = regs_return_value(regs); - - if (ret != (rand1 / div_factor) + 1) { - handler_errors++; - pr_err("incorrect value in kretprobe handler2\n"); - } - if (krph_val == 0) { - handler_errors++; - pr_err("call to kretprobe entry handler failed\n"); - } - - krph_val = rand1; - return 0; -} - -static struct kretprobe rp2 = { - .handler = return_handler2, - .entry_handler = entry_handler, - .kp.symbol_name = "kprobe_target2" -}; - -static int test_kretprobes(void) -{ - int ret; - struct kretprobe *rps[2] = {&rp, &rp2}; - - /* addr and flags should be cleard for reusing kprobe. */ - rp.kp.addr = NULL; - rp.kp.flags = 0; - ret = register_kretprobes(rps, 2); - if (ret < 0) { - pr_err("register_kretprobe returned %d\n", ret); - return ret; - } - - krph_val = 0; - ret = target(rand1); - if (krph_val != rand1) { - pr_err("kretprobe handler not called\n"); - handler_errors++; - } - - krph_val = 0; - ret = target2(rand1); - if (krph_val != rand1) { - pr_err("kretprobe handler2 not called\n"); - handler_errors++; - } - unregister_kretprobes(rps, 2); - return 0; -} -#endif /* CONFIG_KRETPROBES */ - -int init_test_probes(void) -{ - int ret; - - target = kprobe_target; - target2 = kprobe_target2; - - do { - rand1 = prandom_u32(); - } while (rand1 <= div_factor); - - pr_info("started\n"); - num_tests++; - ret = test_kprobe(); - if (ret < 0) - errors++; - - num_tests++; - ret = test_kprobes(); - if (ret < 0) - errors++; - -#ifdef CONFIG_KRETPROBES - num_tests++; - ret = test_kretprobe(); - if (ret < 0) - errors++; - - num_tests++; - ret = test_kretprobes(); - if (ret < 0) - errors++; -#endif /* CONFIG_KRETPROBES */ - - if (errors) - pr_err("BUG: %d out of %d tests failed\n", errors, num_tests); - else if (handler_errors) - pr_err("BUG: %d error(s) running handlers\n", handler_errors); - else - pr_info("passed successfully\n"); - - return 0; -} diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 6de5d4d63165..bedc5caceec7 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_TRACING) += trace_output.o obj-$(CONFIG_TRACING) += trace_seq.o obj-$(CONFIG_TRACING) += trace_stat.o obj-$(CONFIG_TRACING) += trace_printk.o +obj-$(CONFIG_TRACING) += pid_list.o obj-$(CONFIG_TRACING_MAP) += tracing_map.o obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o obj-$(CONFIG_SYNTH_EVENT_GEN_TEST) += synth_event_gen_test.o diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8e2eb950aa82..7396488793ff 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -398,7 +398,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { .arg2_type = ARG_CONST_SIZE, }; -const struct bpf_func_proto *bpf_get_trace_printk_proto(void) +static void __set_printk_clr_event(void) { /* * This program might be calling bpf_trace_printk, @@ -410,11 +410,57 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) */ if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) pr_warn_ratelimited("could not enable bpf_trace_printk events"); +} +const struct bpf_func_proto *bpf_get_trace_printk_proto(void) +{ + __set_printk_clr_event(); return &bpf_trace_printk_proto; } -#define MAX_SEQ_PRINTF_VARARGS 12 +BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data, + u32, data_len) +{ + static char buf[BPF_TRACE_PRINTK_SIZE]; + unsigned long flags; + int ret, num_args; + u32 *bin_args; + + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || + (data_len && !data)) + return -EINVAL; + num_args = data_len / 8; + + ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); + if (ret < 0) + return ret; + + raw_spin_lock_irqsave(&trace_printk_lock, flags); + ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); + + trace_bpf_trace_printk(buf); + raw_spin_unlock_irqrestore(&trace_printk_lock, flags); + + bpf_bprintf_cleanup(); + + return ret; +} + +static const struct bpf_func_proto bpf_trace_vprintk_proto = { + .func = bpf_trace_vprintk, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_CONST_SIZE, + .arg3_type = ARG_PTR_TO_MEM_OR_NULL, + .arg4_type = ARG_CONST_SIZE_OR_ZERO, +}; + +const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) +{ + __set_printk_clr_event(); + return &bpf_trace_vprintk_proto; +} BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, const void *, data, u32, data_len) @@ -422,7 +468,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, int err, num_args; u32 *bin_args; - if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 || + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || (data_len && !data)) return -EINVAL; num_args = data_len / 8; @@ -1017,6 +1063,34 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) +{ +#ifndef CONFIG_X86 + return -ENOENT; +#else + static const u32 br_entry_size = sizeof(struct perf_branch_entry); + u32 entry_cnt = size / br_entry_size; + + entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); + + if (unlikely(flags)) + return -EINVAL; + + if (!entry_cnt) + return -ENOENT; + + return entry_cnt * br_entry_size; +#endif +} + +static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { + .func = bpf_get_branch_snapshot, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, +}; + static const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1132,6 +1206,10 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_snprintf_proto; case BPF_FUNC_get_func_ip: return &bpf_get_func_ip_proto_tracing; + case BPF_FUNC_get_branch_snapshot: + return &bpf_get_branch_snapshot_proto; + case BPF_FUNC_trace_vprintk: + return bpf_get_trace_vprintk_proto(); default: return bpf_base_func_proto(func_id); } @@ -1530,6 +1608,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skc_to_tcp_request_sock_proto; case BPF_FUNC_skc_to_udp6_sock: return &bpf_skc_to_udp6_sock_proto; + case BPF_FUNC_skc_to_unix_sock: + return &bpf_skc_to_unix_sock_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_tracing_proto; case BPF_FUNC_sk_storage_delete: @@ -1566,13 +1646,7 @@ static bool raw_tp_prog_is_valid_access(int off, int size, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) - return false; - if (type != BPF_READ) - return false; - if (off % size != 0) - return false; - return true; + return bpf_tracing_ctx_access(off, size, type); } static bool tracing_prog_is_valid_access(int off, int size, @@ -1580,13 +1654,7 @@ static bool tracing_prog_is_valid_access(int off, int size, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) - return false; - if (type != BPF_READ) - return false; - if (off % size != 0) - return false; - return btf_ctx_access(off, size, type, prog, info); + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); } int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index b8a0d1d564fb..22061d38fc00 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -115,6 +115,7 @@ int function_graph_enter(unsigned long ret, unsigned long func, { struct ftrace_graph_ent trace; +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS /* * Skip graph tracing if the return location is served by direct trampoline, * since call sequence and return addresses are unpredictable anyway. @@ -124,6 +125,7 @@ int function_graph_enter(unsigned long ret, unsigned long func, if (ftrace_direct_func_count && ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE)) return -EBUSY; +#endif trace.func = func; trace.depth = ++current->curr_ret_depth; @@ -333,10 +335,10 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ static struct ftrace_ops graph_ops = { - .func = ftrace_stub, + .func = ftrace_graph_func, .flags = FTRACE_OPS_FL_INITIALIZED | FTRACE_OPS_FL_PID | - FTRACE_OPS_FL_STUB, + FTRACE_OPS_GRAPH_STUB, #ifdef FTRACE_GRAPH_TRAMP_ADDR .trampoline = FTRACE_GRAPH_TRAMP_ADDR, /* trampoline_size is only needed for dynamically allocated tramps */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index feebf57c6458..30bc880c3849 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -119,14 +119,9 @@ struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; struct ftrace_ops global_ops; -#if ARCH_SUPPORTS_FTRACE_OPS -static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct ftrace_regs *fregs); -#else -/* See comment below, where ftrace_ops_list_func is defined */ -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); -#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) -#endif +/* Defined by vmlinux.lds.h see the commment above arch_ftrace_ops_list_func for details */ +void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); static inline void ftrace_ops_init(struct ftrace_ops *ops) { @@ -323,7 +318,7 @@ int __register_ftrace_function(struct ftrace_ops *ops) if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) return -EBUSY; - if (!core_kernel_data((unsigned long)ops)) + if (!is_kernel_core_data((unsigned long)ops)) ops->flags |= FTRACE_OPS_FL_DYNAMIC; add_ftrace_ops(&ftrace_ops_list, ops); @@ -581,7 +576,7 @@ static void ftrace_profile_reset(struct ftrace_profile_stat *stat) FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); } -int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) +static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) { struct ftrace_profile_page *pg; int functions; @@ -988,8 +983,9 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer) } } - entry = tracefs_create_file("function_profile_enabled", 0644, - d_tracer, NULL, &ftrace_profile_fops); + entry = tracefs_create_file("function_profile_enabled", + TRACE_MODE_WRITE, d_tracer, NULL, + &ftrace_profile_fops); if (!entry) pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); } @@ -2394,6 +2390,39 @@ unsigned long ftrace_find_rec_direct(unsigned long ip) return entry->direct; } +static struct ftrace_func_entry* +ftrace_add_rec_direct(unsigned long ip, unsigned long addr, + struct ftrace_hash **free_hash) +{ + struct ftrace_func_entry *entry; + + if (ftrace_hash_empty(direct_functions) || + direct_functions->count > 2 * (1 << direct_functions->size_bits)) { + struct ftrace_hash *new_hash; + int size = ftrace_hash_empty(direct_functions) ? 0 : + direct_functions->count + 1; + + if (size < 32) + size = 32; + + new_hash = dup_hash(direct_functions, size); + if (!new_hash) + return NULL; + + *free_hash = direct_functions; + direct_functions = new_hash; + } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + entry->ip = ip; + entry->direct = addr; + __add_hash_entry(direct_functions, entry); + return entry; +} + static void call_direct_funcs(unsigned long ip, unsigned long pip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { @@ -5110,39 +5139,16 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr) } ret = -ENOMEM; - if (ftrace_hash_empty(direct_functions) || - direct_functions->count > 2 * (1 << direct_functions->size_bits)) { - struct ftrace_hash *new_hash; - int size = ftrace_hash_empty(direct_functions) ? 0 : - direct_functions->count + 1; - - if (size < 32) - size = 32; - - new_hash = dup_hash(direct_functions, size); - if (!new_hash) - goto out_unlock; - - free_hash = direct_functions; - direct_functions = new_hash; - } - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - goto out_unlock; - direct = ftrace_find_direct_func(addr); if (!direct) { direct = ftrace_alloc_direct_func(addr); - if (!direct) { - kfree(entry); + if (!direct) goto out_unlock; - } } - entry->ip = ip; - entry->direct = addr; - __add_hash_entry(direct_functions, entry); + entry = ftrace_add_rec_direct(ip, addr, &free_hash); + if (!entry) + goto out_unlock; ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); if (ret) @@ -5395,6 +5401,217 @@ int modify_ftrace_direct(unsigned long ip, return ret; } EXPORT_SYMBOL_GPL(modify_ftrace_direct); + +#define MULTI_FLAGS (FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_DIRECT | \ + FTRACE_OPS_FL_SAVE_REGS) + +static int check_direct_multi(struct ftrace_ops *ops) +{ + if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) + return -EINVAL; + if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) + return -EINVAL; + return 0; +} + +static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) +{ + struct ftrace_func_entry *entry, *del; + int size, i; + + size = 1 << hash->size_bits; + for (i = 0; i < size; i++) { + hlist_for_each_entry(entry, &hash->buckets[i], hlist) { + del = __ftrace_lookup_ip(direct_functions, entry->ip); + if (del && del->direct == addr) { + remove_hash_entry(direct_functions, del); + kfree(del); + } + } + } +} + +/** + * register_ftrace_direct_multi - Call a custom trampoline directly + * for multiple functions registered in @ops + * @ops: The address of the struct ftrace_ops object + * @addr: The address of the trampoline to call at @ops functions + * + * This is used to connect a direct calls to @addr from the nop locations + * of the functions registered in @ops (with by ftrace_set_filter_ip + * function). + * + * The location that it calls (@addr) must be able to handle a direct call, + * and save the parameters of the function being traced, and restore them + * (or inject new ones if needed), before returning. + * + * Returns: + * 0 on success + * -EINVAL - The @ops object was already registered with this call or + * when there are no functions in @ops object. + * -EBUSY - Another direct function is already attached (there can be only one) + * -ENODEV - @ip does not point to a ftrace nop location (or not supported) + * -ENOMEM - There was an allocation failure. + */ +int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) +{ + struct ftrace_hash *hash, *free_hash = NULL; + struct ftrace_func_entry *entry, *new; + int err = -EBUSY, size, i; + + if (ops->func || ops->trampoline) + return -EINVAL; + if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) + return -EINVAL; + if (ops->flags & FTRACE_OPS_FL_ENABLED) + return -EINVAL; + + hash = ops->func_hash->filter_hash; + if (ftrace_hash_empty(hash)) + return -EINVAL; + + mutex_lock(&direct_mutex); + + /* Make sure requested entries are not already registered.. */ + size = 1 << hash->size_bits; + for (i = 0; i < size; i++) { + hlist_for_each_entry(entry, &hash->buckets[i], hlist) { + if (ftrace_find_rec_direct(entry->ip)) + goto out_unlock; + } + } + + /* ... and insert them to direct_functions hash. */ + err = -ENOMEM; + for (i = 0; i < size; i++) { + hlist_for_each_entry(entry, &hash->buckets[i], hlist) { + new = ftrace_add_rec_direct(entry->ip, addr, &free_hash); + if (!new) + goto out_remove; + entry->direct = addr; + } + } + + ops->func = call_direct_funcs; + ops->flags = MULTI_FLAGS; + ops->trampoline = FTRACE_REGS_ADDR; + + err = register_ftrace_function(ops); + + out_remove: + if (err) + remove_direct_functions_hash(hash, addr); + + out_unlock: + mutex_unlock(&direct_mutex); + + if (free_hash) { + synchronize_rcu_tasks(); + free_ftrace_hash(free_hash); + } + return err; +} +EXPORT_SYMBOL_GPL(register_ftrace_direct_multi); + +/** + * unregister_ftrace_direct_multi - Remove calls to custom trampoline + * previously registered by register_ftrace_direct_multi for @ops object. + * @ops: The address of the struct ftrace_ops object + * + * This is used to remove a direct calls to @addr from the nop locations + * of the functions registered in @ops (with by ftrace_set_filter_ip + * function). + * + * Returns: + * 0 on success + * -EINVAL - The @ops object was not properly registered. + */ +int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) +{ + struct ftrace_hash *hash = ops->func_hash->filter_hash; + int err; + + if (check_direct_multi(ops)) + return -EINVAL; + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return -EINVAL; + + mutex_lock(&direct_mutex); + err = unregister_ftrace_function(ops); + remove_direct_functions_hash(hash, addr); + mutex_unlock(&direct_mutex); + return err; +} +EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi); + +/** + * modify_ftrace_direct_multi - Modify an existing direct 'multi' call + * to call something else + * @ops: The address of the struct ftrace_ops object + * @addr: The address of the new trampoline to call at @ops functions + * + * This is used to unregister currently registered direct caller and + * register new one @addr on functions registered in @ops object. + * + * Note there's window between ftrace_shutdown and ftrace_startup calls + * where there will be no callbacks called. + * + * Returns: zero on success. Non zero on error, which includes: + * -EINVAL - The @ops object was not properly registered. + */ +int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) +{ + struct ftrace_hash *hash; + struct ftrace_func_entry *entry, *iter; + static struct ftrace_ops tmp_ops = { + .func = ftrace_stub, + .flags = FTRACE_OPS_FL_STUB, + }; + int i, size; + int err; + + if (check_direct_multi(ops)) + return -EINVAL; + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return -EINVAL; + + mutex_lock(&direct_mutex); + + /* Enable the tmp_ops to have the same functions as the direct ops */ + ftrace_ops_init(&tmp_ops); + tmp_ops.func_hash = ops->func_hash; + + err = register_ftrace_function(&tmp_ops); + if (err) + goto out_direct; + + /* + * Now the ftrace_ops_list_func() is called to do the direct callers. + * We can safely change the direct functions attached to each entry. + */ + mutex_lock(&ftrace_lock); + + hash = ops->func_hash->filter_hash; + size = 1 << hash->size_bits; + for (i = 0; i < size; i++) { + hlist_for_each_entry(iter, &hash->buckets[i], hlist) { + entry = __ftrace_lookup_ip(direct_functions, iter->ip); + if (!entry) + continue; + entry->direct = addr; + } + } + + mutex_unlock(&ftrace_lock); + + /* Removing the tmp_ops will add the updated direct callers to the functions */ + unregister_ftrace_function(&tmp_ops); + + out_direct: + mutex_unlock(&direct_mutex); + return err; +} +EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi); #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ /** @@ -6109,10 +6326,10 @@ void ftrace_create_filter_files(struct ftrace_ops *ops, struct dentry *parent) { - trace_create_file("set_ftrace_filter", 0644, parent, + trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent, ops, &ftrace_filter_fops); - trace_create_file("set_ftrace_notrace", 0644, parent, + trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent, ops, &ftrace_notrace_fops); } @@ -6139,19 +6356,19 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops) static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { - trace_create_file("available_filter_functions", 0444, + trace_create_file("available_filter_functions", TRACE_MODE_READ, d_tracer, NULL, &ftrace_avail_fops); - trace_create_file("enabled_functions", 0444, + trace_create_file("enabled_functions", TRACE_MODE_READ, d_tracer, NULL, &ftrace_enabled_fops); ftrace_create_filter_files(&global_ops, d_tracer); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - trace_create_file("set_graph_function", 0644, d_tracer, + trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer, NULL, &ftrace_graph_fops); - trace_create_file("set_graph_notrace", 0644, d_tracer, + trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer, NULL, &ftrace_graph_notrace_fops); #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -6846,6 +7063,11 @@ void __init ftrace_free_init_mem(void) ftrace_free_mem(NULL, start, end); } +int __init __weak ftrace_dyn_arch_init(void) +{ + return 0; +} + void __init ftrace_init(void) { extern unsigned long __start_mcount_loc[]; @@ -6977,16 +7199,15 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op; int bit; + /* + * The ftrace_test_and_set_recursion() will disable preemption, + * which is required since some of the ops may be dynamically + * allocated, they must be freed after a synchronize_rcu(). + */ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); if (bit < 0) return; - /* - * Some of the ops may be dynamically allocated, - * they must be freed after a synchronize_rcu(). - */ - preempt_disable_notrace(); - do_for_each_ftrace_op(op, ftrace_ops_list) { /* Stub functions don't need to be called nor tested */ if (op->flags & FTRACE_OPS_FL_STUB) @@ -7010,7 +7231,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, } } while_for_each_ftrace_op(op); out: - preempt_enable_notrace(); trace_clear_recursion(bit); } @@ -7026,21 +7246,23 @@ out: * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. * An architecture can pass partial regs with ftrace_ops and still * set the ARCH_SUPPORTS_FTRACE_OPS. + * + * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be + * arch_ftrace_ops_list_func. */ #if ARCH_SUPPORTS_FTRACE_OPS -static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct ftrace_regs *fregs) +void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) { __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); } -NOKPROBE_SYMBOL(ftrace_ops_list_func); #else -static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) +void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) { __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); } -NOKPROBE_SYMBOL(ftrace_ops_no_ops); #endif +NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); /* * If there's only one function registered but it does not support @@ -7056,12 +7278,9 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); - if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) op->func(ip, parent_ip, op, fregs); - preempt_enable_notrace(); trace_clear_recursion(bit); } NOKPROBE_SYMBOL(ftrace_ops_assist_func); @@ -7184,10 +7403,10 @@ static void clear_ftrace_pids(struct trace_array *tr, int type) synchronize_rcu(); if ((type & TRACE_PIDS) && pid_list) - trace_free_pid_list(pid_list); + trace_pid_list_free(pid_list); if ((type & TRACE_NO_PIDS) && no_pid_list) - trace_free_pid_list(no_pid_list); + trace_pid_list_free(no_pid_list); } void ftrace_clear_pids(struct trace_array *tr) @@ -7428,7 +7647,7 @@ pid_write(struct file *filp, const char __user *ubuf, if (filtered_pids) { synchronize_rcu(); - trace_free_pid_list(filtered_pids); + trace_pid_list_free(filtered_pids); } else if (pid_list && !other_pids) { /* Register a probe to set whether to ignore the tracing of a task */ register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); @@ -7494,10 +7713,10 @@ static const struct file_operations ftrace_no_pid_fops = { void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) { - trace_create_file("set_ftrace_pid", 0644, d_tracer, + trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer, tr, &ftrace_pid_fops); - trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer, - tr, &ftrace_no_pid_fops); + trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE, + d_tracer, tr, &ftrace_no_pid_fops); } void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c new file mode 100644 index 000000000000..a2ef1d18126a --- /dev/null +++ b/kernel/trace/pid_list.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 VMware Inc, Steven Rostedt <rostedt@goodmis.org> + */ +#include <linux/spinlock.h> +#include <linux/irq_work.h> +#include <linux/slab.h> +#include "trace.h" + +/* See pid_list.h for details */ + +static inline union lower_chunk *get_lower_chunk(struct trace_pid_list *pid_list) +{ + union lower_chunk *chunk; + + lockdep_assert_held(&pid_list->lock); + + if (!pid_list->lower_list) + return NULL; + + chunk = pid_list->lower_list; + pid_list->lower_list = chunk->next; + pid_list->free_lower_chunks--; + WARN_ON_ONCE(pid_list->free_lower_chunks < 0); + chunk->next = NULL; + /* + * If a refill needs to happen, it can not happen here + * as the scheduler run queue locks are held. + */ + if (pid_list->free_lower_chunks <= CHUNK_REALLOC) + irq_work_queue(&pid_list->refill_irqwork); + + return chunk; +} + +static inline union upper_chunk *get_upper_chunk(struct trace_pid_list *pid_list) +{ + union upper_chunk *chunk; + + lockdep_assert_held(&pid_list->lock); + + if (!pid_list->upper_list) + return NULL; + + chunk = pid_list->upper_list; + pid_list->upper_list = chunk->next; + pid_list->free_upper_chunks--; + WARN_ON_ONCE(pid_list->free_upper_chunks < 0); + chunk->next = NULL; + /* + * If a refill needs to happen, it can not happen here + * as the scheduler run queue locks are held. + */ + if (pid_list->free_upper_chunks <= CHUNK_REALLOC) + irq_work_queue(&pid_list->refill_irqwork); + + return chunk; +} + +static inline void put_lower_chunk(struct trace_pid_list *pid_list, + union lower_chunk *chunk) +{ + lockdep_assert_held(&pid_list->lock); + + chunk->next = pid_list->lower_list; + pid_list->lower_list = chunk; + pid_list->free_lower_chunks++; +} + +static inline void put_upper_chunk(struct trace_pid_list *pid_list, + union upper_chunk *chunk) +{ + lockdep_assert_held(&pid_list->lock); + + chunk->next = pid_list->upper_list; + pid_list->upper_list = chunk; + pid_list->free_upper_chunks++; +} + +static inline bool upper_empty(union upper_chunk *chunk) +{ + /* + * If chunk->data has no lower chunks, it will be the same + * as a zeroed bitmask. Use find_first_bit() to test it + * and if it doesn't find any bits set, then the array + * is empty. + */ + int bit = find_first_bit((unsigned long *)chunk->data, + sizeof(chunk->data) * 8); + return bit >= sizeof(chunk->data) * 8; +} + +static inline int pid_split(unsigned int pid, unsigned int *upper1, + unsigned int *upper2, unsigned int *lower) +{ + /* MAX_PID should cover all pids */ + BUILD_BUG_ON(MAX_PID < PID_MAX_LIMIT); + + /* In case a bad pid is passed in, then fail */ + if (unlikely(pid >= MAX_PID)) + return -1; + + *upper1 = (pid >> UPPER1_SHIFT) & UPPER_MASK; + *upper2 = (pid >> UPPER2_SHIFT) & UPPER_MASK; + *lower = pid & LOWER_MASK; + + return 0; +} + +static inline unsigned int pid_join(unsigned int upper1, + unsigned int upper2, unsigned int lower) +{ + return ((upper1 & UPPER_MASK) << UPPER1_SHIFT) | + ((upper2 & UPPER_MASK) << UPPER2_SHIFT) | + (lower & LOWER_MASK); +} + +/** + * trace_pid_list_is_set - test if the pid is set in the list + * @pid_list: The pid list to test + * @pid: The pid to to see if set in the list. + * + * Tests if @pid is is set in the @pid_list. This is usually called + * from the scheduler when a task is scheduled. Its pid is checked + * if it should be traced or not. + * + * Return true if the pid is in the list, false otherwise. + */ +bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid) +{ + union upper_chunk *upper_chunk; + union lower_chunk *lower_chunk; + unsigned long flags; + unsigned int upper1; + unsigned int upper2; + unsigned int lower; + bool ret = false; + + if (!pid_list) + return false; + + if (pid_split(pid, &upper1, &upper2, &lower) < 0) + return false; + + raw_spin_lock_irqsave(&pid_list->lock, flags); + upper_chunk = pid_list->upper[upper1]; + if (upper_chunk) { + lower_chunk = upper_chunk->data[upper2]; + if (lower_chunk) + ret = test_bit(lower, lower_chunk->data); + } + raw_spin_unlock_irqrestore(&pid_list->lock, flags); + + return ret; +} + +/** + * trace_pid_list_set - add a pid to the list + * @pid_list: The pid list to add the @pid to. + * @pid: The pid to add. + * + * Adds @pid to @pid_list. This is usually done explicitly by a user + * adding a task to be traced, or indirectly by the fork function + * when children should be traced and a task's pid is in the list. + * + * Return 0 on success, negative otherwise. + */ +int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid) +{ + union upper_chunk *upper_chunk; + union lower_chunk *lower_chunk; + unsigned long flags; + unsigned int upper1; + unsigned int upper2; + unsigned int lower; + int ret; + + if (!pid_list) + return -ENODEV; + + if (pid_split(pid, &upper1, &upper2, &lower) < 0) + return -EINVAL; + + raw_spin_lock_irqsave(&pid_list->lock, flags); + upper_chunk = pid_list->upper[upper1]; + if (!upper_chunk) { + upper_chunk = get_upper_chunk(pid_list); + if (!upper_chunk) { + ret = -ENOMEM; + goto out; + } + pid_list->upper[upper1] = upper_chunk; + } + lower_chunk = upper_chunk->data[upper2]; + if (!lower_chunk) { + lower_chunk = get_lower_chunk(pid_list); + if (!lower_chunk) { + ret = -ENOMEM; + goto out; + } + upper_chunk->data[upper2] = lower_chunk; + } + set_bit(lower, lower_chunk->data); + ret = 0; + out: + raw_spin_unlock_irqrestore(&pid_list->lock, flags); + return ret; +} + +/** + * trace_pid_list_clear - remove a pid from the list + * @pid_list: The pid list to remove the @pid from. + * @pid: The pid to remove. + * + * Removes @pid from @pid_list. This is usually done explicitly by a user + * removing tasks from tracing, or indirectly by the exit function + * when a task that is set to be traced exits. + * + * Return 0 on success, negative otherwise. + */ +int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid) +{ + union upper_chunk *upper_chunk; + union lower_chunk *lower_chunk; + unsigned long flags; + unsigned int upper1; + unsigned int upper2; + unsigned int lower; + + if (!pid_list) + return -ENODEV; + + if (pid_split(pid, &upper1, &upper2, &lower) < 0) + return -EINVAL; + + raw_spin_lock_irqsave(&pid_list->lock, flags); + upper_chunk = pid_list->upper[upper1]; + if (!upper_chunk) + goto out; + + lower_chunk = upper_chunk->data[upper2]; + if (!lower_chunk) + goto out; + + clear_bit(lower, lower_chunk->data); + + /* if there's no more bits set, add it to the free list */ + if (find_first_bit(lower_chunk->data, LOWER_MAX) >= LOWER_MAX) { + put_lower_chunk(pid_list, lower_chunk); + upper_chunk->data[upper2] = NULL; + if (upper_empty(upper_chunk)) { + put_upper_chunk(pid_list, upper_chunk); + pid_list->upper[upper1] = NULL; + } + } + out: + raw_spin_unlock_irqrestore(&pid_list->lock, flags); + return 0; +} + +/** + * trace_pid_list_next - return the next pid in the list + * @pid_list: The pid list to examine. + * @pid: The pid to start from + * @next: The pointer to place the pid that is set starting from @pid. + * + * Looks for the next consecutive pid that is in @pid_list starting + * at the pid specified by @pid. If one is set (including @pid), then + * that pid is placed into @next. + * + * Return 0 when a pid is found, -1 if there are no more pids included. + */ +int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, + unsigned int *next) +{ + union upper_chunk *upper_chunk; + union lower_chunk *lower_chunk; + unsigned long flags; + unsigned int upper1; + unsigned int upper2; + unsigned int lower; + + if (!pid_list) + return -ENODEV; + + if (pid_split(pid, &upper1, &upper2, &lower) < 0) + return -EINVAL; + + raw_spin_lock_irqsave(&pid_list->lock, flags); + for (; upper1 <= UPPER_MASK; upper1++, upper2 = 0) { + upper_chunk = pid_list->upper[upper1]; + + if (!upper_chunk) + continue; + + for (; upper2 <= UPPER_MASK; upper2++, lower = 0) { + lower_chunk = upper_chunk->data[upper2]; + if (!lower_chunk) + continue; + + lower = find_next_bit(lower_chunk->data, LOWER_MAX, + lower); + if (lower < LOWER_MAX) + goto found; + } + } + + found: + raw_spin_unlock_irqrestore(&pid_list->lock, flags); + if (upper1 > UPPER_MASK) + return -1; + + *next = pid_join(upper1, upper2, lower); + return 0; +} + +/** + * trace_pid_list_first - return the first pid in the list + * @pid_list: The pid list to examine. + * @pid: The pointer to place the pid first found pid that is set. + * + * Looks for the first pid that is set in @pid_list, and places it + * into @pid if found. + * + * Return 0 when a pid is found, -1 if there are no pids set. + */ +int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid) +{ + return trace_pid_list_next(pid_list, 0, pid); +} + +static void pid_list_refill_irq(struct irq_work *iwork) +{ + struct trace_pid_list *pid_list = container_of(iwork, struct trace_pid_list, + refill_irqwork); + union upper_chunk *upper = NULL; + union lower_chunk *lower = NULL; + union upper_chunk **upper_next = &upper; + union lower_chunk **lower_next = &lower; + int upper_count; + int lower_count; + int ucnt = 0; + int lcnt = 0; + + again: + raw_spin_lock(&pid_list->lock); + upper_count = CHUNK_ALLOC - pid_list->free_upper_chunks; + lower_count = CHUNK_ALLOC - pid_list->free_lower_chunks; + raw_spin_unlock(&pid_list->lock); + + if (upper_count <= 0 && lower_count <= 0) + return; + + while (upper_count-- > 0) { + union upper_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + break; + *upper_next = chunk; + upper_next = &chunk->next; + ucnt++; + } + + while (lower_count-- > 0) { + union lower_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + break; + *lower_next = chunk; + lower_next = &chunk->next; + lcnt++; + } + + raw_spin_lock(&pid_list->lock); + if (upper) { + *upper_next = pid_list->upper_list; + pid_list->upper_list = upper; + pid_list->free_upper_chunks += ucnt; + } + if (lower) { + *lower_next = pid_list->lower_list; + pid_list->lower_list = lower; + pid_list->free_lower_chunks += lcnt; + } + raw_spin_unlock(&pid_list->lock); + + /* + * On success of allocating all the chunks, both counters + * will be less than zero. If they are not, then an allocation + * failed, and we should not try again. + */ + if (upper_count >= 0 || lower_count >= 0) + return; + /* + * When the locks were released, free chunks could have + * been used and allocation needs to be done again. Might as + * well allocate it now. + */ + goto again; +} + +/** + * trace_pid_list_alloc - create a new pid_list + * + * Allocates a new pid_list to store pids into. + * + * Returns the pid_list on success, NULL otherwise. + */ +struct trace_pid_list *trace_pid_list_alloc(void) +{ + struct trace_pid_list *pid_list; + int i; + + /* According to linux/thread.h, pids can be no bigger that 30 bits */ + WARN_ON_ONCE(pid_max > (1 << 30)); + + pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL); + if (!pid_list) + return NULL; + + init_irq_work(&pid_list->refill_irqwork, pid_list_refill_irq); + + raw_spin_lock_init(&pid_list->lock); + + for (i = 0; i < CHUNK_ALLOC; i++) { + union upper_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + break; + chunk->next = pid_list->upper_list; + pid_list->upper_list = chunk; + pid_list->free_upper_chunks++; + } + + for (i = 0; i < CHUNK_ALLOC; i++) { + union lower_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + break; + chunk->next = pid_list->lower_list; + pid_list->lower_list = chunk; + pid_list->free_lower_chunks++; + } + + return pid_list; +} + +/** + * trace_pid_list_free - Frees an allocated pid_list. + * + * Frees the memory for a pid_list that was allocated. + */ +void trace_pid_list_free(struct trace_pid_list *pid_list) +{ + union upper_chunk *upper; + union lower_chunk *lower; + int i, j; + + if (!pid_list) + return; + + irq_work_sync(&pid_list->refill_irqwork); + + while (pid_list->lower_list) { + union lower_chunk *chunk; + + chunk = pid_list->lower_list; + pid_list->lower_list = pid_list->lower_list->next; + kfree(chunk); + } + + while (pid_list->upper_list) { + union upper_chunk *chunk; + + chunk = pid_list->upper_list; + pid_list->upper_list = pid_list->upper_list->next; + kfree(chunk); + } + + for (i = 0; i < UPPER1_SIZE; i++) { + upper = pid_list->upper[i]; + if (upper) { + for (j = 0; j < UPPER2_SIZE; j++) { + lower = upper->data[j]; + kfree(lower); + } + kfree(upper); + } + } + kfree(pid_list); +} diff --git a/kernel/trace/pid_list.h b/kernel/trace/pid_list.h new file mode 100644 index 000000000000..62e73f1ac85f --- /dev/null +++ b/kernel/trace/pid_list.h @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Do not include this file directly. */ + +#ifndef _TRACE_INTERNAL_PID_LIST_H +#define _TRACE_INTERNAL_PID_LIST_H + +/* + * In order to keep track of what pids to trace, a tree is created much + * like page tables are used. This creates a sparse bit map, where + * the tree is filled in when needed. A PID is at most 30 bits (see + * linux/thread.h), and is broken up into 3 sections based on the bit map + * of the bits. The 8 MSB is the "upper1" section. The next 8 MSB is the + * "upper2" section and the 14 LSB is the "lower" section. + * + * A trace_pid_list structure holds the "upper1" section, in an + * array of 256 pointers (1 or 2K in size) to "upper_chunk" unions, where + * each has an array of 256 pointers (1 or 2K in size) to the "lower_chunk" + * structures, where each has an array of size 2K bytes representing a bitmask + * of the 14 LSB of the PID (256 * 8 = 2048) + * + * When a trace_pid_list is allocated, it includes the 256 pointer array + * of the upper1 unions. Then a "cache" of upper and lower is allocated + * where these will be assigned as needed. + * + * When a bit is set in the pid_list bitmask, the pid to use has + * the 8 MSB masked, and this is used to index the array in the + * pid_list to find the next upper union. If the element is NULL, + * then one is retrieved from the upper_list cache. If none is + * available, then -ENOMEM is returned. + * + * The next 8 MSB is used to index into the "upper2" section. If this + * element is NULL, then it is retrieved from the lower_list cache. + * Again, if one is not available -ENOMEM is returned. + * + * Finally the 14 LSB of the PID is used to set the bit in the 16384 + * bitmask (made up of 2K bytes). + * + * When the second upper section or the lower section has their last + * bit cleared, they are added back to the free list to be reused + * when needed. + */ + +#define UPPER_BITS 8 +#define UPPER_MAX (1 << UPPER_BITS) +#define UPPER1_SIZE (1 << UPPER_BITS) +#define UPPER2_SIZE (1 << UPPER_BITS) + +#define LOWER_BITS 14 +#define LOWER_MAX (1 << LOWER_BITS) +#define LOWER_SIZE (LOWER_MAX / BITS_PER_LONG) + +#define UPPER1_SHIFT (LOWER_BITS + UPPER_BITS) +#define UPPER2_SHIFT LOWER_BITS +#define LOWER_MASK (LOWER_MAX - 1) + +#define UPPER_MASK (UPPER_MAX - 1) + +/* According to linux/thread.h pids can not be bigger than or equal to 1 << 30 */ +#define MAX_PID (1 << 30) + +/* Just keep 6 chunks of both upper and lower in the cache on alloc */ +#define CHUNK_ALLOC 6 + +/* Have 2 chunks free, trigger a refill of the cache */ +#define CHUNK_REALLOC 2 + +union lower_chunk { + union lower_chunk *next; + unsigned long data[LOWER_SIZE]; // 2K in size +}; + +union upper_chunk { + union upper_chunk *next; + union lower_chunk *data[UPPER2_SIZE]; // 1 or 2K in size +}; + +struct trace_pid_list { + raw_spinlock_t lock; + struct irq_work refill_irqwork; + union upper_chunk *upper[UPPER1_SIZE]; // 1 or 2K in size + union upper_chunk *upper_list; + union lower_chunk *lower_list; + int free_upper_chunks; + int free_lower_chunks; +}; + +#endif /* _TRACE_INTERNAL_PID_LIST_H */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c5a3fbf19617..2699e9e562b1 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3167,14 +3167,9 @@ static __always_inline int trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) { unsigned int val = cpu_buffer->current_context; - unsigned long pc = preempt_count(); - int bit; + int bit = interrupt_context_level(); - if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) - bit = RB_CTX_NORMAL; - else - bit = pc & NMI_MASK ? RB_CTX_NMI : - pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ; + bit = RB_CTX_NORMAL - bit; if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { /* @@ -5233,6 +5228,9 @@ void ring_buffer_reset(struct trace_buffer *buffer) struct ring_buffer_per_cpu *cpu_buffer; int cpu; + /* prevent another thread from changing buffer sizes */ + mutex_lock(&buffer->mutex); + for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; @@ -5251,6 +5249,8 @@ void ring_buffer_reset(struct trace_buffer *buffer) atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->resize_disabled); } + + mutex_unlock(&buffer->mutex); } EXPORT_SYMBOL_GPL(ring_buffer_reset); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bc677cd64224..f9139dc1262c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -512,12 +512,6 @@ int call_filter_check_discard(struct trace_event_call *call, void *rec, return 0; } -void trace_free_pid_list(struct trace_pid_list *pid_list) -{ - vfree(pid_list->pids); - kfree(pid_list); -} - /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check @@ -528,14 +522,7 @@ void trace_free_pid_list(struct trace_pid_list *pid_list) bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { - /* - * If pid_max changed after filtered_pids was created, we - * by default ignore all pids greater than the previous pid_max. - */ - if (search_pid >= filtered_pids->pid_max) - return false; - - return test_bit(search_pid, filtered_pids->pids); + return trace_pid_list_is_set(filtered_pids, search_pid); } /** @@ -592,15 +579,11 @@ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, return; } - /* Sorry, but we don't support pid_max changing after setting */ - if (task->pid >= pid_list->pid_max) - return; - /* "self" is set for forks, and NULL for exits */ if (self) - set_bit(task->pid, pid_list->pids); + trace_pid_list_set(pid_list, task->pid); else - clear_bit(task->pid, pid_list->pids); + trace_pid_list_clear(pid_list, task->pid); } /** @@ -617,18 +600,19 @@ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { - unsigned long pid = (unsigned long)v; + long pid = (unsigned long)v; + unsigned int next; (*pos)++; /* pid already is +1 of the actual previous bit */ - pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); + if (trace_pid_list_next(pid_list, pid, &next) < 0) + return NULL; - /* Return pid + 1 to allow zero to be represented */ - if (pid < pid_list->pid_max) - return (void *)(pid + 1); + pid = next; - return NULL; + /* Return pid + 1 to allow zero to be represented */ + return (void *)(pid + 1); } /** @@ -645,12 +629,14 @@ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; + unsigned int first; loff_t l = 0; - pid = find_first_bit(pid_list->pids, pid_list->pid_max); - if (pid >= pid_list->pid_max) + if (trace_pid_list_first(pid_list, &first) < 0) return NULL; + pid = first; + /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) @@ -686,7 +672,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, unsigned long val; int nr_pids = 0; ssize_t read = 0; - ssize_t ret = 0; + ssize_t ret; loff_t pos; pid_t pid; @@ -699,34 +685,23 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, * the user. If the operation fails, then the current list is * not modified. */ - pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); + pid_list = trace_pid_list_alloc(); if (!pid_list) { trace_parser_put(&parser); return -ENOMEM; } - pid_list->pid_max = READ_ONCE(pid_max); - - /* Only truncating will shrink pid_max */ - if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) - pid_list->pid_max = filtered_pids->pid_max; - - pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); - if (!pid_list->pids) { - trace_parser_put(&parser); - kfree(pid_list); - return -ENOMEM; - } - if (filtered_pids) { /* copy the current bits to the new max */ - for_each_set_bit(pid, filtered_pids->pids, - filtered_pids->pid_max) { - set_bit(pid, pid_list->pids); + ret = trace_pid_list_first(filtered_pids, &pid); + while (!ret) { + trace_pid_list_set(pid_list, pid); + ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); nr_pids++; } } + ret = 0; while (cnt > 0) { pos = 0; @@ -742,12 +717,13 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; - if (val >= pid_list->pid_max) - break; pid = (pid_t)val; - set_bit(pid, pid_list->pids); + if (trace_pid_list_set(pid_list, pid) < 0) { + ret = -1; + break; + } nr_pids++; trace_parser_clear(&parser); @@ -756,13 +732,13 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, trace_parser_put(&parser); if (ret < 0) { - trace_free_pid_list(pid_list); + trace_pid_list_free(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ - trace_free_pid_list(pid_list); + trace_pid_list_free(pid_list); read = ret; pid_list = NULL; } @@ -1714,7 +1690,8 @@ static void trace_create_maxlat_file(struct trace_array *tr, { INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); - tr->d_max_latency = trace_create_file("tracing_max_latency", 0644, + tr->d_max_latency = trace_create_file("tracing_max_latency", + TRACE_MODE_WRITE, d_tracer, &tr->max_latency, &tracing_max_lat_fops); } @@ -1748,8 +1725,8 @@ void latency_fsnotify(struct trace_array *tr) || defined(CONFIG_OSNOISE_TRACER) #define trace_create_maxlat_file(tr, d_tracer) \ - trace_create_file("tracing_max_latency", 0644, d_tracer, \ - &tr->max_latency, &tracing_max_lat_fops) + trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ + d_tracer, &tr->max_latency, &tracing_max_lat_fops) #else #define trace_create_maxlat_file(tr, d_tracer) do { } while (0) @@ -5628,6 +5605,7 @@ static const char readme_msg[] = #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" + "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" @@ -5639,6 +5617,16 @@ static const char readme_msg[] = "\t common_timestamp - to record current timestamp\n" "\t common_cpu - to record the CPU the event happened on\n" "\n" + "\t A hist trigger variable can be:\n" + "\t - a reference to a field e.g. x=current_timestamp,\n" + "\t - a reference to another variable e.g. y=$x,\n" + "\t - a numeric literal: e.g. ms_per_sec=1000,\n" + "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" + "\n" + "\t hist trigger aritmethic expressions support addition(+), subtraction(-),\n" + "\t multiplication(*) and division(/) operators. An operand can be either a\n" + "\t variable reference, field or numeric literal.\n" + "\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" @@ -6077,7 +6065,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, static void trace_create_eval_file(struct dentry *d_tracer) { - trace_create_file("eval_map", 0444, d_tracer, + trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, NULL, &tracing_eval_map_fops); } @@ -8590,27 +8578,27 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) } /* per cpu trace_pipe */ - trace_create_cpu_file("trace_pipe", 0444, d_cpu, + trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ - trace_create_cpu_file("trace", 0644, d_cpu, + trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, tr, cpu, &tracing_fops); - trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, + trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_buffers_fops); - trace_create_cpu_file("stats", 0444, d_cpu, + trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_stats_fops); - trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, + trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_entries_fops); #ifdef CONFIG_TRACER_SNAPSHOT - trace_create_cpu_file("snapshot", 0644, d_cpu, + trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, tr, cpu, &snapshot_fops); - trace_create_cpu_file("snapshot_raw", 0444, d_cpu, + trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, tr, cpu, &snapshot_raw_fops); #endif } @@ -8816,8 +8804,8 @@ create_trace_option_file(struct trace_array *tr, topt->opt = opt; topt->tr = tr; - topt->entry = trace_create_file(opt->name, 0644, t_options, topt, - &trace_options_fops); + topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, + t_options, topt, &trace_options_fops); } @@ -8892,7 +8880,7 @@ create_trace_option_core_file(struct trace_array *tr, if (!t_options) return NULL; - return trace_create_file(option, 0644, t_options, + return trace_create_file(option, TRACE_MODE_WRITE, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } @@ -9417,28 +9405,28 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) struct trace_event_file *file; int cpu; - trace_create_file("available_tracers", 0444, d_tracer, + trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, tr, &show_traces_fops); - trace_create_file("current_tracer", 0644, d_tracer, + trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, tr, &set_tracer_fops); - trace_create_file("tracing_cpumask", 0644, d_tracer, + trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, tr, &tracing_cpumask_fops); - trace_create_file("trace_options", 0644, d_tracer, + trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, tr, &tracing_iter_fops); - trace_create_file("trace", 0644, d_tracer, + trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, tr, &tracing_fops); - trace_create_file("trace_pipe", 0444, d_tracer, + trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, tr, &tracing_pipe_fops); - trace_create_file("buffer_size_kb", 0644, d_tracer, + trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, tr, &tracing_entries_fops); - trace_create_file("buffer_total_size_kb", 0444, d_tracer, + trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, @@ -9449,25 +9437,25 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) file = __find_event_file(tr, "ftrace", "print"); if (file && file->dir) - trace_create_file("trigger", 0644, file->dir, file, - &event_trigger_fops); + trace_create_file("trigger", TRACE_MODE_WRITE, file->dir, + file, &event_trigger_fops); tr->trace_marker_file = file; trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); - trace_create_file("trace_clock", 0644, d_tracer, tr, + trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, &trace_clock_fops); - trace_create_file("tracing_on", 0644, d_tracer, + trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, tr, &rb_simple_fops); - trace_create_file("timestamp_mode", 0444, d_tracer, tr, + trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; - trace_create_file("buffer_percent", 0444, d_tracer, + trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer, tr, &buffer_percent_fops); create_trace_options_dir(tr); @@ -9478,11 +9466,11 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) MEM_FAIL(1, "Could not allocate function filter files"); #ifdef CONFIG_TRACER_SNAPSHOT - trace_create_file("snapshot", 0644, d_tracer, + trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, tr, &snapshot_fops); #endif - trace_create_file("error_log", 0644, d_tracer, + trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, tr, &tracing_err_log_fops); for_each_tracing_cpu(cpu) @@ -9675,19 +9663,19 @@ static __init int tracer_init_tracefs(void) init_tracer_tracefs(&global_trace, NULL); ftrace_init_tracefs_toplevel(&global_trace, NULL); - trace_create_file("tracing_thresh", 0644, NULL, + trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, &global_trace, &tracing_thresh_fops); - trace_create_file("README", 0444, NULL, + trace_create_file("README", TRACE_MODE_READ, NULL, NULL, &tracing_readme_fops); - trace_create_file("saved_cmdlines", 0444, NULL, + trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, NULL, &tracing_saved_cmdlines_fops); - trace_create_file("saved_cmdlines_size", 0644, NULL, + trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, NULL, &tracing_saved_cmdlines_size_fops); - trace_create_file("saved_tgids", 0444, NULL, + trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, NULL, &tracing_saved_tgids_fops); trace_eval_init(); @@ -9699,7 +9687,7 @@ static __init int tracer_init_tracefs(void) #endif #ifdef CONFIG_DYNAMIC_FTRACE - trace_create_file("dyn_ftrace_total_info", 0444, NULL, + trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, NULL, &tracing_dyn_info_fops); #endif diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b7c0f8e160fb..6b60ab9475ed 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -22,11 +22,16 @@ #include <linux/ctype.h> #include <linux/once_lite.h> +#include "pid_list.h" + #ifdef CONFIG_FTRACE_SYSCALLS #include <asm/unistd.h> /* For NR_SYSCALLS */ #include <asm/syscall.h> /* some archs define it here */ #endif +#define TRACE_MODE_WRITE 0640 +#define TRACE_MODE_READ 0440 + enum trace_type { __TRACE_FIRST_TYPE = 0, @@ -188,10 +193,14 @@ struct trace_options { struct trace_option_dentry *topts; }; -struct trace_pid_list { - int pid_max; - unsigned long *pids; -}; +struct trace_pid_list *trace_pid_list_alloc(void); +void trace_pid_list_free(struct trace_pid_list *pid_list); +bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); +int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); +int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); +int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); +int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, + unsigned int *next); enum { TRACE_PIDS = BIT(0), @@ -881,7 +890,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) * is set, and called by an interrupt handler, we still * want to trace it. */ - if (in_irq()) + if (in_hardirq()) trace_recursion_set(TRACE_IRQ_BIT); else trace_recursion_clear(TRACE_IRQ_BIT); diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 8d252f63cd78..0580287d7a0d 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -430,6 +430,8 @@ trace_boot_init_histograms(struct trace_event_file *file, /* All digit started node should be instances. */ if (trace_boot_compose_hist_cmd(node, buf, size) == 0) { tmp = kstrdup(buf, GFP_KERNEL); + if (!tmp) + return; if (trigger_process_regex(file, buf) < 0) pr_err("Failed to apply hist trigger: %s\n", tmp); kfree(tmp); @@ -439,6 +441,8 @@ trace_boot_init_histograms(struct trace_event_file *file, if (xbc_node_find_subkey(hnode, "keys")) { if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) { tmp = kstrdup(buf, GFP_KERNEL); + if (!tmp) + return; if (trigger_process_regex(file, buf) < 0) pr_err("Failed to apply hist trigger: %s\n", tmp); kfree(tmp); diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 1110112e55bd..e34e8182ee4b 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -262,7 +262,7 @@ static __init int init_dynamic_event(void) if (ret) return 0; - entry = tracefs_create_file("dynamic_events", 0644, NULL, + entry = tracefs_create_file("dynamic_events", TRACE_MODE_WRITE, NULL, NULL, &dynamic_events_ops); /* Event list interface */ diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 6aed10e2f7ce..a114549720d6 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -400,7 +400,8 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, - "perf buffer not large enough")) + "perf buffer not large enough, wanted %d, have %d", + size, PERF_MAX_TRACE_SIZE)) return NULL; *rctxp = rctx = perf_swevent_get_recursion_context(); @@ -441,13 +442,13 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, if (!rcu_is_watching()) return; - if ((unsigned long)ops->private != smp_processor_id()) - return; - bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; + if ((unsigned long)ops->private != smp_processor_id()) + goto out; + event = container_of(ops, struct perf_event, ftrace_ops); /* diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 830b3b9940f4..4021b9a79f93 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -885,10 +885,10 @@ static void __ftrace_clear_event_pids(struct trace_array *tr, int type) tracepoint_synchronize_unregister(); if ((type & TRACE_PIDS) && pid_list) - trace_free_pid_list(pid_list); + trace_pid_list_free(pid_list); if ((type & TRACE_NO_PIDS) && no_pid_list) - trace_free_pid_list(no_pid_list); + trace_pid_list_free(no_pid_list); } static void ftrace_clear_event_pids(struct trace_array *tr, int type) @@ -1967,7 +1967,7 @@ event_pid_write(struct file *filp, const char __user *ubuf, if (filtered_pids) { tracepoint_synchronize_unregister(); - trace_free_pid_list(filtered_pids); + trace_pid_list_free(filtered_pids); } else if (pid_list && !other_pids) { register_pid_events(tr); } @@ -2312,7 +2312,8 @@ event_subsystem_dir(struct trace_array *tr, const char *name, /* the ftrace system is special, do not create enable or filter files */ if (strcmp(name, "ftrace") != 0) { - entry = tracefs_create_file("filter", 0644, dir->entry, dir, + entry = tracefs_create_file("filter", TRACE_MODE_WRITE, + dir->entry, dir, &ftrace_subsystem_filter_fops); if (!entry) { kfree(system->filter); @@ -2320,7 +2321,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, pr_warn("Could not create tracefs '%s/filter' entry\n", name); } - trace_create_file("enable", 0644, dir->entry, dir, + trace_create_file("enable", TRACE_MODE_WRITE, dir->entry, dir, &ftrace_system_enable_fops); } @@ -2402,12 +2403,12 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) } if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) - trace_create_file("enable", 0644, file->dir, file, + trace_create_file("enable", TRACE_MODE_WRITE, file->dir, file, &ftrace_enable_fops); #ifdef CONFIG_PERF_EVENTS if (call->event.type && call->class->reg) - trace_create_file("id", 0444, file->dir, + trace_create_file("id", TRACE_MODE_READ, file->dir, (void *)(long)call->event.type, &ftrace_event_id_fops); #endif @@ -2423,22 +2424,22 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) * triggers or filters. */ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { - trace_create_file("filter", 0644, file->dir, file, - &ftrace_event_filter_fops); + trace_create_file("filter", TRACE_MODE_WRITE, file->dir, + file, &ftrace_event_filter_fops); - trace_create_file("trigger", 0644, file->dir, file, - &event_trigger_fops); + trace_create_file("trigger", TRACE_MODE_WRITE, file->dir, + file, &event_trigger_fops); } #ifdef CONFIG_HIST_TRIGGERS - trace_create_file("hist", 0444, file->dir, file, + trace_create_file("hist", TRACE_MODE_READ, file->dir, file, &event_hist_fops); #endif #ifdef CONFIG_HIST_TRIGGERS_DEBUG - trace_create_file("hist_debug", 0444, file->dir, file, + trace_create_file("hist_debug", TRACE_MODE_READ, file->dir, file, &event_hist_debug_fops); #endif - trace_create_file("format", 0444, file->dir, call, + trace_create_file("format", TRACE_MODE_READ, file->dir, call, &ftrace_event_format_fops); #ifdef CONFIG_TRACE_EVENT_INJECT @@ -3433,7 +3434,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) struct dentry *d_events; struct dentry *entry; - entry = tracefs_create_file("set_event", 0644, parent, + entry = tracefs_create_file("set_event", TRACE_MODE_WRITE, parent, tr, &ftrace_set_event_fops); if (!entry) { pr_warn("Could not create tracefs 'set_event' entry\n"); @@ -3446,7 +3447,7 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) return -ENOMEM; } - entry = trace_create_file("enable", 0644, d_events, + entry = trace_create_file("enable", TRACE_MODE_WRITE, d_events, tr, &ftrace_tr_enable_fops); if (!entry) { pr_warn("Could not create tracefs 'enable' entry\n"); @@ -3455,24 +3456,25 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) /* There are not as crucial, just warn if they are not created */ - entry = tracefs_create_file("set_event_pid", 0644, parent, + entry = tracefs_create_file("set_event_pid", TRACE_MODE_WRITE, parent, tr, &ftrace_set_event_pid_fops); if (!entry) pr_warn("Could not create tracefs 'set_event_pid' entry\n"); - entry = tracefs_create_file("set_event_notrace_pid", 0644, parent, - tr, &ftrace_set_event_notrace_pid_fops); + entry = tracefs_create_file("set_event_notrace_pid", + TRACE_MODE_WRITE, parent, tr, + &ftrace_set_event_notrace_pid_fops); if (!entry) pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n"); /* ring buffer internal formats */ - entry = trace_create_file("header_page", 0444, d_events, + entry = trace_create_file("header_page", TRACE_MODE_READ, d_events, ring_buffer_print_page_header, &ftrace_show_header_fops); if (!entry) pr_warn("Could not create tracefs 'header_page' entry\n"); - entry = trace_create_file("header_event", 0444, d_events, + entry = trace_create_file("header_event", TRACE_MODE_READ, d_events, ring_buffer_print_entry_header, &ftrace_show_header_fops); if (!entry) @@ -3689,8 +3691,8 @@ __init int event_trace_init(void) if (!tr) return -ENODEV; - entry = tracefs_create_file("available_events", 0444, NULL, - tr, &ftrace_avail_fops); + entry = tracefs_create_file("available_events", TRACE_MODE_READ, + NULL, tr, &ftrace_avail_fops); if (!entry) pr_warn("Could not create tracefs 'available_events' entry\n"); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index f01e442716e2..8a10046c775f 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -66,7 +66,10 @@ C(EMPTY_SORT_FIELD, "Empty sort field"), \ C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ - C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), + C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \ + C(EXPECT_NUMBER, "Expecting numeric literal"), \ + C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \ + C(DIVISION_BY_ZERO, "Division by zero"), #undef C #define C(a, b) HIST_ERR_##a @@ -89,12 +92,16 @@ typedef u64 (*hist_field_fn_t) (struct hist_field *field, #define HIST_FIELD_OPERANDS_MAX 2 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) #define HIST_ACTIONS_MAX 8 +#define HIST_CONST_DIGITS_MAX 21 +#define HIST_DIV_SHIFT 20 /* For optimizing division by constants */ enum field_op_id { FIELD_OP_NONE, FIELD_OP_PLUS, FIELD_OP_MINUS, FIELD_OP_UNARY_MINUS, + FIELD_OP_DIV, + FIELD_OP_MULT, }; /* @@ -152,6 +159,11 @@ struct hist_field { bool read_once; unsigned int var_str_idx; + + /* Numeric literals are represented as u64 */ + u64 constant; + /* Used to optimize division by constants */ + u64 div_multiplier; }; static u64 hist_field_none(struct hist_field *field, @@ -163,6 +175,15 @@ static u64 hist_field_none(struct hist_field *field, return 0; } +static u64 hist_field_const(struct hist_field *field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + return field->constant; +} + static u64 hist_field_counter(struct hist_field *field, struct tracing_map_elt *elt, struct trace_buffer *buffer, @@ -271,6 +292,106 @@ static u64 hist_field_minus(struct hist_field *hist_field, return val1 - val2; } +static u64 hist_field_div(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_field *operand1 = hist_field->operands[0]; + struct hist_field *operand2 = hist_field->operands[1]; + + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + u64 val2 = operand2->fn(operand2, elt, buffer, rbe, event); + + /* Return -1 for the undefined case */ + if (!val2) + return -1; + + /* Use shift if the divisor is a power of 2 */ + if (!(val2 & (val2 - 1))) + return val1 >> __ffs64(val2); + + return div64_u64(val1, val2); +} + +static u64 div_by_power_of_two(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_field *operand1 = hist_field->operands[0]; + struct hist_field *operand2 = hist_field->operands[1]; + + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + + return val1 >> __ffs64(operand2->constant); +} + +static u64 div_by_not_power_of_two(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_field *operand1 = hist_field->operands[0]; + struct hist_field *operand2 = hist_field->operands[1]; + + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + + return div64_u64(val1, operand2->constant); +} + +static u64 div_by_mult_and_shift(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_field *operand1 = hist_field->operands[0]; + struct hist_field *operand2 = hist_field->operands[1]; + + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + + /* + * If the divisor is a constant, do a multiplication and shift instead. + * + * Choose Z = some power of 2. If Y <= Z, then: + * X / Y = (X * (Z / Y)) / Z + * + * (Z / Y) is a constant (mult) which is calculated at parse time, so: + * X / Y = (X * mult) / Z + * + * The division by Z can be replaced by a shift since Z is a power of 2: + * X / Y = (X * mult) >> HIST_DIV_SHIFT + * + * As long, as X < Z the results will not be off by more than 1. + */ + if (val1 < (1 << HIST_DIV_SHIFT)) { + u64 mult = operand2->div_multiplier; + + return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT; + } + + return div64_u64(val1, operand2->constant); +} + +static u64 hist_field_mult(struct hist_field *hist_field, + struct tracing_map_elt *elt, + struct trace_buffer *buffer, + struct ring_buffer_event *rbe, + void *event) +{ + struct hist_field *operand1 = hist_field->operands[0]; + struct hist_field *operand2 = hist_field->operands[1]; + + u64 val1 = operand1->fn(operand1, elt, buffer, rbe, event); + u64 val2 = operand2->fn(operand2, elt, buffer, rbe, event); + + return val1 * val2; +} + static u64 hist_field_unary_minus(struct hist_field *hist_field, struct tracing_map_elt *elt, struct trace_buffer *buffer, @@ -341,6 +462,7 @@ enum hist_field_flags { HIST_FIELD_FL_CPU = 1 << 15, HIST_FIELD_FL_ALIAS = 1 << 16, HIST_FIELD_FL_BUCKET = 1 << 17, + HIST_FIELD_FL_CONST = 1 << 18, }; struct var_defs { @@ -517,6 +639,25 @@ struct snapshot_context { void *key; }; +/* + * Returns the specific division function to use if the divisor + * is constant. This avoids extra branches when the trigger is hit. + */ +static hist_field_fn_t hist_field_get_div_fn(struct hist_field *divisor) +{ + u64 div = divisor->constant; + + if (!(div & (div - 1))) + return div_by_power_of_two; + + /* If the divisor is too large, do a regular division */ + if (div > (1 << HIST_DIV_SHIFT)) + return div_by_not_power_of_two; + + divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div); + return div_by_mult_and_shift; +} + static void track_data_free(struct track_data *track_data) { struct hist_elt_data *elt_data; @@ -1516,6 +1657,12 @@ static void expr_field_str(struct hist_field *field, char *expr) { if (field->flags & HIST_FIELD_FL_VAR_REF) strcat(expr, "$"); + else if (field->flags & HIST_FIELD_FL_CONST) { + char str[HIST_CONST_DIGITS_MAX]; + + snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant); + strcat(expr, str); + } strcat(expr, hist_field_name(field, 0)); @@ -1571,6 +1718,12 @@ static char *expr_str(struct hist_field *field, unsigned int level) case FIELD_OP_PLUS: strcat(expr, "+"); break; + case FIELD_OP_DIV: + strcat(expr, "/"); + break; + case FIELD_OP_MULT: + strcat(expr, "*"); + break; default: kfree(expr); return NULL; @@ -1581,34 +1734,92 @@ static char *expr_str(struct hist_field *field, unsigned int level) return expr; } -static int contains_operator(char *str) +/* + * If field_op != FIELD_OP_NONE, *sep points to the root operator + * of the expression tree to be evaluated. + */ +static int contains_operator(char *str, char **sep) { enum field_op_id field_op = FIELD_OP_NONE; - char *op; + char *minus_op, *plus_op, *div_op, *mult_op; + - op = strpbrk(str, "+-"); - if (!op) - return FIELD_OP_NONE; + /* + * Report the last occurrence of the operators first, so that the + * expression is evaluated left to right. This is important since + * subtraction and division are not associative. + * + * e.g + * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2 + * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2 + */ - switch (*op) { - case '-': + /* + * First, find lower precedence addition and subtraction + * since the expression will be evaluated recursively. + */ + minus_op = strrchr(str, '-'); + if (minus_op) { /* - * Unfortunately, the modifier ".sym-offset" - * can confuse things. + * Unary minus is not supported in sub-expressions. If + * present, it is always the next root operator. */ - if (op - str >= 4 && !strncmp(op - 4, ".sym-offset", 11)) - return FIELD_OP_NONE; - - if (*str == '-') + if (minus_op == str) { field_op = FIELD_OP_UNARY_MINUS; - else - field_op = FIELD_OP_MINUS; - break; - case '+': - field_op = FIELD_OP_PLUS; - break; - default: - break; + goto out; + } + + field_op = FIELD_OP_MINUS; + } + + plus_op = strrchr(str, '+'); + if (plus_op || minus_op) { + /* + * For operators of the same precedence use to rightmost as the + * root, so that the expression is evaluated left to right. + */ + if (plus_op > minus_op) + field_op = FIELD_OP_PLUS; + goto out; + } + + /* + * Multiplication and division have higher precedence than addition and + * subtraction. + */ + div_op = strrchr(str, '/'); + if (div_op) + field_op = FIELD_OP_DIV; + + mult_op = strrchr(str, '*'); + /* + * For operators of the same precedence use to rightmost as the + * root, so that the expression is evaluated left to right. + */ + if (mult_op > div_op) + field_op = FIELD_OP_MULT; + +out: + if (sep) { + switch (field_op) { + case FIELD_OP_UNARY_MINUS: + case FIELD_OP_MINUS: + *sep = minus_op; + break; + case FIELD_OP_PLUS: + *sep = plus_op; + break; + case FIELD_OP_DIV: + *sep = div_op; + break; + case FIELD_OP_MULT: + *sep = mult_op; + break; + case FIELD_OP_NONE: + default: + *sep = NULL; + break; + } } return field_op; @@ -1689,6 +1900,15 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, goto out; } + if (flags & HIST_FIELD_FL_CONST) { + hist_field->fn = hist_field_const; + hist_field->size = sizeof(u64); + hist_field->type = kstrdup("u64", GFP_KERNEL); + if (!hist_field->type) + goto free; + goto out; + } + if (flags & HIST_FIELD_FL_STACKTRACE) { hist_field->fn = hist_field_none; goto out; @@ -1733,9 +1953,10 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, if (!hist_field->type) goto free; - if (field->filter_type == FILTER_STATIC_STRING) + if (field->filter_type == FILTER_STATIC_STRING) { hist_field->fn = hist_field_string; - else if (field->filter_type == FILTER_DYN_STRING) + hist_field->size = field->size; + } else if (field->filter_type == FILTER_DYN_STRING) hist_field->fn = hist_field_dynstring; else hist_field->fn = hist_field_pstring; @@ -1925,7 +2146,7 @@ static char *field_name_from_var(struct hist_trigger_data *hist_data, if (strcmp(var_name, name) == 0) { field = hist_data->attrs->var_defs.expr[i]; - if (contains_operator(field) || is_var_ref(field)) + if (contains_operator(field, NULL) || is_var_ref(field)) continue; return field; } @@ -2002,7 +2223,11 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, *flags |= HIST_FIELD_FL_HEX; else if (strcmp(modifier, "sym") == 0) *flags |= HIST_FIELD_FL_SYM; - else if (strcmp(modifier, "sym-offset") == 0) + /* + * 'sym-offset' occurrences in the trigger string are modified + * to 'symXoffset' to simplify arithmetic expression parsing. + */ + else if (strcmp(modifier, "symXoffset") == 0) *flags |= HIST_FIELD_FL_SYM_OFFSET; else if ((strcmp(modifier, "execname") == 0) && (strcmp(field_name, "common_pid") == 0)) @@ -2090,6 +2315,29 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data, return alias; } +static struct hist_field *parse_const(struct hist_trigger_data *hist_data, + char *str, char *var_name, + unsigned long *flags) +{ + struct trace_array *tr = hist_data->event_file->tr; + struct hist_field *field = NULL; + u64 constant; + + if (kstrtoull(str, 0, &constant)) { + hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str)); + return NULL; + } + + *flags |= HIST_FIELD_FL_CONST; + field = create_hist_field(hist_data, NULL, *flags, var_name); + if (!field) + return NULL; + + field->constant = constant; + + return field; +} + static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, struct trace_event_file *file, char *str, unsigned long *flags, char *var_name) @@ -2100,6 +2348,15 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, unsigned long buckets = 0; int ret = 0; + if (isdigit(str[0])) { + hist_field = parse_const(hist_data, str, var_name, flags); + if (!hist_field) { + ret = -EINVAL; + goto out; + } + return hist_field; + } + s = strchr(str, '.'); if (s) { s = strchr(++s, '.'); @@ -2156,21 +2413,24 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, struct trace_event_file *file, char *str, unsigned long flags, - char *var_name, unsigned int level); + char *var_name, unsigned int *n_subexprs); static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, struct trace_event_file *file, char *str, unsigned long flags, - char *var_name, unsigned int level) + char *var_name, unsigned int *n_subexprs) { struct hist_field *operand1, *expr = NULL; unsigned long operand_flags; int ret = 0; char *s; + /* Unary minus operator, increment n_subexprs */ + ++*n_subexprs; + /* we support only -(xxx) i.e. explicit parens required */ - if (level > 3) { + if (*n_subexprs > 3) { hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); ret = -EINVAL; goto free; @@ -2187,8 +2447,16 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, } s = strrchr(str, ')'); - if (s) + if (s) { + /* unary minus not supported in sub-expressions */ + if (*(s+1) != '\0') { + hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR, + errpos(str)); + ret = -EINVAL; + goto free; + } *s = '\0'; + } else { ret = -EINVAL; /* no closing ')' */ goto free; @@ -2202,7 +2470,7 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, } operand_flags = 0; - operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); + operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); if (IS_ERR(operand1)) { ret = PTR_ERR(operand1); goto free; @@ -2233,9 +2501,15 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, return ERR_PTR(ret); } +/* + * If the operands are var refs, return pointers the + * variable(s) referenced in var1 and var2, else NULL. + */ static int check_expr_operands(struct trace_array *tr, struct hist_field *operand1, - struct hist_field *operand2) + struct hist_field *operand2, + struct hist_field **var1, + struct hist_field **var2) { unsigned long operand1_flags = operand1->flags; unsigned long operand2_flags = operand2->flags; @@ -2248,6 +2522,7 @@ static int check_expr_operands(struct trace_array *tr, if (!var) return -EINVAL; operand1_flags = var->flags; + *var1 = var; } if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || @@ -2258,6 +2533,7 @@ static int check_expr_operands(struct trace_array *tr, if (!var) return -EINVAL; operand2_flags = var->flags; + *var2 = var; } if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != @@ -2272,44 +2548,47 @@ static int check_expr_operands(struct trace_array *tr, static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, struct trace_event_file *file, char *str, unsigned long flags, - char *var_name, unsigned int level) + char *var_name, unsigned int *n_subexprs) { struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; - unsigned long operand_flags; + struct hist_field *var1 = NULL, *var2 = NULL; + unsigned long operand_flags, operand2_flags; int field_op, ret = -EINVAL; char *sep, *operand1_str; + hist_field_fn_t op_fn; + bool combine_consts; - if (level > 3) { + if (*n_subexprs > 3) { hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); return ERR_PTR(-EINVAL); } - field_op = contains_operator(str); + field_op = contains_operator(str, &sep); if (field_op == FIELD_OP_NONE) return parse_atom(hist_data, file, str, &flags, var_name); if (field_op == FIELD_OP_UNARY_MINUS) - return parse_unary(hist_data, file, str, flags, var_name, ++level); + return parse_unary(hist_data, file, str, flags, var_name, n_subexprs); - switch (field_op) { - case FIELD_OP_MINUS: - sep = "-"; - break; - case FIELD_OP_PLUS: - sep = "+"; - break; - default: + /* Binary operator found, increment n_subexprs */ + ++*n_subexprs; + + /* Split the expression string at the root operator */ + if (!sep) goto free; - } + *sep = '\0'; + operand1_str = str; + str = sep+1; - operand1_str = strsep(&str, sep); - if (!operand1_str || !str) + /* Binary operator requires both operands */ + if (*operand1_str == '\0' || *str == '\0') goto free; operand_flags = 0; - operand1 = parse_atom(hist_data, file, operand1_str, - &operand_flags, NULL); + + /* LHS of string is an expression e.g. a+b in a+b+c */ + operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs); if (IS_ERR(operand1)) { ret = PTR_ERR(operand1); operand1 = NULL; @@ -2321,9 +2600,9 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, goto free; } - /* rest of string could be another expression e.g. b+c in a+b+c */ + /* RHS of string is another expression e.g. c in a+b+c */ operand_flags = 0; - operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); + operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); if (IS_ERR(operand2)) { ret = PTR_ERR(operand2); operand2 = NULL; @@ -2335,11 +2614,38 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, goto free; } - ret = check_expr_operands(file->tr, operand1, operand2); + switch (field_op) { + case FIELD_OP_MINUS: + op_fn = hist_field_minus; + break; + case FIELD_OP_PLUS: + op_fn = hist_field_plus; + break; + case FIELD_OP_DIV: + op_fn = hist_field_div; + break; + case FIELD_OP_MULT: + op_fn = hist_field_mult; + break; + default: + ret = -EINVAL; + goto free; + } + + ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2); if (ret) goto free; - flags |= HIST_FIELD_FL_EXPR; + operand_flags = var1 ? var1->flags : operand1->flags; + operand2_flags = var2 ? var2->flags : operand2->flags; + + /* + * If both operands are constant, the expression can be + * collapsed to a single constant. + */ + combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST; + + flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR; flags |= operand1->flags & (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); @@ -2356,31 +2662,61 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, expr->operands[0] = operand1; expr->operands[1] = operand2; - /* The operand sizes should be the same, so just pick one */ - expr->size = operand1->size; + if (field_op == FIELD_OP_DIV && + operand2_flags & HIST_FIELD_FL_CONST) { + u64 divisor = var2 ? var2->constant : operand2->constant; - expr->operator = field_op; - expr->name = expr_str(expr, 0); - expr->type = kstrdup_const(operand1->type, GFP_KERNEL); - if (!expr->type) { - ret = -ENOMEM; - goto free; + if (!divisor) { + hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str)); + ret = -EDOM; + goto free; + } + + /* + * Copy the divisor here so we don't have to look it up + * later if this is a var ref + */ + operand2->constant = divisor; + op_fn = hist_field_get_div_fn(operand2); } - switch (field_op) { - case FIELD_OP_MINUS: - expr->fn = hist_field_minus; - break; - case FIELD_OP_PLUS: - expr->fn = hist_field_plus; - break; - default: - ret = -EINVAL; - goto free; + if (combine_consts) { + if (var1) + expr->operands[0] = var1; + if (var2) + expr->operands[1] = var2; + + expr->constant = op_fn(expr, NULL, NULL, NULL, NULL); + + expr->operands[0] = NULL; + expr->operands[1] = NULL; + + /* + * var refs won't be destroyed immediately + * See: destroy_hist_field() + */ + destroy_hist_field(operand2, 0); + destroy_hist_field(operand1, 0); + + expr->name = expr_str(expr, 0); + } else { + expr->fn = op_fn; + + /* The operand sizes should be the same, so just pick one */ + expr->size = operand1->size; + + expr->operator = field_op; + expr->type = kstrdup_const(operand1->type, GFP_KERNEL); + if (!expr->type) { + ret = -ENOMEM; + goto free; + } + + expr->name = expr_str(expr, 0); } return expr; - free: +free: destroy_hist_field(operand1, 0); destroy_hist_field(operand2, 0); destroy_hist_field(expr, 0); @@ -2691,7 +3027,7 @@ static inline void __update_field_vars(struct tracing_map_elt *elt, char *str = elt_data->field_var_str[j++]; char *val_str = (char *)(uintptr_t)var_val; - strscpy(str, val_str, STR_VAR_LEN_MAX); + strscpy(str, val_str, val->size); var_val = (u64)(uintptr_t)str; } tracing_map_set_var(elt, var_idx, var_val); @@ -3751,9 +4087,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data, unsigned long flags) { struct hist_field *hist_field; - int ret = 0; + int ret = 0, n_subexprs = 0; - hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0); + hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs); if (IS_ERR(hist_field)) { ret = PTR_ERR(hist_field); goto out; @@ -3894,7 +4230,7 @@ static int create_key_field(struct hist_trigger_data *hist_data, struct hist_field *hist_field = NULL; unsigned long flags = 0; unsigned int key_size; - int ret = 0; + int ret = 0, n_subexprs = 0; if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) return -EINVAL; @@ -3907,7 +4243,7 @@ static int create_key_field(struct hist_trigger_data *hist_data, hist_field = create_hist_field(hist_data, NULL, flags, NULL); } else { hist_field = parse_expr(hist_data, file, field_str, flags, - NULL, 0); + NULL, &n_subexprs); if (IS_ERR(hist_field)) { ret = PTR_ERR(hist_field); goto out; @@ -4586,7 +4922,7 @@ static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, str = elt_data->field_var_str[idx]; val_str = (char *)(uintptr_t)hist_val; - strscpy(str, val_str, STR_VAR_LEN_MAX); + strscpy(str, val_str, hist_field->size); hist_val = (u64)(uintptr_t)str; } @@ -4706,7 +5042,6 @@ static void hist_trigger_stacktrace_print(struct seq_file *m, unsigned long *stacktrace_entries, unsigned int max_entries) { - char str[KSYM_SYMBOL_LEN]; unsigned int spaces = 8; unsigned int i; @@ -4715,8 +5050,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m, return; seq_printf(m, "%*c", 1 + spaces, ' '); - sprint_symbol(str, stacktrace_entries[i]); - seq_printf(m, "%s\n", str); + seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]); } } @@ -4726,7 +5060,6 @@ static void hist_trigger_print_key(struct seq_file *m, struct tracing_map_elt *elt) { struct hist_field *key_field; - char str[KSYM_SYMBOL_LEN]; bool multiline = false; const char *field_name; unsigned int i; @@ -4747,14 +5080,12 @@ static void hist_trigger_print_key(struct seq_file *m, seq_printf(m, "%s: %llx", field_name, uval); } else if (key_field->flags & HIST_FIELD_FL_SYM) { uval = *(u64 *)(key + key_field->offset); - sprint_symbol_no_offset(str, uval); - seq_printf(m, "%s: [%llx] %-45s", field_name, - uval, str); + seq_printf(m, "%s: [%llx] %-45ps", field_name, + uval, (void *)(uintptr_t)uval); } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { uval = *(u64 *)(key + key_field->offset); - sprint_symbol(str, uval); - seq_printf(m, "%s: [%llx] %-55s", field_name, - uval, str); + seq_printf(m, "%s: [%llx] %-55pS", field_name, + uval, (void *)(uintptr_t)uval); } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { struct hist_elt_data *elt_data = elt->private_data; char *comm; @@ -4950,6 +5281,8 @@ static void hist_field_debug_show_flags(struct seq_file *m, if (flags & HIST_FIELD_FL_ALIAS) seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); + else if (flags & HIST_FIELD_FL_CONST) + seq_puts(m, " HIST_FIELD_FL_CONST\n"); } static int hist_field_debug_show(struct seq_file *m, @@ -4971,6 +5304,9 @@ static int hist_field_debug_show(struct seq_file *m, field->var.idx); } + if (field->flags & HIST_FIELD_FL_CONST) + seq_printf(m, " constant: %llu\n", field->constant); + if (field->flags & HIST_FIELD_FL_ALIAS) seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", field->var_ref_idx); @@ -5213,6 +5549,8 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) if (hist_field->flags & HIST_FIELD_FL_CPU) seq_puts(m, "common_cpu"); + else if (hist_field->flags & HIST_FIELD_FL_CONST) + seq_printf(m, "%llu", hist_field->constant); else if (field_name) { if (hist_field->flags & HIST_FIELD_FL_VAR_REF || hist_field->flags & HIST_FIELD_FL_ALIAS) @@ -5795,7 +6133,7 @@ static int event_hist_trigger_func(struct event_command *cmd_ops, struct synth_event *se; const char *se_name; bool remove = false; - char *trigger, *p; + char *trigger, *p, *start; int ret = 0; lockdep_assert_held(&event_mutex); @@ -5843,6 +6181,16 @@ static int event_hist_trigger_func(struct event_command *cmd_ops, trigger = strstrip(trigger); } + /* + * To simplify arithmetic expression parsing, replace occurrences of + * '.sym-offset' modifier with '.symXoffset' + */ + start = strstr(trigger, ".sym-offset"); + while (start) { + *(start + 4) = 'X'; + start = strstr(start + 11, ".sym-offset"); + } + attrs = parse_hist_trigger_attrs(file->tr, trigger); if (IS_ERR(attrs)) return PTR_ERR(attrs); diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index d54094b7a9d7..22db3ce95e74 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -2227,8 +2227,8 @@ static __init int trace_events_synth_init(void) if (err) goto err; - entry = tracefs_create_file("synthetic_events", 0644, NULL, - NULL, &synth_events_fops); + entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE, + NULL, NULL, &synth_events_fops); if (!entry) { err = -ENODEV; goto err; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 1f0e63f5d1f9..9f1bfbe105e8 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -186,7 +186,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, return; trace_ctx = tracing_gen_ctx(); - preempt_disable_notrace(); cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); @@ -194,7 +193,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, trace_function(tr, ip, parent_ip, trace_ctx); ftrace_test_recursion_unlock(bit); - preempt_enable_notrace(); } #ifdef CONFIG_UNWINDER_ORC @@ -298,8 +296,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - preempt_disable_notrace(); - cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); if (atomic_read(&data->disabled)) @@ -324,7 +320,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, out: ftrace_test_recursion_unlock(bit); - preempt_enable_notrace(); } static void diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0de6837722da..203204cadf92 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -120,7 +120,7 @@ static inline int ftrace_graph_ignore_irqs(void) if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) return 0; - return in_irq(); + return in_hardirq(); } int trace_graph_entry(struct ftrace_graph_ent *trace) @@ -1340,7 +1340,7 @@ static __init int init_graph_tracefs(void) if (ret) return 0; - trace_create_file("max_graph_depth", 0644, NULL, + trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL, NULL, &graph_depth_fops); return 0; diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 1b83d75eb103..56bb7b890578 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -79,8 +79,8 @@ struct hwlat_kthread_data { int nmi_cpu; }; -struct hwlat_kthread_data hwlat_single_cpu_data; -DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data); +static struct hwlat_kthread_data hwlat_single_cpu_data; +static DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data); /* Tells NMIs to call back to the hwlat tracer to record timestamps */ bool trace_hwlat_callback_enabled; @@ -782,21 +782,21 @@ static int init_tracefs(void) if (!top_dir) return -ENOMEM; - hwlat_sample_window = tracefs_create_file("window", 0640, + hwlat_sample_window = tracefs_create_file("window", TRACE_MODE_WRITE, top_dir, &hwlat_window, &trace_min_max_fops); if (!hwlat_sample_window) goto err; - hwlat_sample_width = tracefs_create_file("width", 0644, + hwlat_sample_width = tracefs_create_file("width", TRACE_MODE_WRITE, top_dir, &hwlat_width, &trace_min_max_fops); if (!hwlat_sample_width) goto err; - hwlat_thread_mode = trace_create_file("mode", 0644, + hwlat_thread_mode = trace_create_file("mode", TRACE_MODE_WRITE, top_dir, NULL, &thread_mode_fops); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3a64ba4bbad6..33272a7b6912 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -97,7 +97,7 @@ static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) { - return !!(kprobe_gone(&tk->rp.kp)); + return kprobe_gone(&tk->rp.kp); } static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, @@ -1925,16 +1925,16 @@ static __init int init_kprobe_trace(void) if (ret) return 0; - entry = tracefs_create_file("kprobe_events", 0644, NULL, - NULL, &kprobe_events_ops); + entry = tracefs_create_file("kprobe_events", TRACE_MODE_WRITE, + NULL, NULL, &kprobe_events_ops); /* Event list interface */ if (!entry) pr_warn("Could not create tracefs 'kprobe_events' entry\n"); /* Profile interface */ - entry = tracefs_create_file("kprobe_profile", 0444, NULL, - NULL, &kprobe_profile_ops); + entry = tracefs_create_file("kprobe_profile", TRACE_MODE_READ, + NULL, NULL, &kprobe_profile_ops); if (!entry) pr_warn("Could not create tracefs 'kprobe_profile' entry\n"); diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index ce053619f289..7520d43aed55 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -38,8 +38,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/osnoise.h> -static struct trace_array *osnoise_trace; - /* * Default values. */ @@ -51,6 +49,100 @@ static struct trace_array *osnoise_trace; #define DEFAULT_TIMERLAT_PRIO 95 /* FIFO 95 */ /* + * trace_array of the enabled osnoise/timerlat instances. + */ +struct osnoise_instance { + struct list_head list; + struct trace_array *tr; +}; + +static struct list_head osnoise_instances; + +static bool osnoise_has_registered_instances(void) +{ + return !!list_first_or_null_rcu(&osnoise_instances, + struct osnoise_instance, + list); +} + +/* + * osnoise_instance_registered - check if a tr is already registered + */ +static int osnoise_instance_registered(struct trace_array *tr) +{ + struct osnoise_instance *inst; + int found = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + if (inst->tr == tr) + found = 1; + } + rcu_read_unlock(); + + return found; +} + +/* + * osnoise_register_instance - register a new trace instance + * + * Register a trace_array *tr in the list of instances running + * osnoise/timerlat tracers. + */ +static int osnoise_register_instance(struct trace_array *tr) +{ + struct osnoise_instance *inst; + + /* + * register/unregister serialization is provided by trace's + * trace_types_lock. + */ + lockdep_assert_held(&trace_types_lock); + + inst = kmalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + INIT_LIST_HEAD_RCU(&inst->list); + inst->tr = tr; + list_add_tail_rcu(&inst->list, &osnoise_instances); + + return 0; +} + +/* + * osnoise_unregister_instance - unregister a registered trace instance + * + * Remove the trace_array *tr from the list of instances running + * osnoise/timerlat tracers. + */ +static void osnoise_unregister_instance(struct trace_array *tr) +{ + struct osnoise_instance *inst; + int found = 0; + + /* + * register/unregister serialization is provided by trace's + * trace_types_lock. + */ + lockdep_assert_held(&trace_types_lock); + + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + if (inst->tr == tr) { + list_del_rcu(&inst->list); + found = 1; + break; + } + } + + if (!found) + return; + + synchronize_rcu(); + kfree(inst); +} + +/* * NMI runtime info. */ struct osn_nmi { @@ -248,10 +340,56 @@ static struct osnoise_data { #endif }; -/* - * Boolean variable used to inform that the tracer is currently sampling. - */ -static bool osnoise_busy; +#ifdef CONFIG_TIMERLAT_TRACER +static inline bool timerlat_enabled(void) +{ + return osnoise_data.timerlat_tracer; +} + +static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var) +{ + struct timerlat_variables *tlat_var = this_cpu_tmr_var(); + /* + * If the timerlat is enabled, but the irq handler did + * not run yet enabling timerlat_tracer, do not trace. + */ + if (!tlat_var->tracing_thread) { + osn_var->softirq.arrival_time = 0; + osn_var->softirq.delta_start = 0; + return 0; + } + return 1; +} + +static inline int timerlat_thread_exit(struct osnoise_variables *osn_var) +{ + struct timerlat_variables *tlat_var = this_cpu_tmr_var(); + /* + * If the timerlat is enabled, but the irq handler did + * not run yet enabling timerlat_tracer, do not trace. + */ + if (!tlat_var->tracing_thread) { + osn_var->thread.delta_start = 0; + osn_var->thread.arrival_time = 0; + return 0; + } + return 1; +} +#else /* CONFIG_TIMERLAT_TRACER */ +static inline bool timerlat_enabled(void) +{ + return false; +} + +static inline int timerlat_softirq_exit(struct osnoise_variables *osn_var) +{ + return 1; +} +static inline int timerlat_thread_exit(struct osnoise_variables *osn_var) +{ + return 1; +} +#endif #ifdef CONFIG_PREEMPT_RT /* @@ -294,19 +432,19 @@ static void print_osnoise_headers(struct seq_file *s) seq_puts(s, "# _-----=> irqs-off\n"); seq_puts(s, "# / _----=> need-resched\n"); seq_puts(s, "# | / _---=> hardirq/softirq\n"); - seq_puts(s, "# || / _--=> preempt-depth "); - seq_puts(s, " MAX\n"); - - seq_puts(s, "# || / "); + seq_puts(s, "# || / _--=> preempt-depth\n"); + seq_puts(s, "# ||| / _-=> migrate-disable "); + seq_puts(s, " MAX\n"); + seq_puts(s, "# |||| / delay "); seq_puts(s, " SINGLE Interference counters:\n"); - seq_puts(s, "# |||| RUNTIME "); + seq_puts(s, "# ||||| RUNTIME "); seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n"); - seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP IN US "); + seq_puts(s, "# TASK-PID CPU# ||||| TIMESTAMP IN US "); seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n"); - seq_puts(s, "# | | | |||| | | "); + seq_puts(s, "# | | | ||||| | | "); seq_puts(s, " | | | | | | | |\n"); } #endif /* CONFIG_PREEMPT_RT */ @@ -315,19 +453,24 @@ static void print_osnoise_headers(struct seq_file *s) * osnoise_taint - report an osnoise error. */ #define osnoise_taint(msg) ({ \ - struct trace_array *tr = osnoise_trace; \ + struct osnoise_instance *inst; \ + struct trace_buffer *buffer; \ \ - trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \ + rcu_read_lock(); \ + list_for_each_entry_rcu(inst, &osnoise_instances, list) { \ + buffer = inst->tr->array_buffer.buffer; \ + trace_array_printk_buf(buffer, _THIS_IP_, msg); \ + } \ + rcu_read_unlock(); \ osnoise_data.tainted = true; \ }) /* * Record an osnoise_sample into the tracer buffer. */ -static void trace_osnoise_sample(struct osnoise_sample *sample) +static void +__trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffer) { - struct trace_array *tr = osnoise_trace; - struct trace_buffer *buffer = tr->array_buffer.buffer; struct trace_event_call *call = &event_osnoise; struct ring_buffer_event *event; struct osnoise_entry *entry; @@ -350,6 +493,22 @@ static void trace_osnoise_sample(struct osnoise_sample *sample) trace_buffer_unlock_commit_nostack(buffer, event); } +/* + * Record an osnoise_sample on all osnoise instances. + */ +static void trace_osnoise_sample(struct osnoise_sample *sample) +{ + struct osnoise_instance *inst; + struct trace_buffer *buffer; + + rcu_read_lock(); + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + buffer = inst->tr->array_buffer.buffer; + __trace_osnoise_sample(sample, buffer); + } + rcu_read_unlock(); +} + #ifdef CONFIG_TIMERLAT_TRACER /* * Print the timerlat header info. @@ -378,23 +537,20 @@ static void print_timerlat_headers(struct seq_file *s) seq_puts(s, "# / _----=> need-resched\n"); seq_puts(s, "# | / _---=> hardirq/softirq\n"); seq_puts(s, "# || / _--=> preempt-depth\n"); - seq_puts(s, "# || /\n"); - seq_puts(s, "# |||| ACTIVATION\n"); - seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP ID "); - seq_puts(s, " CONTEXT LATENCY\n"); - seq_puts(s, "# | | | |||| | | "); + seq_puts(s, "# ||| / _-=> migrate-disable\n"); + seq_puts(s, "# |||| / delay\n"); + seq_puts(s, "# ||||| ACTIVATION\n"); + seq_puts(s, "# TASK-PID CPU# ||||| TIMESTAMP ID "); + seq_puts(s, " CONTEXT LATENCY\n"); + seq_puts(s, "# | | | ||||| | | "); seq_puts(s, " | |\n"); } #endif /* CONFIG_PREEMPT_RT */ -/* - * Record an timerlat_sample into the tracer buffer. - */ -static void trace_timerlat_sample(struct timerlat_sample *sample) +static void +__trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buffer) { - struct trace_array *tr = osnoise_trace; struct trace_event_call *call = &event_osnoise; - struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct timerlat_entry *entry; @@ -411,6 +567,22 @@ static void trace_timerlat_sample(struct timerlat_sample *sample) trace_buffer_unlock_commit_nostack(buffer, event); } +/* + * Record an timerlat_sample into the tracer buffer. + */ +static void trace_timerlat_sample(struct timerlat_sample *sample) +{ + struct osnoise_instance *inst; + struct trace_buffer *buffer; + + rcu_read_lock(); + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + buffer = inst->tr->array_buffer.buffer; + __trace_timerlat_sample(sample, buffer); + } + rcu_read_unlock(); +} + #ifdef CONFIG_STACKTRACE #define MAX_CALLS 256 @@ -450,29 +622,18 @@ static void timerlat_save_stack(int skip) return; } -/* - * timerlat_dump_stack - dump a stack trace previously saved - * - * Dump a saved stack trace into the trace buffer. - */ -static void timerlat_dump_stack(void) + +static void +__timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, unsigned int size) { struct trace_event_call *call = &event_osnoise; - struct trace_array *tr = osnoise_trace; - struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; - struct trace_stack *fstack; struct stack_entry *entry; - unsigned int size; - - preempt_disable_notrace(); - fstack = this_cpu_ptr(&trace_stack); - size = fstack->stack_size; event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, tracing_gen_ctx()); if (!event) - goto out; + return; entry = ring_buffer_event_data(event); @@ -481,12 +642,39 @@ static void timerlat_dump_stack(void) if (!call_filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit_nostack(buffer, event); +} -out: +/* + * timerlat_dump_stack - dump a stack trace previously saved + */ +static void timerlat_dump_stack(u64 latency) +{ + struct osnoise_instance *inst; + struct trace_buffer *buffer; + struct trace_stack *fstack; + unsigned int size; + + /* + * trace only if latency > print_stack config, if enabled. + */ + if (!osnoise_data.print_stack || osnoise_data.print_stack > latency) + return; + + preempt_disable_notrace(); + fstack = this_cpu_ptr(&trace_stack); + size = fstack->stack_size; + + rcu_read_lock(); + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + buffer = inst->tr->array_buffer.buffer; + __timerlat_dump_stack(buffer, fstack, size); + + } + rcu_read_unlock(); preempt_enable_notrace(); } -#else -#define timerlat_dump_stack() do {} while (0) +#else /* CONFIG_STACKTRACE */ +#define timerlat_dump_stack(u64 latency) do {} while (0) #define timerlat_save_stack(a) do {} while (0) #endif /* CONFIG_STACKTRACE */ #endif /* CONFIG_TIMERLAT_TRACER */ @@ -866,21 +1054,9 @@ static void trace_softirq_exit_callback(void *data, unsigned int vec_nr) if (!osn_var->sampling) return; -#ifdef CONFIG_TIMERLAT_TRACER - /* - * If the timerlat is enabled, but the irq handler did - * not run yet enabling timerlat_tracer, do not trace. - */ - if (unlikely(osnoise_data.timerlat_tracer)) { - struct timerlat_variables *tlat_var; - tlat_var = this_cpu_tmr_var(); - if (!tlat_var->tracing_thread) { - osn_var->softirq.arrival_time = 0; - osn_var->softirq.delta_start = 0; + if (unlikely(timerlat_enabled())) + if (!timerlat_softirq_exit(osn_var)) return; - } - } -#endif duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start); trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration); @@ -974,17 +1150,9 @@ thread_exit(struct osnoise_variables *osn_var, struct task_struct *t) if (!osn_var->sampling) return; -#ifdef CONFIG_TIMERLAT_TRACER - if (osnoise_data.timerlat_tracer) { - struct timerlat_variables *tlat_var; - tlat_var = this_cpu_tmr_var(); - if (!tlat_var->tracing_thread) { - osn_var->thread.delta_start = 0; - osn_var->thread.arrival_time = 0; + if (unlikely(timerlat_enabled())) + if (!timerlat_thread_exit(osn_var)) return; - } - } -#endif duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start); @@ -1077,12 +1245,37 @@ diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample * */ static __always_inline void osnoise_stop_tracing(void) { - struct trace_array *tr = osnoise_trace; + struct osnoise_instance *inst; + struct trace_array *tr; + + rcu_read_lock(); + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + tr = inst->tr; + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, + "stop tracing hit on cpu %d\n", smp_processor_id()); + + tracer_tracing_off(tr); + } + rcu_read_unlock(); +} - trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, - "stop tracing hit on cpu %d\n", smp_processor_id()); +/* + * notify_new_max_latency - Notify a new max latency via fsnotify interface. + */ +static void notify_new_max_latency(u64 latency) +{ + struct osnoise_instance *inst; + struct trace_array *tr; - tracer_tracing_off(tr); + rcu_read_lock(); + list_for_each_entry_rcu(inst, &osnoise_instances, list) { + tr = inst->tr; + if (tr->max_latency < latency) { + tr->max_latency = latency; + latency_fsnotify(tr); + } + } + rcu_read_unlock(); } /* @@ -1096,7 +1289,6 @@ static __always_inline void osnoise_stop_tracing(void) static int run_osnoise(void) { struct osnoise_variables *osn_var = this_cpu_osn_var(); - struct trace_array *tr = osnoise_trace; u64 start, sample, last_sample; u64 last_int_count, int_count; s64 noise = 0, max_noise = 0; @@ -1231,11 +1423,7 @@ static int run_osnoise(void) trace_osnoise_sample(&s); - /* Keep a running maximum ever recorded osnoise "latency" */ - if (max_noise > tr->max_latency) { - tr->max_latency = max_noise; - latency_fsnotify(tr); - } + notify_new_max_latency(max_noise); if (osnoise_data.stop_tracing_total) if (s.noise > osnoise_data.stop_tracing_total) @@ -1293,7 +1481,6 @@ static int osnoise_main(void *data) static enum hrtimer_restart timerlat_irq(struct hrtimer *timer) { struct osnoise_variables *osn_var = this_cpu_osn_var(); - struct trace_array *tr = osnoise_trace; struct timerlat_variables *tlat; struct timerlat_sample s; u64 now; @@ -1332,9 +1519,11 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer) * running, the thread needs to receive the softirq delta_start. The * reason being is that the softirq will be the last to be unfolded, * resseting the thread delay to zero. + * + * The PREEMPT_RT is a special case, though. As softirqs run as threads + * on RT, moving the thread is enough. */ -#ifndef CONFIG_PREEMPT_RT - if (osn_var->softirq.delta_start) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && osn_var->softirq.delta_start) { copy_int_safe_time(osn_var, &osn_var->thread.delta_start, &osn_var->softirq.delta_start); @@ -1344,13 +1533,6 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer) copy_int_safe_time(osn_var, &osn_var->thread.delta_start, &osn_var->irq.delta_start); } -#else /* CONFIG_PREEMPT_RT */ - /* - * The sofirqs run as threads on RT, so there is not need - * to keep track of it. - */ - copy_int_safe_time(osn_var, &osn_var->thread.delta_start, &osn_var->irq.delta_start); -#endif /* CONFIG_PREEMPT_RT */ /* * Compute the current time with the expected time. @@ -1364,11 +1546,7 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer) trace_timerlat_sample(&s); - /* Keep a running maximum ever recorded os noise "latency" */ - if (diff > tr->max_latency) { - tr->max_latency = diff; - latency_fsnotify(tr); - } + notify_new_max_latency(diff); if (osnoise_data.stop_tracing) if (time_to_us(diff) >= osnoise_data.stop_tracing) @@ -1456,11 +1634,7 @@ static int timerlat_main(void *data) trace_timerlat_sample(&s); -#ifdef CONFIG_STACKTRACE - if (osnoise_data.print_stack) - if (osnoise_data.print_stack <= time_to_us(diff)) - timerlat_dump_stack(); -#endif /* CONFIG_STACKTRACE */ + timerlat_dump_stack(time_to_us(diff)); tlat->tracing_thread = false; if (osnoise_data.stop_tracing_total) @@ -1473,6 +1647,11 @@ static int timerlat_main(void *data) hrtimer_cancel(&tlat->timer); return 0; } +#else /* CONFIG_TIMERLAT_TRACER */ +static int timerlat_main(void *data) +{ + return 0; +} #endif /* CONFIG_TIMERLAT_TRACER */ /* @@ -1515,16 +1694,13 @@ static int start_kthread(unsigned int cpu) void *main = osnoise_main; char comm[24]; -#ifdef CONFIG_TIMERLAT_TRACER - if (osnoise_data.timerlat_tracer) { + if (timerlat_enabled()) { snprintf(comm, 24, "timerlat/%d", cpu); main = timerlat_main; } else { snprintf(comm, 24, "osnoise/%d", cpu); } -#else - snprintf(comm, 24, "osnoise/%d", cpu); -#endif + kthread = kthread_create_on_cpu(main, NULL, cpu, comm); if (IS_ERR(kthread)) { @@ -1545,7 +1721,7 @@ static int start_kthread(unsigned int cpu) * This starts the kernel thread that will look for osnoise on many * cpus. */ -static int start_per_cpu_kthreads(struct trace_array *tr) +static int start_per_cpu_kthreads(void) { struct cpumask *current_mask = &save_cpumask; int retval = 0; @@ -1553,13 +1729,9 @@ static int start_per_cpu_kthreads(struct trace_array *tr) cpus_read_lock(); /* - * Run only on CPUs in which trace and osnoise are allowed to run. - */ - cpumask_and(current_mask, tr->tracing_cpumask, &osnoise_cpumask); - /* - * And the CPU is online. + * Run only on online CPUs in which osnoise is allowed to run. */ - cpumask_and(current_mask, cpu_online_mask, current_mask); + cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask); for_each_possible_cpu(cpu) per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; @@ -1580,13 +1752,11 @@ static int start_per_cpu_kthreads(struct trace_array *tr) #ifdef CONFIG_HOTPLUG_CPU static void osnoise_hotplug_workfn(struct work_struct *dummy) { - struct trace_array *tr = osnoise_trace; unsigned int cpu = smp_processor_id(); - mutex_lock(&trace_types_lock); - if (!osnoise_busy) + if (!osnoise_has_registered_instances()) goto out_unlock_trace; mutex_lock(&interface_lock); @@ -1595,9 +1765,6 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy) if (!cpumask_test_cpu(cpu, &osnoise_cpumask)) goto out_unlock; - if (!cpumask_test_cpu(cpu, tr->tracing_cpumask)) - goto out_unlock; - start_kthread(cpu); out_unlock: @@ -1686,9 +1853,6 @@ out_unlock: return count; } -static void osnoise_tracer_start(struct trace_array *tr); -static void osnoise_tracer_stop(struct trace_array *tr); - /* * osnoise_cpus_write - Write function for "cpus" entry * @filp: The active open file structure @@ -1700,19 +1864,15 @@ static void osnoise_tracer_stop(struct trace_array *tr); * interface to the osnoise trace. By default, it lists all CPUs, * in this way, allowing osnoise threads to run on any online CPU * of the system. It serves to restrict the execution of osnoise to the - * set of CPUs writing via this interface. Note that osnoise also - * respects the "tracing_cpumask." Hence, osnoise threads will run only - * on the set of CPUs allowed here AND on "tracing_cpumask." Why not - * have just "tracing_cpumask?" Because the user might be interested - * in tracing what is running on other CPUs. For instance, one might - * run osnoise in one HT CPU while observing what is running on the - * sibling HT CPU. + * set of CPUs writing via this interface. Why not use "tracing_cpumask"? + * Because the user might be interested in tracing what is running on + * other CPUs. For instance, one might run osnoise in one HT CPU + * while observing what is running on the sibling HT CPU. */ static ssize_t osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { - struct trace_array *tr = osnoise_trace; cpumask_var_t osnoise_cpumask_new; int running, err; char buf[256]; @@ -1731,13 +1891,12 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count, goto err_free; /* - * trace_types_lock is taken to avoid concurrency on start/stop - * and osnoise_busy. + * trace_types_lock is taken to avoid concurrency on start/stop. */ mutex_lock(&trace_types_lock); - running = osnoise_busy; + running = osnoise_has_registered_instances(); if (running) - osnoise_tracer_stop(tr); + stop_per_cpu_kthreads(); mutex_lock(&interface_lock); /* @@ -1751,7 +1910,7 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count, mutex_unlock(&interface_lock); if (running) - osnoise_tracer_start(tr); + start_per_cpu_kthreads(); mutex_unlock(&trace_types_lock); free_cpumask_var(osnoise_cpumask_new); @@ -1835,6 +1994,47 @@ static const struct file_operations cpus_fops = { .llseek = generic_file_llseek, }; +#ifdef CONFIG_TIMERLAT_TRACER +#ifdef CONFIG_STACKTRACE +static int init_timerlat_stack_tracefs(struct dentry *top_dir) +{ + struct dentry *tmp; + + tmp = tracefs_create_file("print_stack", TRACE_MODE_WRITE, top_dir, + &osnoise_print_stack, &trace_min_max_fops); + if (!tmp) + return -ENOMEM; + + return 0; +} +#else /* CONFIG_STACKTRACE */ +static int init_timerlat_stack_tracefs(struct dentry *top_dir) +{ + return 0; +} +#endif /* CONFIG_STACKTRACE */ + +/* + * init_timerlat_tracefs - A function to initialize the timerlat interface files + */ +static int init_timerlat_tracefs(struct dentry *top_dir) +{ + struct dentry *tmp; + + tmp = tracefs_create_file("timerlat_period_us", TRACE_MODE_WRITE, top_dir, + &timerlat_period, &trace_min_max_fops); + if (!tmp) + return -ENOMEM; + + return init_timerlat_stack_tracefs(top_dir); +} +#else /* CONFIG_TIMERLAT_TRACER */ +static int init_timerlat_tracefs(struct dentry *top_dir) +{ + return 0; +} +#endif /* CONFIG_TIMERLAT_TRACER */ + /* * init_tracefs - A function to initialize the tracefs interface files * @@ -1856,42 +2056,33 @@ static int init_tracefs(void) if (!top_dir) return 0; - tmp = tracefs_create_file("period_us", 0640, top_dir, + tmp = tracefs_create_file("period_us", TRACE_MODE_WRITE, top_dir, &osnoise_period, &trace_min_max_fops); if (!tmp) goto err; - tmp = tracefs_create_file("runtime_us", 0644, top_dir, + tmp = tracefs_create_file("runtime_us", TRACE_MODE_WRITE, top_dir, &osnoise_runtime, &trace_min_max_fops); if (!tmp) goto err; - tmp = tracefs_create_file("stop_tracing_us", 0640, top_dir, + tmp = tracefs_create_file("stop_tracing_us", TRACE_MODE_WRITE, top_dir, &osnoise_stop_tracing_in, &trace_min_max_fops); if (!tmp) goto err; - tmp = tracefs_create_file("stop_tracing_total_us", 0640, top_dir, + tmp = tracefs_create_file("stop_tracing_total_us", TRACE_MODE_WRITE, top_dir, &osnoise_stop_tracing_total, &trace_min_max_fops); if (!tmp) goto err; - tmp = trace_create_file("cpus", 0644, top_dir, NULL, &cpus_fops); + tmp = trace_create_file("cpus", TRACE_MODE_WRITE, top_dir, NULL, &cpus_fops); if (!tmp) goto err; -#ifdef CONFIG_TIMERLAT_TRACER -#ifdef CONFIG_STACKTRACE - tmp = tracefs_create_file("print_stack", 0640, top_dir, - &osnoise_print_stack, &trace_min_max_fops); - if (!tmp) - goto err; -#endif - tmp = tracefs_create_file("timerlat_period_us", 0640, top_dir, - &timerlat_period, &trace_min_max_fops); - if (!tmp) + ret = init_timerlat_tracefs(top_dir); + if (ret) goto err; -#endif return 0; @@ -1932,74 +2123,110 @@ out_unhook_irq: return -EINVAL; } -static int __osnoise_tracer_start(struct trace_array *tr) +/* + * osnoise_workload_start - start the workload and hook to events + */ +static int osnoise_workload_start(void) { int retval; + /* + * Instances need to be registered after calling workload + * start. Hence, if there is already an instance, the + * workload was already registered. Otherwise, this + * code is on the way to register the first instance, + * and the workload will start. + */ + if (osnoise_has_registered_instances()) + return 0; + osn_var_reset_all(); retval = osnoise_hook_events(); if (retval) return retval; + /* - * Make sure NMIs see reseted values. + * Make sure that ftrace_nmi_enter/exit() see reset values + * before enabling trace_osnoise_callback_enabled. */ barrier(); trace_osnoise_callback_enabled = true; - retval = start_per_cpu_kthreads(tr); + retval = start_per_cpu_kthreads(); if (retval) { unhook_irq_events(); return retval; } - osnoise_busy = true; - return 0; } +/* + * osnoise_workload_stop - stop the workload and unhook the events + */ +static void osnoise_workload_stop(void) +{ + /* + * Instances need to be unregistered before calling + * stop. Hence, if there is a registered instance, more + * than one instance is running, and the workload will not + * yet stop. Otherwise, this code is on the way to disable + * the last instance, and the workload can stop. + */ + if (osnoise_has_registered_instances()) + return; + + trace_osnoise_callback_enabled = false; + /* + * Make sure that ftrace_nmi_enter/exit() see + * trace_osnoise_callback_enabled as false before continuing. + */ + barrier(); + + stop_per_cpu_kthreads(); + + unhook_irq_events(); + unhook_softirq_events(); + unhook_thread_events(); +} + static void osnoise_tracer_start(struct trace_array *tr) { int retval; - if (osnoise_busy) + /* + * If the instance is already registered, there is no need to + * register it again. + */ + if (osnoise_instance_registered(tr)) return; - retval = __osnoise_tracer_start(tr); + retval = osnoise_workload_start(); if (retval) pr_err(BANNER "Error starting osnoise tracer\n"); + osnoise_register_instance(tr); } static void osnoise_tracer_stop(struct trace_array *tr) { - if (!osnoise_busy) - return; - - trace_osnoise_callback_enabled = false; - barrier(); - - stop_per_cpu_kthreads(); - - unhook_irq_events(); - unhook_softirq_events(); - unhook_thread_events(); - - osnoise_busy = false; + osnoise_unregister_instance(tr); + osnoise_workload_stop(); } static int osnoise_tracer_init(struct trace_array *tr) { - - /* Only allow one instance to enable this */ - if (osnoise_busy) + /* + * Only allow osnoise tracer if timerlat tracer is not running + * already. + */ + if (timerlat_enabled()) return -EBUSY; - osnoise_trace = tr; tr->max_latency = 0; osnoise_tracer_start(tr); - return 0; } @@ -2023,45 +2250,55 @@ static void timerlat_tracer_start(struct trace_array *tr) { int retval; - if (osnoise_busy) + /* + * If the instance is already registered, there is no need to + * register it again. + */ + if (osnoise_instance_registered(tr)) return; - osnoise_data.timerlat_tracer = 1; - - retval = __osnoise_tracer_start(tr); + retval = osnoise_workload_start(); if (retval) - goto out_err; + pr_err(BANNER "Error starting timerlat tracer\n"); + + osnoise_register_instance(tr); return; -out_err: - pr_err(BANNER "Error starting timerlat tracer\n"); } static void timerlat_tracer_stop(struct trace_array *tr) { int cpu; - if (!osnoise_busy) - return; - - for_each_online_cpu(cpu) - per_cpu(per_cpu_osnoise_var, cpu).sampling = 0; + osnoise_unregister_instance(tr); - osnoise_tracer_stop(tr); + /* + * Instruct the threads to stop only if this is the last instance. + */ + if (!osnoise_has_registered_instances()) { + for_each_online_cpu(cpu) + per_cpu(per_cpu_osnoise_var, cpu).sampling = 0; + } - osnoise_data.timerlat_tracer = 0; + osnoise_workload_stop(); } static int timerlat_tracer_init(struct trace_array *tr) { - /* Only allow one instance to enable this */ - if (osnoise_busy) + /* + * Only allow timerlat tracer if osnoise tracer is not running already. + */ + if (osnoise_has_registered_instances() && !osnoise_data.timerlat_tracer) return -EBUSY; - osnoise_trace = tr; + /* + * If this is the first instance, set timerlat_tracer to block + * osnoise tracer start. + */ + if (!osnoise_has_registered_instances()) + osnoise_data.timerlat_tracer = 1; tr->max_latency = 0; - timerlat_tracer_start(tr); return 0; @@ -2070,6 +2307,13 @@ static int timerlat_tracer_init(struct trace_array *tr) static void timerlat_tracer_reset(struct trace_array *tr) { timerlat_tracer_stop(tr); + + /* + * If this is the last instance, reset timerlat_tracer allowing + * osnoise to be started. + */ + if (!osnoise_has_registered_instances()) + osnoise_data.timerlat_tracer = 0; } static struct tracer timerlat_tracer __read_mostly = { @@ -2081,6 +2325,16 @@ static struct tracer timerlat_tracer __read_mostly = { .print_header = print_timerlat_headers, .allow_instances = true, }; + +__init static int init_timerlat_tracer(void) +{ + return register_tracer(&timerlat_tracer); +} +#else /* CONFIG_TIMERLAT_TRACER */ +__init static int init_timerlat_tracer(void) +{ + return 0; +} #endif /* CONFIG_TIMERLAT_TRACER */ __init static int init_osnoise_tracer(void) @@ -2097,15 +2351,16 @@ __init static int init_osnoise_tracer(void) return ret; } -#ifdef CONFIG_TIMERLAT_TRACER - ret = register_tracer(&timerlat_tracer); + ret = init_timerlat_tracer(); if (ret) { - pr_err(BANNER "Error registering timerlat\n"); + pr_err(BANNER "Error registering timerlat!\n"); return ret; } -#endif + osnoise_init_hotplug_support(); + INIT_LIST_HEAD_RCU(&osnoise_instances); + init_tracefs(); return 0; diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index c2ca40e8595b..3547e7176ff7 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <linux/sched/clock.h> #include <linux/sched/mm.h> @@ -346,22 +347,12 @@ int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) } EXPORT_SYMBOL_GPL(trace_output_call); -#ifdef CONFIG_KRETPROBES -static inline const char *kretprobed(const char *name) +static inline const char *kretprobed(const char *name, unsigned long addr) { - static const char tramp_name[] = "kretprobe_trampoline"; - int size = sizeof(tramp_name); - - if (strncmp(tramp_name, name, size) == 0) + if (is_kretprobe_trampoline(addr)) return "[unknown/kretprobe'd]"; return name; } -#else -static inline const char *kretprobed(const char *name) -{ - return name; -} -#endif /* CONFIG_KRETPROBES */ void trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset) @@ -374,7 +365,7 @@ trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset) sprint_symbol(str, address); else kallsyms_lookup(address, NULL, NULL, NULL, str); - name = kretprobed(str); + name = kretprobed(str, address); if (name && strlen(name)) { trace_seq_puts(s, name); diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 4b320fe7df70..29f6e95439b6 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -384,7 +384,7 @@ static __init int init_trace_printk_function_export(void) if (ret) return 0; - trace_create_file("printk_formats", 0444, NULL, + trace_create_file("printk_formats", TRACE_MODE_READ, NULL, NULL, &ftrace_formats_fops); return 0; diff --git a/kernel/trace/trace_recursion_record.c b/kernel/trace/trace_recursion_record.c index b2edac1fe156..4d4b78c8ca25 100644 --- a/kernel/trace/trace_recursion_record.c +++ b/kernel/trace/trace_recursion_record.c @@ -226,8 +226,8 @@ __init static int create_recursed_functions(void) { struct dentry *dentry; - dentry = trace_create_file("recursed_functions", 0644, NULL, NULL, - &recursed_functions_fops); + dentry = trace_create_file("recursed_functions", TRACE_MODE_WRITE, + NULL, NULL, &recursed_functions_fops); if (!dentry) pr_warn("WARNING: Failed to create recursed_functions\n"); return 0; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index adf7ef194005..afd937a46496 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -287,6 +287,40 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt) if (trace_selftest_test_probe3_cnt != 4) goto out_free; + /* Remove trace function from probe 3 */ + func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME); + len1 = strlen(func1_name); + + ftrace_set_filter(&test_probe3, func1_name, len1, 0); + + DYN_FTRACE_TEST_NAME(); + + print_counts(); + + if (trace_selftest_test_probe1_cnt != 3) + goto out_free; + if (trace_selftest_test_probe2_cnt != 2) + goto out_free; + if (trace_selftest_test_probe3_cnt != 4) + goto out_free; + if (cnt > 1) { + if (trace_selftest_test_global_cnt == 0) + goto out_free; + } + if (trace_selftest_test_dyn_cnt == 0) + goto out_free; + + DYN_FTRACE_TEST_NAME2(); + + print_counts(); + + if (trace_selftest_test_probe1_cnt != 3) + goto out_free; + if (trace_selftest_test_probe2_cnt != 3) + goto out_free; + if (trace_selftest_test_probe3_cnt != 5) + goto out_free; + ret = 0; out_free: unregister_ftrace_function(dyn_ops); @@ -750,6 +784,12 @@ static struct fgraph_ops fgraph_ops __initdata = { .retfunc = &trace_graph_return, }; +#if defined(CONFIG_DYNAMIC_FTRACE) && \ + defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) +#define TEST_DIRECT_TRAMP +noinline __noclone static void trace_direct_tramp(void) { } +#endif + /* * Pretty much the same than for the function tracer from which the selftest * has been borrowed. @@ -760,6 +800,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, { int ret; unsigned long count; + char *func_name __maybe_unused; #ifdef CONFIG_DYNAMIC_FTRACE if (ftrace_filter_param) { @@ -808,8 +849,57 @@ trace_selftest_startup_function_graph(struct tracer *trace, goto out; } - /* Don't test dynamic tracing, the function tracer already did */ +#ifdef TEST_DIRECT_TRAMP + tracing_reset_online_cpus(&tr->array_buffer); + set_graph_array(tr); + + /* + * Some archs *cough*PowerPC*cough* add characters to the + * start of the function names. We simply put a '*' to + * accommodate them. + */ + func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); + ftrace_set_global_filter(func_name, strlen(func_name), 1); + + /* + * Register direct function together with graph tracer + * and make sure we get graph trace. + */ + ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, + (unsigned long) trace_direct_tramp); + if (ret) + goto out; + + ret = register_ftrace_graph(&fgraph_ops); + if (ret) { + warn_failed_init_tracer(trace, ret); + goto out; + } + + DYN_FTRACE_TEST_NAME(); + + count = 0; + tracing_stop(); + /* check the trace buffer */ + ret = trace_test_buffer(&tr->array_buffer, &count); + + unregister_ftrace_graph(&fgraph_ops); + + ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, + (unsigned long) trace_direct_tramp); + if (ret) + goto out; + + tracing_start(); + + if (!ret && !count) { + ret = -1; + goto out; + } +#endif + + /* Don't test dynamic tracing, the function tracer already did */ out: /* Stop it if we failed */ if (ret) diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 63c285042051..5a48dba912ea 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -559,14 +559,14 @@ static __init int stack_trace_init(void) if (ret) return 0; - trace_create_file("stack_max_size", 0644, NULL, + trace_create_file("stack_max_size", TRACE_MODE_WRITE, NULL, &stack_trace_max_size, &stack_max_size_fops); - trace_create_file("stack_trace", 0444, NULL, + trace_create_file("stack_trace", TRACE_MODE_READ, NULL, NULL, &stack_trace_fops); #ifdef CONFIG_DYNAMIC_FTRACE - trace_create_file("stack_trace_filter", 0644, NULL, + trace_create_file("stack_trace_filter", TRACE_MODE_WRITE, NULL, &trace_ops, &stack_trace_filter_fops); #endif diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 8d141c3825a9..bb247beec447 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -297,9 +297,9 @@ static int init_stat_file(struct stat_session *session) if (!stat_dir && (ret = tracing_stat_init())) return ret; - session->file = tracefs_create_file(session->ts->name, 0644, - stat_dir, - session, &tracing_stat_fops); + session->file = tracefs_create_file(session->ts->name, TRACE_MODE_WRITE, + stat_dir, session, + &tracing_stat_fops); if (!session->file) return -ENOMEM; return 0; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 225ce569bf8f..0a5c0db3137e 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1655,10 +1655,10 @@ static __init int init_uprobe_trace(void) if (ret) return 0; - trace_create_file("uprobe_events", 0644, NULL, + trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL, NULL, &uprobe_events_ops); /* Profile interface */ - trace_create_file("uprobe_profile", 0444, NULL, + trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL, NULL, &uprobe_profile_ops); return 0; } diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index d6bddb157ef2..39bb56d2dcbe 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -834,29 +834,35 @@ int tracing_map_init(struct tracing_map *map) return err; } -static int cmp_entries_dup(const struct tracing_map_sort_entry **a, - const struct tracing_map_sort_entry **b) +static int cmp_entries_dup(const void *A, const void *B) { + const struct tracing_map_sort_entry *a, *b; int ret = 0; - if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size)) + a = *(const struct tracing_map_sort_entry **)A; + b = *(const struct tracing_map_sort_entry **)B; + + if (memcmp(a->key, b->key, a->elt->map->key_size)) ret = 1; return ret; } -static int cmp_entries_sum(const struct tracing_map_sort_entry **a, - const struct tracing_map_sort_entry **b) +static int cmp_entries_sum(const void *A, const void *B) { const struct tracing_map_elt *elt_a, *elt_b; + const struct tracing_map_sort_entry *a, *b; struct tracing_map_sort_key *sort_key; struct tracing_map_field *field; tracing_map_cmp_fn_t cmp_fn; void *val_a, *val_b; int ret = 0; - elt_a = (*a)->elt; - elt_b = (*b)->elt; + a = *(const struct tracing_map_sort_entry **)A; + b = *(const struct tracing_map_sort_entry **)B; + + elt_a = a->elt; + elt_b = b->elt; sort_key = &elt_a->map->sort_key; @@ -873,18 +879,21 @@ static int cmp_entries_sum(const struct tracing_map_sort_entry **a, return ret; } -static int cmp_entries_key(const struct tracing_map_sort_entry **a, - const struct tracing_map_sort_entry **b) +static int cmp_entries_key(const void *A, const void *B) { const struct tracing_map_elt *elt_a, *elt_b; + const struct tracing_map_sort_entry *a, *b; struct tracing_map_sort_key *sort_key; struct tracing_map_field *field; tracing_map_cmp_fn_t cmp_fn; void *val_a, *val_b; int ret = 0; - elt_a = (*a)->elt; - elt_b = (*b)->elt; + a = *(const struct tracing_map_sort_entry **)A; + b = *(const struct tracing_map_sort_entry **)B; + + elt_a = a->elt; + elt_b = b->elt; sort_key = &elt_a->map->sort_key; @@ -989,10 +998,8 @@ static void sort_secondary(struct tracing_map *map, struct tracing_map_sort_key *primary_key, struct tracing_map_sort_key *secondary_key) { - int (*primary_fn)(const struct tracing_map_sort_entry **, - const struct tracing_map_sort_entry **); - int (*secondary_fn)(const struct tracing_map_sort_entry **, - const struct tracing_map_sort_entry **); + int (*primary_fn)(const void *, const void *); + int (*secondary_fn)(const void *, const void *); unsigned i, start = 0, n_sub = 1; if (is_key(map, primary_key->field_idx)) @@ -1061,8 +1068,7 @@ int tracing_map_sort_entries(struct tracing_map *map, unsigned int n_sort_keys, struct tracing_map_sort_entry ***sort_entries) { - int (*cmp_entries_fn)(const struct tracing_map_sort_entry **, - const struct tracing_map_sort_entry **); + int (*cmp_entries_fn)(const void *, const void *); struct tracing_map_sort_entry *sort_entry, **entries; int i, n_entries, ret; diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 257ffb993ea2..f00de83d0246 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -137,7 +137,7 @@ static void __acct_update_integrals(struct task_struct *tsk, * the rest of the math is done in xacct_add_tsk. */ tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10; - tsk->acct_vm_mem1 += delta * tsk->mm->total_vm >> 10; + tsk->acct_vm_mem1 += delta * READ_ONCE(tsk->mm->total_vm) >> 10; } /** diff --git a/kernel/ucount.c b/kernel/ucount.c index eb03f3c68375..4f5613dac227 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -150,9 +150,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts) spin_unlock_irq(&ucounts_lock); } +static inline bool get_ucounts_or_wrap(struct ucounts *ucounts) +{ + /* Returns true on a successful get, false if the count wraps. */ + return !atomic_add_negative(1, &ucounts->count); +} + struct ucounts *get_ucounts(struct ucounts *ucounts) { - if (ucounts && atomic_add_negative(1, &ucounts->count)) { + if (!get_ucounts_or_wrap(ucounts)) { put_ucounts(ucounts); ucounts = NULL; } @@ -163,7 +169,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct ucounts *ucounts, *new; - long overflow; + bool wrapped; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); @@ -188,9 +194,9 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) return new; } } - overflow = atomic_add_negative(1, &ucounts->count); + wrapped = !get_ucounts_or_wrap(ucounts); spin_unlock_irq(&ucounts_lock); - if (overflow) { + if (wrapped) { put_ucounts(ucounts); return NULL; } @@ -276,7 +282,7 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v) struct ucounts *iter; long new = -1; /* Silence compiler warning */ for (iter = ucounts; iter; iter = iter->ns->ucounts) { - long dec = atomic_long_add_return(-v, &iter->ucount[type]); + long dec = atomic_long_sub_return(v, &iter->ucount[type]); WARN_ON_ONCE(dec < 0); if (iter == ucounts) new = dec; @@ -289,7 +295,7 @@ static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts, { struct ucounts *iter, *next; for (iter = ucounts; iter != last; iter = next) { - long dec = atomic_long_add_return(-1, &iter->ucount[type]); + long dec = atomic_long_sub_return(1, &iter->ucount[type]); WARN_ON_ONCE(dec < 0); next = iter->ns->ucounts; if (dec == 0) @@ -326,7 +332,7 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type) } return ret; dec_unwind: - dec = atomic_long_add_return(-1, &iter->ucount[type]); + dec = atomic_long_sub_return(1, &iter->ucount[type]); WARN_ON_ONCE(dec < 0); unwind: do_dec_rlimit_put_ucounts(ucounts, iter, type); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1b3eb1e9531f..613917bbc4e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -375,6 +375,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); static int worker_thread(void *__worker); static void workqueue_sysfs_unregister(struct workqueue_struct *wq); static void show_pwq(struct pool_workqueue *pwq); +static void show_one_worker_pool(struct worker_pool *pool); #define CREATE_TRACE_POINTS #include <trace/events/workqueue.h> @@ -1350,7 +1351,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, struct worker_pool *pool = pwq->pool; /* record the work call stack in order to print it in KASAN reports */ - kasan_record_aux_stack(work); + kasan_record_aux_stack_noalloc(work); /* we own @work, set data and link */ set_work_pwq(work, pwq, extra_flags); @@ -4447,7 +4448,7 @@ void destroy_workqueue(struct workqueue_struct *wq) raw_spin_unlock_irq(&pwq->pool->lock); mutex_unlock(&wq->mutex); mutex_unlock(&wq_pool_mutex); - show_workqueue_state(); + show_one_workqueue(wq); return; } raw_spin_unlock_irq(&pwq->pool->lock); @@ -4797,97 +4798,116 @@ static void show_pwq(struct pool_workqueue *pwq) } /** - * show_workqueue_state - dump workqueue state - * - * Called from a sysrq handler or try_to_freeze_tasks() and prints out - * all busy workqueues and pools. + * show_one_workqueue - dump state of specified workqueue + * @wq: workqueue whose state will be printed */ -void show_workqueue_state(void) +void show_one_workqueue(struct workqueue_struct *wq) { - struct workqueue_struct *wq; - struct worker_pool *pool; + struct pool_workqueue *pwq; + bool idle = true; unsigned long flags; - int pi; - - rcu_read_lock(); - - pr_info("Showing busy workqueues and worker pools:\n"); - - list_for_each_entry_rcu(wq, &workqueues, list) { - struct pool_workqueue *pwq; - bool idle = true; - for_each_pwq(pwq, wq) { - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { - idle = false; - break; - } + for_each_pwq(pwq, wq) { + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { + idle = false; + break; } - if (idle) - continue; + } + if (idle) /* Nothing to print for idle workqueue */ + return; - pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); + pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); - for_each_pwq(pwq, wq) { - raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { - /* - * Defer printing to avoid deadlocks in console - * drivers that queue work while holding locks - * also taken in their write paths. - */ - printk_deferred_enter(); - show_pwq(pwq); - printk_deferred_exit(); - } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + for_each_pwq(pwq, wq) { + raw_spin_lock_irqsave(&pwq->pool->lock, flags); + if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { /* - * We could be printing a lot from atomic context, e.g. - * sysrq-t -> show_workqueue_state(). Avoid triggering - * hard lockup. + * Defer printing to avoid deadlocks in console + * drivers that queue work while holding locks + * also taken in their write paths. */ - touch_nmi_watchdog(); - } - } - - for_each_pool(pool, pi) { - struct worker *worker; - bool first = true; - - raw_spin_lock_irqsave(&pool->lock, flags); - if (pool->nr_workers == pool->nr_idle) - goto next_pool; - /* - * Defer printing to avoid deadlocks in console drivers that - * queue work while holding locks also taken in their write - * paths. - */ - printk_deferred_enter(); - pr_info("pool %d:", pool->id); - pr_cont_pool_info(pool); - pr_cont(" hung=%us workers=%d", - jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, - pool->nr_workers); - if (pool->manager) - pr_cont(" manager: %d", - task_pid_nr(pool->manager->task)); - list_for_each_entry(worker, &pool->idle_list, entry) { - pr_cont(" %s%d", first ? "idle: " : "", - task_pid_nr(worker->task)); - first = false; + printk_deferred_enter(); + show_pwq(pwq); + printk_deferred_exit(); } - pr_cont("\n"); - printk_deferred_exit(); - next_pool: - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); /* * We could be printing a lot from atomic context, e.g. - * sysrq-t -> show_workqueue_state(). Avoid triggering + * sysrq-t -> show_all_workqueues(). Avoid triggering * hard lockup. */ touch_nmi_watchdog(); } +} + +/** + * show_one_worker_pool - dump state of specified worker pool + * @pool: worker pool whose state will be printed + */ +static void show_one_worker_pool(struct worker_pool *pool) +{ + struct worker *worker; + bool first = true; + unsigned long flags; + + raw_spin_lock_irqsave(&pool->lock, flags); + if (pool->nr_workers == pool->nr_idle) + goto next_pool; + /* + * Defer printing to avoid deadlocks in console drivers that + * queue work while holding locks also taken in their write + * paths. + */ + printk_deferred_enter(); + pr_info("pool %d:", pool->id); + pr_cont_pool_info(pool); + pr_cont(" hung=%us workers=%d", + jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, + pool->nr_workers); + if (pool->manager) + pr_cont(" manager: %d", + task_pid_nr(pool->manager->task)); + list_for_each_entry(worker, &pool->idle_list, entry) { + pr_cont(" %s%d", first ? "idle: " : "", + task_pid_nr(worker->task)); + first = false; + } + pr_cont("\n"); + printk_deferred_exit(); +next_pool: + raw_spin_unlock_irqrestore(&pool->lock, flags); + /* + * We could be printing a lot from atomic context, e.g. + * sysrq-t -> show_all_workqueues(). Avoid triggering + * hard lockup. + */ + touch_nmi_watchdog(); + +} + +/** + * show_all_workqueues - dump workqueue state + * + * Called from a sysrq handler or try_to_freeze_tasks() and prints out + * all busy workqueues and pools. + */ +void show_all_workqueues(void) +{ + struct workqueue_struct *wq; + struct worker_pool *pool; + int pi; + + rcu_read_lock(); + + pr_info("Showing busy workqueues and worker pools:\n"); + + list_for_each_entry_rcu(wq, &workqueues, list) + show_one_workqueue(wq); + + for_each_pool(pool, pi) + show_one_worker_pool(pool); + rcu_read_unlock(); } @@ -5384,9 +5404,6 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) int ret = -EINVAL; cpumask_var_t saved_cpumask; - if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) - return -ENOMEM; - /* * Not excluding isolated cpus on purpose. * If the user wishes to include them, we allow that. @@ -5394,6 +5411,15 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) cpumask_and(cpumask, cpumask, cpu_possible_mask); if (!cpumask_empty(cpumask)) { apply_wqattrs_lock(); + if (cpumask_equal(cpumask, wq_unbound_cpumask)) { + ret = 0; + goto out_unlock; + } + + if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) { + ret = -ENOMEM; + goto out_unlock; + } /* save the old wq_unbound_cpumask. */ cpumask_copy(saved_cpumask, wq_unbound_cpumask); @@ -5406,10 +5432,11 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) if (ret < 0) cpumask_copy(wq_unbound_cpumask, saved_cpumask); + free_cpumask_var(saved_cpumask); +out_unlock: apply_wqattrs_unlock(); } - free_cpumask_var(saved_cpumask); return ret; } @@ -5869,7 +5896,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) rcu_read_unlock(); if (lockup_detected) - show_workqueue_state(); + show_all_workqueues(); wq_watchdog_reset_touched(); mod_timer(&wq_watchdog_timer, jiffies + thresh); |