summaryrefslogtreecommitdiff
path: root/kernel/module
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module')
-rw-r--r--kernel/module/internal.h11
-rw-r--r--kernel/module/kallsyms.c73
-rw-r--r--kernel/module/main.c199
-rw-r--r--kernel/module/strict_rwx.c9
-rw-r--r--kernel/module/tracking.c2
-rw-r--r--kernel/module/tree_lookup.c8
-rw-r--r--kernel/module/version.c14
7 files changed, 116 insertions, 200 deletions
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index d09b46ef032f..626cf8668a7e 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -124,17 +124,6 @@ char *module_next_tag_pair(char *string, unsigned long *secsize);
#define for_each_modinfo_entry(entry, info, name) \
for (entry = get_modinfo(info, name); entry; entry = get_next_modinfo(info, name, entry))
-static inline void module_assert_mutex_or_preempt(void)
-{
-#ifdef CONFIG_LOCKDEP
- if (unlikely(!debug_locks))
- return;
-
- WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
- !lockdep_is_held(&module_mutex));
-#endif
-}
-
static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
{
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
diff --git a/kernel/module/kallsyms.c b/kernel/module/kallsyms.c
index bf65e0c3c86f..00a60796327c 100644
--- a/kernel/module/kallsyms.c
+++ b/kernel/module/kallsyms.c
@@ -177,19 +177,15 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
unsigned long strtab_size;
void *data_base = mod->mem[MOD_DATA].base;
void *init_data_base = mod->mem[MOD_INIT_DATA].base;
+ struct mod_kallsyms *kallsyms;
- /* Set up to point into init section. */
- mod->kallsyms = (void __rcu *)init_data_base +
- info->mod_kallsyms_init_off;
+ kallsyms = init_data_base + info->mod_kallsyms_init_off;
- rcu_read_lock();
- /* The following is safe since this pointer cannot change */
- rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr;
- rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+ kallsyms->symtab = (void *)symsec->sh_addr;
+ kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
/* Make sure we get permanent strtab: don't use info->strtab. */
- rcu_dereference(mod->kallsyms)->strtab =
- (void *)info->sechdrs[info->index.str].sh_addr;
- rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs;
+ kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+ kallsyms->typetab = init_data_base + info->init_typeoffs;
/*
* Now populate the cut down core kallsyms for after init
@@ -199,20 +195,19 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
mod->core_kallsyms.strtab = s = data_base + info->stroffs;
mod->core_kallsyms.typetab = data_base + info->core_typeoffs;
strtab_size = info->core_typeoffs - info->stroffs;
- src = rcu_dereference(mod->kallsyms)->symtab;
- for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) {
- rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info);
+ src = kallsyms->symtab;
+ for (ndst = i = 0; i < kallsyms->num_symtab; i++) {
+ kallsyms->typetab[i] = elf_type(src + i, info);
if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) {
ssize_t ret;
mod->core_kallsyms.typetab[ndst] =
- rcu_dereference(mod->kallsyms)->typetab[i];
+ kallsyms->typetab[i];
dst[ndst] = src[i];
dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
- ret = strscpy(s,
- &rcu_dereference(mod->kallsyms)->strtab[src[i].st_name],
+ ret = strscpy(s, &kallsyms->strtab[src[i].st_name],
strtab_size);
if (ret < 0)
break;
@@ -220,7 +215,9 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
strtab_size -= ret + 1;
}
}
- rcu_read_unlock();
+
+ /* Set up to point into init section. */
+ rcu_assign_pointer(mod->kallsyms, kallsyms);
mod->core_kallsyms.num_symtab = ndst;
}
@@ -260,7 +257,7 @@ static const char *find_kallsyms_symbol(struct module *mod,
{
unsigned int i, best = 0;
unsigned long nextval, bestval;
- struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms);
struct module_memory *mod_mem;
/* At worse, next value is at end of module */
@@ -319,7 +316,7 @@ void * __weak dereference_module_function_descriptor(struct module *mod,
/*
* For kallsyms to ask for address resolution. NULL means not found. Careful
- * not to lock to avoid deadlock on oopses, simply disable preemption.
+ * not to lock to avoid deadlock on oopses, RCU is enough.
*/
int module_address_lookup(unsigned long addr,
unsigned long *size,
@@ -332,7 +329,7 @@ int module_address_lookup(unsigned long addr,
int ret = 0;
struct module *mod;
- preempt_disable();
+ guard(rcu)();
mod = __module_address(addr);
if (mod) {
if (modname)
@@ -350,8 +347,6 @@ int module_address_lookup(unsigned long addr,
if (sym)
ret = strscpy(namebuf, sym, KSYM_NAME_LEN);
}
- preempt_enable();
-
return ret;
}
@@ -359,7 +354,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
{
struct module *mod;
- preempt_disable();
+ guard(rcu)();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -371,12 +366,10 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
goto out;
strscpy(symname, sym, KSYM_NAME_LEN);
- preempt_enable();
return 0;
}
}
out:
- preempt_enable();
return -ERANGE;
}
@@ -385,13 +378,13 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
{
struct module *mod;
- preempt_disable();
+ guard(rcu)();
list_for_each_entry_rcu(mod, &modules, list) {
struct mod_kallsyms *kallsyms;
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- kallsyms = rcu_dereference_sched(mod->kallsyms);
+ kallsyms = rcu_dereference(mod->kallsyms);
if (symnum < kallsyms->num_symtab) {
const Elf_Sym *sym = &kallsyms->symtab[symnum];
@@ -400,12 +393,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
strscpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, *value, mod);
- preempt_enable();
return 0;
}
symnum -= kallsyms->num_symtab;
}
- preempt_enable();
return -ERANGE;
}
@@ -413,7 +404,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name)
{
unsigned int i;
- struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms);
for (i = 0; i < kallsyms->num_symtab; i++) {
const Elf_Sym *sym = &kallsyms->symtab[i];
@@ -453,23 +444,15 @@ static unsigned long __module_kallsyms_lookup_name(const char *name)
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name)
{
- unsigned long ret;
-
/* Don't lock: we're in enough trouble already. */
- preempt_disable();
- ret = __module_kallsyms_lookup_name(name);
- preempt_enable();
- return ret;
+ guard(rcu)();
+ return __module_kallsyms_lookup_name(name);
}
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
{
- unsigned long ret;
-
- preempt_disable();
- ret = __find_kallsyms_symbol_value(mod, name);
- preempt_enable();
- return ret;
+ guard(rcu)();
+ return __find_kallsyms_symbol_value(mod, name);
}
int module_kallsyms_on_each_symbol(const char *modname,
@@ -490,10 +473,8 @@ int module_kallsyms_on_each_symbol(const char *modname,
if (modname && strcmp(modname, mod->name))
continue;
- /* Use rcu_dereference_sched() to remain compliant with the sparse tool */
- preempt_disable();
- kallsyms = rcu_dereference_sched(mod->kallsyms);
- preempt_enable();
+ kallsyms = rcu_dereference_check(mod->kallsyms,
+ lockdep_is_held(&module_mutex));
for (i = 0; i < kallsyms->num_symtab; i++) {
const Elf_Sym *sym = &kallsyms->symtab[i];
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 1fb9ad289a6f..a2859dc3eea6 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -67,7 +67,7 @@
/*
* Mutex protects:
- * 1) List of modules (also safely readable with preempt_disable),
+ * 1) List of modules (also safely readable within RCU read section),
* 2) module_use links,
* 3) mod_tree.addr_min/mod_tree.addr_max.
* (delete and add uses RCU list operations).
@@ -331,7 +331,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms,
/*
* Find an exported symbol and return it, along with, (optional) crc and
- * (optional) module which owns it. Needs preempt disabled or module_mutex.
+ * (optional) module which owns it. Needs RCU or module_mutex.
*/
bool find_symbol(struct find_symbol_arg *fsa)
{
@@ -345,8 +345,6 @@ bool find_symbol(struct find_symbol_arg *fsa)
struct module *mod;
unsigned int i;
- module_assert_mutex_or_preempt();
-
for (i = 0; i < ARRAY_SIZE(arr); i++)
if (find_exported_symbol_in_section(&arr[i], NULL, fsa))
return true;
@@ -374,16 +372,14 @@ bool find_symbol(struct find_symbol_arg *fsa)
}
/*
- * Search for module by name: must hold module_mutex (or preempt disabled
- * for read-only access).
+ * Search for module by name: must hold module_mutex (or RCU for read-only
+ * access).
*/
struct module *find_module_all(const char *name, size_t len,
bool even_unformed)
{
struct module *mod;
- module_assert_mutex_or_preempt();
-
list_for_each_entry_rcu(mod, &modules, list,
lockdep_is_held(&module_mutex)) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -454,8 +450,7 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
struct module *mod;
unsigned int cpu;
- preempt_disable();
-
+ guard(rcu)();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -472,13 +467,10 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
per_cpu_ptr(mod->percpu,
get_boot_cpu_id());
}
- preempt_enable();
return true;
}
}
}
-
- preempt_enable();
return false;
}
@@ -795,8 +787,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
async_synchronize_full();
/* Store the name and taints of the last unloaded module for diagnostic purposes */
- strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name));
- strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints));
+ strscpy(last_unloaded_module.name, mod->name);
+ strscpy(last_unloaded_module.taints, module_flags(mod, buf, false));
free_module(mod);
/* someone could wait for the module in add_unformed_module() */
@@ -814,10 +806,9 @@ void __symbol_put(const char *symbol)
.gplok = true,
};
- preempt_disable();
+ guard(rcu)();
BUG_ON(!find_symbol(&fsa));
module_put(fsa.owner);
- preempt_enable();
}
EXPORT_SYMBOL(__symbol_put);
@@ -832,13 +823,12 @@ void symbol_put_addr(void *addr)
/*
* Even though we hold a reference on the module; we still need to
- * disable preemption in order to safely traverse the data structure.
+ * RCU read section in order to safely traverse the data structure.
*/
- preempt_disable();
+ guard(rcu)();
modaddr = __module_text_address(a);
BUG_ON(!modaddr);
module_put(modaddr);
- preempt_enable();
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
@@ -1189,7 +1179,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
getname:
/* We must make copy under the lock if we failed to get ref. */
- strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
+ strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
unlock:
mutex_unlock(&module_mutex);
return fsa.sym;
@@ -1221,18 +1211,6 @@ void __weak module_arch_freeing_init(struct module *mod)
{
}
-void *__module_writable_address(struct module *mod, void *loc)
-{
- for_class_mod_mem_type(type, text) {
- struct module_memory *mem = &mod->mem[type];
-
- if (loc >= mem->base && loc < mem->base + mem->size)
- return loc + (mem->rw_copy - mem->base);
- }
-
- return loc;
-}
-
static int module_memory_alloc(struct module *mod, enum mod_mem_type type)
{
unsigned int size = PAGE_ALIGN(mod->mem[type].size);
@@ -1250,21 +1228,15 @@ static int module_memory_alloc(struct module *mod, enum mod_mem_type type)
if (!ptr)
return -ENOMEM;
- mod->mem[type].base = ptr;
-
if (execmem_is_rox(execmem_type)) {
- ptr = vzalloc(size);
+ int err = execmem_make_temp_rw(ptr, size);
- if (!ptr) {
- execmem_free(mod->mem[type].base);
+ if (err) {
+ execmem_free(ptr);
return -ENOMEM;
}
- mod->mem[type].rw_copy = ptr;
mod->mem[type].is_rox = true;
- } else {
- mod->mem[type].rw_copy = mod->mem[type].base;
- memset(mod->mem[type].base, 0, size);
}
/*
@@ -1278,18 +1250,29 @@ static int module_memory_alloc(struct module *mod, enum mod_mem_type type)
* *do* eventually get freed, but let's just keep things simple
* and avoid *any* false positives.
*/
- kmemleak_not_leak(ptr);
+ if (!mod->mem[type].is_rox)
+ kmemleak_not_leak(ptr);
+
+ memset(ptr, 0, size);
+ mod->mem[type].base = ptr;
return 0;
}
+static void module_memory_restore_rox(struct module *mod)
+{
+ for_class_mod_mem_type(type, text) {
+ struct module_memory *mem = &mod->mem[type];
+
+ if (mem->is_rox)
+ execmem_restore_rox(mem->base, mem->size);
+ }
+}
+
static void module_memory_free(struct module *mod, enum mod_mem_type type)
{
struct module_memory *mem = &mod->mem[type];
- if (mem->is_rox)
- vfree(mem->rw_copy);
-
execmem_free(mem->base);
}
@@ -1348,7 +1331,7 @@ static void free_module(struct module *mod)
mod_tree_remove(mod);
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod);
- /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
+ /* Wait for RCU synchronizing before releasing mod->list and buglist. */
synchronize_rcu();
if (try_add_tainted_module(mod))
pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
@@ -1371,21 +1354,18 @@ void *__symbol_get(const char *symbol)
.warn = true,
};
- preempt_disable();
- if (!find_symbol(&fsa))
- goto fail;
- if (fsa.license != GPL_ONLY) {
- pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
- symbol);
- goto fail;
+ scoped_guard(rcu) {
+ if (!find_symbol(&fsa))
+ return NULL;
+ if (fsa.license != GPL_ONLY) {
+ pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
+ symbol);
+ return NULL;
+ }
+ if (strong_try_module_get(fsa.owner))
+ return NULL;
}
- if (strong_try_module_get(fsa.owner))
- goto fail;
- preempt_enable();
return (void *)kernel_symbol_value(fsa.sym);
-fail:
- preempt_enable();
- return NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);
@@ -2642,7 +2622,6 @@ static int move_module(struct module *mod, struct load_info *info)
for_each_mod_mem_type(type) {
if (!mod->mem[type].size) {
mod->mem[type].base = NULL;
- mod->mem[type].rw_copy = NULL;
continue;
}
@@ -2659,7 +2638,6 @@ static int move_module(struct module *mod, struct load_info *info)
void *dest;
Elf_Shdr *shdr = &info->sechdrs[i];
const char *sname;
- unsigned long addr;
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
@@ -2680,14 +2658,12 @@ static int move_module(struct module *mod, struct load_info *info)
ret = PTR_ERR(dest);
goto out_err;
}
- addr = (unsigned long)dest;
codetag_section_found = true;
} else {
enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT;
unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK;
- addr = (unsigned long)mod->mem[type].base + offset;
- dest = mod->mem[type].rw_copy + offset;
+ dest = mod->mem[type].base + offset;
}
if (shdr->sh_type != SHT_NOBITS) {
@@ -2710,13 +2686,14 @@ static int move_module(struct module *mod, struct load_info *info)
* users of info can keep taking advantage and using the newly
* minted official memory area.
*/
- shdr->sh_addr = addr;
+ shdr->sh_addr = (unsigned long)dest;
pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr,
(long)shdr->sh_size, info->secstrings + shdr->sh_name);
}
return 0;
out_err:
+ module_memory_restore_rox(mod);
for (t--; t >= 0; t--)
module_memory_free(mod, t);
if (codetag_section_found)
@@ -2863,17 +2840,8 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-int __weak module_post_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
static int post_relocation(struct module *mod, const struct load_info *info)
{
- int ret;
-
/* Sort exception table now relocations are done. */
sort_extable(mod->extable, mod->extable + mod->num_exentries);
@@ -2885,24 +2853,7 @@ static int post_relocation(struct module *mod, const struct load_info *info)
add_kallsyms(mod, info);
/* Arch-specific module finalizing. */
- ret = module_finalize(info->hdr, info->sechdrs, mod);
- if (ret)
- return ret;
-
- for_each_mod_mem_type(type) {
- struct module_memory *mem = &mod->mem[type];
-
- if (mem->is_rox) {
- if (!execmem_update_copy(mem->base, mem->rw_copy,
- mem->size))
- return -ENOMEM;
-
- vfree(mem->rw_copy);
- mem->rw_copy = NULL;
- }
- }
-
- return module_post_finalize(info->hdr, info->sechdrs, mod);
+ return module_finalize(info->hdr, info->sechdrs, mod);
}
/* Call module constructors. */
@@ -3049,7 +3000,7 @@ static noinline int do_init_module(struct module *mod)
#endif
/*
* We want to free module_init, but be aware that kallsyms may be
- * walking this with preempt disabled. In all the failure paths, we
+ * walking this within an RCU read section. In all the failure paths, we
* call synchronize_rcu(), but we don't want to slow down the success
* path. execmem_free() cannot be called in an interrupt, so do the
* work and call synchronize_rcu() in a work queue.
@@ -3499,6 +3450,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
mod->mem[type].size);
}
+ module_memory_restore_rox(mod);
module_deallocate(mod, info);
free_copy:
/*
@@ -3715,28 +3667,23 @@ out:
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr)
{
- const struct exception_table_entry *e = NULL;
struct module *mod;
- preempt_disable();
+ guard(rcu)();
mod = __module_address(addr);
if (!mod)
- goto out;
+ return NULL;
if (!mod->num_exentries)
- goto out;
-
- e = search_extable(mod->extable,
- mod->num_exentries,
- addr);
-out:
- preempt_enable();
-
+ return NULL;
/*
- * Now, if we found one, we are running inside it now, hence
- * we cannot unload the module, hence no refcnt needed.
+ * The address passed here belongs to a module that is currently
+ * invoked (we are running inside it). Therefore its module::refcnt
+ * needs already be >0 to ensure that it is not removed at this stage.
+ * All other user need to invoke this function within a RCU read
+ * section.
*/
- return e;
+ return search_extable(mod->extable, mod->num_exentries, addr);
}
/**
@@ -3748,20 +3695,15 @@ out:
*/
bool is_module_address(unsigned long addr)
{
- bool ret;
-
- preempt_disable();
- ret = __module_address(addr) != NULL;
- preempt_enable();
-
- return ret;
+ guard(rcu)();
+ return __module_address(addr) != NULL;
}
/**
* __module_address() - get the module which contains an address.
* @addr: the address.
*
- * Must be called with preempt disabled or module mutex held so that
+ * Must be called within RCU read section or module mutex held so that
* module doesn't get freed during this.
*/
struct module *__module_address(unsigned long addr)
@@ -3779,8 +3721,6 @@ struct module *__module_address(unsigned long addr)
return NULL;
lookup:
- module_assert_mutex_or_preempt();
-
mod = mod_find(addr, &mod_tree);
if (mod) {
BUG_ON(!within_module(addr, mod));
@@ -3800,20 +3740,28 @@ lookup:
*/
bool is_module_text_address(unsigned long addr)
{
- bool ret;
+ guard(rcu)();
+ return __module_text_address(addr) != NULL;
+}
- preempt_disable();
- ret = __module_text_address(addr) != NULL;
- preempt_enable();
+void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data)
+{
+ struct module *mod;
- return ret;
+ guard(rcu)();
+ list_for_each_entry_rcu(mod, &modules, list) {
+ if (mod->state == MODULE_STATE_UNFORMED)
+ continue;
+ if (func(mod, data))
+ break;
+ }
}
/**
* __module_text_address() - get the module whose code contains an address.
* @addr: the address.
*
- * Must be called with preempt disabled or module mutex held so that
+ * Must be called within RCU read section or module mutex held so that
* module doesn't get freed during this.
*/
struct module *__module_text_address(unsigned long addr)
@@ -3836,7 +3784,7 @@ void print_modules(void)
printk(KERN_DEFAULT "Modules linked in:");
/* Most callers should already have preempt disabled, but make sure */
- preempt_disable();
+ guard(rcu)();
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -3844,7 +3792,6 @@ void print_modules(void)
}
print_unloaded_tainted_modules();
- preempt_enable();
if (last_unloaded_module.name[0])
pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name,
last_unloaded_module.taints);
diff --git a/kernel/module/strict_rwx.c b/kernel/module/strict_rwx.c
index 74834ba15615..03f4142cfbf4 100644
--- a/kernel/module/strict_rwx.c
+++ b/kernel/module/strict_rwx.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
+#include <linux/execmem.h>
#include "internal.h"
static int module_set_memory(const struct module *mod, enum mod_mem_type type,
@@ -32,12 +33,12 @@ static int module_set_memory(const struct module *mod, enum mod_mem_type type,
int module_enable_text_rox(const struct module *mod)
{
for_class_mod_mem_type(type, text) {
+ const struct module_memory *mem = &mod->mem[type];
int ret;
- if (mod->mem[type].is_rox)
- continue;
-
- if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ if (mem->is_rox)
+ ret = execmem_restore_rox(mem->base, mem->size);
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
ret = module_set_memory(mod, type, set_memory_rox);
else
ret = module_set_memory(mod, type, set_memory_x);
diff --git a/kernel/module/tracking.c b/kernel/module/tracking.c
index 16742d1c630c..4fefec5b683c 100644
--- a/kernel/module/tracking.c
+++ b/kernel/module/tracking.c
@@ -21,8 +21,6 @@ int try_add_tainted_module(struct module *mod)
{
struct mod_unload_taint *mod_taint;
- module_assert_mutex_or_preempt();
-
if (!mod->taints)
goto out;
diff --git a/kernel/module/tree_lookup.c b/kernel/module/tree_lookup.c
index 277197977d43..d3204c5c74eb 100644
--- a/kernel/module/tree_lookup.c
+++ b/kernel/module/tree_lookup.c
@@ -12,11 +12,11 @@
/*
* Use a latched RB-tree for __module_address(); this allows us to use
- * RCU-sched lookups of the address from any context.
+ * RCU lookups of the address from any context.
*
- * This is conditional on PERF_EVENTS || TRACING because those can really hit
- * __module_address() hard by doing a lot of stack unwinding; potentially from
- * NMI context.
+ * This is conditional on PERF_EVENTS || TRACING || CFI_CLANG because those can
+ * really hit __module_address() hard by doing a lot of stack unwinding;
+ * potentially from NMI context.
*/
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
diff --git a/kernel/module/version.c b/kernel/module/version.c
index 3718a8868321..2beefeba82d9 100644
--- a/kernel/module/version.c
+++ b/kernel/module/version.c
@@ -79,17 +79,17 @@ int check_modstruct_version(const struct load_info *info,
.name = "module_layout",
.gplok = true,
};
+ bool have_symbol;
/*
* Since this should be found in kernel (which can't be removed), no
- * locking is necessary -- use preempt_disable() to placate lockdep.
+ * locking is necessary. Regardless use a RCU read section to keep
+ * lockdep happy.
*/
- preempt_disable();
- if (!find_symbol(&fsa)) {
- preempt_enable();
- BUG();
- }
- preempt_enable();
+ scoped_guard(rcu)
+ have_symbol = find_symbol(&fsa);
+ BUG_ON(!have_symbol);
+
return check_version(info, "module_layout", mod, fsa.crc);
}