module: Don't ignore errors from set_memory_XX()

set_memory_ro(), set_memory_nx(), set_memory_x() and other helpers
can fail and return an error. In that case the memory might not be
protected as expected and the module loading has to be aborted to
avoid security issues.

Check return value of all calls to set_memory_XX() and handle
error if any.

Add a check to not call set_memory_XX() on NULL pointers as some
architectures may not like it allthough numpages is always 0 in that
case. This also avoid a useless call to set_vm_flush_reset_perms().

Link: https://github.com/KSPP/linux/issues/7
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
This commit is contained in:
Christophe Leroy 2024-02-16 09:14:27 +01:00 committed by Luis Chamberlain
parent 157285397f
commit d1909c0221
3 changed files with 56 additions and 23 deletions

View File

@ -322,9 +322,9 @@ static inline struct module *mod_find(unsigned long addr, struct mod_tree_root *
}
#endif /* CONFIG_MODULES_TREE_LOOKUP */
void module_enable_rodata_ro(const struct module *mod, bool after_init);
void module_enable_data_nx(const struct module *mod);
void module_enable_text_rox(const struct module *mod);
int module_enable_rodata_ro(const struct module *mod, bool after_init);
int module_enable_data_nx(const struct module *mod);
int module_enable_text_rox(const struct module *mod);
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod);

View File

@ -2571,7 +2571,9 @@ static noinline int do_init_module(struct module *mod)
/* Switch to core kallsyms now init is done: kallsyms may be walking! */
rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif
module_enable_rodata_ro(mod, true);
ret = module_enable_rodata_ro(mod, true);
if (ret)
goto fail_mutex_unlock;
mod_tree_remove_init(mod);
module_arch_freeing_init(mod);
for_class_mod_mem_type(type, init) {
@ -2609,6 +2611,8 @@ static noinline int do_init_module(struct module *mod)
return 0;
fail_mutex_unlock:
mutex_unlock(&module_mutex);
fail_free_freeinit:
kfree(freeinit);
fail:
@ -2736,9 +2740,15 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_bug_finalize(info->hdr, info->sechdrs, mod);
module_cfi_finalize(info->hdr, info->sechdrs, mod);
module_enable_rodata_ro(mod, false);
module_enable_data_nx(mod);
module_enable_text_rox(mod);
err = module_enable_rodata_ro(mod, false);
if (err)
goto out_strict_rwx;
err = module_enable_data_nx(mod);
if (err)
goto out_strict_rwx;
err = module_enable_text_rox(mod);
if (err)
goto out_strict_rwx;
/*
* Mark state as coming so strong_try_module_get() ignores us,
@ -2749,6 +2759,8 @@ static int complete_formation(struct module *mod, struct load_info *info)
return 0;
out_strict_rwx:
module_bug_cleanup(mod);
out:
mutex_unlock(&module_mutex);
return err;

View File

@ -11,13 +11,16 @@
#include <linux/set_memory.h>
#include "internal.h"
static void module_set_memory(const struct module *mod, enum mod_mem_type type,
int (*set_memory)(unsigned long start, int num_pages))
static int module_set_memory(const struct module *mod, enum mod_mem_type type,
int (*set_memory)(unsigned long start, int num_pages))
{
const struct module_memory *mod_mem = &mod->mem[type];
if (!mod_mem->base)
return 0;
set_vm_flush_reset_perms(mod_mem->base);
set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
return set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
}
/*
@ -26,35 +29,53 @@ static void module_set_memory(const struct module *mod, enum mod_mem_type type,
* CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we
* are strict.
*/
void module_enable_text_rox(const struct module *mod)
int module_enable_text_rox(const struct module *mod)
{
for_class_mod_mem_type(type, text) {
int ret;
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
module_set_memory(mod, type, set_memory_rox);
ret = module_set_memory(mod, type, set_memory_rox);
else
module_set_memory(mod, type, set_memory_x);
ret = module_set_memory(mod, type, set_memory_x);
if (ret)
return ret;
}
return 0;
}
void module_enable_rodata_ro(const struct module *mod, bool after_init)
int module_enable_rodata_ro(const struct module *mod, bool after_init)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
return;
int ret;
module_set_memory(mod, MOD_RODATA, set_memory_ro);
module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
return 0;
ret = module_set_memory(mod, MOD_RODATA, set_memory_ro);
if (ret)
return ret;
ret = module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
if (ret)
return ret;
if (after_init)
module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
return module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
return 0;
}
void module_enable_data_nx(const struct module *mod)
int module_enable_data_nx(const struct module *mod)
{
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return;
return 0;
for_class_mod_mem_type(type, data)
module_set_memory(mod, type, set_memory_nx);
for_class_mod_mem_type(type, data) {
int ret = module_set_memory(mod, type, set_memory_nx);
if (ret)
return ret;
}
return 0;
}
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,