Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "7 patches.

  Subsystems affected by this patch series: mm (kasan, mm/slub,
  mm/madvise, and memcg), and lib"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  lib: use PFN_PHYS() in devmem_is_allowed()
  mm/memcg: fix incorrect flushing of lruvec data in obj_stock
  mm/madvise: report SIGBUS as -EFAULT for MADV_POPULATE_(READ|WRITE)
  mm: slub: fix slub_debug disabling for list of slabs
  slub: fix kmalloc_pagealloc_invalid_free unit test
  kasan, slub: reset tag when printing address
  kasan, kmemleak: reset tags when scanning block
This commit is contained in:
Linus Torvalds 2021-08-13 15:05:23 -10:00
commit dfa377c35d
6 changed files with 30 additions and 20 deletions

View file

@ -19,7 +19,7 @@
*/ */
int devmem_is_allowed(unsigned long pfn) int devmem_is_allowed(unsigned long pfn)
{ {
if (iomem_is_exclusive(pfn << PAGE_SHIFT)) if (iomem_is_exclusive(PFN_PHYS(pfn)))
return 0; return 0;
if (!page_is_ram(pfn)) if (!page_is_ram(pfn))
return 1; return 1;

View file

@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
gup_flags |= FOLL_WRITE; gup_flags |= FOLL_WRITE;
/* /*
* See check_vma_flags(): Will return -EFAULT on incompatible mappings * We want to report -EINVAL instead of -EFAULT for any permission
* or with insufficient permissions. * problems or incompatible mappings.
*/ */
if (check_vma_flags(vma, gup_flags))
return -EINVAL;
return __get_user_pages(mm, start, nr_pages, gup_flags, return __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked); NULL, NULL, locked);
} }

View file

@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
kasan_disable_current(); kasan_disable_current();
warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE, warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
HEX_GROUP_SIZE, ptr, len, HEX_ASCII); HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
kasan_enable_current(); kasan_enable_current();
} }
@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
kasan_disable_current(); kasan_disable_current();
kcsan_disable_current(); kcsan_disable_current();
object->checksum = crc32(0, (void *)object->pointer, object->size); object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
kasan_enable_current(); kasan_enable_current();
kcsan_enable_current(); kcsan_enable_current();
@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
break; break;
kasan_disable_current(); kasan_disable_current();
pointer = *ptr; pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
kasan_enable_current(); kasan_enable_current();
untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);

View file

@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma,
switch (pages) { switch (pages) {
case -EINTR: case -EINTR:
return -EINTR; return -EINTR;
case -EFAULT: /* Incompatible mappings / permissions. */ case -EINVAL: /* Incompatible mappings / permissions. */
return -EINVAL; return -EINVAL;
case -EHWPOISON: case -EHWPOISON:
return -EHWPOISON; return -EHWPOISON;
case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
return -EFAULT;
default: default:
pr_warn_once("%s: unhandled return value: %ld\n", pr_warn_once("%s: unhandled return value: %ld\n",
__func__, pages); __func__, pages);

View file

@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
stock->cached_pgdat = pgdat; stock->cached_pgdat = pgdat;
} else if (stock->cached_pgdat != pgdat) { } else if (stock->cached_pgdat != pgdat) {
/* Flush the existing cached vmstat data */ /* Flush the existing cached vmstat data */
struct pglist_data *oldpg = stock->cached_pgdat;
if (stock->nr_slab_reclaimable_b) { if (stock->nr_slab_reclaimable_b) {
mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B, mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
stock->nr_slab_reclaimable_b); stock->nr_slab_reclaimable_b);
stock->nr_slab_reclaimable_b = 0; stock->nr_slab_reclaimable_b = 0;
} }
if (stock->nr_slab_unreclaimable_b) { if (stock->nr_slab_unreclaimable_b) {
mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B, mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
stock->nr_slab_unreclaimable_b); stock->nr_slab_unreclaimable_b);
stock->nr_slab_unreclaimable_b = 0; stock->nr_slab_unreclaimable_b = 0;
} }

View file

@ -576,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length) unsigned int length)
{ {
metadata_access_enable(); metadata_access_enable();
print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS, print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
16, 1, addr, length, 1); 16, 1, kasan_reset_tag((void *)addr), length, 1);
metadata_access_disable(); metadata_access_disable();
} }
@ -1400,12 +1400,13 @@ parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
static int __init setup_slub_debug(char *str) static int __init setup_slub_debug(char *str)
{ {
slab_flags_t flags; slab_flags_t flags;
slab_flags_t global_flags;
char *saved_str; char *saved_str;
char *slab_list; char *slab_list;
bool global_slub_debug_changed = false; bool global_slub_debug_changed = false;
bool slab_list_specified = false; bool slab_list_specified = false;
slub_debug = DEBUG_DEFAULT_FLAGS; global_flags = DEBUG_DEFAULT_FLAGS;
if (*str++ != '=' || !*str) if (*str++ != '=' || !*str)
/* /*
* No options specified. Switch on full debugging. * No options specified. Switch on full debugging.
@ -1417,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
str = parse_slub_debug_flags(str, &flags, &slab_list, true); str = parse_slub_debug_flags(str, &flags, &slab_list, true);
if (!slab_list) { if (!slab_list) {
slub_debug = flags; global_flags = flags;
global_slub_debug_changed = true; global_slub_debug_changed = true;
} else { } else {
slab_list_specified = true; slab_list_specified = true;
@ -1426,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
/* /*
* For backwards compatibility, a single list of flags with list of * For backwards compatibility, a single list of flags with list of
* slabs means debugging is only enabled for those slabs, so the global * slabs means debugging is only changed for those slabs, so the global
* slub_debug should be 0. We can extended that to multiple lists as * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
* on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
* long as there is no option specifying flags without a slab list. * long as there is no option specifying flags without a slab list.
*/ */
if (slab_list_specified) { if (slab_list_specified) {
if (!global_slub_debug_changed) if (!global_slub_debug_changed)
slub_debug = 0; global_flags = slub_debug;
slub_debug_string = saved_str; slub_debug_string = saved_str;
} }
out: out:
slub_debug = global_flags;
if (slub_debug != 0 || slub_debug_string) if (slub_debug != 0 || slub_debug_string)
static_branch_enable(&slub_debug_enabled); static_branch_enable(&slub_debug_enabled);
else else
@ -3236,12 +3239,12 @@ struct detached_freelist {
struct kmem_cache *s; struct kmem_cache *s;
}; };
static inline void free_nonslab_page(struct page *page) static inline void free_nonslab_page(struct page *page, void *object)
{ {
unsigned int order = compound_order(page); unsigned int order = compound_order(page);
VM_BUG_ON_PAGE(!PageCompound(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page);
kfree_hook(page_address(page)); kfree_hook(object);
mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
__free_pages(page, order); __free_pages(page, order);
} }
@ -3282,7 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) { if (!s) {
/* Handle kalloc'ed objects */ /* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
free_nonslab_page(page); free_nonslab_page(page, object);
p[size] = NULL; /* mark object processed */ p[size] = NULL; /* mark object processed */
return size; return size;
} }
@ -4258,7 +4261,7 @@ void kfree(const void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) { if (unlikely(!PageSlab(page))) {
free_nonslab_page(page); free_nonslab_page(page, object);
return; return;
} }
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);