slab fixes for 6.6-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmUWfrsACgkQu+CwddJF
 iJo6+QgAnn3klZX5wOfH93tdlOz2TNy8QVSmNuITDKThLJg9r8YkQJdp6NYHR0Rc
 vrbZ2pMqF/LQ/LW49uZahQwVi7811psfU3PqbSC3CRtUYq0RUMu5PaeItvRp4S5n
 2zYiWVSNGfSmG4jQm2L2nMjDRK8m3oLKwuxKejv3UQLDZ5U1Fh36k75lZK1PERmu
 +cBQATtncj4N1rF0eY8mif3ctqqkVqz79t/nU/FCBx0+v3s4wTzYB1y8l5FEH2cM
 iU4A4jsZe147DxHadUQF2ahnj6oaOacgtg846WN5P73BjiRhdrJaTS8HSeAS/RIo
 e/PpbLzOFp4Rz+2u1Me7nFK64qFjyw==
 =+WB7
 -----END PGP SIGNATURE-----

Merge tag 'slab-fixes-for-6.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fixes from Vlastimil Babka:

 - stable fix to prevent list corruption when destroying caches with
   leftover objects (Rafael Aquini)

 - fix for a gotcha in kmalloc_size_roundup() when calling it with too
   high size, discovered when recently a networking call site had to be
   fixed for a different issue (David Laight)

* tag 'slab-fixes-for-6.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  slab: kmalloc_size_roundup() must not return 0 for non-zero size
  mm/slab_common: fix slab_caches list corruption after kmem_cache_destroy()
This commit is contained in:
Linus Torvalds 2023-09-29 12:10:12 -07:00
commit 1c84724ccb

View file

@ -479,7 +479,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
int refcnt; int err = -EBUSY;
bool rcu_set; bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s)) if (unlikely(!s) || !kasan_check_byte(s))
@ -490,17 +490,17 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
refcnt = --s->refcount; s->refcount--;
if (refcnt) if (s->refcount)
goto out_unlock; goto out_unlock;
WARN(shutdown_cache(s), err = shutdown_cache(s);
"%s %s: Slab cache still has objects when called from %pS", WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_); __func__, s->name, (void *)_RET_IP_);
out_unlock: out_unlock:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
cpus_read_unlock(); cpus_read_unlock();
if (!refcnt && !rcu_set) if (!err && !rcu_set)
kmem_cache_release(s); kmem_cache_release(s);
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
@ -745,24 +745,24 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
size_t kmalloc_size_roundup(size_t size) size_t kmalloc_size_roundup(size_t size)
{ {
struct kmem_cache *c; if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
/* Short-circuit the 0 size case. */
if (unlikely(size == 0))
return 0;
/* Short-circuit saturated "too-large" case. */
if (unlikely(size == SIZE_MAX))
return SIZE_MAX;
/* Above the smaller buckets, size is a multiple of page size. */
if (size > KMALLOC_MAX_CACHE_SIZE)
return PAGE_SIZE << get_order(size);
/* /*
* The flags don't matter since size_index is common to all. * The flags don't matter since size_index is common to all.
* Neither does the caller for just getting ->object_size. * Neither does the caller for just getting ->object_size.
*/ */
c = kmalloc_slab(size, GFP_KERNEL, 0); return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
return c ? c->object_size : 0; }
/* Above the smaller buckets, size is a multiple of page size. */
if (size && size <= KMALLOC_MAX_SIZE)
return PAGE_SIZE << get_order(size);
/*
* Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
* and very large size - kmalloc() may fail.
*/
return size;
} }
EXPORT_SYMBOL(kmalloc_size_roundup); EXPORT_SYMBOL(kmalloc_size_roundup);