mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
a44e84a9b7
When manipulating xattr blocks, we can deadlock infinitely looping inside ext4_xattr_block_set() where we constantly keep finding xattr block for reuse in mbcache but we are unable to reuse it because its reference count is too big. This happens because cache entry for the xattr block is marked as reusable (e_reusable set) although its reference count is too big. When this inconsistency happens, this inconsistent state is kept indefinitely and so ext4_xattr_block_set() keeps retrying indefinitely. The inconsistent state is caused by non-atomic update of e_reusable bit. e_reusable is part of a bitfield and e_reusable update can race with update of e_referenced bit in the same bitfield resulting in loss of one of the updates. Fix the problem by using atomic bitops instead. This bug has been around for many years, but it became *much* easier to hit after commit65f8b80053
("ext4: fix race when reusing xattr blocks"). Cc: stable@vger.kernel.org Fixes:6048c64b26
("mbcache: add reusable flag to cache entries") Fixes:65f8b80053
("ext4: fix race when reusing xattr blocks") Reported-and-tested-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com> Reported-by: Thilo Fromm <t-lo@linux.microsoft.com> Link: https://lore.kernel.org/r/c77bf00f-4618-7149-56f1-b8d1664b9d07@linux.microsoft.com/ Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Andreas Dilger <adilger@dilger.ca> Link: https://lore.kernel.org/r/20221123193950.16758-1-jack@suse.cz Signed-off-by: Theodore Ts'o <tytso@mit.edu>
72 lines
2.1 KiB
C
72 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MBCACHE_H
|
|
#define _LINUX_MBCACHE_H
|
|
|
|
#include <linux/hash.h>
|
|
#include <linux/list_bl.h>
|
|
#include <linux/list.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/fs.h>
|
|
|
|
struct mb_cache;
|
|
|
|
/* Cache entry flags */
|
|
enum {
|
|
MBE_REFERENCED_B = 0,
|
|
MBE_REUSABLE_B
|
|
};
|
|
|
|
struct mb_cache_entry {
|
|
/* List of entries in cache - protected by cache->c_list_lock */
|
|
struct list_head e_list;
|
|
/*
|
|
* Hash table list - protected by hash chain bitlock. The entry is
|
|
* guaranteed to be hashed while e_refcnt > 0.
|
|
*/
|
|
struct hlist_bl_node e_hash_list;
|
|
/*
|
|
* Entry refcount. Once it reaches zero, entry is unhashed and freed.
|
|
* While refcount > 0, the entry is guaranteed to stay in the hash and
|
|
* e.g. mb_cache_entry_try_delete() will fail.
|
|
*/
|
|
atomic_t e_refcnt;
|
|
/* Key in hash - stable during lifetime of the entry */
|
|
u32 e_key;
|
|
unsigned long e_flags;
|
|
/* User provided value - stable during lifetime of the entry */
|
|
u64 e_value;
|
|
};
|
|
|
|
struct mb_cache *mb_cache_create(int bucket_bits);
|
|
void mb_cache_destroy(struct mb_cache *cache);
|
|
|
|
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
|
|
u64 value, bool reusable);
|
|
void __mb_cache_entry_free(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry);
|
|
void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
|
|
static inline void mb_cache_entry_put(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry)
|
|
{
|
|
unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
|
|
|
|
if (cnt > 0) {
|
|
if (cnt <= 2)
|
|
wake_up_var(&entry->e_refcnt);
|
|
return;
|
|
}
|
|
__mb_cache_entry_free(cache, entry);
|
|
}
|
|
|
|
struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
|
|
u32 key, u64 value);
|
|
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
|
|
u64 value);
|
|
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
|
|
u32 key);
|
|
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry);
|
|
void mb_cache_entry_touch(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry);
|
|
|
|
#endif /* _LINUX_MBCACHE_H */
|