fscache fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEqG5UsNXhtOCrfGQP+7dXa6fLC2sFAmJW6J0ACgkQ+7dXa6fL
 C2sXZw//Z5if0BzqI0SjZ7JeozZ4ADgAKYrT0WvYRg7TPfJBThZsa9NEaQfueXBp
 0DlZpM/ToNQXMYV/jRlDjzooPstKt45IdS4wMewTtXZAXDuZNDygOIRTY5S3zKHY
 wW4ZuZ/3AtNejY8YAnxFtykcbVfxMzYkQ+cgX79fd/+ZLgpYImtk4k3fS53pyRhv
 Q6bT7zxIOIGzaPVda/lu4t1vdTXNTNI2b/9lYOPEXMC6OFmHQ94dnqsyW6AKUD6K
 oYldKUFIFcv5CeiH8sTXFrkckNqgkUln5OXdkFLUjRPHE8Yyq57ZyX7ilp4yFdfS
 HE8VpDaDfk11hVpFLngkPu5RJKsRbad5CQ+Jl8+xiEn0fbuR4q7LFvqSVoykh/x5
 hh5veuSc/KvfpRIijNlCff4gry9gnAhHlLbt8xFbpNWPaYFTVdkkuDtoB8Kv/KoV
 HmxyNyQEW2eL7i7JXfSI4EjNPI85RVhVXmccbJJoKHQRvSVCZuWBlR7NUkykzuXg
 CQbnO/SeqilpOpbFvhTUXdOXNQEHbtcKp9yO9hd/f67E7gNKiDOewOmItMt4HfFx
 pjKv48tLR1cnZtL8pNf7nSMKuGe0bofDu46rnra0Z4mcz4d4y9mkIrdoIVT/heae
 E3Ut/1GJ0MkETl+nYXdV9IxTMHWIVGyzt7HyJTziXj03dfVquqA=
 =fUQK
 -----END PGP SIGNATURE-----

Merge tag 'fscache-fixes-20220413' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull fscache fixes from David Howells:
 "Here's a collection of fscache and cachefiles fixes and misc small
  cleanups. The two main fixes are:

   - Add a missing unmark of the inode in-use mark in an error path.

   - Fix a KASAN slab-out-of-bounds error when setting the xattr on a
     cachefiles volume due to the wrong length being given to memcpy().

  In addition, there's the removal of an unused parameter, removal of an
  unused Kconfig option, conditionalising a bit of procfs-related stuff
  and some doc fixes"

* tag 'fscache-fixes-20220413' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  fscache: remove FSCACHE_OLD_API Kconfig option
  fscache: Use wrapper fscache_set_cache_state() directly when relinquishing
  fscache: Move fscache_cookies_seq_ops specific code under CONFIG_PROC_FS
  fscache: Remove the cookie parameter from fscache_clear_page_bits()
  docs: filesystems: caching/backend-api.rst: fix an object withdrawn API
  docs: filesystems: caching/backend-api.rst: correct two relinquish APIs use
  cachefiles: Fix KASAN slab-out-of-bounds in cachefiles_set_volume_xattr
  cachefiles: unmark inode in use in error path
This commit is contained in:
Linus Torvalds 2022-04-14 10:51:20 -07:00
commit ec9c57a732
11 changed files with 52 additions and 39 deletions

View File

@ -73,7 +73,7 @@ busy.
If successful, the cache backend can then start setting up the cache. In the
event that the initialisation fails, the cache backend should call::
void fscache_relinquish_cookie(struct fscache_cache *cache);
void fscache_relinquish_cache(struct fscache_cache *cache);
to reset and discard the cookie.
@ -110,9 +110,9 @@ to withdraw them, calling::
on the cookie that each object belongs to. This schedules the specified cookie
for withdrawal. This gets offloaded to a workqueue. The cache backend can
test for completion by calling::
wait for completion by calling::
bool fscache_are_objects_withdrawn(struct fscache_cookie *cache);
void fscache_wait_for_objects(struct fscache_cache *cache);
Once all the cookies are withdrawn, a cache backend can withdraw all the
volumes, calling::
@ -125,7 +125,7 @@ outstanding accesses on the volume to complete before returning.
When the the cache is completely withdrawn, fscache should be notified by
calling::
void fscache_cache_relinquish(struct fscache_cache *cache);
void fscache_relinquish_cache(struct fscache_cache *cache);
to clear fields in the cookie and discard the caller's ref on it.

View File

@ -404,22 +404,21 @@ schedule a write of that region::
And if an error occurs before that point is reached, the marks can be removed
by calling::
void fscache_clear_page_bits(struct fscache_cookie *cookie,
struct address_space *mapping,
void fscache_clear_page_bits(struct address_space *mapping,
loff_t start, size_t len,
bool caching)
In both of these functions, the cookie representing the cache object to be
written to and a pointer to the mapping to which the source pages are attached
are passed in; start and len indicate the size of the region that's going to be
written (it doesn't have to align to page boundaries necessarily, but it does
have to align to DIO boundaries on the backing filesystem). The caching
parameter indicates if caching should be skipped, and if false, the functions
do nothing.
In these functions, a pointer to the mapping to which the source pages are
attached is passed in and start and len indicate the size of the region that's
going to be written (it doesn't have to align to page boundaries necessarily,
but it does have to align to DIO boundaries on the backing filesystem). The
caching parameter indicates if caching should be skipped, and if false, the
functions do nothing.
The write function takes some additional parameters: i_size indicates the size
of the netfs file and term_func indicates an optional completion function, to
which term_func_priv will be passed, along with the error or amount written.
The write function takes some additional parameters: the cookie representing
the cache object to be written to, i_size indicates the size of the netfs file
and term_func indicates an optional completion function, to which
term_func_priv will be passed, along with the error or amount written.
Note that the write function will always run asynchronously and will unmark all
the pages upon completion before calling term_func.

View File

@ -616,8 +616,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
_debug("write discard %x @%llx [%llx]", len, start, i_size);
/* The dirty region was entirely beyond the EOF. */
fscache_clear_page_bits(afs_vnode_cache(vnode),
mapping, start, len, caching);
fscache_clear_page_bits(mapping, start, len, caching);
afs_pages_written_back(vnode, start, len);
ret = 0;
}

View File

@ -57,6 +57,16 @@ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
trace_cachefiles_mark_inactive(object, inode);
}
static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
inode_lock(inode);
__cachefiles_unmark_inode_in_use(object, dentry);
inode_unlock(inode);
}
/*
* Unmark a backing inode and tell cachefilesd that there's something that can
* be culled.
@ -68,9 +78,7 @@ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
struct inode *inode = file_inode(file);
if (inode) {
inode_lock(inode);
__cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
inode_unlock(inode);
cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
atomic_long_add(inode->i_blocks, &cache->b_released);
@ -484,7 +492,7 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
object, d_backing_inode(path.dentry), ret,
cachefiles_trace_trunc_error);
file = ERR_PTR(ret);
goto out_dput;
goto out_unuse;
}
}
@ -494,15 +502,20 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
PTR_ERR(file),
cachefiles_trace_open_error);
goto out_dput;
goto out_unuse;
}
if (unlikely(!file->f_op->read_iter) ||
unlikely(!file->f_op->write_iter)) {
fput(file);
pr_notice("Cache does not support read_iter and write_iter\n");
file = ERR_PTR(-EINVAL);
goto out_unuse;
}
goto out_dput;
out_unuse:
cachefiles_do_unmark_inode_in_use(object, path.dentry);
out_dput:
dput(path.dentry);
out:
@ -590,14 +603,16 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
check_failed:
fscache_cookie_lookup_negative(object->cookie);
cachefiles_unmark_inode_in_use(object, file);
if (ret == -ESTALE) {
fput(file);
dput(dentry);
fput(file);
dput(dentry);
if (ret == -ESTALE)
return cachefiles_create_file(object);
}
return false;
error_fput:
fput(file);
error:
cachefiles_do_unmark_inode_in_use(object, dentry);
dput(dentry);
return false;
}

View File

@ -203,7 +203,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
if (!buf)
return false;
buf->reserved = cpu_to_be32(0);
memcpy(buf->data, p, len);
memcpy(buf->data, p, volume->vcookie->coherency_len);
ret = cachefiles_inject_write_error();
if (ret == 0)

View File

@ -38,6 +38,3 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_OLD_API
bool

View File

@ -214,7 +214,7 @@ void fscache_relinquish_cache(struct fscache_cache *cache)
cache->ops = NULL;
cache->cache_priv = NULL;
smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
fscache_put_cache(cache, where);
}
EXPORT_SYMBOL(fscache_relinquish_cache);

View File

@ -30,7 +30,7 @@ static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
unsigned int fscache_lru_cookie_timeout = 10 * HZ;
static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
{
@ -1069,6 +1069,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
}
EXPORT_SYMBOL(__fscache_invalidate);
#ifdef CONFIG_PROC_FS
/*
* Generate a list of extant cookies in /proc/fs/fscache/cookies
*/
@ -1145,3 +1146,4 @@ const struct seq_operations fscache_cookies_seq_ops = {
.stop = fscache_cookies_seq_stop,
.show = fscache_cookies_seq_show,
};
#endif

View File

@ -56,7 +56,9 @@ static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
* cookie.c
*/
extern struct kmem_cache *fscache_cookie_jar;
#ifdef CONFIG_PROC_FS
extern const struct seq_operations fscache_cookies_seq_ops;
#endif
extern struct timer_list fscache_cookie_lru_timer;
extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
@ -137,7 +139,9 @@ int fscache_stats_show(struct seq_file *m, void *v);
/*
* volume.c
*/
#ifdef CONFIG_PROC_FS
extern const struct seq_operations fscache_volumes_seq_ops;
#endif
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);

View File

@ -235,8 +235,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
{
struct fscache_write_request *wreq = priv;
fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
wreq->mapping, wreq->start, wreq->len,
fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
wreq->set_bits);
if (wreq->term_func)
@ -296,7 +295,7 @@ abandon_end:
abandon_free:
kfree(wreq);
abandon:
fscache_clear_page_bits(cookie, mapping, start, len, cond);
fscache_clear_page_bits(mapping, start, len, cond);
if (term_func)
term_func(term_func_priv, ret, false);
}

View File

@ -573,7 +573,6 @@ int fscache_write(struct netfs_cache_resources *cres,
/**
* fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
* @cookie: The cookie representing the cache object
* @mapping: The netfs inode to use as the source
* @start: The start position in @mapping
* @len: The amount of data to unlock
@ -582,8 +581,7 @@ int fscache_write(struct netfs_cache_resources *cres,
* Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
* waiting.
*/
static inline void fscache_clear_page_bits(struct fscache_cookie *cookie,
struct address_space *mapping,
static inline void fscache_clear_page_bits(struct address_space *mapping,
loff_t start, size_t len,
bool caching)
{