vfs-6.10-rc4.fixes

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZmb/TwAKCRCRxhvAZXjc
 om/QAQCaXBrLRBLS0lvpmGfyFhBC+N+hNLEw5bzA0Dkm39nCUQEAwaMCWYzzSLMK
 SfRAqKYkfQKSSHIGOq63ThZWkuFIMgQ=
 =Z5xj
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.10-rc4.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "Misc:
   - Restore debugfs behavior of ignoring unknown mount options
   - Fix kernel doc for netfs_wait_for_oustanding_io()
   - Fix struct statx comment after new addition for this cycle
   - Fix a check in find_next_fd()

  iomap:
   - Fix data zeroing behavior when an extent spans the block that
     contains i_size
   - Restore i_size increasing in iomap_write_end() for now to avoid
     stale data exposure on xfs with a realtime device

  Cachefiles:
   - Remove unneeded fdtable.h include
   - Improve trace output for cachefiles_obj_{get,put}_ondemand_fd()
   - Remove requests from the request list to prevent accessing already
     freed requests
   - Fix UAF when issuing restore command while the daemon is still
     alive by adding an additional reference count to requests
   - Fix UAF by grabbing a reference during xarray lookup with xa_lock()
     held
   - Simplify error handling in cachefiles_ondemand_daemon_read()
   - Add consistency checks read and open requests to avoid crashes
   - Add a spinlock to protect ondemand_id variable which is used to
     determine whether an anonymous cachefiles fd has already been
     closed
   - Make on-demand reads killable allowing to handle broken cachefiles
     daemon better
   - Flush all requests after the kernel has been marked dead via
     CACHEFILES_DEAD to avoid hung-tasks
   - Ensure that closed requests are marked as such to avoid reusing
     them with a reopen request
   - Defer fd_install() until after copy_to_user() succeeded and thereby
     get rid of having to use close_fd()
   - Ensure that anonymous cachefiles on-demand fds are reused while
     they are valid to avoid pinning already freed cookies"

* tag 'vfs-6.10-rc4.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  iomap: Fix iomap_adjust_read_range for plen calculation
  iomap: keep on increasing i_size in iomap_write_end()
  cachefiles: remove unneeded include of <linux/fdtable.h>
  fs/file: fix the check in find_next_fd()
  cachefiles: make on-demand read killable
  cachefiles: flush all requests after setting CACHEFILES_DEAD
  cachefiles: Set object to close if ondemand_id < 0 in copen
  cachefiles: defer exposing anon_fd until after copy_to_user() succeeds
  cachefiles: never get a new anonymous fd if ondemand_id is valid
  cachefiles: add spin_lock for cachefiles_ondemand_info
  cachefiles: add consistency check for copen/cread
  cachefiles: remove err_put_fd label in cachefiles_ondemand_daemon_read()
  cachefiles: fix slab-use-after-free in cachefiles_ondemand_daemon_read()
  cachefiles: fix slab-use-after-free in cachefiles_ondemand_get_fd()
  cachefiles: remove requests from xarray during flushing requests
  cachefiles: add output string to cachefiles_obj_[get|put]_ondemand_fd
  statx: Update offset commentary for struct statx
  netfs: fix kernel doc for nets_wait_for_outstanding_io()
  debugfs: continue to ignore unknown mount options
This commit is contained in:
Linus Torvalds 2024-06-11 12:04:21 -07:00
commit 2ef5971ff3
9 changed files with 217 additions and 95 deletions

View file

@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
return 0;
}
static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
void cachefiles_flush_reqs(struct cachefiles_cache *cache)
{
struct xarray *xa = &cache->reqs;
struct cachefiles_req *req;
@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
xa_for_each(xa, index, req) {
req->error = -EIO;
complete(&req->done);
__xa_erase(xa, index);
}
xa_unlock(xa);

View file

@ -55,6 +55,7 @@ struct cachefiles_ondemand_info {
int ondemand_id;
enum cachefiles_object_state state;
struct cachefiles_object *object;
spinlock_t lock;
};
/*
@ -138,6 +139,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
struct cachefiles_req {
struct cachefiles_object *object;
struct completion done;
refcount_t ref;
int error;
struct cachefiles_msg msg;
};
@ -186,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
* daemon.c
*/
extern const struct file_operations cachefiles_daemon_fops;
extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
@ -424,6 +427,8 @@ do { \
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
fscache_io_error((___cache)->cache); \
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
if (cachefiles_in_ondemand_mode(___cache)) \
cachefiles_flush_reqs(___cache); \
} while (0)
#define cachefiles_io_error_obj(object, FMT, ...) \

View file

@ -1,22 +1,42 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/fdtable.h>
#include <linux/anon_inodes.h>
#include <linux/uio.h>
#include "internal.h"
struct ondemand_anon_file {
struct file *file;
int fd;
};
static inline void cachefiles_req_put(struct cachefiles_req *req)
{
if (refcount_dec_and_test(&req->ref))
kfree(req);
}
static int cachefiles_ondemand_fd_release(struct inode *inode,
struct file *file)
{
struct cachefiles_object *object = file->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_ondemand_info *info = object->ondemand;
int object_id = info->ondemand_id;
struct cachefiles_cache *cache;
struct cachefiles_ondemand_info *info;
int object_id;
struct cachefiles_req *req;
XA_STATE(xas, &cache->reqs, 0);
XA_STATE(xas, NULL, 0);
if (!object)
return 0;
info = object->ondemand;
cache = object->volume->cache;
xas.xa = &cache->reqs;
xa_lock(&cache->reqs);
spin_lock(&info->lock);
object_id = info->ondemand_id;
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
cachefiles_ondemand_set_object_close(object);
spin_unlock(&info->lock);
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
@ -76,12 +96,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
}
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
unsigned long id)
{
struct cachefiles_object *object = filp->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_req *req;
unsigned long id;
XA_STATE(xas, &cache->reqs, id);
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
return -EINVAL;
@ -89,10 +109,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
id = arg;
req = xa_erase(&cache->reqs, id);
if (!req)
xa_lock(&cache->reqs);
req = xas_load(&xas);
if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
req->object != object) {
xa_unlock(&cache->reqs);
return -EINVAL;
}
xas_store(&xas, NULL);
xa_unlock(&cache->reqs);
trace_cachefiles_ondemand_cread(object, id);
complete(&req->done);
@ -116,10 +141,12 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
{
struct cachefiles_req *req;
struct fscache_cookie *cookie;
struct cachefiles_ondemand_info *info;
char *pid, *psize;
unsigned long id;
long size;
int ret;
XA_STATE(xas, &cache->reqs, 0);
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
@ -143,10 +170,18 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
if (ret)
return ret;
req = xa_erase(&cache->reqs, id);
if (!req)
xa_lock(&cache->reqs);
xas.xa_index = id;
req = xas_load(&xas);
if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
!req->object->ondemand->ondemand_id) {
xa_unlock(&cache->reqs);
return -EINVAL;
}
xas_store(&xas, NULL);
xa_unlock(&cache->reqs);
info = req->object->ondemand;
/* fail OPEN request if copen format is invalid */
ret = kstrtol(psize, 0, &size);
if (ret) {
@ -166,6 +201,32 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
goto out;
}
spin_lock(&info->lock);
/*
* The anonymous fd was closed before copen ? Fail the request.
*
* t1 | t2
* ---------------------------------------------------------
* cachefiles_ondemand_copen
* req = xa_erase(&cache->reqs, id)
* // Anon fd is maliciously closed.
* cachefiles_ondemand_fd_release
* xa_lock(&cache->reqs)
* cachefiles_ondemand_set_object_close(object)
* xa_unlock(&cache->reqs)
* cachefiles_ondemand_set_object_open
* // No one will ever close it again.
* cachefiles_ondemand_daemon_read
* cachefiles_ondemand_select_req
*
* Get a read req but its fd is already closed. The daemon can't
* issue a cread ioctl with an closed fd, then hung.
*/
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
spin_unlock(&info->lock);
req->error = -EBADFD;
goto out;
}
cookie = req->object->cookie;
cookie->object_size = size;
if (size)
@ -175,9 +236,15 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
trace_cachefiles_ondemand_copen(req->object, id, size);
cachefiles_ondemand_set_object_open(req->object);
spin_unlock(&info->lock);
wake_up_all(&cache->daemon_pollwq);
out:
spin_lock(&info->lock);
/* Need to set object close to avoid reopen status continuing */
if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
cachefiles_ondemand_set_object_close(req->object);
spin_unlock(&info->lock);
complete(&req->done);
return ret;
}
@ -205,14 +272,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
return 0;
}
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
struct ondemand_anon_file *anon_file)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
struct cachefiles_open *load;
struct file *file;
u32 object_id;
int ret, fd;
int ret;
object = cachefiles_grab_object(req->object,
cachefiles_obj_get_ondemand_fd);
@ -224,35 +291,53 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
if (ret < 0)
goto err;
fd = get_unused_fd_flags(O_WRONLY);
if (fd < 0) {
ret = fd;
anon_file->fd = get_unused_fd_flags(O_WRONLY);
if (anon_file->fd < 0) {
ret = anon_file->fd;
goto err_free_id;
}
file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
object, O_WRONLY);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
anon_file->file = anon_inode_getfile("[cachefiles]",
&cachefiles_ondemand_fd_fops, object, O_WRONLY);
if (IS_ERR(anon_file->file)) {
ret = PTR_ERR(anon_file->file);
goto err_put_fd;
}
file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
fd_install(fd, file);
spin_lock(&object->ondemand->lock);
if (object->ondemand->ondemand_id > 0) {
spin_unlock(&object->ondemand->lock);
/* Pair with check in cachefiles_ondemand_fd_release(). */
anon_file->file->private_data = NULL;
ret = -EEXIST;
goto err_put_file;
}
anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
load = (void *)req->msg.data;
load->fd = fd;
load->fd = anon_file->fd;
object->ondemand->ondemand_id = object_id;
spin_unlock(&object->ondemand->lock);
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
return 0;
err_put_file:
fput(anon_file->file);
anon_file->file = NULL;
err_put_fd:
put_unused_fd(fd);
put_unused_fd(anon_file->fd);
anon_file->fd = ret;
err_free_id:
xa_erase(&cache->ondemand_ids, object_id);
err:
spin_lock(&object->ondemand->lock);
/* Avoid marking an opened object as closed. */
if (object->ondemand->ondemand_id <= 0)
cachefiles_ondemand_set_object_close(object);
spin_unlock(&object->ondemand->lock);
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
return ret;
}
@ -294,14 +379,28 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
return NULL;
}
static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
struct xa_state *xas, int err)
{
if (unlikely(!xas || !req))
return false;
if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
return false;
req->error = err;
complete(&req->done);
return true;
}
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
{
struct cachefiles_req *req;
struct cachefiles_msg *msg;
unsigned long id = 0;
size_t n;
int ret = 0;
struct ondemand_anon_file anon_file;
XA_STATE(xas, &cache->reqs, cache->req_id_next);
xa_lock(&cache->reqs);
@ -330,42 +429,37 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
cache->req_id_next = xas.xa_index + 1;
refcount_inc(&req->ref);
cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
xa_unlock(&cache->reqs);
id = xas.xa_index;
if (msg->opcode == CACHEFILES_OP_OPEN) {
ret = cachefiles_ondemand_get_fd(req);
if (ret) {
cachefiles_ondemand_set_object_close(req->object);
goto error;
}
ret = cachefiles_ondemand_get_fd(req, &anon_file);
if (ret)
goto out;
}
msg->msg_id = id;
msg->msg_id = xas.xa_index;
msg->object_id = req->object->ondemand->ondemand_id;
if (copy_to_user(_buffer, msg, n) != 0) {
if (copy_to_user(_buffer, msg, n) != 0)
ret = -EFAULT;
goto err_put_fd;
if (msg->opcode == CACHEFILES_OP_OPEN) {
if (ret < 0) {
fput(anon_file.file);
put_unused_fd(anon_file.fd);
goto out;
}
fd_install(anon_file.fd, anon_file.file);
}
/* CLOSE request has no reply */
if (msg->opcode == CACHEFILES_OP_CLOSE) {
xa_erase(&cache->reqs, id);
complete(&req->done);
}
return n;
err_put_fd:
if (msg->opcode == CACHEFILES_OP_OPEN)
close_fd(((struct cachefiles_open *)msg->data)->fd);
error:
xa_erase(&cache->reqs, id);
req->error = ret;
complete(&req->done);
return ret;
out:
cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
/* Remove error request and CLOSE request has no reply */
if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
cachefiles_ondemand_finish_req(req, &xas, ret);
cachefiles_req_put(req);
return ret ? ret : n;
}
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
@ -395,6 +489,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
}
refcount_set(&req->ref, 1);
req->object = object;
init_completion(&req->done);
req->msg.opcode = opcode;
@ -454,9 +549,19 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
wake_up_all(&cache->daemon_pollwq);
wait_for_completion(&req->done);
ret = req->error;
kfree(req);
wait:
ret = wait_for_completion_killable(&req->done);
if (!ret) {
ret = req->error;
} else {
ret = -EINTR;
if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
/* Someone will complete it soon. */
cpu_relax();
goto wait;
}
}
cachefiles_req_put(req);
return ret;
out:
/* Reset the object to close state in error handling path.
@ -578,6 +683,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
return -ENOMEM;
object->ondemand->object = object;
spin_lock_init(&object->ondemand->lock);
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
return 0;
}

View file

@ -107,8 +107,16 @@ static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param
int opt;
opt = fs_parse(fc, debugfs_param_specs, param, &result);
if (opt < 0)
if (opt < 0) {
/*
* We might like to report bad mount options here; but
* traditionally debugfs has ignored all mount options
*/
if (opt == -ENOPARAM)
return 0;
return opt;
}
switch (opt) {
case Opt_uid:

View file

@ -486,12 +486,12 @@ struct files_struct init_files = {
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
{
unsigned int maxfd = fdt->max_fds;
unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
unsigned int maxbit = maxfd / BITS_PER_LONG;
unsigned int bitbit = start / BITS_PER_LONG;
bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
if (bitbit > maxfd)
if (bitbit >= maxfd)
return maxfd;
if (bitbit > start)
start = bitbit;

View file

@ -241,6 +241,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
unsigned block_size = (1 << block_bits);
size_t poff = offset_in_folio(folio, *pos);
size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
size_t orig_plen = plen;
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@ -277,7 +278,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
* handle both halves separately so that we properly zero data in the
* page cache for blocks that are entirely outside of i_size.
*/
if (orig_pos <= isize && orig_pos + length > isize) {
if (orig_pos <= isize && orig_pos + orig_plen > isize) {
unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
@ -877,22 +878,37 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t old_size = iter->inode->i_size;
size_t written;
if (srcmap->type == IOMAP_INLINE) {
iomap_write_end_inline(iter, folio, pos, copied);
return true;
}
if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
size_t bh_written;
bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
written = copied;
} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
written = block_write_end(NULL, iter->inode->i_mapping, pos,
len, copied, &folio->page, NULL);
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
return bh_written == copied;
WARN_ON_ONCE(written != copied && written != 0);
} else {
written = __iomap_write_end(iter->inode, pos, len, copied,
folio) ? copied : 0;
}
return __iomap_write_end(iter->inode, pos, len, copied, folio);
/*
* Update the in-memory inode size after copying the data into the page
* cache. It's up to the file system to write the updated size to disk,
* preferably after I/O completion so that no stale data is exposed.
* Only once that's done can we unlock and release the folio.
*/
if (pos + written > old_size) {
i_size_write(iter->inode, pos + written);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
__iomap_put_folio(iter, pos, written, folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
return written == copied;
}
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
@ -907,7 +923,6 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
do {
struct folio *folio;
loff_t old_size;
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
@ -959,23 +974,6 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
written = iomap_write_end(iter, pos, bytes, copied, folio) ?
copied : 0;
/*
* Update the in-memory inode size after copying the data into
* the page cache. It's up to the file system to write the
* updated size to disk, preferably after I/O completion so that
* no stale data is exposed. Only once that's done can we
* unlock and release the folio.
*/
old_size = iter->inode->i_size;
if (pos + written > old_size) {
i_size_write(iter->inode, pos + written);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
__iomap_put_folio(iter, pos, written, folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
cond_resched();
if (unlikely(written == 0)) {
/*
@ -1346,7 +1344,6 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
bytes = folio_size(folio) - offset;
ret = iomap_write_end(iter, pos, bytes, bytes, folio);
__iomap_put_folio(iter, pos, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;
@ -1412,7 +1409,6 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
folio_mark_accessed(folio);
ret = iomap_write_end(iter, pos, bytes, bytes, folio);
__iomap_put_folio(iter, pos, bytes, folio);
if (WARN_ON_ONCE(!ret))
return -EIO;

View file

@ -521,7 +521,7 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
/**
* netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
* @ctx: The netfs inode to wait on
* @inode: The netfs inode to wait on
*
* Wait for outstanding I/O requests of any type to complete. This is intended
* to be called from inode eviction routines. This makes sure that any

View file

@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_see_withdrawal,
cachefiles_obj_get_ondemand_fd,
cachefiles_obj_put_ondemand_fd,
cachefiles_obj_get_read_req,
cachefiles_obj_put_read_req,
};
enum fscache_why_object_killed {
@ -127,7 +129,11 @@ enum cachefiles_error_trace {
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
EM(cachefiles_obj_get_read_req, "GET read_req") \
E_(cachefiles_obj_put_read_req, "PUT read_req")
#define cachefiles_coherency_traces \
EM(cachefiles_coherency_check_aux, "BAD aux ") \

View file

@ -126,8 +126,8 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
__u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
__u64 stx_subvol; /* Subvolume identifier */
__u64 __spare3[11]; /* Spare space for future expansion */
/* 0x100 */
};