vfs-6.10-rc7.fixes

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZoJSmAAKCRCRxhvAZXjc
 ot3tAQCUjJh7jZvmmkUV0pF51JI1jEumk8d8vPORGsm1A6oMawEA+tyiWYkcIU3t
 JUFGZSDce5MuJEI/frDPb98CW2dLkQA=
 =fVtx
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.10-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "Misc:

   - Don't misleadingly warn during filesystem thaw operations.

     It's possible that a block device which was frozen before it was
     mounted can cause a failing thaw operation if someone concurrently
     tried to mount it while that thaw operation was issued and the
     device had already been temporarily claimed for the mount (The
     mount will of course be aborted because the device is frozen).

  netfs:

   - Fix io_uring based write-through. Make sure that the total request
     length is correctly set.

   - Fix partial writes to folio tail.

   - Remove some xarray helpers that were intended for bounce buffers
     which got defered to a later patch series.

   - Make netfs_page_mkwrite() whether folio->mapping is vallid after
     acquiring the folio lock.

   - Make netfs_page_mkrite() flush conflicting data instead of waiting.

  fsnotify:

   - Ensure that fsnotify creation events are generated before fsnotify
     open events when a file is created via ->atomic_open(). The
     ordering was broken before.

   - Ensure that no fsnotify events are generated for O_PATH file
     descriptors. While no fsnotify open events were generated, fsnotify
     close events were. Make it consistent and don't produce any"

* tag 'vfs-6.10-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  netfs: Fix netfs_page_mkwrite() to flush conflicting data, not wait
  netfs: Fix netfs_page_mkwrite() to check folio->mapping is valid
  netfs: Delete some xarray-wangling functions that aren't used
  netfs: Fix early issue of write op on partial write to folio tail
  netfs: Fix io_uring based write-through
  vfs: generate FS_CREATE before FS_OPEN when ->atomic_open used.
  fsnotify: Do not generate events for O_PATH file descriptors
  fs: don't misleadingly warn during thaw operations
This commit is contained in:
Linus Torvalds 2024-07-01 09:22:08 -07:00
commit 9b458a2600
9 changed files with 52 additions and 106 deletions

View file

@ -3572,8 +3572,12 @@ static const char *open_last_lookups(struct nameidata *nd,
else
inode_lock_shared(dir->d_inode);
dentry = lookup_open(nd, file, op, got_write);
if (!IS_ERR(dentry) && (file->f_mode & FMODE_CREATED))
if (!IS_ERR(dentry)) {
if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir->d_inode, dentry);
if (file->f_mode & FMODE_OPENED)
fsnotify_open(file);
}
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
@ -3700,6 +3704,8 @@ int vfs_tmpfile(struct mnt_idmap *idmap,
mode = vfs_prepare_mode(idmap, dir, mode, mode, mode);
error = dir->i_op->tmpfile(idmap, dir, file, mode);
dput(child);
if (file->f_mode & FMODE_OPENED)
fsnotify_open(file);
if (error)
return error;
/* Don't check for other permissions, the inode was just created */

View file

@ -523,6 +523,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
struct netfs_group *group;
struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file_inode(file);
struct netfs_inode *ictx = netfs_inode(inode);
vm_fault_t ret = VM_FAULT_RETRY;
@ -534,6 +535,11 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
if (folio_lock_killable(folio) < 0)
goto out;
if (folio->mapping != mapping) {
folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
goto out;
}
if (folio_wait_writeback_killable(folio)) {
ret = VM_FAULT_LOCKED;
@ -549,7 +555,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
group = netfs_folio_group(folio);
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
folio_unlock(folio);
err = filemap_fdatawait_range(inode->i_mapping,
err = filemap_fdatawrite_range(mapping,
folio_pos(folio),
folio_pos(folio) + folio_size(folio));
switch (err) {

View file

@ -92,8 +92,9 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
if (async)
wreq->iocb = iocb;
wreq->len = iov_iter_count(&wreq->io_iter);
wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) {
_debug("begin = %zd", ret);
goto out;

View file

@ -63,15 +63,6 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
#define NETFS_FLAG_PUT_MARK BIT(0)
#define NETFS_FLAG_PAGECACHE_MARK BIT(1)
int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
struct folio *folio, unsigned int flags,
gfp_t gfp_mask);
int netfs_add_folios_to_buffer(struct xarray *buffer,
struct address_space *mapping,
pgoff_t index, pgoff_t to, gfp_t gfp_mask);
void netfs_clear_buffer(struct xarray *buffer);
/*
* objects.c

View file

@ -8,87 +8,6 @@
#include <linux/swap.h>
#include "internal.h"
/*
* Attach a folio to the buffer and maybe set marks on it to say that we need
* to put the folio later and twiddle the pagecache flags.
*/
int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
struct folio *folio, unsigned int flags,
gfp_t gfp_mask)
{
XA_STATE_ORDER(xas, xa, index, folio_order(folio));
retry:
xas_lock(&xas);
for (;;) {
xas_store(&xas, folio);
if (!xas_error(&xas))
break;
xas_unlock(&xas);
if (!xas_nomem(&xas, gfp_mask))
return xas_error(&xas);
goto retry;
}
if (flags & NETFS_FLAG_PUT_MARK)
xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
if (flags & NETFS_FLAG_PAGECACHE_MARK)
xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
xas_unlock(&xas);
return xas_error(&xas);
}
/*
* Create the specified range of folios in the buffer attached to the read
* request. The folios are marked with NETFS_BUF_PUT_MARK so that we know that
* these need freeing later.
*/
int netfs_add_folios_to_buffer(struct xarray *buffer,
struct address_space *mapping,
pgoff_t index, pgoff_t to, gfp_t gfp_mask)
{
struct folio *folio;
int ret;
if (to + 1 == index) /* Page range is inclusive */
return 0;
do {
/* TODO: Figure out what order folio can be allocated here */
folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
if (!folio)
return -ENOMEM;
folio->index = index;
ret = netfs_xa_store_and_mark(buffer, index, folio,
NETFS_FLAG_PUT_MARK, gfp_mask);
if (ret < 0) {
folio_put(folio);
return ret;
}
index += folio_nr_pages(folio);
} while (index <= to && index != 0);
return 0;
}
/*
* Clear an xarray buffer, putting a ref on the folios that have
* NETFS_BUF_PUT_MARK set.
*/
void netfs_clear_buffer(struct xarray *buffer)
{
struct folio *folio;
XA_STATE(xas, buffer, 0);
rcu_read_lock();
xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
folio_put(folio);
}
rcu_read_unlock();
xa_destroy(buffer);
}
/**
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @mapping: The mapping the folio belongs to.

View file

@ -483,7 +483,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (!debug)
kdebug("R=%x: No submit", wreq->debug_id);
if (flen < fsize)
if (foff + flen < fsize)
for (int s = 0; s < NR_IO_STREAMS; s++)
netfs_issue_write(wreq, &wreq->io_streams[s]);

View file

@ -1004,11 +1004,6 @@ static int do_dentry_open(struct file *f,
}
}
/*
* Once we return a file with FMODE_OPENED, __fput() will call
* fsnotify_close(), so we need fsnotify_open() here for symmetry.
*/
fsnotify_open(f);
return 0;
cleanup_all:
@ -1085,8 +1080,19 @@ EXPORT_SYMBOL(file_path);
*/
int vfs_open(const struct path *path, struct file *file)
{
int ret;
file->f_path = *path;
return do_dentry_open(file, NULL);
ret = do_dentry_open(file, NULL);
if (!ret) {
/*
* Once we return a file with FMODE_OPENED, __fput() will call
* fsnotify_close(), so we need fsnotify_open() here for
* symmetry.
*/
fsnotify_open(file);
}
return ret;
}
struct file *dentry_open(const struct path *path, int flags,
@ -1177,8 +1183,10 @@ struct file *kernel_file_open(const struct path *path, int flags,
error = do_dentry_open(f, NULL);
if (error) {
fput(f);
f = ERR_PTR(error);
return ERR_PTR(error);
}
fsnotify_open(f);
return f;
}
EXPORT_SYMBOL_GPL(kernel_file_open);

View file

@ -1502,8 +1502,17 @@ static int fs_bdev_thaw(struct block_device *bdev)
lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
/*
* The block device may have been frozen before it was claimed by a
* filesystem. Concurrently another process might try to mount that
* frozen block device and has temporarily claimed the block device for
* that purpose causing a concurrent fs_bdev_thaw() to end up here. The
* mounter is already about to abort mounting because they still saw an
* elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
* NULL in that case.
*/
sb = get_bdev_super(bdev);
if (WARN_ON_ONCE(!sb))
if (!sb)
return -EINVAL;
if (sb->s_op->thaw_super)

View file

@ -112,7 +112,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
{
const struct path *path;
if (file->f_mode & FMODE_NONOTIFY)
/*
* FMODE_NONOTIFY are fds generated by fanotify itself which should not
* generate new events. We also don't want to generate events for
* FMODE_PATH fds (involves open & close events) as they are just
* handle creation / destruction events and not "real" file events.
*/
if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
return 0;
path = &file->f_path;