v6.6-vfs.super

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZOXpbgAKCRCRxhvAZXjc
 oi8PAQCtXelGZHmTcmevsO8p4Qz7hFpkonZ/TnxKf+RdnlNgPgD+NWi+LoRBpaAj
 xk4z8SqJaTTP4WXrG5JZ6o7EQkUL8gE=
 =2e9I
 -----END PGP SIGNATURE-----

Merge tag 'v6.6-vfs.super' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull superblock updates from Christian Brauner:
 "This contains the super rework that was ready for this cycle. The
  first part changes the order of how we open block devices and allocate
  superblocks, contains various cleanups, simplifications, and a new
  mechanism to wait on superblock state changes.

  This unblocks work to ultimately limit the number of writers to a
  block device. Jan has already scheduled follow-up work that will be
  ready for v6.7 and allows us to restrict the number of writers to a
  given block device. That series builds on this work right here.

  The second part contains filesystem freezing updates.

  Overview:

  The generic superblock changes are rougly organized as follows
  (ignoring additional minor cleanups):

   (1) Removal of the bd_super member from struct block_device.

       This was a very odd back pointer to struct super_block with
       unclear rules. For all relevant places we have other means to get
       the same information so just get rid of this.

   (2) Simplify rules for superblock cleanup.

       Roughly, everything that is allocated during fs_context
       initialization and that's stored in fs_context->s_fs_info needs
       to be cleaned up by the fs_context->free() implementation before
       the superblock allocation function has been called successfully.

       After sget_fc() returned fs_context->s_fs_info has been
       transferred to sb->s_fs_info at which point sb->kill_sb() if
       fully responsible for cleanup. Adhering to these rules means that
       cleanup of sb->s_fs_info in fill_super() is to be avoided as it's
       brittle and inconsistent.

       Cleanup shouldn't be duplicated between sb->put_super() as
       sb->put_super() is only called if sb->s_root has been set aka
       when the filesystem has been successfully born (SB_BORN). That
       complexity should be avoided.

       This also means that block devices are to be closed in
       sb->kill_sb() instead of sb->put_super(). More details in the
       lower section.

   (3) Make it possible to lookup or create a superblock before opening
       block devices

       There's a subtle dependency on (2) as some filesystems did rely
       on fill_super() to be called in order to correctly clean up
       sb->s_fs_info. All these filesystems have been fixed.

   (4) Switch most filesystem to follow the same logic as the generic
       mount code now does as outlined in (3).

   (5) Use the superblock as the holder of the block device. We can now
       easily go back from block device to owning superblock.

   (6) Export and extend the generic fs_holder_ops and use them as
       holder ops everywhere and remove the filesystem specific holder
       ops.

   (7) Call from the block layer up into the filesystem layer when the
       block device is removed, allowing to shut down the filesystem
       without risk of deadlocks.

   (8) Get rid of get_super().

       We can now easily go back from the block device to owning
       superblock and can call up from the block layer into the
       filesystem layer when the device is removed. So no need to wade
       through all registered superblock to find the owning superblock
       anymore"

Link: https://lore.kernel.org/lkml/20230824-prall-intakt-95dbffdee4a0@brauner/

* tag 'v6.6-vfs.super' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (47 commits)
  super: use higher-level helper for {freeze,thaw}
  super: wait until we passed kill super
  super: wait for nascent superblocks
  super: make locking naming consistent
  super: use locking helpers
  fs: simplify invalidate_inodes
  fs: remove get_super
  block: call into the file system for ioctl BLKFLSBUF
  block: call into the file system for bdev_mark_dead
  block: consolidate __invalidate_device and fsync_bdev
  block: drop the "busy inodes on changed media" log message
  dasd: also call __invalidate_device when setting the device offline
  amiflop: don't call fsync_bdev in FDFMTBEG
  floppy: call disk_force_media_change when changing the format
  block: simplify the disk_force_media_change interface
  nbd: call blk_mark_disk_dead in nbd_clear_sock_ioctl
  xfs use fs_holder_ops for the log and RT devices
  xfs: drop s_umount over opening the log and RT devices
  ext4: use fs_holder_ops for the log device
  ext4: drop s_umount over opening the log device
  ...
This commit is contained in:
Linus Torvalds 2023-08-28 11:04:18 -07:00
commit 511fb5bafe
40 changed files with 1049 additions and 635 deletions

View File

@ -260,9 +260,11 @@ filesystem. The following members are defined:
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_super) (struct super_block *);
int (*freeze_super) (struct super_block *sb,
enum freeze_holder who);
int (*freeze_fs) (struct super_block *);
int (*thaw_super) (struct super_block *);
int (*thaw_super) (struct super_block *sb,
enum freeze_wholder who);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);

View File

@ -206,23 +206,6 @@ int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
}
EXPORT_SYMBOL(sync_blockdev_range);
/*
* Write out and wait upon all dirty data associated with this
* device. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
int fsync_bdev(struct block_device *bdev)
{
struct super_block *sb = get_super(bdev);
if (sb) {
int res = sync_filesystem(sb);
drop_super(sb);
return res;
}
return sync_blockdev(bdev);
}
EXPORT_SYMBOL(fsync_bdev);
/**
* freeze_bdev - lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
@ -248,9 +231,9 @@ int freeze_bdev(struct block_device *bdev)
if (!sb)
goto sync;
if (sb->s_op->freeze_super)
error = sb->s_op->freeze_super(sb);
error = sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
else
error = freeze_super(sb);
error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
deactivate_super(sb);
if (error) {
@ -291,9 +274,9 @@ int thaw_bdev(struct block_device *bdev)
goto out;
if (sb->s_op->thaw_super)
error = sb->s_op->thaw_super(sb);
error = sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
else
error = thaw_super(sb);
error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
if (error)
bdev->bd_fsfreeze_count++;
else
@ -960,26 +943,38 @@ out_path_put:
}
EXPORT_SYMBOL(lookup_bdev);
int __invalidate_device(struct block_device *bdev, bool kill_dirty)
/**
* bdev_mark_dead - mark a block device as dead
* @bdev: block device to operate on
* @surprise: indicate a surprise removal
*
* Tell the file system that this devices or media is dead. If @surprise is set
* to %true the device or media is already gone, if not we are preparing for an
* orderly removal.
*
* This calls into the file system, which then typicall syncs out all dirty data
* and writes back inodes and then invalidates any cached data in the inodes on
* the file system. In addition we also invalidate the block device mapping.
*/
void bdev_mark_dead(struct block_device *bdev, bool surprise)
{
struct super_block *sb = get_super(bdev);
int res = 0;
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
bdev->bd_holder_ops->mark_dead(bdev, surprise);
else
sync_blockdev(bdev);
mutex_unlock(&bdev->bd_holder_lock);
if (sb) {
/*
* no need to lock the super, get_super holds the
* read mutex so the filesystem cannot go away
* under us (->put_super runs with the write lock
* hold).
*/
shrink_dcache_sb(sb);
res = invalidate_inodes(sb, kill_dirty);
drop_super(sb);
}
invalidate_bdev(bdev);
return res;
}
EXPORT_SYMBOL(__invalidate_device);
#ifdef CONFIG_DASD_MODULE
/*
* Drivers should not use this directly, but the DASD driver has historically
* had a shutdown to offline mode that doesn't actually remove the gendisk
* that otherwise looks a lot like a safe device removal.
*/
EXPORT_SYMBOL_GPL(bdev_mark_dead);
#endif
void sync_bdevs(bool wait)
{

View File

@ -281,9 +281,7 @@ bool disk_check_media_change(struct gendisk *disk)
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return false;
if (__invalidate_device(disk->part0, true))
pr_warn("VFS: busy inodes on changed media %s\n",
disk->disk_name);
bdev_mark_dead(disk->part0, true);
set_bit(GD_NEED_PART_SCAN, &disk->state);
return true;
}
@ -294,25 +292,16 @@ EXPORT_SYMBOL(disk_check_media_change);
* @disk: the disk which will raise the event
* @events: the events to raise
*
* Generate uevents for the disk. If DISK_EVENT_MEDIA_CHANGE is present,
* attempt to free all dentries and inodes and invalidates all block
* Should be called when the media changes for @disk. Generates a uevent
* and attempts to free all dentries and inodes and invalidates all block
* device page cache entries in that case.
*
* Returns %true if DISK_EVENT_MEDIA_CHANGE was raised, or %false if not.
*/
bool disk_force_media_change(struct gendisk *disk, unsigned int events)
void disk_force_media_change(struct gendisk *disk)
{
disk_event_uevent(disk, events);
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return false;
disk_event_uevent(disk, DISK_EVENT_MEDIA_CHANGE);
inc_diskseq(disk);
if (__invalidate_device(disk->part0, true))
pr_warn("VFS: busy inodes on changed media %s\n",
disk->disk_name);
bdev_mark_dead(disk->part0, true);
set_bit(GD_NEED_PART_SCAN, &disk->state);
return true;
}
EXPORT_SYMBOL_GPL(disk_force_media_change);

View File

@ -554,7 +554,7 @@ out_exit_elevator:
}
EXPORT_SYMBOL(device_add_disk);
static void blk_report_disk_dead(struct gendisk *disk)
static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
{
struct block_device *bdev;
unsigned long idx;
@ -565,10 +565,7 @@ static void blk_report_disk_dead(struct gendisk *disk)
continue;
rcu_read_unlock();
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
bdev->bd_holder_ops->mark_dead(bdev);
mutex_unlock(&bdev->bd_holder_lock);
bdev_mark_dead(bdev, surprise);
put_device(&bdev->bd_device);
rcu_read_lock();
@ -576,14 +573,7 @@ static void blk_report_disk_dead(struct gendisk *disk)
rcu_read_unlock();
}
/**
* blk_mark_disk_dead - mark a disk as dead
* @disk: disk to mark as dead
*
* Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
* to this disk.
*/
void blk_mark_disk_dead(struct gendisk *disk)
static void __blk_mark_disk_dead(struct gendisk *disk)
{
/*
* Fail any new I/O.
@ -603,8 +593,19 @@ void blk_mark_disk_dead(struct gendisk *disk)
* Prevent new I/O from crossing bio_queue_enter().
*/
blk_queue_start_drain(disk->queue);
}
blk_report_disk_dead(disk);
/**
* blk_mark_disk_dead - mark a disk as dead
* @disk: disk to mark as dead
*
* Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
* to this disk.
*/
void blk_mark_disk_dead(struct gendisk *disk)
{
__blk_mark_disk_dead(disk);
blk_report_disk_dead(disk, true);
}
EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
@ -641,18 +642,20 @@ void del_gendisk(struct gendisk *disk)
disk_del_events(disk);
/*
* Prevent new openers by unlinked the bdev inode, and write out
* dirty data before marking the disk dead and stopping all I/O.
* Prevent new openers by unlinked the bdev inode.
*/
mutex_lock(&disk->open_mutex);
xa_for_each(&disk->part_tbl, idx, part) {
xa_for_each(&disk->part_tbl, idx, part)
remove_inode_hash(part->bd_inode);
fsync_bdev(part);
__invalidate_device(part, true);
}
mutex_unlock(&disk->open_mutex);
blk_mark_disk_dead(disk);
/*
* Tell the file system to write back all dirty data and shut down if
* it hasn't been notified earlier.
*/
if (!test_bit(GD_DEAD, &disk->state))
blk_report_disk_dead(disk, false);
__blk_mark_disk_dead(disk);
/*
* Drop all partitions now that the disk is marked dead.

View File

@ -364,7 +364,14 @@ static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
{
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
fsync_bdev(bdev);
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
bdev->bd_holder_ops->sync(bdev);
else
sync_blockdev(bdev);
mutex_unlock(&bdev->bd_holder_lock);
invalidate_bdev(bdev);
return 0;
}

View File

@ -281,10 +281,7 @@ static void delete_partition(struct block_device *part)
* looked up any more even when openers still hold references.
*/
remove_inode_hash(part->bd_inode);
fsync_bdev(part);
__invalidate_device(part, true);
bdev_mark_dead(part, false);
drop_partition(part);
}

View File

@ -1547,7 +1547,6 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
rel_fdc();
return -EBUSY;
}
fsync_bdev(bdev);
if (fd_motor_on(drive) == 0) {
rel_fdc();
return -ENODEV;

View File

@ -3255,7 +3255,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!disk || ITYPE(drive_state[cnt].fd_device) != type)
continue;
__invalidate_device(disk->part0, true);
disk_force_media_change(disk);
}
mutex_unlock(&open_lock);
} else {

View File

@ -603,7 +603,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_err;
/* and ... switch */
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
disk_force_media_change(lo->lo_disk);
blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
@ -1067,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
@ -1171,7 +1171,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
if (!release)
blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
disk_force_media_change(lo->lo_disk);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;

View File

@ -1434,12 +1434,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
return ret;
}
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
{
blk_mark_disk_dead(nbd->disk);
nbd_clear_sock(nbd);
__invalidate_device(bdev, true);
nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@ -1465,7 +1463,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_DISCONNECT:
return nbd_disconnect(nbd);
case NBD_CLEAR_SOCK:
nbd_clear_sock_ioctl(nbd, bdev);
nbd_clear_sock_ioctl(nbd);
return 0;
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);

View File

@ -3636,11 +3636,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
if (device->block) {
rc = fsync_bdev(device->block->bdev);
if (rc != 0)
goto interrupted;
}
if (device->block)
bdev_mark_dead(device->block->bdev, false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));

View File

@ -1226,19 +1226,14 @@ EXPORT_SYMBOL(mark_buffer_dirty);
void mark_buffer_write_io_error(struct buffer_head *bh)
{
struct super_block *sb;
set_buffer_write_io_error(bh);
/* FIXME: do we need to set this in both places? */
if (bh->b_folio && bh->b_folio->mapping)
mapping_set_error(bh->b_folio->mapping, -EIO);
if (bh->b_assoc_map)
if (bh->b_assoc_map) {
mapping_set_error(bh->b_assoc_map, -EIO);
rcu_read_lock();
sb = READ_ONCE(bh->b_bdev->bd_super);
if (sb)
errseq_set(&sb->s_wb_err, -EIO);
rcu_read_unlock();
errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
}
}
EXPORT_SYMBOL(mark_buffer_write_io_error);

View File

@ -486,12 +486,16 @@ static void cramfs_kill_sb(struct super_block *sb)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
generic_shutdown_super(sb);
if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
if (sbi && sbi->mtd_point_size)
mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
kill_mtd_super(sb);
put_mtd_device(sb->s_mtd);
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
kill_block_super(sb);
sync_blockdev(sb->s_bdev);
blkdev_put(sb->s_bdev, sb);
}
kfree(sbi);
}

View File

@ -273,8 +273,6 @@ struct exfat_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
struct rcu_head rcu;
};
#define EXFAT_CACHE_VALID 0

View File

@ -31,16 +31,6 @@ static void exfat_free_iocharset(struct exfat_sb_info *sbi)
kfree(sbi->options.iocharset);
}
static void exfat_delayed_free(struct rcu_head *p)
{
struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
unload_nls(sbi->nls_io);
exfat_free_iocharset(sbi);
exfat_free_upcase_table(sbi);
kfree(sbi);
}
static void exfat_put_super(struct super_block *sb)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@ -50,7 +40,8 @@ static void exfat_put_super(struct super_block *sb)
brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
call_rcu(&sbi->rcu, exfat_delayed_free);
unload_nls(sbi->nls_io);
exfat_free_upcase_table(sbi);
}
static int exfat_sync_fs(struct super_block *sb, int wait)
@ -709,9 +700,6 @@ free_table:
check_nls_io:
unload_nls(sbi->nls_io);
exfat_free_iocharset(sbi);
sb->s_fs_info = NULL;
kfree(sbi);
return err;
}
@ -720,14 +708,18 @@ static int exfat_get_tree(struct fs_context *fc)
return get_tree_bdev(fc, exfat_fill_super);
}
static void exfat_free_sbi(struct exfat_sb_info *sbi)
{
exfat_free_iocharset(sbi);
kfree(sbi);
}
static void exfat_free(struct fs_context *fc)
{
struct exfat_sb_info *sbi = fc->s_fs_info;
if (sbi) {
exfat_free_iocharset(sbi);
kfree(sbi);
}
if (sbi)
exfat_free_sbi(sbi);
}
static int exfat_reconfigure(struct fs_context *fc)
@ -772,12 +764,21 @@ static int exfat_init_fs_context(struct fs_context *fc)
return 0;
}
static void exfat_kill_sb(struct super_block *sb)
{
struct exfat_sb_info *sbi = sb->s_fs_info;
kill_block_super(sb);
if (sbi)
exfat_free_sbi(sbi);
}
static struct file_system_type exfat_fs_type = {
.owner = THIS_MODULE,
.name = "exfat",
.init_fs_context = exfat_init_fs_context,
.parameters = exfat_parameters,
.kill_sb = kill_block_super,
.kill_sb = exfat_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};

View File

@ -234,8 +234,7 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
might_sleep();
if (bh->b_bdev->bd_super)
ext4_check_bdev_write_error(bh->b_bdev->bd_super);
ext4_check_bdev_write_error(sb);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);

View File

@ -93,6 +93,7 @@ static int ext4_get_tree(struct fs_context *fc);
static int ext4_reconfigure(struct fs_context *fc);
static void ext4_fc_free(struct fs_context *fc);
static int ext4_init_fs_context(struct fs_context *fc);
static void ext4_kill_sb(struct super_block *sb);
static const struct fs_parameter_spec ext4_param_specs[];
/*
@ -135,12 +136,12 @@ static struct file_system_type ext2_fs_type = {
.name = "ext2",
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
.kill_sb = kill_block_super,
.kill_sb = ext4_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ext2");
MODULE_ALIAS("ext2");
#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
#define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif
@ -151,12 +152,12 @@ static struct file_system_type ext3_fs_type = {
.name = "ext3",
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
.kill_sb = kill_block_super,
.kill_sb = ext4_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ext3");
MODULE_ALIAS("ext3");
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
#define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
@ -1096,15 +1097,6 @@ void ext4_update_dynamic_rev(struct super_block *sb)
*/
}
static void ext4_bdev_mark_dead(struct block_device *bdev)
{
ext4_force_shutdown(bdev->bd_holder, EXT4_GOING_FLAGS_NOLOGFLUSH);
}
static const struct blk_holder_ops ext4_holder_ops = {
.mark_dead = ext4_bdev_mark_dead,
};
/*
* Open the external journal device
*/
@ -1113,7 +1105,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
struct block_device *bdev;
bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, sb,
&ext4_holder_ops);
&fs_holder_ops);
if (IS_ERR(bdev))
goto fail;
return bdev;
@ -1125,25 +1117,6 @@ fail:
return NULL;
}
/*
* Release the journal device
*/
static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
{
struct block_device *bdev;
bdev = sbi->s_journal_bdev;
if (bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
invalidate_bdev(bdev);
blkdev_put(bdev, sbi->s_sb);
sbi->s_journal_bdev = NULL;
}
}
static inline struct inode *orphan_list_entry(struct list_head *l)
{
return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
@ -1339,8 +1312,13 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
if (sbi->s_journal_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
sync_blockdev(sbi->s_journal_bdev);
ext4_blkdev_remove(sbi);
invalidate_bdev(sbi->s_journal_bdev);
}
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
@ -5572,7 +5550,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
spin_lock_init(&sbi->s_bdev_wb_lock);
errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
&sbi->s_bdev_wb_err);
sb->s_bdev->bd_super = sb;
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
@ -5664,9 +5641,11 @@ failed_mount:
kfree(get_qf_name(sb, sbi, i));
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
/* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
brelse(sbi->s_sbh);
ext4_blkdev_remove(sbi);
if (sbi->s_journal_bdev) {
invalidate_bdev(sbi->s_journal_bdev);
blkdev_put(sbi->s_journal_bdev, sb);
}
out_fail:
invalidate_bdev(sb->s_bdev);
sb->s_fs_info = NULL;
@ -5854,7 +5833,10 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
return NULL;
/* see get_tree_bdev why this is needed and safe */
up_write(&sb->s_umount);
bdev = ext4_blkdev_get(j_dev, sb);
down_write(&sb->s_umount);
if (bdev == NULL)
return NULL;
@ -7273,12 +7255,23 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
return 1;
}
static void ext4_kill_sb(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct block_device *journal_bdev = sbi ? sbi->s_journal_bdev : NULL;
kill_block_super(sb);
if (journal_bdev)
blkdev_put(journal_bdev, sb);
}
static struct file_system_type ext4_fs_type = {
.owner = THIS_MODULE,
.name = "ext4",
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
.kill_sb = kill_block_super,
.kill_sb = ext4_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
};
MODULE_ALIAS_FS("ext4");

View File

@ -2181,12 +2181,14 @@ out_drop_write:
if (err)
return err;
err = freeze_super(sbi->sb);
err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
if (err)
return err;
if (f2fs_readonly(sbi->sb)) {
thaw_super(sbi->sb);
err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
if (err)
return err;
return -EROFS;
}
@ -2240,6 +2242,6 @@ recover_out:
out_err:
f2fs_up_write(&sbi->cp_global_sem);
f2fs_up_write(&sbi->gc_lock);
thaw_super(sbi->sb);
thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
return err;
}

View File

@ -1561,7 +1561,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
blkdev_put(FDEV(i).bdev, sbi->sb->s_type);
blkdev_put(FDEV(i).bdev, sbi->sb);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@ -4198,7 +4198,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
/* Single zoned block device mount */
FDEV(0).bdev =
blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, mode,
sbi->sb->s_type, NULL);
sbi->sb, NULL);
} else {
/* Multi-device mount */
memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
@ -4217,8 +4217,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->log_blocks_per_seg) - 1;
}
FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, mode,
sbi->sb->s_type,
NULL);
sbi->sb, NULL);
}
if (IS_ERR(FDEV(i).bdev))
return PTR_ERR(FDEV(i).bdev);

View File

@ -1953,9 +1953,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
if (!trylock_super(sb)) {
if (!super_trylock_shared(sb)) {
/*
* trylock_super() may fail consistently due to
* super_trylock_shared() may fail consistently due to
* s_umount being grabbed by someone else. Don't use
* requeue_io() to avoid busy retrying the inode/sb.
*/

View File

@ -689,7 +689,7 @@ static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
struct super_block *sb = sdp->sd_vfs;
int error;
error = freeze_super(sb);
error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
if (error)
return error;
@ -697,7 +697,9 @@ static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
GFS2_LFC_FREEZE_GO_SYNC);
if (gfs2_withdrawn(sdp)) {
thaw_super(sb);
error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
if (error)
return error;
return -EIO;
}
}
@ -712,7 +714,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp)
error = gfs2_freeze_lock_shared(sdp);
if (error)
goto fail;
error = thaw_super(sb);
error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
if (!error)
return 0;
@ -761,7 +763,7 @@ out:
*
*/
static int gfs2_freeze_super(struct super_block *sb)
static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
@ -816,7 +818,7 @@ out:
*
*/
static int gfs2_thaw_super(struct super_block *sb)
static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;

View File

@ -168,10 +168,10 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
switch (n) {
case 0:
error = thaw_super(sdp->sd_vfs);
error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
break;
case 1:
error = freeze_super(sdp->sd_vfs);
error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
break;
default:
return -EINVAL;

View File

@ -751,16 +751,11 @@ EXPORT_SYMBOL_GPL(evict_inodes);
/**
* invalidate_inodes - attempt to free all inodes on a superblock
* @sb: superblock to operate on
* @kill_dirty: flag to guide handling of dirty inodes
*
* Attempts to free all inodes for a given superblock. If there were any
* busy inodes return a non-zero value, else zero.
* If @kill_dirty is set, discard dirty inodes too, otherwise treat
* them as busy.
* Attempts to free all inodes (including dirty inodes) for a given superblock.
*/
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
void invalidate_inodes(struct super_block *sb)
{
int busy = 0;
struct inode *inode, *next;
LIST_HEAD(dispose);
@ -772,14 +767,8 @@ again:
spin_unlock(&inode->i_lock);
continue;
}
if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
if (atomic_read(&inode->i_count)) {
spin_unlock(&inode->i_lock);
busy = 1;
continue;
}
@ -797,8 +786,6 @@ again:
spin_unlock(&sb->s_inode_list_lock);
dispose_list(&dispose);
return busy;
}
/*

View File

@ -115,7 +115,7 @@ static inline void put_file_access(struct file *file)
* super.c
*/
extern int reconfigure_super(struct fs_context *);
extern bool trylock_super(struct super_block *sb);
extern bool super_trylock_shared(struct super_block *sb);
struct super_block *user_get_super(dev_t, bool excl);
void put_super(struct super_block *sb);
extern bool mount_capable(struct fs_context *);
@ -201,7 +201,7 @@ void lock_two_inodes(struct inode *inode1, struct inode *inode2,
* fs-writeback.c
*/
extern long get_nr_dirty_inodes(void);
extern int invalidate_inodes(struct super_block *, bool);
void invalidate_inodes(struct super_block *sb);
/*
* dcache.c

View File

@ -397,8 +397,8 @@ static int ioctl_fsfreeze(struct file *filp)
/* Freeze */
if (sb->s_op->freeze_super)
return sb->s_op->freeze_super(sb);
return freeze_super(sb);
return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
return freeze_super(sb, FREEZE_HOLDER_USERSPACE);
}
static int ioctl_fsthaw(struct file *filp)
@ -410,8 +410,8 @@ static int ioctl_fsthaw(struct file *filp)
/* Thaw */
if (sb->s_op->thaw_super)
return sb->s_op->thaw_super(sb);
return thaw_super(sb);
return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
return thaw_super(sb, FREEZE_HOLDER_USERSPACE);
}
static int ioctl_file_dedupe_range(struct file *file,

View File

@ -35,6 +35,7 @@
#include <linux/writeback.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/fs_context.h>
#include "nilfs.h"
#include "export.h"
#include "mdt.h"
@ -1216,7 +1217,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
}
struct nilfs_super_data {
struct block_device *bdev;
__u64 cno;
int flags;
};
@ -1283,64 +1283,49 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
static int nilfs_set_bdev_super(struct super_block *s, void *data)
{
s->s_bdev = data;
s->s_dev = s->s_bdev->bd_dev;
s->s_dev = *(dev_t *)data;
return 0;
}
static int nilfs_test_bdev_super(struct super_block *s, void *data)
{
return (void *)s->s_bdev == data;
return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
}
static struct dentry *
nilfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
struct nilfs_super_data sd;
struct nilfs_super_data sd = { .flags = flags };
struct super_block *s;
struct dentry *root_dentry;
int err, s_new = false;
dev_t dev;
int err;
sd.bdev = blkdev_get_by_path(dev_name, sb_open_mode(flags), fs_type,
NULL);
if (IS_ERR(sd.bdev))
return ERR_CAST(sd.bdev);
if (nilfs_identify(data, &sd))
return ERR_PTR(-EINVAL);
sd.cno = 0;
sd.flags = flags;
if (nilfs_identify((char *)data, &sd)) {
err = -EINVAL;
goto failed;
}
err = lookup_bdev(dev_name, &dev);
if (err)
return ERR_PTR(err);
/*
* once the super is inserted into the list by sget, s_umount
* will protect the lockfs code from trying to start a snapshot
* while we are mounting
*/
mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
if (sd.bdev->bd_fsfreeze_count > 0) {
mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
err = -EBUSY;
goto failed;
}
s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
sd.bdev);
mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
if (IS_ERR(s)) {
err = PTR_ERR(s);
goto failed;
}
&dev);
if (IS_ERR(s))
return ERR_CAST(s);
if (!s->s_root) {
s_new = true;
/* New superblock instance created */
snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
sb_set_blocksize(s, block_size(sd.bdev));
err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
/*
* We drop s_umount here because we need to open the bdev and
* bdev->open_mutex ranks above s_umount (blkdev_put() ->
* __invalidate_device()). It is safe because we have active sb
* reference and SB_BORN is not set yet.
*/
up_write(&s->s_umount);
err = setup_bdev_super(s, flags, NULL);
down_write(&s->s_umount);
if (!err)
err = nilfs_fill_super(s, data,
flags & SB_SILENT ? 1 : 0);
if (err)
goto failed_super;
@ -1366,24 +1351,18 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
}
if (sd.cno) {
struct dentry *root_dentry;
err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
if (err)
goto failed_super;
} else {
root_dentry = dget(s->s_root);
return root_dentry;
}
if (!s_new)
blkdev_put(sd.bdev, fs_type);
return root_dentry;
return dget(s->s_root);
failed_super:
deactivate_locked_super(s);
failed:
if (!s_new)
blkdev_put(sd.bdev, fs_type);
return ERR_PTR(err);
}

View File

@ -569,9 +569,9 @@ static void init_once(void *foo)
}
/*
* put_ntfs - Noinline to reduce binary size.
* Noinline to reduce binary size.
*/
static noinline void put_ntfs(struct ntfs_sb_info *sbi)
static noinline void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
{
kfree(sbi->new_rec);
kvfree(ntfs_put_shared(sbi->upcase));
@ -625,12 +625,6 @@ static void ntfs_put_super(struct super_block *sb)
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
put_mount_options(sbi->options);
put_ntfs(sbi);
sb->s_fs_info = NULL;
sync_blockdev(sb->s_bdev);
}
static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@ -1564,15 +1558,7 @@ load_root:
put_inode_out:
iput(inode);
out:
/*
* Free resources here.
* ntfs_fs_free will be called with fc->s_fs_info = NULL
*/
put_mount_options(sbi->options);
put_ntfs(sbi);
sb->s_fs_info = NULL;
kfree(boot2);
return err;
}
@ -1659,7 +1645,7 @@ static void ntfs_fs_free(struct fs_context *fc)
struct ntfs_sb_info *sbi = fc->s_fs_info;
if (sbi)
put_ntfs(sbi);
ntfs3_free_sbi(sbi);
if (opts)
put_mount_options(opts);
@ -1728,13 +1714,24 @@ free_opts:
return -ENOMEM;
}
static void ntfs3_kill_sb(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
kill_block_super(sb);
if (sbi->options)
put_mount_options(sbi->options);
ntfs3_free_sbi(sbi);
}
// clang-format off
static struct file_system_type ntfs_fs_type = {
.owner = THIS_MODULE,
.name = "ntfs3",
.init_fs_context = ntfs_init_fs_context,
.parameters = ntfs_fs_parameters,
.kill_sb = kill_block_super,
.kill_sb = ntfs3_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
// clang-format on

View File

@ -557,7 +557,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
(unsigned long)bh,
(unsigned long long)bh->b_blocknr);
ocfs2_error(bh->b_bdev->bd_super,
ocfs2_error(bh->b_assoc_map->host->i_sb,
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
}
@ -780,14 +780,14 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
mlog_errno(status);
if (!is_handle_aborted(handle)) {
journal_t *journal = handle->h_transaction->t_journal;
struct super_block *sb = bh->b_bdev->bd_super;
mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
"Aborting transaction and journal.\n");
handle->h_err = status;
jbd2_journal_abort_handle(handle);
jbd2_journal_abort(journal, status);
ocfs2_abort(sb, "Journal already aborted.\n");
ocfs2_abort(bh->b_assoc_map->host->i_sb,
"Journal already aborted.\n");
}
}
}

View File

@ -582,16 +582,18 @@ static int romfs_init_fs_context(struct fs_context *fc)
*/
static void romfs_kill_sb(struct super_block *sb)
{
generic_shutdown_super(sb);
#ifdef CONFIG_ROMFS_ON_MTD
if (sb->s_mtd) {
kill_mtd_super(sb);
return;
put_mtd_device(sb->s_mtd);
sb->s_mtd = NULL;
}
#endif
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
kill_block_super(sb);
return;
sync_blockdev(sb->s_bdev);
blkdev_put(sb->s_bdev, sb);
}
#endif
}

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0+
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2019-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
@ -8,6 +8,8 @@
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_mount.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
@ -16,6 +18,7 @@
#include "xfs_ag.h"
#include "xfs_rtalloc.h"
#include "xfs_inode.h"
#include "xfs_icache.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@ -53,6 +56,7 @@ struct xchk_fscounters {
uint64_t frextents;
unsigned long long icount_min;
unsigned long long icount_max;
bool frozen;
};
/*
@ -123,6 +127,82 @@ xchk_fscount_warmup(
return error;
}
static inline int
xchk_fsfreeze(
struct xfs_scrub *sc)
{
int error;
error = freeze_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL);
trace_xchk_fsfreeze(sc, error);
return error;
}
static inline int
xchk_fsthaw(
struct xfs_scrub *sc)
{
int error;
/* This should always succeed, we have a kernel freeze */
error = thaw_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL);
trace_xchk_fsthaw(sc, error);
return error;
}
/*
* We couldn't stabilize the filesystem long enough to sample all the variables
* that comprise the summary counters and compare them to the percpu counters.
* We need to disable all writer threads, which means taking the first two
* freeze levels to put userspace to sleep, and the third freeze level to
* prevent background threads from starting new transactions. Take one level
* more to prevent other callers from unfreezing the filesystem while we run.
*/
STATIC int
xchk_fscounters_freeze(
struct xfs_scrub *sc)
{
struct xchk_fscounters *fsc = sc->buf;
int error = 0;
if (sc->flags & XCHK_HAVE_FREEZE_PROT) {
sc->flags &= ~XCHK_HAVE_FREEZE_PROT;
mnt_drop_write_file(sc->file);
}
/* Try to grab a kernel freeze. */
while ((error = xchk_fsfreeze(sc)) == -EBUSY) {
if (xchk_should_terminate(sc, &error))
return error;
delay(HZ / 10);
}
if (error)
return error;
fsc->frozen = true;
return 0;
}
/* Thaw the filesystem after checking or repairing fscounters. */
STATIC void
xchk_fscounters_cleanup(
void *buf)
{
struct xchk_fscounters *fsc = buf;
struct xfs_scrub *sc = fsc->sc;
int error;
if (!fsc->frozen)
return;
error = xchk_fsthaw(sc);
if (error)
xfs_emerg(sc->mp, "still frozen after scrub, err=%d", error);
else
fsc->frozen = false;
}
int
xchk_setup_fscounters(
struct xfs_scrub *sc)
@ -140,6 +220,7 @@ xchk_setup_fscounters(
sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
if (!sc->buf)
return -ENOMEM;
sc->buf_cleanup = xchk_fscounters_cleanup;
fsc = sc->buf;
fsc->sc = sc;
@ -150,7 +231,18 @@ xchk_setup_fscounters(
if (error)
return error;
return xchk_trans_alloc(sc, 0);
/*
* Pause all writer activity in the filesystem while we're scrubbing to
* reduce the likelihood of background perturbations to the counters
* throwing off our calculations.
*/
if (sc->flags & XCHK_TRY_HARDER) {
error = xchk_fscounters_freeze(sc);
if (error)
return error;
}
return xfs_trans_alloc_empty(sc->mp, &sc->tp);
}
/*
@ -290,8 +382,7 @@ retry:
if (fsc->ifree > fsc->icount) {
if (tries--)
goto retry;
xchk_set_incomplete(sc);
return 0;
return -EDEADLOCK;
}
return 0;
@ -367,6 +458,8 @@ xchk_fscount_count_frextents(
* Otherwise, we /might/ have a problem. If the change in the summations is
* more than we want to tolerate, the filesystem is probably busy and we should
* just send back INCOMPLETE and see if userspace will try again.
*
* If we're repairing then we require an exact match.
*/
static inline bool
xchk_fscount_within_range(
@ -396,21 +489,7 @@ xchk_fscount_within_range(
if (expected >= min_value && expected <= max_value)
return true;
/*
* If the difference between the two summations is too large, the fs
* might just be busy and so we'll mark the scrub incomplete. Return
* true here so that we don't mark the counter corrupt.
*
* XXX: In the future when userspace can grant scrub permission to
* quiesce the filesystem to solve the outsized variance problem, this
* check should be moved up and the return code changed to signal to
* userspace that we need quiesce permission.
*/
if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) {
xchk_set_incomplete(sc);
return true;
}
/* Everything else is bad. */
return false;
}
@ -422,6 +501,7 @@ xchk_fscounters(
struct xfs_mount *mp = sc->mp;
struct xchk_fscounters *fsc = sc->buf;
int64_t icount, ifree, fdblocks, frextents;
bool try_again = false;
int error;
/* Snapshot the percpu counters. */
@ -431,9 +511,26 @@ xchk_fscounters(
frextents = percpu_counter_sum(&mp->m_frextents);
/* No negative values, please! */
if (icount < 0 || ifree < 0 || fdblocks < 0 || frextents < 0)
if (icount < 0 || ifree < 0)
xchk_set_corrupt(sc);
/*
* If the filesystem is not frozen, the counter summation calls above
* can race with xfs_mod_freecounter, which subtracts a requested space
* reservation from the counter and undoes the subtraction if that made
* the counter go negative. Therefore, it's possible to see negative
* values here, and we should only flag that as a corruption if we
* froze the fs. This is much more likely to happen with frextents
* since there are no reserved pools.
*/
if (fdblocks < 0 || frextents < 0) {
if (!fsc->frozen)
return -EDEADLOCK;
xchk_set_corrupt(sc);
return 0;
}
/* See if icount is obviously wrong. */
if (icount < fsc->icount_min || icount > fsc->icount_max)
xchk_set_corrupt(sc);
@ -446,12 +543,6 @@ xchk_fscounters(
if (frextents > mp->m_sb.sb_rextents)
xchk_set_corrupt(sc);
/*
* XXX: We can't quiesce percpu counter updates, so exit early.
* This can be re-enabled when we gain exclusive freeze functionality.
*/
return 0;
/*
* If ifree exceeds icount by more than the minimum variance then
* something's probably wrong with the counters.
@ -463,8 +554,6 @@ xchk_fscounters(
error = xchk_fscount_aggregate_agcounts(sc, fsc);
if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
return error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
return 0;
/* Count the free extents counter for rt volumes. */
error = xchk_fscount_count_frextents(sc, fsc);
@ -473,20 +562,45 @@ xchk_fscounters(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
return 0;
/* Compare the in-core counters with whatever we counted. */
if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount))
xchk_set_corrupt(sc);
/*
* Compare the in-core counters with whatever we counted. If the fs is
* frozen, we treat the discrepancy as a corruption because the freeze
* should have stabilized the counter values. Otherwise, we need
* userspace to call us back having granted us freeze permission.
*/
if (!xchk_fscount_within_range(sc, icount, &mp->m_icount,
fsc->icount)) {
if (fsc->frozen)
xchk_set_corrupt(sc);
else
try_again = true;
}
if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree))
xchk_set_corrupt(sc);
if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) {
if (fsc->frozen)
xchk_set_corrupt(sc);
else
try_again = true;
}
if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
fsc->fdblocks))
xchk_set_corrupt(sc);
fsc->fdblocks)) {
if (fsc->frozen)
xchk_set_corrupt(sc);
else
try_again = true;
}
if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents,
fsc->frextents))
xchk_set_corrupt(sc);
fsc->frextents)) {
if (fsc->frozen)
xchk_set_corrupt(sc);
else
try_again = true;
}
if (try_again)
return -EDEADLOCK;
return 0;
}

View File

@ -184,8 +184,10 @@ xchk_teardown(
xchk_irele(sc, sc->ip);
sc->ip = NULL;
}
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
if (sc->flags & XCHK_HAVE_FREEZE_PROT) {
sc->flags &= ~XCHK_HAVE_FREEZE_PROT;
mnt_drop_write_file(sc->file);
}
if (sc->buf) {
if (sc->buf_cleanup)
sc->buf_cleanup(sc->buf);
@ -505,6 +507,8 @@ retry_op:
error = mnt_want_write_file(sc->file);
if (error)
goto out_sc;
sc->flags |= XCHK_HAVE_FREEZE_PROT;
}
/* Set up for the operation. */

View File

@ -106,6 +106,7 @@ struct xfs_scrub {
/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
#define XCHK_TRY_HARDER (1U << 0) /* can't get resources, try again */
#define XCHK_HAVE_FREEZE_PROT (1U << 1) /* do we have freeze protection? */
#define XCHK_FSGATES_DRAIN (1U << 2) /* defer ops draining enabled */
#define XCHK_NEED_DRAIN (1U << 3) /* scrub needs to drain defer ops */
#define XREP_ALREADY_FIXED (1U << 31) /* checking our repair work */

View File

@ -98,6 +98,7 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS);
#define XFS_SCRUB_STATE_STRINGS \
{ XCHK_TRY_HARDER, "try_harder" }, \
{ XCHK_HAVE_FREEZE_PROT, "nofreeze" }, \
{ XCHK_FSGATES_DRAIN, "fsgates_drain" }, \
{ XCHK_NEED_DRAIN, "need_drain" }, \
{ XREP_ALREADY_FIXED, "already_fixed" }
@ -693,6 +694,31 @@ TRACE_EVENT(xchk_fscounters_within_range,
__entry->old_value)
)
DECLARE_EVENT_CLASS(xchk_fsfreeze_class,
TP_PROTO(struct xfs_scrub *sc, int error),
TP_ARGS(sc, error),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, type)
__field(int, error)
),
TP_fast_assign(
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
__entry->error = error;
),
TP_printk("dev %d:%d type %s error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->error)
);
#define DEFINE_XCHK_FSFREEZE_EVENT(name) \
DEFINE_EVENT(xchk_fsfreeze_class, name, \
TP_PROTO(struct xfs_scrub *sc, int error), \
TP_ARGS(sc, error))
DEFINE_XCHK_FSFREEZE_EVENT(xchk_fsfreeze);
DEFINE_XCHK_FSFREEZE_EVENT(xchk_fsthaw);
TRACE_EVENT(xchk_refcount_incorrect,
TP_PROTO(struct xfs_perag *pag, const struct xfs_refcount_irec *irec,
xfs_nlink_t seen),

View File

@ -1938,14 +1938,17 @@ void
xfs_free_buftarg(
struct xfs_buftarg *btp)
{
struct block_device *bdev = btp->bt_bdev;
unregister_shrinker(&btp->bt_shrinker);
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
percpu_counter_destroy(&btp->bt_io_count);
list_lru_destroy(&btp->bt_lru);
blkdev_issue_flush(btp->bt_bdev);
invalidate_bdev(btp->bt_bdev);
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
if (bdev != btp->bt_mount->m_super->s_bdev)
blkdev_put(bdev, btp->bt_mount->m_super);
kmem_free(btp);
}

View File

@ -377,17 +377,6 @@ disable_dax:
return 0;
}
static void
xfs_bdev_mark_dead(
struct block_device *bdev)
{
xfs_force_shutdown(bdev->bd_holder, SHUTDOWN_DEVICE_REMOVED);
}
static const struct blk_holder_ops xfs_holder_ops = {
.mark_dead = xfs_bdev_mark_dead,
};
STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
@ -396,8 +385,8 @@ xfs_blkdev_get(
{
int error = 0;
*bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE, mp,
&xfs_holder_ops);
*bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
mp->m_super, &fs_holder_ops);
if (IS_ERR(*bdevp)) {
error = PTR_ERR(*bdevp);
xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
@ -407,31 +396,45 @@ xfs_blkdev_get(
}
STATIC void
xfs_blkdev_put(
struct xfs_mount *mp,
struct block_device *bdev)
{
if (bdev)
blkdev_put(bdev, mp);
}
STATIC void
xfs_close_devices(
xfs_shutdown_devices(
struct xfs_mount *mp)
{
/*
* Udev is triggered whenever anyone closes a block device or unmounts
* a file systemm on a block device.
* The default udev rules invoke blkid to read the fs super and create
* symlinks to the bdev under /dev/disk. For this, it uses buffered
* reads through the page cache.
*
* xfs_db also uses buffered reads to examine metadata. There is no
* coordination between xfs_db and udev, which means that they can run
* concurrently. Note there is no coordination between the kernel and
* blkid either.
*
* On a system with 64k pages, the page cache can cache the superblock
* and the root inode (and hence the root directory) with the same 64k
* page. If udev spawns blkid after the mkfs and the system is busy
* enough that it is still running when xfs_db starts up, they'll both
* read from the same page in the pagecache.
*
* The unmount writes updated inode metadata to disk directly. The XFS
* buffer cache does not use the bdev pagecache, so it needs to
* invalidate that pagecache on unmount. If the above scenario occurs,
* the pagecache no longer reflects what's on disk, xfs_db reads the
* stale metadata, and fails to find /a. Most of the time this succeeds
* because closing a bdev invalidates the page cache, but when processes
* race, everyone loses.
*/
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
xfs_free_buftarg(mp->m_logdev_targp);
xfs_blkdev_put(mp, logdev);
blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
invalidate_bdev(mp->m_logdev_targp->bt_bdev);
}
if (mp->m_rtdev_targp) {
struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
xfs_free_buftarg(mp->m_rtdev_targp);
xfs_blkdev_put(mp, rtdev);
blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
}
xfs_free_buftarg(mp->m_ddev_targp);
blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
invalidate_bdev(mp->m_ddev_targp->bt_bdev);
}
/*
@ -448,17 +451,24 @@ STATIC int
xfs_open_devices(
struct xfs_mount *mp)
{
struct block_device *ddev = mp->m_super->s_bdev;
struct super_block *sb = mp->m_super;
struct block_device *ddev = sb->s_bdev;
struct block_device *logdev = NULL, *rtdev = NULL;
int error;
/*
* blkdev_put() can't be called under s_umount, see the comment
* in get_tree_bdev() for more details
*/
up_write(&sb->s_umount);
/*
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
if (error)
return error;
goto out_relock;
}
if (mp->m_rtname) {
@ -496,7 +506,10 @@ xfs_open_devices(
mp->m_logdev_targp = mp->m_ddev_targp;
}
return 0;
error = 0;
out_relock:
down_write(&sb->s_umount);
return error;
out_free_rtdev_targ:
if (mp->m_rtdev_targp)
@ -504,11 +517,12 @@ xfs_open_devices(
out_free_ddev_targ:
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
xfs_blkdev_put(mp, rtdev);
if (rtdev)
blkdev_put(rtdev, sb);
out_close_logdev:
if (logdev && logdev != ddev)
xfs_blkdev_put(mp, logdev);
return error;
blkdev_put(logdev, sb);
goto out_relock;
}
/*
@ -758,6 +772,17 @@ static void
xfs_mount_free(
struct xfs_mount *mp)
{
/*
* Free the buftargs here because blkdev_put needs to be called outside
* of sb->s_umount, which is held around the call to ->put_super.
*/
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
xfs_free_buftarg(mp->m_logdev_targp);
if (mp->m_rtdev_targp)
xfs_free_buftarg(mp->m_rtdev_targp);
if (mp->m_ddev_targp)
xfs_free_buftarg(mp->m_ddev_targp);
kfree(mp->m_rtname);
kfree(mp->m_logname);
kmem_free(mp);
@ -1133,10 +1158,6 @@ xfs_fs_put_super(
{
struct xfs_mount *mp = XFS_M(sb);
/* if ->fill_super failed, we have no mount to tear down */
if (!sb->s_fs_info)
return;
xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
xfs_filestream_unmount(mp);
xfs_unmountfs(mp);
@ -1147,10 +1168,7 @@ xfs_fs_put_super(
xfs_inodegc_free_percpu(mp);
xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp);
xfs_close_devices(mp);
sb->s_fs_info = NULL;
xfs_mount_free(mp);
xfs_shutdown_devices(mp);
}
static long
@ -1492,7 +1510,7 @@ xfs_fs_fill_super(
error = xfs_fs_validate_params(mp);
if (error)
goto out_free_names;
return error;
sb_min_blocksize(sb, BBSIZE);
sb->s_xattr = xfs_xattr_handlers;
@ -1519,11 +1537,11 @@ xfs_fs_fill_super(
error = xfs_open_devices(mp);
if (error)
goto out_free_names;
return error;
error = xfs_init_mount_workqueues(mp);
if (error)
goto out_close_devices;
goto out_shutdown_devices;
error = xfs_init_percpu_counters(mp);
if (error)
@ -1737,11 +1755,8 @@ xfs_fs_fill_super(
xfs_destroy_percpu_counters(mp);
out_destroy_workqueues:
xfs_destroy_mount_workqueues(mp);
out_close_devices:
xfs_close_devices(mp);
out_free_names:
sb->s_fs_info = NULL;
xfs_mount_free(mp);
out_shutdown_devices:
xfs_shutdown_devices(mp);
return error;
out_unmount:
@ -1934,7 +1949,8 @@ xfs_fs_reconfigure(
return 0;
}
static void xfs_fs_free(
static void
xfs_fs_free(
struct fs_context *fc)
{
struct xfs_mount *mp = fc->s_fs_info;
@ -2003,12 +2019,20 @@ static int xfs_init_fs_context(
return 0;
}
static void
xfs_kill_sb(
struct super_block *sb)
{
kill_block_super(sb);
xfs_mount_free(XFS_M(sb));
}
static struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
.init_fs_context = xfs_init_fs_context,
.parameters = xfs_fs_parameters,
.kill_sb = kill_block_super,
.kill_sb = xfs_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
};
MODULE_ALIAS_FS("xfs");

View File

@ -52,7 +52,6 @@ struct block_device {
atomic_t bd_openers;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
struct inode * bd_inode; /* will die */
struct super_block * bd_super;
void * bd_claiming;
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;

View File

@ -750,7 +750,8 @@ static inline int bdev_read_only(struct block_device *bdev)
}
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
bool disk_force_media_change(struct gendisk *disk, unsigned int events);
void disk_force_media_change(struct gendisk *disk);
void bdev_mark_dead(struct block_device *bdev, bool surprise);
void add_disk_randomness(struct gendisk *disk) __latent_entropy;
void rand_initialize_disk(struct gendisk *disk);
@ -809,7 +810,6 @@ int __register_blkdev(unsigned int major, const char *name,
void unregister_blkdev(unsigned int major, const char *name);
bool disk_check_media_change(struct gendisk *disk);
int __invalidate_device(struct block_device *bdev, bool kill_dirty);
void set_capacity(struct gendisk *disk, sector_t size);
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
@ -1460,9 +1460,16 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
#endif
struct blk_holder_ops {
void (*mark_dead)(struct block_device *bdev);
void (*mark_dead)(struct block_device *bdev, bool surprise);
/*
* Sync the file system mounted on the block device.
*/
void (*sync)(struct block_device *bdev);
};
extern const struct blk_holder_ops fs_holder_ops;
/*
* Return the correct open flags for blkdev_get_by_* for super block flags
* as stored in sb->s_flags.
@ -1521,8 +1528,6 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
}
#endif /* CONFIG_BLOCK */
int fsync_bdev(struct block_device *bdev);
int freeze_bdev(struct block_device *bdev);
int thaw_bdev(struct block_device *bdev);

View File

@ -1095,6 +1095,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
#define SB_DEAD BIT(21)
#define SB_DYING BIT(24)
#define SB_SUBMOUNT BIT(26)
#define SB_FORCE BIT(27)
#define SB_NOSEC BIT(28)
@ -1147,7 +1149,8 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
int frozen; /* Is sb frozen? */
unsigned short frozen; /* Is sb frozen? */
unsigned short freeze_holders; /* Who froze fs? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@ -1982,6 +1985,10 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
};
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
@ -1994,9 +2001,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_super) (struct super_block *);
int (*freeze_super) (struct super_block *, enum freeze_holder who);
int (*freeze_fs) (struct super_block *);
int (*thaw_super) (struct super_block *);
int (*thaw_super) (struct super_block *, enum freeze_holder who);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@ -2382,8 +2389,8 @@ extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
int freeze_super(struct super_block *super, enum freeze_holder who);
int thaw_super(struct super_block *super, enum freeze_holder who);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@ -3055,7 +3062,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);

View File

@ -156,6 +156,8 @@ extern int get_tree_keyed(struct fs_context *fc,
struct fs_context *fc),
void *key);
int setup_bdev_super(struct super_block *sb, int sb_flags,
struct fs_context *fc);
extern int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));