Pull block devices as files from Christian Brauner:

This opens block devices as files. Instead of introducing a separate
indirection into bdev_open_by_*() vis struct bdev_handle we can just
make bdev_file_open_by_*() return a struct file. Opening and closing a
block device from setup_bdev_super() and in all other places just
becomes equivalent to opening and closing a file.

This has held up in xfstests and in blktests so far and it seems stable
and clean. The equivalence of opening and closing block devices to
regular files is a win in and of itself imho. Added to that is the
ability to do away with struct bdev_handle completely and make various
low-level helpers private to the block layer.

All places were we currently stash a struct bdev_handle we just stash a
file and use an accessor such as file_bdev() akin to I_BDEV() to get to
the block device.

It's now also possible to use file->f_mapping as a replacement for
bdev->bd_inode->i_mapping and file->f_inode or file->f_mapping->host as
an alternative to bdev->bd_inode allowing us to significantly reduce or
even fully remove bdev->bd_inode in follow-up patches.

In addition, we could get rid of sb->s_bdev and various other places
that stash the block device directly and instead stash the block device
file. Again, this is follow-up work if we want this.

* series 'Open block devices as files' of https://lore.kernel.org/r/20240123-vfs-bdev-file-v2-0-adbd023e19cc@kernel.org: (35 commits)
  file: add alloc_file_pseudo_noaccount()
  file: prepare for new helper
  init: flush async file closing
  block: remove bdev_handle completely
  block: don't rely on BLK_OPEN_RESTRICT_WRITES when yielding write access
  bdev: remove bdev pointer from struct bdev_handle
  bdev: make struct bdev_handle private to the block layer
  bdev: make bdev_{release, open_by_dev}() private to block layer
  bdev: remove bdev_open_by_path()
  reiserfs: port block device access to file
  ocfs2: port block device access to file
  nfs: port block device access to files
  jfs: port block device access to file
  f2fs: port block device access to files
  ext4: port block device access to file
  erofs: port device access to file
  btrfs: port device access to file
  bcachefs: port block device access to file
  target: port block device access to file
  s390: port block device access to file
  nvme: port block device access to file
  block2mtd: port device access to files
  bcache: port block device access to files
  ...

Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2024-02-05 12:05:16 +01:00
commit 40ebc18b99
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
72 changed files with 811 additions and 702 deletions

View File

@ -49,6 +49,12 @@ struct block_device *I_BDEV(struct inode *inode)
}
EXPORT_SYMBOL(I_BDEV);
struct block_device *file_bdev(struct file *bdev_file)
{
return I_BDEV(bdev_file->f_mapping->host);
}
EXPORT_SYMBOL(file_bdev);
static void bdev_write_inode(struct block_device *bdev)
{
struct inode *inode = bdev->bd_inode;
@ -368,12 +374,12 @@ static struct file_system_type bd_type = {
};
struct super_block *blockdev_superblock __ro_after_init;
struct vfsmount *blockdev_mnt __ro_after_init;
EXPORT_SYMBOL_GPL(blockdev_superblock);
void __init bdev_cache_init(void)
{
int err;
static struct vfsmount *bd_mnt __ro_after_init;
bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
@ -382,10 +388,10 @@ void __init bdev_cache_init(void)
err = register_filesystem(&bd_type);
if (err)
panic("Cannot register bdev pseudo-fs");
bd_mnt = kern_mount(&bd_type);
if (IS_ERR(bd_mnt))
blockdev_mnt = kern_mount(&bd_type);
if (IS_ERR(blockdev_mnt))
panic("Cannot create bdev pseudo-fs");
blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
blockdev_superblock = blockdev_mnt->mnt_sb; /* For writeback */
}
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
@ -696,6 +702,31 @@ out_blkdev_put:
return ret;
}
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder)
{
int ret;
ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
MAJOR(dev), MINOR(dev),
((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
if (ret)
return ret;
/* Blocking writes requires exclusive opener */
if (mode & BLK_OPEN_RESTRICT_WRITES && !holder)
return -EINVAL;
/*
* We're using error pointers to indicate to ->release() when we
* failed to open that block device. Also this doesn't make sense.
*/
if (WARN_ON_ONCE(IS_ERR(holder)))
return -EINVAL;
return 0;
}
static void blkdev_put_part(struct block_device *part)
{
struct block_device *whole = bdev_whole(part);
@ -775,83 +806,55 @@ static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
bdev->bd_writers++;
}
static void bdev_yield_write_access(struct block_device *bdev, blk_mode_t mode)
static void bdev_yield_write_access(struct file *bdev_file)
{
struct block_device *bdev;
if (bdev_allow_write_mounted)
return;
bdev = file_bdev(bdev_file);
/* Yield exclusive or shared write access. */
if (mode & BLK_OPEN_RESTRICT_WRITES)
bdev_unblock_writes(bdev);
else if (mode & BLK_OPEN_WRITE)
bdev->bd_writers--;
if (bdev_file->f_mode & FMODE_WRITE) {
if (bdev_writes_blocked(bdev))
bdev_unblock_writes(bdev);
else
bdev->bd_writers--;
}
}
/**
* bdev_open_by_dev - open a block device by device number
* @dev: device number of block device to open
* bdev_open - open a block device
* @bdev: block device to open
* @mode: open mode (BLK_OPEN_*)
* @holder: exclusive holder identifier
* @hops: holder operations
* @bdev_file: file for the block device
*
* Open the block device described by device number @dev. If @holder is not
* %NULL, the block device is opened with exclusive access. Exclusive opens may
* nest for the same @holder.
*
* Use this interface ONLY if you really do not have anything better - i.e. when
* you are behind a truly sucky interface and all you are given is a device
* number. Everything else should use bdev_open_by_path().
* Open the block device. If @holder is not %NULL, the block device is opened
* with exclusive access. Exclusive opens may nest for the same @holder.
*
* CONTEXT:
* Might sleep.
*
* RETURNS:
* Handle with a reference to the block_device on success, ERR_PTR(-errno) on
* failure.
* zero on success, -errno on failure.
*/
struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops)
int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops, struct file *bdev_file)
{
struct bdev_handle *handle = kmalloc(sizeof(struct bdev_handle),
GFP_KERNEL);
struct block_device *bdev;
bool unblock_events = true;
struct gendisk *disk;
struct gendisk *disk = bdev->bd_disk;
int ret;
if (!handle)
return ERR_PTR(-ENOMEM);
ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
MAJOR(dev), MINOR(dev),
((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
if (ret)
goto free_handle;
/* Blocking writes requires exclusive opener */
if (mode & BLK_OPEN_RESTRICT_WRITES && !holder) {
ret = -EINVAL;
goto free_handle;
}
bdev = blkdev_get_no_open(dev);
if (!bdev) {
ret = -ENXIO;
goto free_handle;
}
disk = bdev->bd_disk;
if (holder) {
mode |= BLK_OPEN_EXCL;
ret = bd_prepare_to_claim(bdev, holder, hops);
if (ret)
goto put_blkdev;
return ret;
} else {
if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) {
ret = -EIO;
goto put_blkdev;
}
if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL))
return -EIO;
}
disk_block_events(disk);
@ -892,10 +895,16 @@ struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
if (unblock_events)
disk_unblock_events(disk);
handle->bdev = bdev;
handle->holder = holder;
handle->mode = mode;
return handle;
bdev_file->f_flags |= O_LARGEFILE;
bdev_file->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
if (bdev_nowait(bdev))
bdev_file->f_mode |= FMODE_NOWAIT;
bdev_file->f_mapping = bdev->bd_inode->i_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;
return 0;
put_module:
module_put(disk->fops->owner);
abort_claiming:
@ -903,36 +912,80 @@ abort_claiming:
bd_abort_claiming(bdev, holder);
mutex_unlock(&disk->open_mutex);
disk_unblock_events(disk);
put_blkdev:
blkdev_put_no_open(bdev);
free_handle:
kfree(handle);
return ERR_PTR(ret);
return ret;
}
EXPORT_SYMBOL(bdev_open_by_dev);
/**
* bdev_open_by_path - open a block device by name
* @path: path to the block device to open
* @mode: open mode (BLK_OPEN_*)
* @holder: exclusive holder identifier
* @hops: holder operations
/*
* If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk
* associated with the floppy driver where it has allowed ioctls if the
* file was opened for writing, but does not allow reads or writes.
* Make sure that this quirk is reflected in @f_flags.
*
* Open the block device described by the device file at @path. If @holder is
* not %NULL, the block device is opened with exclusive access. Exclusive opens
* may nest for the same @holder.
*
* CONTEXT:
* Might sleep.
*
* RETURNS:
* Handle with a reference to the block_device on success, ERR_PTR(-errno) on
* failure.
* It can also happen if a block device is opened as O_RDWR | O_WRONLY.
*/
struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops)
static unsigned blk_to_file_flags(blk_mode_t mode)
{
struct bdev_handle *handle;
unsigned int flags = 0;
if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) ==
(BLK_OPEN_READ | BLK_OPEN_WRITE))
flags |= O_RDWR;
else if (mode & BLK_OPEN_WRITE_IOCTL)
flags |= O_RDWR | O_WRONLY;
else if (mode & BLK_OPEN_WRITE)
flags |= O_WRONLY;
else if (mode & BLK_OPEN_READ)
flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */
else
WARN_ON_ONCE(true);
if (mode & BLK_OPEN_NDELAY)
flags |= O_NDELAY;
return flags;
}
struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops)
{
struct file *bdev_file;
struct block_device *bdev;
unsigned int flags;
int ret;
ret = bdev_permission(dev, mode, holder);
if (ret)
return ERR_PTR(ret);
bdev = blkdev_get_no_open(dev);
if (!bdev)
return ERR_PTR(-ENXIO);
flags = blk_to_file_flags(mode);
bdev_file = alloc_file_pseudo_noaccount(bdev->bd_inode,
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
if (IS_ERR(bdev_file)) {
blkdev_put_no_open(bdev);
return bdev_file;
}
ihold(bdev->bd_inode);
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
if (ret) {
/* We failed to open the block device. Let ->release() know. */
bdev_file->private_data = ERR_PTR(ret);
fput(bdev_file);
return ERR_PTR(ret);
}
return bdev_file;
}
EXPORT_SYMBOL(bdev_file_open_by_dev);
struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
void *holder,
const struct blk_holder_ops *hops)
{
struct file *file;
dev_t dev;
int error;
@ -940,22 +993,28 @@ struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
if (error)
return ERR_PTR(error);
handle = bdev_open_by_dev(dev, mode, holder, hops);
if (!IS_ERR(handle) && (mode & BLK_OPEN_WRITE) &&
bdev_read_only(handle->bdev)) {
bdev_release(handle);
return ERR_PTR(-EACCES);
file = bdev_file_open_by_dev(dev, mode, holder, hops);
if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) {
if (bdev_read_only(file_bdev(file))) {
fput(file);
file = ERR_PTR(-EACCES);
}
}
return handle;
return file;
}
EXPORT_SYMBOL(bdev_open_by_path);
EXPORT_SYMBOL(bdev_file_open_by_path);
void bdev_release(struct bdev_handle *handle)
void bdev_release(struct file *bdev_file)
{
struct block_device *bdev = handle->bdev;
struct block_device *bdev = file_bdev(bdev_file);
void *holder = bdev_file->private_data;
struct gendisk *disk = bdev->bd_disk;
/* We failed to open that block device. */
if (IS_ERR(holder))
goto put_no_open;
/*
* Sync early if it looks like we're the last one. If someone else
* opens the block device between now and the decrement of bd_openers
@ -967,10 +1026,10 @@ void bdev_release(struct bdev_handle *handle)
sync_blockdev(bdev);
mutex_lock(&disk->open_mutex);
bdev_yield_write_access(bdev, handle->mode);
bdev_yield_write_access(bdev_file);
if (handle->holder)
bd_end_claim(bdev, handle->holder);
if (holder)
bd_end_claim(bdev, holder);
/*
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
@ -986,10 +1045,9 @@ void bdev_release(struct bdev_handle *handle)
mutex_unlock(&disk->open_mutex);
module_put(disk->fops->owner);
put_no_open:
blkdev_put_no_open(bdev);
kfree(handle);
}
EXPORT_SYMBOL(bdev_release);
/**
* lookup_bdev() - Look up a struct block_device by name.

View File

@ -516,4 +516,8 @@ static inline int req_ref_read(struct request *req)
return atomic_read(&req->ref);
}
void bdev_release(struct file *bdev_file);
int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops, struct file *bdev_file);
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
#endif /* BLK_INTERNAL_H */

View File

@ -569,18 +569,17 @@ static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
blk_mode_t file_to_blk_mode(struct file *file)
{
blk_mode_t mode = 0;
struct bdev_handle *handle = file->private_data;
if (file->f_mode & FMODE_READ)
mode |= BLK_OPEN_READ;
if (file->f_mode & FMODE_WRITE)
mode |= BLK_OPEN_WRITE;
/*
* do_dentry_open() clears O_EXCL from f_flags, use handle->mode to
* determine whether the open was exclusive for already open files.
* do_dentry_open() clears O_EXCL from f_flags, use file->private_data
* to determine whether the open was exclusive for already open files.
*/
if (handle)
mode |= handle->mode & BLK_OPEN_EXCL;
if (file->private_data)
mode |= BLK_OPEN_EXCL;
else if (file->f_flags & O_EXCL)
mode |= BLK_OPEN_EXCL;
if (file->f_flags & O_NDELAY)
@ -599,36 +598,31 @@ blk_mode_t file_to_blk_mode(struct file *file)
static int blkdev_open(struct inode *inode, struct file *filp)
{
struct bdev_handle *handle;
struct block_device *bdev;
blk_mode_t mode;
/*
* Preserve backwards compatibility and allow large file access
* even if userspace doesn't ask for it explicitly. Some mkfs
* binary needs it. We might want to drop this workaround
* during an unstable branch.
*/
filp->f_flags |= O_LARGEFILE;
filp->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
int ret;
mode = file_to_blk_mode(filp);
handle = bdev_open_by_dev(inode->i_rdev, mode,
mode & BLK_OPEN_EXCL ? filp : NULL, NULL);
if (IS_ERR(handle))
return PTR_ERR(handle);
/* Use the file as the holder. */
if (mode & BLK_OPEN_EXCL)
filp->private_data = filp;
ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
if (ret)
return ret;
if (bdev_nowait(handle->bdev))
filp->f_mode |= FMODE_NOWAIT;
bdev = blkdev_get_no_open(inode->i_rdev);
if (!bdev)
return -ENXIO;
filp->f_mapping = handle->bdev->bd_inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = handle;
return 0;
ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
if (ret)
blkdev_put_no_open(bdev);
return ret;
}
static int blkdev_release(struct inode *inode, struct file *filp)
{
bdev_release(filp->private_data);
bdev_release(filp);
return 0;
}

View File

@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(disk_uevent);
int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
{
struct bdev_handle *handle;
struct file *file;
int ret = 0;
if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
@ -366,12 +366,12 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
}
set_bit(GD_NEED_PART_SCAN, &disk->state);
handle = bdev_open_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
NULL);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
file = bdev_file_open_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL,
NULL, NULL);
if (IS_ERR(file))
ret = PTR_ERR(file);
else
bdev_release(handle);
fput(file);
/*
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,

View File

@ -471,7 +471,7 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
int __user *argp)
{
int ret, n;
struct bdev_handle *handle;
struct file *file;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@ -483,12 +483,11 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
if (mode & BLK_OPEN_EXCL)
return set_blocksize(bdev, n);
handle = bdev_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
if (IS_ERR(handle))
file = bdev_file_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
if (IS_ERR(file))
return -EBUSY;
ret = set_blocksize(bdev, n);
bdev_release(handle);
fput(file);
return ret;
}

View File

@ -524,9 +524,9 @@ struct drbd_md {
struct drbd_backing_dev {
struct block_device *backing_bdev;
struct bdev_handle *backing_bdev_handle;
struct file *backing_bdev_file;
struct block_device *md_bdev;
struct bdev_handle *md_bdev_handle;
struct file *f_md_bdev;
struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */

View File

@ -1635,45 +1635,45 @@ success:
return 0;
}
static struct bdev_handle *open_backing_dev(struct drbd_device *device,
static struct file *open_backing_dev(struct drbd_device *device,
const char *bdev_path, void *claim_ptr, bool do_bd_link)
{
struct bdev_handle *handle;
struct file *file;
int err = 0;
handle = bdev_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
claim_ptr, NULL);
if (IS_ERR(handle)) {
file = bdev_file_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
claim_ptr, NULL);
if (IS_ERR(file)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
bdev_path, PTR_ERR(handle));
return handle;
bdev_path, PTR_ERR(file));
return file;
}
if (!do_bd_link)
return handle;
return file;
err = bd_link_disk_holder(handle->bdev, device->vdisk);
err = bd_link_disk_holder(file_bdev(file), device->vdisk);
if (err) {
bdev_release(handle);
fput(file);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
handle = ERR_PTR(err);
file = ERR_PTR(err);
}
return handle;
return file;
}
static int open_backing_devices(struct drbd_device *device,
struct disk_conf *new_disk_conf,
struct drbd_backing_dev *nbc)
{
struct bdev_handle *handle;
struct file *file;
handle = open_backing_dev(device, new_disk_conf->backing_dev, device,
file = open_backing_dev(device, new_disk_conf->backing_dev, device,
true);
if (IS_ERR(handle))
if (IS_ERR(file))
return ERR_OPEN_DISK;
nbc->backing_bdev = handle->bdev;
nbc->backing_bdev_handle = handle;
nbc->backing_bdev = file_bdev(file);
nbc->backing_bdev_file = file;
/*
* meta_dev_idx >= 0: external fixed size, possibly multiple
@ -1683,7 +1683,7 @@ static int open_backing_devices(struct drbd_device *device,
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
handle = open_backing_dev(device, new_disk_conf->meta_dev,
file = open_backing_dev(device, new_disk_conf->meta_dev,
/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
* if potentially shared with other drbd minors */
(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
@ -1691,21 +1691,21 @@ static int open_backing_devices(struct drbd_device *device,
* as would happen with internal metadata. */
(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
if (IS_ERR(handle))
if (IS_ERR(file))
return ERR_OPEN_MD_DISK;
nbc->md_bdev = handle->bdev;
nbc->md_bdev_handle = handle;
nbc->md_bdev = file_bdev(file);
nbc->f_md_bdev = file;
return NO_ERROR;
}
static void close_backing_dev(struct drbd_device *device,
struct bdev_handle *handle, bool do_bd_unlink)
struct file *bdev_file, bool do_bd_unlink)
{
if (!handle)
if (!bdev_file)
return;
if (do_bd_unlink)
bd_unlink_disk_holder(handle->bdev, device->vdisk);
bdev_release(handle);
bd_unlink_disk_holder(file_bdev(bdev_file), device->vdisk);
fput(bdev_file);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
@ -1713,9 +1713,9 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *
if (ldev == NULL)
return;
close_backing_dev(device, ldev->md_bdev_handle,
close_backing_dev(device, ldev->f_md_bdev,
ldev->md_bdev != ldev->backing_bdev);
close_backing_dev(device, ldev->backing_bdev_handle, true);
close_backing_dev(device, ldev->backing_bdev_file, true);
kfree(ldev->disk_conf);
kfree(ldev);
@ -2131,9 +2131,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
fail:
conn_reconfig_done(connection);
if (nbc) {
close_backing_dev(device, nbc->md_bdev_handle,
close_backing_dev(device, nbc->f_md_bdev,
nbc->md_bdev != nbc->backing_bdev);
close_backing_dev(device, nbc->backing_bdev_handle, true);
close_backing_dev(device, nbc->backing_bdev_file, true);
kfree(nbc);
}
kfree(new_disk_conf);

View File

@ -340,8 +340,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu
n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
MAJOR(pd->bdev_handle->bdev->bd_dev),
MINOR(pd->bdev_handle->bdev->bd_dev));
MAJOR(file_bdev(pd->bdev_file)->bd_dev),
MINOR(file_bdev(pd->bdev_file)->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
@ -438,7 +438,7 @@ static int pkt_seq_show(struct seq_file *m, void *p)
int states[PACKET_NUM_STATES];
seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
pd->bdev_handle->bdev);
file_bdev(pd->bdev_file));
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@ -715,7 +715,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
struct request_queue *q = bdev_get_queue(pd->bdev_handle->bdev);
struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
@ -1048,7 +1048,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
bio_init(bio, pd->bdev_handle->bdev, bio->bi_inline_vecs, 1,
bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
@ -1264,7 +1264,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct device *ddev = disk_to_dev(pd->disk);
int f;
bio_init(pkt->w_bio, pd->bdev_handle->bdev, pkt->w_bio->bi_inline_vecs,
bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
pkt->frames, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
@ -2162,20 +2162,20 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
int ret;
long lba;
struct request_queue *q;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
bdev_handle = bdev_open_by_dev(pd->bdev_handle->bdev->bd_dev,
bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
BLK_OPEN_READ, pd, NULL);
if (IS_ERR(bdev_handle)) {
ret = PTR_ERR(bdev_handle);
if (IS_ERR(bdev_file)) {
ret = PTR_ERR(bdev_file);
goto out;
}
pd->open_bdev_handle = bdev_handle;
pd->f_open_bdev = bdev_file;
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@ -2184,9 +2184,9 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
}
set_capacity(pd->disk, lba << 2);
set_capacity_and_notify(pd->bdev_handle->bdev->bd_disk, lba << 2);
set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
q = bdev_get_queue(pd->bdev_handle->bdev);
q = bdev_get_queue(file_bdev(pd->bdev_file));
if (write) {
ret = pkt_open_write(pd);
if (ret)
@ -2218,7 +2218,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
return 0;
out_putdev:
bdev_release(bdev_handle);
fput(bdev_file);
out:
return ret;
}
@ -2237,8 +2237,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
bdev_release(pd->open_bdev_handle);
pd->open_bdev_handle = NULL;
fput(pd->f_open_bdev);
pd->f_open_bdev = NULL;
pkt_shrink_pktlist(pd);
}
@ -2326,7 +2326,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
struct bio *cloned_bio = bio_alloc_clone(pd->bdev_handle->bdev, bio,
struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
@ -2497,7 +2497,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
struct device *ddev = disk_to_dev(pd->disk);
int i;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
@ -2508,9 +2508,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
if (pd2->bdev_handle->bdev->bd_dev == dev) {
if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
dev_err(ddev, "%pg already setup\n",
pd2->bdev_handle->bdev);
file_bdev(pd2->bdev_file));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@ -2519,13 +2519,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
NULL, NULL);
if (IS_ERR(bdev_handle))
return PTR_ERR(bdev_handle);
sdev = scsi_device_from_queue(bdev_handle->bdev->bd_disk->queue);
if (IS_ERR(bdev_file))
return PTR_ERR(bdev_file);
sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
if (!sdev) {
bdev_release(bdev_handle);
fput(bdev_file);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
@ -2533,8 +2533,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
pd->bdev_handle = bdev_handle;
set_blocksize(bdev_handle->bdev, CD_FRAMESIZE);
pd->bdev_file = bdev_file;
set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE);
pkt_init_queue(pd);
@ -2546,11 +2546,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
dev_notice(ddev, "writer mapped to %pg\n", bdev_handle->bdev);
dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
return 0;
out_mem:
bdev_release(bdev_handle);
fput(bdev_file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
@ -2605,9 +2605,9 @@ static unsigned int pkt_check_events(struct gendisk *disk,
if (!pd)
return 0;
if (!pd->bdev_handle)
if (!pd->bdev_file)
return 0;
attached_disk = pd->bdev_handle->bdev->bd_disk;
attached_disk = file_bdev(pd->bdev_file)->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
@ -2692,7 +2692,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev_handle->bdev->bd_disk->events;
disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
ret = add_disk(disk);
if (ret)
@ -2757,7 +2757,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
bdev_release(pd->bdev_handle);
fput(pd->bdev_file);
remove_proc_entry(pd->disk->disk_name, pkt_proc);
dev_notice(ddev, "writer unmapped\n");
@ -2784,7 +2784,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
ctrl_cmd->dev = new_encode_dev(pd->bdev_handle->bdev->bd_dev);
ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;

View File

@ -145,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
bio = bio_alloc(sess_dev->bdev_handle->bdev, 1,
bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
bdev_release(sess_dev->bdev_handle);
fput(sess_dev->bdev_file);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (!sess_dev->readonly)
@ -534,7 +534,7 @@ rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
struct block_device *bdev = sess_dev->bdev_handle->bdev;
struct block_device *bdev = file_bdev(sess_dev->bdev_file);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = cpu_to_le32(sess_dev->device_id);
@ -560,7 +560,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
struct bdev_handle *handle, bool readonly,
struct file *bdev_file, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@ -572,7 +572,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
sdev->bdev_handle = handle;
sdev->bdev_file = bdev_file;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->readonly = readonly;
@ -678,7 +678,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
@ -716,15 +716,15 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL);
if (IS_ERR(bdev_handle)) {
ret = PTR_ERR(bdev_handle);
bdev_file = bdev_file_open_by_path(full_path, open_flags, NULL, NULL);
if (IS_ERR(bdev_file)) {
ret = PTR_ERR(bdev_file);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n",
full_path, srv_sess->sessname, bdev_handle);
full_path, srv_sess->sessname, bdev_file);
goto free_path;
}
srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess,
srv_dev = rnbd_srv_get_or_create_srv_dev(file_bdev(bdev_file), srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n",
@ -734,7 +734,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
}
srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
bdev_handle,
bdev_file,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
@ -750,7 +750,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev_handle->bdev);
ret = rnbd_srv_create_dev_sysfs(srv_dev, file_bdev(bdev_file));
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@ -793,7 +793,7 @@ srv_dev_put:
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
bdev_release(bdev_handle);
fput(bdev_file);
free_path:
kfree(full_path);
reject:

View File

@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;

View File

@ -465,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
}
req->dev = vbd->pdevice;
req->bdev = vbd->bdev_handle->bdev;
req->bdev = file_bdev(vbd->bdev_file);
rc = 0;
out:
@ -969,7 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
int err = 0;
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
struct block_device *bdev = blkif->vbd.bdev_handle->bdev;
struct block_device *bdev = file_bdev(blkif->vbd.bdev_file);
struct phys_req preq;
xen_blkif_get(blkif);

View File

@ -221,7 +221,7 @@ struct xen_vbd {
unsigned char type;
/* phys device that this vbd maps to. */
u32 pdevice;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
/* Cached size parameter. */
sector_t size;
unsigned int flush_support:1;
@ -360,7 +360,7 @@ struct pending_req {
};
#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev_handle->bdev)
#define vbd_sz(_v) bdev_nr_sectors(file_bdev((_v)->bdev_file))
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
#define xen_blkif_put(_b) \

View File

@ -81,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
int i;
/* Not ready to connect? */
if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_handle)
if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file)
return;
/* Already connected? */
@ -99,13 +99,12 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
err = sync_blockdev(blkif->vbd.bdev_handle->bdev);
err = sync_blockdev(file_bdev(blkif->vbd.bdev_file));
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
}
invalidate_inode_pages2(
blkif->vbd.bdev_handle->bdev->bd_inode->i_mapping);
invalidate_inode_pages2(blkif->vbd.bdev_file->f_mapping);
for (i = 0; i < blkif->nr_rings; i++) {
ring = &blkif->rings[i];
@ -473,9 +472,9 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
static void xen_vbd_free(struct xen_vbd *vbd)
{
if (vbd->bdev_handle)
bdev_release(vbd->bdev_handle);
vbd->bdev_handle = NULL;
if (vbd->bdev_file)
fput(vbd->bdev_file);
vbd->bdev_file = NULL;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
@ -483,7 +482,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
int cdrom)
{
struct xen_vbd *vbd;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
vbd = &blkif->vbd;
vbd->handle = handle;
@ -492,17 +491,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->pdevice = MKDEV(major, minor);
bdev_handle = bdev_open_by_dev(vbd->pdevice, vbd->readonly ?
bdev_file = bdev_file_open_by_dev(vbd->pdevice, vbd->readonly ?
BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
vbd->bdev_handle = bdev_handle;
if (vbd->bdev_handle->bdev->bd_disk == NULL) {
vbd->bdev_file = bdev_file;
if (file_bdev(vbd->bdev_file)->bd_disk == NULL) {
pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
@ -510,14 +509,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
if (cdrom || disk_to_cdi(vbd->bdev_handle->bdev->bd_disk))
if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk))
vbd->type |= VDISK_CDROM;
if (vbd->bdev_handle->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
if (bdev_write_cache(bdev_handle->bdev))
if (bdev_write_cache(file_bdev(bdev_file)))
vbd->flush_support = true;
if (bdev_max_secure_erase_sectors(bdev_handle->bdev))
if (bdev_max_secure_erase_sectors(file_bdev(bdev_file)))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
@ -570,7 +569,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xen_blkif *blkif = be->blkif;
int err;
int state = 0;
struct block_device *bdev = be->blkif->vbd.bdev_handle->bdev;
struct block_device *bdev = file_bdev(be->blkif->vbd.bdev_file);
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
@ -932,7 +931,7 @@ again:
}
err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
(unsigned long)bdev_logical_block_size(
be->blkif->vbd.bdev_handle->bdev));
file_bdev(be->blkif->vbd.bdev_file)));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sector-size",
dev->nodename);
@ -940,7 +939,7 @@ again:
}
err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
bdev_physical_block_size(
be->blkif->vbd.bdev_handle->bdev));
file_bdev(be->blkif->vbd.bdev_file)));
if (err)
xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
dev->nodename);

View File

@ -426,11 +426,11 @@ static void reset_bdev(struct zram *zram)
if (!zram->backing_dev)
return;
bdev_release(zram->bdev_handle);
fput(zram->bdev_file);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
zram->bdev_handle = NULL;
zram->bdev_file = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@ -476,7 +476,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
struct bdev_handle *bdev_handle = NULL;
struct file *bdev_file = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@ -513,11 +513,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
bdev_handle = bdev_open_by_dev(inode->i_rdev,
bdev_file = bdev_file_open_by_dev(inode->i_rdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
if (IS_ERR(bdev_handle)) {
err = PTR_ERR(bdev_handle);
bdev_handle = NULL;
if (IS_ERR(bdev_file)) {
err = PTR_ERR(bdev_file);
bdev_file = NULL;
goto out;
}
@ -531,7 +531,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
zram->bdev_handle = bdev_handle;
zram->bdev_file = bdev_file;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@ -544,8 +544,8 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
if (bdev_handle)
bdev_release(bdev_handle);
if (bdev_file)
fput(bdev_file);
if (backing_dev)
filp_close(backing_dev, NULL);
@ -587,7 +587,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO);
bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
@ -703,7 +703,7 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1,
bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@ -785,7 +785,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ);
bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);

View File

@ -132,7 +132,7 @@ struct zram {
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
unsigned long *bitmap;
unsigned long nr_pages;
#endif

View File

@ -300,7 +300,7 @@ struct cached_dev {
struct list_head list;
struct bcache_device disk;
struct block_device *bdev;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct cache_sb sb;
struct cache_sb_disk *sb_disk;
@ -423,7 +423,7 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct task_struct *alloc_thread;

View File

@ -1369,8 +1369,8 @@ static CLOSURE_CALLBACK(cached_dev_free)
if (dc->sb_disk)
put_page(virt_to_page(dc->sb_disk));
if (dc->bdev_handle)
bdev_release(dc->bdev_handle);
if (dc->bdev_file)
fput(dc->bdev_file);
wake_up(&unregister_wait);
@ -1445,7 +1445,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct bdev_handle *bdev_handle,
struct file *bdev_file,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
@ -1453,8 +1453,8 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
int ret = -ENOMEM;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev_handle = bdev_handle;
dc->bdev = bdev_handle->bdev;
dc->bdev_file = bdev_file;
dc->bdev = file_bdev(bdev_file);
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
@ -2218,8 +2218,8 @@ void bch_cache_release(struct kobject *kobj)
if (ca->sb_disk)
put_page(virt_to_page(ca->sb_disk));
if (ca->bdev_handle)
bdev_release(ca->bdev_handle);
if (ca->bdev_file)
fput(ca->bdev_file);
kfree(ca);
module_put(THIS_MODULE);
@ -2339,18 +2339,18 @@ err_free:
}
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct bdev_handle *bdev_handle,
struct file *bdev_file,
struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev_handle = bdev_handle;
ca->bdev = bdev_handle->bdev;
ca->bdev_file = bdev_file;
ca->bdev = file_bdev(bdev_file);
ca->sb_disk = sb_disk;
if (bdev_max_discard_sectors((bdev_handle->bdev)))
if (bdev_max_discard_sectors(file_bdev(bdev_file)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
@ -2361,20 +2361,20 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
pr_notice("error %pg: %s\n", bdev_handle->bdev, err);
pr_notice("error %pg: %s\n", file_bdev(bdev_file), err);
/*
* If we failed here, it means ca->kobj is not initialized yet,
* kobject_put() won't be called and there is no chance to
* call bdev_release() to bdev in bch_cache_release(). So
* we explicitly call bdev_release() here.
* call fput() to bdev in bch_cache_release(). So
* we explicitly call fput() on the block device here.
*/
bdev_release(bdev_handle);
fput(bdev_file);
return ret;
}
if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) {
if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) {
pr_notice("error %pg: error calling kobject_add\n",
bdev_handle->bdev);
file_bdev(bdev_file));
ret = -ENOMEM;
goto out;
}
@ -2388,7 +2388,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
pr_info("registered cache device %pg\n", ca->bdev_handle->bdev);
pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file));
out:
kobject_put(&ca->kobj);
@ -2446,7 +2446,7 @@ struct async_reg_args {
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
void *holder;
};
@ -2457,7 +2457,7 @@ static void register_bdev_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
mutex_lock(&bch_register_lock);
if (register_bdev(args->sb, args->sb_disk, args->bdev_handle,
if (register_bdev(args->sb, args->sb_disk, args->bdev_file,
args->holder) < 0)
fail = true;
mutex_unlock(&bch_register_lock);
@ -2478,7 +2478,7 @@ static void register_cache_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
/* blkdev_put() will be called in bch_cache_release() */
if (register_cache(args->sb, args->sb_disk, args->bdev_handle,
if (register_cache(args->sb, args->sb_disk, args->bdev_file,
args->holder))
fail = true;
@ -2516,7 +2516,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
struct bdev_handle *bdev_handle, *bdev_handle2;
struct file *bdev_file, *bdev_file2;
void *holder = NULL;
ssize_t ret;
bool async_registration = false;
@ -2549,15 +2549,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(bdev_handle))
bdev_file = bdev_file_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(bdev_file))
goto out_free_sb;
err = "failed to set blocksize";
if (set_blocksize(bdev_handle->bdev, 4096))
if (set_blocksize(file_bdev(bdev_file), 4096))
goto out_blkdev_put;
err = read_super(sb, bdev_handle->bdev, &sb_disk);
err = read_super(sb, file_bdev(bdev_file), &sb_disk);
if (err)
goto out_blkdev_put;
@ -2569,13 +2569,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
}
/* Now reopen in exclusive mode with proper holder */
bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev,
bdev_file2 = bdev_file_open_by_dev(file_bdev(bdev_file)->bd_dev,
BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
bdev_release(bdev_handle);
bdev_handle = bdev_handle2;
if (IS_ERR(bdev_handle)) {
ret = PTR_ERR(bdev_handle);
bdev_handle = NULL;
fput(bdev_file);
bdev_file = bdev_file2;
if (IS_ERR(bdev_file)) {
ret = PTR_ERR(bdev_file);
bdev_file = NULL;
if (ret == -EBUSY) {
dev_t dev;
@ -2610,7 +2610,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
args->bdev_handle = bdev_handle;
args->bdev_file = bdev_file;
args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
@ -2619,14 +2619,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_disk, bdev_handle, holder);
ret = register_bdev(sb, sb_disk, bdev_file, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
/* blkdev_put() will be called in bch_cache_release() */
ret = register_cache(sb, sb_disk, bdev_handle, holder);
ret = register_cache(sb, sb_disk, bdev_file, holder);
if (ret)
goto out_free_sb;
}
@ -2642,8 +2642,8 @@ out_free_holder:
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
if (bdev_handle)
bdev_release(bdev_handle);
if (bdev_file)
fput(bdev_file);
out_free_sb:
kfree(sb);
out_free_path:

View File

@ -726,7 +726,8 @@ static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
u64 part_off;
int r;
@ -735,34 +736,36 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
if (IS_ERR(bdev_handle)) {
r = PTR_ERR(bdev_handle);
bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
if (IS_ERR(bdev_file)) {
r = PTR_ERR(bdev_file);
goto out_free_td;
}
bdev = file_bdev(bdev_file);
/*
* We can be called before the dm disk is added. In that case we can't
* register the holder relation here. It will be done once add_disk was
* called.
*/
if (md->disk->slave_dir) {
r = bd_link_disk_holder(bdev_handle->bdev, md->disk);
r = bd_link_disk_holder(bdev, md->disk);
if (r)
goto out_blkdev_put;
}
td->dm_dev.mode = mode;
td->dm_dev.bdev = bdev_handle->bdev;
td->dm_dev.bdev_handle = bdev_handle;
td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off,
td->dm_dev.bdev = bdev;
td->dm_dev.bdev_file = bdev_file;
td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
bdev_release(bdev_handle);
fput(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@ -775,7 +778,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
bdev_release(td->dm_dev.bdev_handle);
fput(td->dm_dev.bdev_file);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);

View File

@ -2578,7 +2578,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
bdev_release(rdev->bdev_handle);
fput(rdev->bdev_file);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@ -3773,16 +3773,16 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
rdev->bdev_handle = bdev_open_by_dev(newdev,
rdev->bdev_file = bdev_file_open_by_dev(newdev,
BLK_OPEN_READ | BLK_OPEN_WRITE,
super_format == -2 ? &claim_rdev : rdev, NULL);
if (IS_ERR(rdev->bdev_handle)) {
if (IS_ERR(rdev->bdev_file)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
err = PTR_ERR(rdev->bdev_handle);
err = PTR_ERR(rdev->bdev_file);
goto out_clear_rdev;
}
rdev->bdev = rdev->bdev_handle->bdev;
rdev->bdev = file_bdev(rdev->bdev_file);
kobject_init(&rdev->kobj, &rdev_ktype);
@ -3813,7 +3813,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
bdev_release(rdev->bdev_handle);
fput(rdev->bdev_file);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:

View File

@ -59,7 +59,7 @@ struct md_rdev {
*/
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
struct bdev_handle *bdev_handle; /* Handle from open for bdev */
struct file *bdev_file; /* Handle from open for bdev */
struct page *sb_page, *bb_page;
int sb_loaded;

View File

@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct mtd_info mtd;
struct mutex write_mutex;
};
@ -55,8 +55,7 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
struct address_space *mapping =
dev->bdev_handle->bdev->bd_inode->i_mapping;
struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@ -106,8 +105,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
struct address_space *mapping =
dev->bdev_handle->bdev->bd_inode->i_mapping;
struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@ -142,8 +140,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
struct address_space *mapping =
dev->bdev_handle->bdev->bd_inode->i_mapping;
struct address_space *mapping = dev->bdev_file->f_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@ -198,7 +195,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
sync_blockdev(dev->bdev_handle->bdev);
sync_blockdev(file_bdev(dev->bdev_file));
return;
}
@ -210,10 +207,9 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
if (dev->bdev_handle) {
invalidate_mapping_pages(
dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
bdev_release(dev->bdev_handle);
if (dev->bdev_file) {
invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
fput(dev->bdev_file);
}
kfree(dev);
@ -223,10 +219,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
static struct file __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
struct file *bdev_file = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@ -234,7 +230,7 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
return bdev_handle;
return bdev_file;
/*
* We might not have the root device mounted at this point.
@ -253,20 +249,20 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
if (!IS_ERR(bdev_handle))
bdev_file = bdev_file_open_by_dev(devt, mode, dev, NULL);
if (!IS_ERR(bdev_file))
break;
}
}
#endif
return bdev_handle;
return bdev_file;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@ -279,16 +275,16 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
if (IS_ERR(bdev_handle))
bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
bdev_file = bdev_file_open_by_path(devname, mode, dev, NULL);
if (IS_ERR(bdev_file))
bdev_file = mdtblock_early_get_bdev(devname, mode, timeout,
dev);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
dev->bdev_handle = bdev_handle;
bdev = bdev_handle->bdev;
dev->bdev_file = bdev_file;
bdev = file_bdev(bdev_file);
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");

View File

@ -50,10 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
if (ns->bdev_handle) {
bdev_release(ns->bdev_handle);
if (ns->bdev_file) {
fput(ns->bdev_file);
ns->bdev = NULL;
ns->bdev_handle = NULL;
ns->bdev_file = NULL;
}
}
@ -85,18 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (ns->buffered_io)
return -ENOTBLK;
ns->bdev_handle = bdev_open_by_path(ns->device_path,
ns->bdev_file = bdev_file_open_by_path(ns->device_path,
BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
if (IS_ERR(ns->bdev_handle)) {
ret = PTR_ERR(ns->bdev_handle);
if (IS_ERR(ns->bdev_file)) {
ret = PTR_ERR(ns->bdev_file);
if (ret != -ENOTBLK) {
pr_err("failed to open block device %s: (%d)\n",
ns->device_path, ret);
}
ns->bdev_handle = NULL;
ns->bdev_file = NULL;
return ret;
}
ns->bdev = ns->bdev_handle->bdev;
ns->bdev = file_bdev(ns->bdev_file);
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));

View File

@ -58,7 +58,7 @@
struct nvmet_ns {
struct percpu_ref ref;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
struct file *file;
bool readonly;

View File

@ -412,7 +412,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
KOBJ_CHANGE);
return 0;
}
disk_uevent(device->block->bdev_handle->bdev->bd_disk,
disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
}
return 0;
@ -433,7 +433,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
disk_uevent(device->block->bdev_handle->bdev->bd_disk,
disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
return 0;
}
@ -3588,7 +3588,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
max_count = device->block->bdev_handle ? 0 : -1;
max_count = device->block->bdev_file ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
@ -3634,8 +3634,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
if (device->block && device->block->bdev_handle)
bdev_mark_dead(device->block->bdev_handle->bdev, false);
if (device->block && device->block->bdev_file)
bdev_mark_dead(file_bdev(device->block->bdev_file), false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));

View File

@ -127,15 +127,15 @@ void dasd_gendisk_free(struct dasd_block *block)
*/
int dasd_scan_partitions(struct dasd_block *block)
{
struct bdev_handle *bdev_handle;
struct file *bdev_file;
int rc;
bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
bdev_file = bdev_file_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
NULL, NULL);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
PTR_ERR(bdev_handle));
PTR_ERR(bdev_file));
return -ENODEV;
}
@ -147,15 +147,15 @@ int dasd_scan_partitions(struct dasd_block *block)
"scan partitions error, rc %d", rc);
/*
* Since the matching bdev_release() call to the
* bdev_open_by_path() in this function is not called before
* Since the matching fput() call to the
* bdev_file_open_by_path() in this function is not called before
* dasd_destroy_partitions the offline open_count limit needs to be
* increased from 0 to 1. This is done by setting device->bdev_handle
* increased from 0 to 1. This is done by setting device->bdev_file
* (see dasd_generic_set_offline). As long as the partition detection
* is running no offline should be allowed. That is why the assignment
* to block->bdev_handle is done AFTER the BLKRRPART ioctl.
* to block->bdev_file is done AFTER the BLKRRPART ioctl.
*/
block->bdev_handle = bdev_handle;
block->bdev_file = bdev_file;
return 0;
}
@ -165,21 +165,21 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
struct bdev_handle *bdev_handle;
struct file *bdev_file;
/*
* Get the bdev_handle pointer from the device structure and clear
* device->bdev_handle to lower the offline open_count limit again.
* Get the bdev_file pointer from the device structure and clear
* device->bdev_file to lower the offline open_count limit again.
*/
bdev_handle = block->bdev_handle;
block->bdev_handle = NULL;
bdev_file = block->bdev_file;
block->bdev_file = NULL;
mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex);
bdev_disk_changed(bdev_handle->bdev->bd_disk, true);
mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex);
mutex_lock(&file_bdev(bdev_file)->bd_disk->open_mutex);
bdev_disk_changed(file_bdev(bdev_file)->bd_disk, true);
mutex_unlock(&file_bdev(bdev_file)->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
bdev_release(bdev_handle);
fput(bdev_file);
}
int dasd_gendisk_init(void)

View File

@ -650,7 +650,7 @@ struct dasd_block {
struct gendisk *gdp;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
atomic_t open_count;
unsigned long blocks; /* size of volume in blocks */

View File

@ -537,7 +537,7 @@ static int __dasd_ioctl_information(struct dasd_block *block,
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
if (!block->bdev_handle)
if (!block->bdev_file)
dasd_info->open_count++;
/*

View File

@ -91,7 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bd;
struct blk_integrity *bi;
blk_mode_t mode = BLK_OPEN_READ;
@ -117,14 +117,14 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_flags |= DF_READ_ONLY;
bdev_handle = bdev_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
NULL);
if (IS_ERR(bdev_handle)) {
ret = PTR_ERR(bdev_handle);
if (IS_ERR(bdev_file)) {
ret = PTR_ERR(bdev_file);
goto out_free_bioset;
}
ib_dev->ibd_bdev_handle = bdev_handle;
ib_dev->ibd_bd = bd = bdev_handle->bdev;
ib_dev->ibd_bdev_file = bdev_file;
ib_dev->ibd_bd = bd = file_bdev(bdev_file);
q = bdev_get_queue(bd);
@ -180,7 +180,7 @@ static int iblock_configure_device(struct se_device *dev)
return 0;
out_blkdev_put:
bdev_release(ib_dev->ibd_bdev_handle);
fput(ib_dev->ibd_bdev_file);
out_free_bioset:
bioset_exit(&ib_dev->ibd_bio_set);
out:
@ -205,8 +205,8 @@ static void iblock_destroy_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
if (ib_dev->ibd_bdev_handle)
bdev_release(ib_dev->ibd_bdev_handle);
if (ib_dev->ibd_bdev_file)
fput(ib_dev->ibd_bdev_file);
bioset_exit(&ib_dev->ibd_bio_set);
}

View File

@ -32,7 +32,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
struct bdev_handle *ibd_bdev_handle;
struct file *ibd_bdev_file;
bool ibd_readonly;
struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;

View File

@ -352,7 +352,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
int ret;
if (scsi_device_get(sd)) {
@ -366,18 +366,18 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
bdev_handle = bdev_open_by_path(dev->udev_path,
bdev_file = bdev_file_open_by_path(dev->udev_path,
BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
pr_err("pSCSI: bdev_open_by_path() failed\n");
scsi_device_put(sd);
return PTR_ERR(bdev_handle);
return PTR_ERR(bdev_file);
}
pdv->pdv_bdev_handle = bdev_handle;
pdv->pdv_bdev_file = bdev_file;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
bdev_release(bdev_handle);
fput(bdev_file);
scsi_device_put(sd);
return ret;
}
@ -564,9 +564,9 @@ static void pscsi_destroy_device(struct se_device *dev)
* from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
pdv->pdv_bdev_handle) {
bdev_release(pdv->pdv_bdev_handle);
pdv->pdv_bdev_handle = NULL;
pdv->pdv_bdev_file) {
fput(pdv->pdv_bdev_file);
pdv->pdv_bdev_file = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
@ -994,8 +994,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
if (pdv->pdv_bdev_handle)
return bdev_nr_sectors(pdv->pdv_bdev_handle->bdev);
if (pdv->pdv_bdev_file)
return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file));
return 0;
}

View File

@ -37,7 +37,7 @@ struct pscsi_dev_virt {
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
struct bdev_handle *pdv_bdev_handle;
struct file *pdv_bdev_file;
struct scsi_device *pdv_sd;
struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned;

View File

@ -142,8 +142,8 @@ void bch2_sb_field_delete(struct bch_sb_handle *sb,
void bch2_free_super(struct bch_sb_handle *sb)
{
kfree(sb->bio);
if (!IS_ERR_OR_NULL(sb->bdev_handle))
bdev_release(sb->bdev_handle);
if (!IS_ERR_OR_NULL(sb->s_bdev_file))
fput(sb->s_bdev_file);
kfree(sb->holder);
kfree(sb->sb_name);
@ -704,22 +704,22 @@ retry:
if (!opt_get(*opts, nochanges))
sb->mode |= BLK_OPEN_WRITE;
sb->bdev_handle = bdev_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
if (IS_ERR(sb->bdev_handle) &&
PTR_ERR(sb->bdev_handle) == -EACCES &&
sb->s_bdev_file = bdev_file_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
if (IS_ERR(sb->s_bdev_file) &&
PTR_ERR(sb->s_bdev_file) == -EACCES &&
opt_get(*opts, read_only)) {
sb->mode &= ~BLK_OPEN_WRITE;
sb->bdev_handle = bdev_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
if (!IS_ERR(sb->bdev_handle))
sb->s_bdev_file = bdev_file_open_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
if (!IS_ERR(sb->s_bdev_file))
opt_set(*opts, nochanges, true);
}
if (IS_ERR(sb->bdev_handle)) {
ret = PTR_ERR(sb->bdev_handle);
if (IS_ERR(sb->s_bdev_file)) {
ret = PTR_ERR(sb->s_bdev_file);
goto out;
}
sb->bdev = sb->bdev_handle->bdev;
sb->bdev = file_bdev(sb->s_bdev_file);
ret = bch2_sb_realloc(sb, 0);
if (ret) {

View File

@ -4,7 +4,7 @@
struct bch_sb_handle {
struct bch_sb *sb;
struct bdev_handle *bdev_handle;
struct file *s_bdev_file;
struct block_device *bdev;
char *sb_name;
struct bio *bio;

View File

@ -246,7 +246,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
int ret = 0;
@ -257,13 +257,13 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return -EINVAL;
}
bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
fs_info->bdev_holder, NULL);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
btrfs_err(fs_info, "target device %s is invalid!", device_path);
return PTR_ERR(bdev_handle);
return PTR_ERR(bdev_file);
}
bdev = bdev_handle->bdev;
bdev = file_bdev(bdev_file);
if (!btrfs_check_device_zone_type(fs_info, bdev)) {
btrfs_err(fs_info,
@ -314,7 +314,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
device->commit_bytes_used = device->bytes_used;
device->fs_info = fs_info;
device->bdev = bdev;
device->bdev_handle = bdev_handle;
device->bdev_file = bdev_file;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->dev_stats_valid = 1;
@ -335,7 +335,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return 0;
error:
bdev_release(bdev_handle);
fput(bdev_file);
return ret;
}

View File

@ -2691,7 +2691,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args_v2 *vol_args;
struct bdev_handle *bdev_handle = NULL;
struct file *bdev_file = NULL;
int ret;
bool cancel = false;
@ -2728,7 +2728,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto err_drop;
/* Exclusive operation is now claimed */
ret = btrfs_rm_device(fs_info, &args, &bdev_handle);
ret = btrfs_rm_device(fs_info, &args, &bdev_file);
btrfs_exclop_finish(fs_info);
@ -2742,8 +2742,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
}
err_drop:
mnt_drop_write_file(file);
if (bdev_handle)
bdev_release(bdev_handle);
if (bdev_file)
fput(bdev_file);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
@ -2756,7 +2756,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args *vol_args;
struct bdev_handle *bdev_handle = NULL;
struct file *bdev_file = NULL;
int ret;
bool cancel = false;
@ -2783,15 +2783,15 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
cancel);
if (ret == 0) {
ret = btrfs_rm_device(fs_info, &args, &bdev_handle);
ret = btrfs_rm_device(fs_info, &args, &bdev_file);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
btrfs_exclop_finish(fs_info);
}
mnt_drop_write_file(file);
if (bdev_handle)
bdev_release(bdev_handle);
if (bdev_file)
fput(bdev_file);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);

View File

@ -468,39 +468,39 @@ static noinline struct btrfs_fs_devices *find_fsid(
static int
btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
int flush, struct bdev_handle **bdev_handle,
int flush, struct file **bdev_file,
struct btrfs_super_block **disk_super)
{
struct block_device *bdev;
int ret;
*bdev_handle = bdev_open_by_path(device_path, flags, holder, NULL);
*bdev_file = bdev_file_open_by_path(device_path, flags, holder, NULL);
if (IS_ERR(*bdev_handle)) {
ret = PTR_ERR(*bdev_handle);
if (IS_ERR(*bdev_file)) {
ret = PTR_ERR(*bdev_file);
goto error;
}
bdev = (*bdev_handle)->bdev;
bdev = file_bdev(*bdev_file);
if (flush)
sync_blockdev(bdev);
ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE);
if (ret) {
bdev_release(*bdev_handle);
fput(*bdev_file);
goto error;
}
invalidate_bdev(bdev);
*disk_super = btrfs_read_dev_super(bdev);
if (IS_ERR(*disk_super)) {
ret = PTR_ERR(*disk_super);
bdev_release(*bdev_handle);
fput(*bdev_file);
goto error;
}
return 0;
error:
*bdev_handle = NULL;
*bdev_file = NULL;
return ret;
}
@ -643,7 +643,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device, blk_mode_t flags,
void *holder)
{
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct btrfs_super_block *disk_super;
u64 devid;
int ret;
@ -654,7 +654,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
return -EINVAL;
ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
&bdev_handle, &disk_super);
&bdev_file, &disk_super);
if (ret)
return ret;
@ -678,20 +678,20 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->seeding = true;
} else {
if (bdev_read_only(bdev_handle->bdev))
if (bdev_read_only(file_bdev(bdev_file)))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
else
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
}
if (!bdev_nonrot(bdev_handle->bdev))
if (!bdev_nonrot(file_bdev(bdev_file)))
fs_devices->rotating = true;
if (bdev_max_discard_sectors(bdev_handle->bdev))
if (bdev_max_discard_sectors(file_bdev(bdev_file)))
fs_devices->discardable = true;
device->bdev_handle = bdev_handle;
device->bdev = bdev_handle->bdev;
device->bdev_file = bdev_file;
device->bdev = file_bdev(bdev_file);
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
fs_devices->open_devices++;
@ -706,7 +706,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
error_free_page:
btrfs_release_disk_super(disk_super);
bdev_release(bdev_handle);
fput(bdev_file);
return -EINVAL;
}
@ -1015,10 +1015,10 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
if (device->devid == BTRFS_DEV_REPLACE_DEVID)
continue;
if (device->bdev_handle) {
bdev_release(device->bdev_handle);
if (device->bdev_file) {
fput(device->bdev_file);
device->bdev = NULL;
device->bdev_handle = NULL;
device->bdev_file = NULL;
fs_devices->open_devices--;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
@ -1063,7 +1063,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
invalidate_bdev(device->bdev);
}
bdev_release(device->bdev_handle);
fput(device->bdev_file);
}
static void btrfs_close_one_device(struct btrfs_device *device)
@ -1316,7 +1316,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
struct btrfs_super_block *disk_super;
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
u64 bytenr, bytenr_orig;
int ret;
@ -1339,18 +1339,18 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
* values temporarily, as the device paths of the fsid are the only
* required information for assembling the volume.
*/
bdev_handle = bdev_open_by_path(path, flags, NULL, NULL);
if (IS_ERR(bdev_handle))
return ERR_CAST(bdev_handle);
bdev_file = bdev_file_open_by_path(path, flags, NULL, NULL);
if (IS_ERR(bdev_file))
return ERR_CAST(bdev_file);
bytenr_orig = btrfs_sb_offset(0);
ret = btrfs_sb_log_location_bdev(bdev_handle->bdev, 0, READ, &bytenr);
ret = btrfs_sb_log_location_bdev(file_bdev(bdev_file), 0, READ, &bytenr);
if (ret) {
device = ERR_PTR(ret);
goto error_bdev_put;
}
disk_super = btrfs_read_disk_super(bdev_handle->bdev, bytenr,
disk_super = btrfs_read_disk_super(file_bdev(bdev_file), bytenr,
bytenr_orig);
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
@ -1381,7 +1381,7 @@ free_disk_super:
btrfs_release_disk_super(disk_super);
error_bdev_put:
bdev_release(bdev_handle);
fput(bdev_file);
return device;
}
@ -2057,7 +2057,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
struct bdev_handle **bdev_handle)
struct file **bdev_file)
{
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
@ -2166,7 +2166,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
btrfs_assign_next_active_device(device, NULL);
if (device->bdev_handle) {
if (device->bdev_file) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
@ -2182,9 +2182,9 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
* free the device.
*
* We cannot call btrfs_close_bdev() here because we're holding the sb
* write lock, and bdev_release() will pull in the ->open_mutex on
* the block device and it's dependencies. Instead just flush the
* device and let the caller do the final bdev_release.
* write lock, and fput() on the block device will pull in the
* ->open_mutex on the block device and it's dependencies. Instead
* just flush the device and let the caller do the final bdev_release.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_scratch_superblocks(fs_info, device->bdev,
@ -2195,7 +2195,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
}
}
*bdev_handle = device->bdev_handle;
*bdev_file = device->bdev_file;
synchronize_rcu();
btrfs_free_device(device);
@ -2332,7 +2332,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
const char *path)
{
struct btrfs_super_block *disk_super;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
int ret;
if (!path || !path[0])
@ -2350,7 +2350,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
}
ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0,
&bdev_handle, &disk_super);
&bdev_file, &disk_super);
if (ret) {
btrfs_put_dev_args_from_path(args);
return ret;
@ -2363,7 +2363,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
else
memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
btrfs_release_disk_super(disk_super);
bdev_release(bdev_handle);
fput(bdev_file);
return 0;
}
@ -2583,7 +2583,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct super_block *sb = fs_info->sb;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *seed_devices = NULL;
@ -2596,12 +2596,12 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
bdev_file = bdev_file_open_by_path(device_path, BLK_OPEN_WRITE,
fs_info->bdev_holder, NULL);
if (IS_ERR(bdev_handle))
return PTR_ERR(bdev_handle);
if (IS_ERR(bdev_file))
return PTR_ERR(bdev_file);
if (!btrfs_check_device_zone_type(fs_info, bdev_handle->bdev)) {
if (!btrfs_check_device_zone_type(fs_info, file_bdev(bdev_file))) {
ret = -EINVAL;
goto error;
}
@ -2613,11 +2613,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
locked = true;
}
sync_blockdev(bdev_handle->bdev);
sync_blockdev(file_bdev(bdev_file));
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
if (device->bdev == bdev_handle->bdev) {
if (device->bdev == file_bdev(bdev_file)) {
ret = -EEXIST;
rcu_read_unlock();
goto error;
@ -2633,8 +2633,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
device->fs_info = fs_info;
device->bdev_handle = bdev_handle;
device->bdev = bdev_handle->bdev;
device->bdev_file = bdev_file;
device->bdev = file_bdev(bdev_file);
ret = lookup_bdev(device_path, &device->devt);
if (ret)
goto error_free_device;
@ -2817,7 +2817,7 @@ error_free_zone:
error_free_device:
btrfs_free_device(device);
error:
bdev_release(bdev_handle);
fput(bdev_file);
if (locked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);

View File

@ -90,7 +90,7 @@ struct btrfs_device {
u64 generation;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
struct btrfs_zoned_device_info *zone_info;
@ -661,7 +661,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
struct bdev_handle **bdev_handle);
struct file **bdev_file);
void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,

View File

@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb)
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
sync_blockdev(sb->s_bdev);
bdev_release(sb->s_bdev_handle);
fput(sb->s_bdev_file);
}
kfree(sbi);
}

View File

@ -220,7 +220,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return 0;
}
map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL;
map->m_bdev = dif->bdev_file ? file_bdev(dif->bdev_file) : NULL;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
@ -238,8 +238,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
map->m_bdev = dif->bdev_handle ?
dif->bdev_handle->bdev : NULL;
map->m_bdev = dif->bdev_file ?
file_bdev(dif->bdev_file) : NULL;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;

View File

@ -49,7 +49,7 @@ typedef u32 erofs_blk_t;
struct erofs_device_info {
char *path;
struct erofs_fscache *fscache;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct dax_device *dax_dev;
u64 dax_part_off;

View File

@ -177,7 +177,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
struct erofs_deviceslot *dis;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
void *ptr;
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
@ -201,12 +201,12 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
return PTR_ERR(fscache);
dif->fscache = fscache;
} else if (!sbi->devs->flatdev) {
bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ,
bdev_file = bdev_file_open_by_path(dif->path, BLK_OPEN_READ,
sb->s_type, NULL);
if (IS_ERR(bdev_handle))
return PTR_ERR(bdev_handle);
dif->bdev_handle = bdev_handle;
dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev,
if (IS_ERR(bdev_file))
return PTR_ERR(bdev_file);
dif->bdev_file = bdev_file;
dif->dax_dev = fs_dax_get_by_bdev(file_bdev(bdev_file),
&dif->dax_part_off, NULL, NULL);
}
@ -754,8 +754,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
struct erofs_device_info *dif = ptr;
fs_put_dax(dif->dax_dev, NULL);
if (dif->bdev_handle)
bdev_release(dif->bdev_handle);
if (dif->bdev_file)
fput(dif->bdev_file);
erofs_fscache_unregister_cookie(dif->fscache);
dif->fscache = NULL;
kfree(dif->path);

View File

@ -1548,7 +1548,7 @@ struct ext4_sb_info {
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
struct bdev_handle *s_journal_bdev_handle;
struct file *s_journal_bdev_file;
#ifdef CONFIG_QUOTA
/* Names of quota files with journalled quota */
char __rcu *s_qf_names[EXT4_MAXQUOTAS];

View File

@ -576,9 +576,9 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
return true;
if (EXT4_SB(sb)->s_journal_bdev_handle &&
if (EXT4_SB(sb)->s_journal_bdev_file &&
fm->fmr_device ==
new_encode_dev(EXT4_SB(sb)->s_journal_bdev_handle->bdev->bd_dev))
new_encode_dev(file_bdev(EXT4_SB(sb)->s_journal_bdev_file)->bd_dev))
return true;
return false;
}
@ -648,9 +648,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
memset(handlers, 0, sizeof(handlers));
handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
handlers[0].gfd_fn = ext4_getfsmap_datadev;
if (EXT4_SB(sb)->s_journal_bdev_handle) {
if (EXT4_SB(sb)->s_journal_bdev_file) {
handlers[1].gfd_dev = new_encode_dev(
EXT4_SB(sb)->s_journal_bdev_handle->bdev->bd_dev);
file_bdev(EXT4_SB(sb)->s_journal_bdev_file)->bd_dev);
handlers[1].gfd_fn = ext4_getfsmap_logdev;
}

View File

@ -1359,14 +1359,14 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
if (sbi->s_journal_bdev_handle) {
if (sbi->s_journal_bdev_file) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
sync_blockdev(sbi->s_journal_bdev_handle->bdev);
invalidate_bdev(sbi->s_journal_bdev_handle->bdev);
sync_blockdev(file_bdev(sbi->s_journal_bdev_file));
invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
}
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
@ -4233,7 +4233,7 @@ int ext4_calculate_overhead(struct super_block *sb)
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
if (sbi->s_journal && !sbi->s_journal_bdev_handle)
if (sbi->s_journal && !sbi->s_journal_bdev_file)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
@ -5670,9 +5670,9 @@ failed_mount:
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
brelse(sbi->s_sbh);
if (sbi->s_journal_bdev_handle) {
invalidate_bdev(sbi->s_journal_bdev_handle->bdev);
bdev_release(sbi->s_journal_bdev_handle);
if (sbi->s_journal_bdev_file) {
invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
fput(sbi->s_journal_bdev_file);
}
out_fail:
invalidate_bdev(sb->s_bdev);
@ -5842,30 +5842,30 @@ static journal_t *ext4_open_inode_journal(struct super_block *sb,
return journal;
}
static struct bdev_handle *ext4_get_journal_blkdev(struct super_block *sb,
static struct file *ext4_get_journal_blkdev(struct super_block *sb,
dev_t j_dev, ext4_fsblk_t *j_start,
ext4_fsblk_t *j_len)
{
struct buffer_head *bh;
struct block_device *bdev;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
int hblock, blocksize;
ext4_fsblk_t sb_block;
unsigned long offset;
struct ext4_super_block *es;
int errno;
bdev_handle = bdev_open_by_dev(j_dev,
bdev_file = bdev_file_open_by_dev(j_dev,
BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
sb, &fs_holder_ops);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
ext4_msg(sb, KERN_ERR,
"failed to open journal device unknown-block(%u,%u) %ld",
MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_handle));
return bdev_handle;
MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_file));
return bdev_file;
}
bdev = bdev_handle->bdev;
bdev = file_bdev(bdev_file);
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
@ -5912,12 +5912,12 @@ static struct bdev_handle *ext4_get_journal_blkdev(struct super_block *sb,
*j_start = sb_block + 1;
*j_len = ext4_blocks_count(es);
brelse(bh);
return bdev_handle;
return bdev_file;
out_bh:
brelse(bh);
out_bdev:
bdev_release(bdev_handle);
fput(bdev_file);
return ERR_PTR(errno);
}
@ -5927,14 +5927,14 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
journal_t *journal;
ext4_fsblk_t j_start;
ext4_fsblk_t j_len;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
int errno = 0;
bdev_handle = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
if (IS_ERR(bdev_handle))
return ERR_CAST(bdev_handle);
bdev_file = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
if (IS_ERR(bdev_file))
return ERR_CAST(bdev_file);
journal = jbd2_journal_init_dev(bdev_handle->bdev, sb->s_bdev, j_start,
journal = jbd2_journal_init_dev(file_bdev(bdev_file), sb->s_bdev, j_start,
j_len, sb->s_blocksize);
if (IS_ERR(journal)) {
ext4_msg(sb, KERN_ERR, "failed to create device journal");
@ -5949,14 +5949,14 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
goto out_journal;
}
journal->j_private = sb;
EXT4_SB(sb)->s_journal_bdev_handle = bdev_handle;
EXT4_SB(sb)->s_journal_bdev_file = bdev_file;
ext4_init_journal_params(sb, journal);
return journal;
out_journal:
jbd2_journal_destroy(journal);
out_bdev:
bdev_release(bdev_handle);
fput(bdev_file);
return ERR_PTR(errno);
}
@ -7314,12 +7314,12 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
static void ext4_kill_sb(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct bdev_handle *handle = sbi ? sbi->s_journal_bdev_handle : NULL;
struct file *bdev_file = sbi ? sbi->s_journal_bdev_file : NULL;
kill_block_super(sb);
if (handle)
bdev_release(handle);
if (bdev_file)
fput(bdev_file);
}
static struct file_system_type ext4_fs_type = {

View File

@ -1239,7 +1239,7 @@ struct f2fs_bio_info {
#define FDEV(i) (sbi->devs[i])
#define RDEV(i) (raw_super->devs[i])
struct f2fs_dev_info {
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
char path[MAX_PATH_LEN];
unsigned int total_segments;

View File

@ -1605,7 +1605,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
for (i = 0; i < sbi->s_ndevs; i++) {
if (i > 0)
bdev_release(FDEV(i).bdev_handle);
fput(FDEV(i).bdev_file);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@ -4247,7 +4247,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
for (i = 0; i < max_devices; i++) {
if (i == 0)
FDEV(0).bdev_handle = sbi->sb->s_bdev_handle;
FDEV(0).bdev_file = sbi->sb->s_bdev_file;
else if (!RDEV(i).path[0])
break;
@ -4267,14 +4267,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1;
FDEV(i).bdev_handle = bdev_open_by_path(
FDEV(i).bdev_file = bdev_file_open_by_path(
FDEV(i).path, mode, sbi->sb, NULL);
}
}
if (IS_ERR(FDEV(i).bdev_handle))
return PTR_ERR(FDEV(i).bdev_handle);
if (IS_ERR(FDEV(i).bdev_file))
return PTR_ERR(FDEV(i).bdev_file);
FDEV(i).bdev = FDEV(i).bdev_handle->bdev;
FDEV(i).bdev = file_bdev(FDEV(i).bdev_file);
/* to release errored devices */
sbi->s_ndevs = i + 1;

View File

@ -276,21 +276,15 @@ struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
}
/**
* alloc_file - allocate and initialize a 'struct file'
* file_init_path - initialize a 'struct file' based on path
*
* @file: the file to set up
* @path: the (dentry, vfsmount) pair for the new file
* @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
static struct file *alloc_file(const struct path *path, int flags,
const struct file_operations *fop)
static void file_init_path(struct file *file, const struct path *path,
const struct file_operations *fop)
{
struct file *file;
file = alloc_empty_file(flags, current_cred());
if (IS_ERR(file))
return file;
file->f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
@ -309,22 +303,51 @@ static struct file *alloc_file(const struct path *path, int flags,
file->f_op = fop;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
}
/**
* alloc_file - allocate and initialize a 'struct file'
*
* @path: the (dentry, vfsmount) pair for the new file
* @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
static struct file *alloc_file(const struct path *path, int flags,
const struct file_operations *fop)
{
struct file *file;
file = alloc_empty_file(flags, current_cred());
if (!IS_ERR(file))
file_init_path(file, path, fop);
return file;
}
struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
const char *name, int flags,
const struct file_operations *fops)
static inline int alloc_path_pseudo(const char *name, struct inode *inode,
struct vfsmount *mnt, struct path *path)
{
struct qstr this = QSTR_INIT(name, strlen(name));
path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
if (!path->dentry)
return -ENOMEM;
path->mnt = mntget(mnt);
d_instantiate(path->dentry, inode);
return 0;
}
struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
const char *name, int flags,
const struct file_operations *fops)
{
int ret;
struct path path;
struct file *file;
path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
if (!path.dentry)
return ERR_PTR(-ENOMEM);
path.mnt = mntget(mnt);
d_instantiate(path.dentry, inode);
ret = alloc_path_pseudo(name, inode, mnt, &path);
if (ret)
return ERR_PTR(ret);
file = alloc_file(&path, flags, fops);
if (IS_ERR(file)) {
ihold(inode);
@ -334,6 +357,30 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
}
EXPORT_SYMBOL(alloc_file_pseudo);
struct file *alloc_file_pseudo_noaccount(struct inode *inode,
struct vfsmount *mnt, const char *name,
int flags,
const struct file_operations *fops)
{
int ret;
struct path path;
struct file *file;
ret = alloc_path_pseudo(name, inode, mnt, &path);
if (ret)
return ERR_PTR(ret);
file = alloc_empty_file_noaccount(flags, current_cred());
if (IS_ERR(file)) {
ihold(inode);
path_put(&path);
return file;
}
file_init_path(file, &path, fops);
return file;
}
EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount);
struct file *alloc_file_clone(struct file *base, int flags,
const struct file_operations *fops)
{

View File

@ -1058,7 +1058,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync)
int lmLogOpen(struct super_block *sb)
{
int rc;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct jfs_log *log;
struct jfs_sb_info *sbi = JFS_SBI(sb);
@ -1070,7 +1070,7 @@ int lmLogOpen(struct super_block *sb)
mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
if (log->bdev_handle->bdev->bd_dev == sbi->logdev) {
if (file_bdev(log->bdev_file)->bd_dev == sbi->logdev) {
if (!uuid_equal(&log->uuid, &sbi->loguuid)) {
jfs_warn("wrong uuid on JFS journal");
mutex_unlock(&jfs_log_mutex);
@ -1100,14 +1100,14 @@ int lmLogOpen(struct super_block *sb)
* file systems to log may have n-to-1 relationship;
*/
bdev_handle = bdev_open_by_dev(sbi->logdev,
bdev_file = bdev_file_open_by_dev(sbi->logdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL);
if (IS_ERR(bdev_handle)) {
rc = PTR_ERR(bdev_handle);
if (IS_ERR(bdev_file)) {
rc = PTR_ERR(bdev_file);
goto free;
}
log->bdev_handle = bdev_handle;
log->bdev_file = bdev_file;
uuid_copy(&log->uuid, &sbi->loguuid);
/*
@ -1141,7 +1141,7 @@ journal_found:
lbmLogShutdown(log);
close: /* close external log device */
bdev_release(bdev_handle);
fput(bdev_file);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
@ -1162,7 +1162,7 @@ static int open_inline_log(struct super_block *sb)
init_waitqueue_head(&log->syncwait);
set_bit(log_INLINELOG, &log->flag);
log->bdev_handle = sb->s_bdev_handle;
log->bdev_file = sb->s_bdev_file;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
(L2LOGPSIZE - sb->s_blocksize_bits);
@ -1436,7 +1436,7 @@ int lmLogClose(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
int rc = 0;
jfs_info("lmLogClose: log:0x%p", log);
@ -1482,10 +1482,10 @@ int lmLogClose(struct super_block *sb)
* external log as separate logical volume
*/
list_del(&log->journal_list);
bdev_handle = log->bdev_handle;
bdev_file = log->bdev_file;
rc = lmLogShutdown(log);
bdev_release(bdev_handle);
fput(bdev_file);
kfree(log);
@ -1972,7 +1972,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD;
bio = bio_alloc(log->bdev_handle->bdev, 1, REQ_OP_READ, GFP_NOFS);
bio = bio_alloc(file_bdev(log->bdev_file), 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
@ -2115,7 +2115,7 @@ static void lbmStartIO(struct lbuf * bp)
jfs_info("lbmStartIO");
if (!log->no_integrity)
bdev = log->bdev_handle->bdev;
bdev = file_bdev(log->bdev_file);
bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC,
GFP_NOFS);

View File

@ -356,7 +356,7 @@ struct jfs_log {
* before writing syncpt.
*/
struct list_head journal_list; /* Global list */
struct bdev_handle *bdev_handle; /* 4: log lv pointer */
struct file *bdev_file; /* 4: log lv pointer */
int serial; /* 4: log mount serial number */
s64 base; /* @8: log extent address (inline log ) */

View File

@ -431,7 +431,7 @@ int updateSuper(struct super_block *sb, uint state)
if (state == FM_MOUNT) {
/* record log's dev_t and mount serial number */
j_sb->s_logdev = cpu_to_le32(
new_encode_dev(sbi->log->bdev_handle->bdev->bd_dev));
new_encode_dev(file_bdev(sbi->log->bdev_file)->bd_dev));
j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
} else if (state == FM_CLEAN) {
/*

View File

@ -108,7 +108,7 @@ struct pnfs_block_dev {
struct pnfs_block_dev *children;
u64 chunk_size;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
u64 disk_offset;
u64 pr_key;

View File

@ -25,17 +25,17 @@ bl_free_device(struct pnfs_block_dev *dev)
} else {
if (dev->pr_registered) {
const struct pr_ops *ops =
dev->bdev_handle->bdev->bd_disk->fops->pr_ops;
file_bdev(dev->bdev_file)->bd_disk->fops->pr_ops;
int error;
error = ops->pr_register(dev->bdev_handle->bdev,
error = ops->pr_register(file_bdev(dev->bdev_file),
dev->pr_key, 0, false);
if (error)
pr_err("failed to unregister PR key.\n");
}
if (dev->bdev_handle)
bdev_release(dev->bdev_handle);
if (dev->bdev_file)
fput(dev->bdev_file);
}
}
@ -169,7 +169,7 @@ static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
map->start = dev->start;
map->len = dev->len;
map->disk_offset = dev->disk_offset;
map->bdev = dev->bdev_handle->bdev;
map->bdev = file_bdev(dev->bdev_file);
return true;
}
@ -236,26 +236,26 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
struct bdev_handle *bdev_handle;
struct file *bdev_file;
dev_t dev;
dev = bl_resolve_deviceid(server, v, gfp_mask);
if (!dev)
return -EIO;
bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
NULL, NULL);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
MAJOR(dev), MINOR(dev), PTR_ERR(bdev_handle));
return PTR_ERR(bdev_handle);
MAJOR(dev), MINOR(dev), PTR_ERR(bdev_file));
return PTR_ERR(bdev_file);
}
d->bdev_handle = bdev_handle;
d->len = bdev_nr_bytes(bdev_handle->bdev);
d->bdev_file = bdev_file;
d->len = bdev_nr_bytes(file_bdev(bdev_file));
d->map = bl_map_simple;
printk(KERN_INFO "pNFS: using block device %s\n",
bdev_handle->bdev->bd_disk->disk_name);
file_bdev(bdev_file)->bd_disk->disk_name);
return 0;
}
@ -300,10 +300,10 @@ bl_validate_designator(struct pnfs_block_volume *v)
}
}
static struct bdev_handle *
static struct file *
bl_open_path(struct pnfs_block_volume *v, const char *prefix)
{
struct bdev_handle *bdev_handle;
struct file *bdev_file;
const char *devname;
devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN",
@ -311,15 +311,15 @@ bl_open_path(struct pnfs_block_volume *v, const char *prefix)
if (!devname)
return ERR_PTR(-ENOMEM);
bdev_handle = bdev_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE,
bdev_file = bdev_file_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE,
NULL, NULL);
if (IS_ERR(bdev_handle)) {
if (IS_ERR(bdev_file)) {
pr_warn("pNFS: failed to open device %s (%ld)\n",
devname, PTR_ERR(bdev_handle));
devname, PTR_ERR(bdev_file));
}
kfree(devname);
return bdev_handle;
return bdev_file;
}
static int
@ -327,7 +327,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
struct bdev_handle *bdev_handle;
struct file *bdev_file;
const struct pr_ops *ops;
int error;
@ -340,14 +340,14 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
* On other distributions like Debian, the default SCSI by-id path will
* point to the dm-multipath device if one exists.
*/
bdev_handle = bl_open_path(v, "dm-uuid-mpath-0x");
if (IS_ERR(bdev_handle))
bdev_handle = bl_open_path(v, "wwn-0x");
if (IS_ERR(bdev_handle))
return PTR_ERR(bdev_handle);
d->bdev_handle = bdev_handle;
bdev_file = bl_open_path(v, "dm-uuid-mpath-0x");
if (IS_ERR(bdev_file))
bdev_file = bl_open_path(v, "wwn-0x");
if (IS_ERR(bdev_file))
return PTR_ERR(bdev_file);
d->bdev_file = bdev_file;
d->len = bdev_nr_bytes(d->bdev_handle->bdev);
d->len = bdev_nr_bytes(file_bdev(d->bdev_file));
d->map = bl_map_simple;
d->pr_key = v->scsi.pr_key;
@ -355,20 +355,20 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
return -ENODEV;
pr_info("pNFS: using block device %s (reservation key 0x%llx)\n",
d->bdev_handle->bdev->bd_disk->disk_name, d->pr_key);
file_bdev(d->bdev_file)->bd_disk->disk_name, d->pr_key);
ops = d->bdev_handle->bdev->bd_disk->fops->pr_ops;
ops = file_bdev(d->bdev_file)->bd_disk->fops->pr_ops;
if (!ops) {
pr_err("pNFS: block device %s does not support reservations.",
d->bdev_handle->bdev->bd_disk->disk_name);
file_bdev(d->bdev_file)->bd_disk->disk_name);
error = -EINVAL;
goto out_blkdev_put;
}
error = ops->pr_register(d->bdev_handle->bdev, 0, d->pr_key, true);
error = ops->pr_register(file_bdev(d->bdev_file), 0, d->pr_key, true);
if (error) {
pr_err("pNFS: failed to register key for block device %s.",
d->bdev_handle->bdev->bd_disk->disk_name);
file_bdev(d->bdev_file)->bd_disk->disk_name);
goto out_blkdev_put;
}
@ -376,7 +376,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
return 0;
out_blkdev_put:
bdev_release(d->bdev_handle);
fput(d->bdev_file);
return error;
}

View File

@ -213,7 +213,7 @@ struct o2hb_region {
unsigned int hr_num_pages;
struct page **hr_slot_data;
struct bdev_handle *hr_bdev_handle;
struct file *hr_bdev_file;
struct o2hb_disk_slot *hr_slots;
/* live node map of this region */
@ -263,7 +263,7 @@ struct o2hb_region {
static inline struct block_device *reg_bdev(struct o2hb_region *reg)
{
return reg->hr_bdev_handle ? reg->hr_bdev_handle->bdev : NULL;
return reg->hr_bdev_file ? file_bdev(reg->hr_bdev_file) : NULL;
}
struct o2hb_bio_wait_ctxt {
@ -1509,8 +1509,8 @@ static void o2hb_region_release(struct config_item *item)
kfree(reg->hr_slot_data);
}
if (reg->hr_bdev_handle)
bdev_release(reg->hr_bdev_handle);
if (reg->hr_bdev_file)
fput(reg->hr_bdev_file);
kfree(reg->hr_slots);
@ -1569,7 +1569,7 @@ static ssize_t o2hb_region_block_bytes_store(struct config_item *item,
unsigned long block_bytes;
unsigned int block_bits;
if (reg->hr_bdev_handle)
if (reg->hr_bdev_file)
return -EINVAL;
status = o2hb_read_block_input(reg, page, &block_bytes,
@ -1598,7 +1598,7 @@ static ssize_t o2hb_region_start_block_store(struct config_item *item,
char *p = (char *)page;
ssize_t ret;
if (reg->hr_bdev_handle)
if (reg->hr_bdev_file)
return -EINVAL;
ret = kstrtoull(p, 0, &tmp);
@ -1623,7 +1623,7 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
unsigned long tmp;
char *p = (char *)page;
if (reg->hr_bdev_handle)
if (reg->hr_bdev_file)
return -EINVAL;
tmp = simple_strtoul(p, &p, 0);
@ -1642,7 +1642,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
{
unsigned int ret = 0;
if (to_o2hb_region(item)->hr_bdev_handle)
if (to_o2hb_region(item)->hr_bdev_file)
ret = sprintf(page, "%pg\n", reg_bdev(to_o2hb_region(item)));
return ret;
@ -1753,7 +1753,7 @@ out:
}
/*
* this is acting as commit; we set up all of hr_bdev_handle and hr_task or
* this is acting as commit; we set up all of hr_bdev_file and hr_task or
* nothing
*/
static ssize_t o2hb_region_dev_store(struct config_item *item,
@ -1769,7 +1769,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
ssize_t ret = -EINVAL;
int live_threshold;
if (reg->hr_bdev_handle)
if (reg->hr_bdev_file)
goto out;
/* We can't heartbeat without having had our node number
@ -1795,11 +1795,11 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (!S_ISBLK(f.file->f_mapping->host->i_mode))
goto out2;
reg->hr_bdev_handle = bdev_open_by_dev(f.file->f_mapping->host->i_rdev,
reg->hr_bdev_file = bdev_file_open_by_dev(f.file->f_mapping->host->i_rdev,
BLK_OPEN_WRITE | BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(reg->hr_bdev_handle)) {
ret = PTR_ERR(reg->hr_bdev_handle);
reg->hr_bdev_handle = NULL;
if (IS_ERR(reg->hr_bdev_file)) {
ret = PTR_ERR(reg->hr_bdev_file);
reg->hr_bdev_file = NULL;
goto out2;
}
@ -1903,8 +1903,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
out3:
if (ret < 0) {
bdev_release(reg->hr_bdev_handle);
reg->hr_bdev_handle = NULL;
fput(reg->hr_bdev_file);
reg->hr_bdev_file = NULL;
}
out2:
fdput(f);

View File

@ -2386,7 +2386,7 @@ static int journal_read(struct super_block *sb)
cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
reiserfs_info(sb, "checking transaction log (%pg)\n",
journal->j_bdev_handle->bdev);
file_bdev(journal->j_bdev_file));
start = ktime_get_seconds();
/*
@ -2447,7 +2447,7 @@ static int journal_read(struct super_block *sb)
* device and journal device to be the same
*/
d_bh =
reiserfs_breada(journal->j_bdev_handle->bdev, cur_dblock,
reiserfs_breada(file_bdev(journal->j_bdev_file), cur_dblock,
sb->s_blocksize,
SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
SB_ONDISK_JOURNAL_SIZE(sb));
@ -2588,9 +2588,9 @@ static void journal_list_init(struct super_block *sb)
static void release_journal_dev(struct reiserfs_journal *journal)
{
if (journal->j_bdev_handle) {
bdev_release(journal->j_bdev_handle);
journal->j_bdev_handle = NULL;
if (journal->j_bdev_file) {
fput(journal->j_bdev_file);
journal->j_bdev_file = NULL;
}
}
@ -2605,7 +2605,7 @@ static int journal_init_dev(struct super_block *super,
result = 0;
journal->j_bdev_handle = NULL;
journal->j_bdev_file = NULL;
jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
@ -2616,37 +2616,37 @@ static int journal_init_dev(struct super_block *super,
if ((!jdev_name || !jdev_name[0])) {
if (jdev == super->s_dev)
holder = NULL;
journal->j_bdev_handle = bdev_open_by_dev(jdev, blkdev_mode,
journal->j_bdev_file = bdev_file_open_by_dev(jdev, blkdev_mode,
holder, NULL);
if (IS_ERR(journal->j_bdev_handle)) {
result = PTR_ERR(journal->j_bdev_handle);
journal->j_bdev_handle = NULL;
if (IS_ERR(journal->j_bdev_file)) {
result = PTR_ERR(journal->j_bdev_file);
journal->j_bdev_file = NULL;
reiserfs_warning(super, "sh-458",
"cannot init journal device unknown-block(%u,%u): %i",
MAJOR(jdev), MINOR(jdev), result);
return result;
} else if (jdev != super->s_dev)
set_blocksize(journal->j_bdev_handle->bdev,
set_blocksize(file_bdev(journal->j_bdev_file),
super->s_blocksize);
return 0;
}
journal->j_bdev_handle = bdev_open_by_path(jdev_name, blkdev_mode,
journal->j_bdev_file = bdev_file_open_by_path(jdev_name, blkdev_mode,
holder, NULL);
if (IS_ERR(journal->j_bdev_handle)) {
result = PTR_ERR(journal->j_bdev_handle);
journal->j_bdev_handle = NULL;
if (IS_ERR(journal->j_bdev_file)) {
result = PTR_ERR(journal->j_bdev_file);
journal->j_bdev_file = NULL;
reiserfs_warning(super, "sh-457",
"journal_init_dev: Cannot open '%s': %i",
jdev_name, result);
return result;
}
set_blocksize(journal->j_bdev_handle->bdev, super->s_blocksize);
set_blocksize(file_bdev(journal->j_bdev_file), super->s_blocksize);
reiserfs_info(super,
"journal_init_dev: journal device: %pg\n",
journal->j_bdev_handle->bdev);
file_bdev(journal->j_bdev_file));
return 0;
}
@ -2804,7 +2804,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
"journal header magic %x (device %pg) does "
"not match to magic found in super block %x",
jh->jh_journal.jp_journal_magic,
journal->j_bdev_handle->bdev,
file_bdev(journal->j_bdev_file),
sb_jp_journal_magic(rs));
brelse(bhjh);
goto free_and_return;
@ -2828,7 +2828,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
reiserfs_info(sb, "journal params: device %pg, size %u, "
"journal first block %u, max trans len %u, max batch %u, "
"max commit age %u, max trans age %u\n",
journal->j_bdev_handle->bdev,
file_bdev(journal->j_bdev_file),
SB_ONDISK_JOURNAL_SIZE(sb),
SB_ONDISK_JOURNAL_1st_BLOCK(sb),
journal->j_trans_max,

View File

@ -354,7 +354,7 @@ static int show_journal(struct seq_file *m, void *unused)
"prepare: \t%12lu\n"
"prepare_retry: \t%12lu\n",
DJP(jp_journal_1st_block),
SB_JOURNAL(sb)->j_bdev_handle->bdev,
file_bdev(SB_JOURNAL(sb)->j_bdev_file),
DJP(jp_journal_dev),
DJP(jp_journal_size),
DJP(jp_journal_trans_max),

View File

@ -299,7 +299,7 @@ struct reiserfs_journal {
/* oldest journal block. start here for traverse */
struct reiserfs_journal_cnode *j_first;
struct bdev_handle *j_bdev_handle;
struct file *j_bdev_file;
/* first block on s_dev of reserved area journal */
int j_1st_reserved_block;
@ -2810,10 +2810,10 @@ struct reiserfs_journal_header {
/* We need these to make journal.c code more readable */
#define journal_find_get_block(s, block) __find_get_block(\
SB_JOURNAL(s)->j_bdev_handle->bdev, block, s->s_blocksize)
#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_bdev_handle->bdev,\
file_bdev(SB_JOURNAL(s)->j_bdev_file), block, s->s_blocksize)
#define journal_getblk(s, block) __getblk(file_bdev(SB_JOURNAL(s)->j_bdev_file),\
block, s->s_blocksize)
#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_bdev_handle->bdev,\
#define journal_bread(s, block) __bread(file_bdev(SB_JOURNAL(s)->j_bdev_file),\
block, s->s_blocksize)
enum reiserfs_bh_state_bits {

View File

@ -594,7 +594,7 @@ static void romfs_kill_sb(struct super_block *sb)
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
sync_blockdev(sb->s_bdev);
bdev_release(sb->s_bdev_handle);
fput(sb->s_bdev_file);
}
#endif
}

View File

@ -1532,16 +1532,16 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
struct fs_context *fc)
{
blk_mode_t mode = sb_open_mode(sb_flags);
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct block_device *bdev;
bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
if (IS_ERR(bdev_handle)) {
bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
if (IS_ERR(bdev_file)) {
if (fc)
errorf(fc, "%s: Can't open blockdev", fc->source);
return PTR_ERR(bdev_handle);
return PTR_ERR(bdev_file);
}
bdev = bdev_handle->bdev;
bdev = file_bdev(bdev_file);
/*
* This really should be in blkdev_get_by_dev, but right now can't due
@ -1549,7 +1549,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
* writable from userspace even for a read-only block device.
*/
if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
bdev_release(bdev_handle);
fput(bdev_file);
return -EACCES;
}
@ -1560,11 +1560,11 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
bdev_release(bdev_handle);
fput(bdev_file);
return -EBUSY;
}
spin_lock(&sb_lock);
sb->s_bdev_handle = bdev_handle;
sb->s_bdev_file = bdev_file;
sb->s_bdev = bdev;
sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
if (bdev_stable_writes(bdev))
@ -1680,7 +1680,7 @@ void kill_block_super(struct super_block *sb)
generic_shutdown_super(sb);
if (bdev) {
sync_blockdev(bdev);
bdev_release(sb->s_bdev_handle);
fput(sb->s_bdev_file);
}
}

View File

@ -1951,7 +1951,7 @@ xfs_free_buftarg(
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
bdev_release(btp->bt_bdev_handle);
fput(btp->bt_bdev_file);
kmem_free(btp);
}
@ -1994,7 +1994,7 @@ xfs_setsize_buftarg_early(
struct xfs_buftarg *
xfs_alloc_buftarg(
struct xfs_mount *mp,
struct bdev_handle *bdev_handle)
struct file *bdev_file)
{
xfs_buftarg_t *btp;
const struct dax_holder_operations *ops = NULL;
@ -2005,9 +2005,9 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
btp->bt_mount = mp;
btp->bt_bdev_handle = bdev_handle;
btp->bt_dev = bdev_handle->bdev->bd_dev;
btp->bt_bdev = bdev_handle->bdev;
btp->bt_bdev_file = bdev_file;
btp->bt_bdev = file_bdev(bdev_file);
btp->bt_dev = btp->bt_bdev->bd_dev;
btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
mp, ops);

View File

@ -98,7 +98,7 @@ typedef unsigned int xfs_buf_flags_t;
*/
typedef struct xfs_buftarg {
dev_t bt_dev;
struct bdev_handle *bt_bdev_handle;
struct file *bt_bdev_file;
struct block_device *bt_bdev;
struct dax_device *bt_daxdev;
u64 bt_dax_part_off;
@ -366,7 +366,7 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
* Handling of buftargs.
*/
struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
struct bdev_handle *bdev_handle);
struct file *bdev_file);
extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_buftarg_wait(struct xfs_buftarg *);
extern void xfs_buftarg_drain(struct xfs_buftarg *);

View File

@ -362,16 +362,16 @@ STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
const char *name,
struct bdev_handle **handlep)
struct file **bdev_filep)
{
int error = 0;
*handlep = bdev_open_by_path(name,
*bdev_filep = bdev_file_open_by_path(name,
BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
mp->m_super, &fs_holder_ops);
if (IS_ERR(*handlep)) {
error = PTR_ERR(*handlep);
*handlep = NULL;
if (IS_ERR(*bdev_filep)) {
error = PTR_ERR(*bdev_filep);
*bdev_filep = NULL;
xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
}
@ -436,26 +436,26 @@ xfs_open_devices(
{
struct super_block *sb = mp->m_super;
struct block_device *ddev = sb->s_bdev;
struct bdev_handle *logdev_handle = NULL, *rtdev_handle = NULL;
struct file *logdev_file = NULL, *rtdev_file = NULL;
int error;
/*
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
if (error)
return error;
}
if (mp->m_rtname) {
error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle);
error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
if (error)
goto out_close_logdev;
if (rtdev_handle->bdev == ddev ||
(logdev_handle &&
rtdev_handle->bdev == logdev_handle->bdev)) {
if (file_bdev(rtdev_file) == ddev ||
(logdev_file &&
file_bdev(rtdev_file) == file_bdev(logdev_file))) {
xfs_warn(mp,
"Cannot mount filesystem with identical rtdev and ddev/logdev.");
error = -EINVAL;
@ -467,25 +467,25 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error = -ENOMEM;
mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle);
mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
if (rtdev_handle) {
mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle);
if (rtdev_file) {
mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
if (logdev_handle && logdev_handle->bdev != ddev) {
mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle);
if (logdev_file && file_bdev(logdev_file) != ddev) {
mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
/* Handle won't be used, drop it */
if (logdev_handle)
bdev_release(logdev_handle);
if (logdev_file)
fput(logdev_file);
}
return 0;
@ -496,11 +496,11 @@ xfs_open_devices(
out_free_ddev_targ:
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
if (rtdev_handle)
bdev_release(rtdev_handle);
if (rtdev_file)
fput(rtdev_file);
out_close_logdev:
if (logdev_handle)
bdev_release(logdev_handle);
if (logdev_file)
fput(logdev_file);
return error;
}

View File

@ -24,6 +24,7 @@
#include <linux/sbitmap.h>
#include <linux/uuid.h>
#include <linux/xarray.h>
#include <linux/file.h>
struct module;
struct request_queue;
@ -1474,26 +1475,20 @@ extern const struct blk_holder_ops fs_holder_ops;
(BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
(((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
struct bdev_handle {
struct block_device *bdev;
void *holder;
blk_mode_t mode;
};
struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops);
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
void blkdev_put_no_open(struct block_device *bdev);
struct block_device *I_BDEV(struct inode *inode);
struct block_device *file_bdev(struct file *bdev_file);
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);

View File

@ -165,7 +165,7 @@ void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
struct bdev_handle *bdev_handle;
struct file *bdev_file;
struct dax_device *dax_dev;
blk_mode_t mode;
char name[16];

View File

@ -24,6 +24,8 @@ struct inode;
struct path;
extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
const char *, int flags, const struct file_operations *);
extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount *,
const char *, int flags, const struct file_operations *);
extern struct file *alloc_file_clone(struct file *, int flags,
const struct file_operations *);

View File

@ -1228,8 +1228,8 @@ struct super_block {
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
struct bdev_handle *s_bdev_handle;
struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
struct file *s_bdev_file;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;

View File

@ -154,9 +154,9 @@ struct packet_stacked_data
struct pktcdvd_device
{
struct bdev_handle *bdev_handle; /* dev attached */
struct file *bdev_file; /* dev attached */
/* handle acquired for bdev during pkt_open_dev() */
struct bdev_handle *open_bdev_handle;
struct file *f_open_bdev;
dev_t pkt_dev; /* our dev */
struct packet_settings settings;
struct packet_stats stats;

View File

@ -298,7 +298,7 @@ struct swap_info_struct {
unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct bdev_handle *bdev_handle;/* open handle of the bdev */
struct file *bdev_file; /* open handle of the bdev */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */

View File

@ -208,6 +208,9 @@ retry:
goto out;
case -EACCES:
case -EINVAL:
#ifdef CONFIG_BLOCK
init_flush_fput();
#endif
continue;
}
/*

View File

@ -9,6 +9,8 @@
#include <linux/major.h>
#include <linux/root_dev.h>
#include <linux/init_syscalls.h>
#include <linux/task_work.h>
#include <linux/file.h>
void mount_root_generic(char *name, char *pretty_name, int flags);
void mount_root(char *root_device_name);
@ -41,3 +43,10 @@ static inline bool initrd_load(char *root_device_name)
}
#endif
/* Ensure that async file closing finished to prevent spurious errors. */
static inline void init_flush_fput(void)
{
flush_delayed_fput();
task_work_run();
}

View File

@ -16,9 +16,10 @@
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/init_syscalls.h>
#include <linux/task_work.h>
#include <linux/umh.h>
#include "do_mounts.h"
static __initdata bool csum_present;
static __initdata u32 io_csum;
@ -736,8 +737,7 @@ done:
initrd_start = 0;
initrd_end = 0;
flush_delayed_fput();
task_work_run();
init_flush_fput();
}
static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);

View File

@ -222,7 +222,7 @@ int swsusp_swap_in_use(void)
*/
static unsigned short root_swap = 0xffff;
static struct bdev_handle *hib_resume_bdev_handle;
static struct file *hib_resume_bdev_file;
struct hib_bio_batch {
atomic_t count;
@ -276,7 +276,7 @@ static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
struct bio *bio;
int error = 0;
bio = bio_alloc(hib_resume_bdev_handle->bdev, 1, opf,
bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf,
GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
@ -357,14 +357,14 @@ static int swsusp_swap_check(void)
return res;
root_swap = res;
hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
BLK_OPEN_WRITE, NULL, NULL);
if (IS_ERR(hib_resume_bdev_handle))
return PTR_ERR(hib_resume_bdev_handle);
if (IS_ERR(hib_resume_bdev_file))
return PTR_ERR(hib_resume_bdev_file);
res = set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
res = set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE);
if (res < 0)
bdev_release(hib_resume_bdev_handle);
fput(hib_resume_bdev_file);
return res;
}
@ -1523,10 +1523,10 @@ int swsusp_check(bool exclusive)
void *holder = exclusive ? &swsusp_holder : NULL;
int error;
hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
BLK_OPEN_READ, holder, NULL);
if (!IS_ERR(hib_resume_bdev_handle)) {
set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
if (!IS_ERR(hib_resume_bdev_file)) {
set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE);
clear_page(swsusp_header);
error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
swsusp_header, NULL);
@ -1551,11 +1551,11 @@ int swsusp_check(bool exclusive)
put:
if (error)
bdev_release(hib_resume_bdev_handle);
fput(hib_resume_bdev_file);
else
pr_debug("Image signature found, resuming\n");
} else {
error = PTR_ERR(hib_resume_bdev_handle);
error = PTR_ERR(hib_resume_bdev_file);
}
if (error)
@ -1570,12 +1570,12 @@ put:
void swsusp_close(void)
{
if (IS_ERR(hib_resume_bdev_handle)) {
if (IS_ERR(hib_resume_bdev_file)) {
pr_debug("Image device not initialised\n");
return;
}
bdev_release(hib_resume_bdev_handle);
fput(hib_resume_bdev_file);
}
/**

View File

@ -2532,10 +2532,10 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
exit_swap_address_space(p->type);
inode = mapping->host;
if (p->bdev_handle) {
if (p->bdev_file) {
set_blocksize(p->bdev, old_block_size);
bdev_release(p->bdev_handle);
p->bdev_handle = NULL;
fput(p->bdev_file);
p->bdev_file = NULL;
}
inode_lock(inode);
@ -2765,14 +2765,14 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
int error;
if (S_ISBLK(inode->i_mode)) {
p->bdev_handle = bdev_open_by_dev(inode->i_rdev,
p->bdev_file = bdev_file_open_by_dev(inode->i_rdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL);
if (IS_ERR(p->bdev_handle)) {
error = PTR_ERR(p->bdev_handle);
p->bdev_handle = NULL;
if (IS_ERR(p->bdev_file)) {
error = PTR_ERR(p->bdev_file);
p->bdev_file = NULL;
return error;
}
p->bdev = p->bdev_handle->bdev;
p->bdev = file_bdev(p->bdev_file);
p->old_block_size = block_size(p->bdev);
error = set_blocksize(p->bdev, PAGE_SIZE);
if (error < 0)
@ -3208,10 +3208,10 @@ bad_swap:
p->percpu_cluster = NULL;
free_percpu(p->cluster_next_cpu);
p->cluster_next_cpu = NULL;
if (p->bdev_handle) {
if (p->bdev_file) {
set_blocksize(p->bdev, p->old_block_size);
bdev_release(p->bdev_handle);
p->bdev_handle = NULL;
fput(p->bdev_file);
p->bdev_file = NULL;
}
inode = NULL;
destroy_swap_extents(p);