mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-30 08:02:30 +00:00
549c729771
Extend some inode methods with an additional user namespace argument. A filesystem that is aware of idmapped mounts will receive the user namespace the mount has been marked with. This can be used for additional permission checking and also to enable filesystems to translate between uids and gids if they need to. We have implemented all relevant helpers in earlier patches. As requested we simply extend the exisiting inode method instead of introducing new ones. This is a little more code churn but it's mostly mechanical and doesnt't leave us with additional inode methods. Link: https://lore.kernel.org/r/20210121131959.646623-25-christian.brauner@ubuntu.com Cc: Christoph Hellwig <hch@lst.de> Cc: David Howells <dhowells@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: linux-fsdevel@vger.kernel.org Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
153 lines
3.7 KiB
C
153 lines
3.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Copyright (C) International Business Machines Corp., 2000-2002
|
|
* Portions Copyright (C) Christoph Hellwig, 2001-2002
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/posix_acl.h>
|
|
#include <linux/quotaops.h>
|
|
#include "jfs_incore.h"
|
|
#include "jfs_inode.h"
|
|
#include "jfs_dmap.h"
|
|
#include "jfs_txnmgr.h"
|
|
#include "jfs_xattr.h"
|
|
#include "jfs_acl.h"
|
|
#include "jfs_debug.h"
|
|
|
|
int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|
{
|
|
struct inode *inode = file->f_mapping->host;
|
|
int rc = 0;
|
|
|
|
rc = file_write_and_wait_range(file, start, end);
|
|
if (rc)
|
|
return rc;
|
|
|
|
inode_lock(inode);
|
|
if (!(inode->i_state & I_DIRTY_ALL) ||
|
|
(datasync && !(inode->i_state & I_DIRTY_DATASYNC))) {
|
|
/* Make sure committed changes hit the disk */
|
|
jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
|
|
inode_unlock(inode);
|
|
return rc;
|
|
}
|
|
|
|
rc |= jfs_commit_inode(inode, 1);
|
|
inode_unlock(inode);
|
|
|
|
return rc ? -EIO : 0;
|
|
}
|
|
|
|
static int jfs_open(struct inode *inode, struct file *file)
|
|
{
|
|
int rc;
|
|
|
|
if ((rc = dquot_file_open(inode, file)))
|
|
return rc;
|
|
|
|
/*
|
|
* We attempt to allow only one "active" file open per aggregate
|
|
* group. Otherwise, appending to files in parallel can cause
|
|
* fragmentation within the files.
|
|
*
|
|
* If the file is empty, it was probably just created and going
|
|
* to be written to. If it has a size, we'll hold off until the
|
|
* file is actually grown.
|
|
*/
|
|
if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE &&
|
|
(inode->i_size == 0)) {
|
|
struct jfs_inode_info *ji = JFS_IP(inode);
|
|
spin_lock_irq(&ji->ag_lock);
|
|
if (ji->active_ag == -1) {
|
|
struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
|
|
ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
|
|
atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]);
|
|
}
|
|
spin_unlock_irq(&ji->ag_lock);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
static int jfs_release(struct inode *inode, struct file *file)
|
|
{
|
|
struct jfs_inode_info *ji = JFS_IP(inode);
|
|
|
|
spin_lock_irq(&ji->ag_lock);
|
|
if (ji->active_ag != -1) {
|
|
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
|
|
atomic_dec(&bmap->db_active[ji->active_ag]);
|
|
ji->active_ag = -1;
|
|
}
|
|
spin_unlock_irq(&ji->ag_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int jfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
|
|
struct iattr *iattr)
|
|
{
|
|
struct inode *inode = d_inode(dentry);
|
|
int rc;
|
|
|
|
rc = setattr_prepare(&init_user_ns, dentry, iattr);
|
|
if (rc)
|
|
return rc;
|
|
|
|
if (is_quota_modification(inode, iattr)) {
|
|
rc = dquot_initialize(inode);
|
|
if (rc)
|
|
return rc;
|
|
}
|
|
if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
|
|
(iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
|
|
rc = dquot_transfer(inode, iattr);
|
|
if (rc)
|
|
return rc;
|
|
}
|
|
|
|
if ((iattr->ia_valid & ATTR_SIZE) &&
|
|
iattr->ia_size != i_size_read(inode)) {
|
|
inode_dio_wait(inode);
|
|
|
|
rc = inode_newsize_ok(inode, iattr->ia_size);
|
|
if (rc)
|
|
return rc;
|
|
|
|
truncate_setsize(inode, iattr->ia_size);
|
|
jfs_truncate(inode);
|
|
}
|
|
|
|
setattr_copy(&init_user_ns, inode, iattr);
|
|
mark_inode_dirty(inode);
|
|
|
|
if (iattr->ia_valid & ATTR_MODE)
|
|
rc = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
|
|
return rc;
|
|
}
|
|
|
|
const struct inode_operations jfs_file_inode_operations = {
|
|
.listxattr = jfs_listxattr,
|
|
.setattr = jfs_setattr,
|
|
#ifdef CONFIG_JFS_POSIX_ACL
|
|
.get_acl = jfs_get_acl,
|
|
.set_acl = jfs_set_acl,
|
|
#endif
|
|
};
|
|
|
|
const struct file_operations jfs_file_operations = {
|
|
.open = jfs_open,
|
|
.llseek = generic_file_llseek,
|
|
.read_iter = generic_file_read_iter,
|
|
.write_iter = generic_file_write_iter,
|
|
.mmap = generic_file_mmap,
|
|
.splice_read = generic_file_splice_read,
|
|
.splice_write = iter_file_splice_write,
|
|
.fsync = jfs_fsync,
|
|
.release = jfs_release,
|
|
.unlocked_ioctl = jfs_ioctl,
|
|
#ifdef CONFIG_COMPAT
|
|
.compat_ioctl = jfs_compat_ioctl,
|
|
#endif
|
|
};
|