-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEq1nRK9aeMoq1VSgcnJ2qBz9kQNkFAmXx5kwACgkQnJ2qBz9k
 QNmZowf/UlGJ1rmQFFhoodn3SyK48tQjOZ23Ygx6v9FZiLMuQ3b1k0kWKmwM4lZb
 mtRriCm+lPO9Yp/Sflz+jn8S51b/2bcTXiPV4w2Y4ZIun41wwggV7rWPnTCHhu94
 rGEPu/SNSBdpxWGv43BKHSDl4XolsGbyusQKBbKZtftnrpIf0y2OnyEXSV91Vnlh
 KM/XxzacBD4/3r4KCljyEkORWlIIn2+gdZf58sKtxLKvnfCIxjB+BF1e0gOWgmNQ
 e/pVnzbAHO3wuavRlwnrtA+ekBYQiJq7T61yyYI8zpeSoLHmwvPoKSsZP+q4BTvV
 yrcVCbGp3uZlXHD93U3BOfdqS0xBmg==
 =84Q4
 -----END PGP SIGNATURE-----

Merge tag 'fs_for_v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, isofs, udf, and quota updates from Jan Kara:
 "A lot of material this time:

   - removal of a lot of GFP_NOFS usage from ext2, udf, quota (either it
     was legacy or replaced with scoped memalloc_nofs_*() API)

   - removal of BUG_ONs in quota code

   - conversion of UDF to the new mount API

   - tightening quota on disk format verification

   - fix some potentially unsafe use of RCU pointers in quota code and
     annotate everything properly to make sparse happy

   - a few other small quota, ext2, udf, and isofs fixes"

* tag 'fs_for_v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (26 commits)
  udf: remove SLAB_MEM_SPREAD flag usage
  quota: remove SLAB_MEM_SPREAD flag usage
  isofs: remove SLAB_MEM_SPREAD flag usage
  ext2: remove SLAB_MEM_SPREAD flag usage
  ext2: mark as deprecated
  udf: convert to new mount API
  udf: convert novrs to an option flag
  MAINTAINERS: add missing git address for ext2 entry
  quota: Detect loops in quota tree
  quota: Properly annotate i_dquot arrays with __rcu
  quota: Fix rcu annotations of inode dquot pointers
  isofs: handle CDs with bad root inode but good Joliet root directory
  udf: Avoid invalid LVID used on mount
  quota: Fix potential NULL pointer dereference
  quota: Drop GFP_NOFS instances under dquot->dq_lock and dqio_sem
  quota: Set nofs allocation context when acquiring dqio_sem
  ext2: Remove GFP_NOFS use in ext2_xattr_cache_insert()
  ext2: Drop GFP_NOFS use in ext2_get_blocks()
  ext2: Drop GFP_NOFS allocation from ext2_init_block_alloc_info()
  udf: Remove GFP_NOFS allocation in udf_expand_file_adinicb()
  ...
This commit is contained in:
Linus Torvalds 2024-03-13 14:30:58 -07:00
commit e5e038b7ae
32 changed files with 607 additions and 422 deletions

View File

@ -8021,6 +8021,7 @@ M: Jan Kara <jack@suse.com>
L: linux-ext4@vger.kernel.org
S: Maintained
F: Documentation/filesystems/ext2.rst
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs.git
F: fs/ext2/
F: include/linux/ext2*

View File

@ -1,16 +1,23 @@
# SPDX-License-Identifier: GPL-2.0-only
config EXT2_FS
tristate "Second extended fs support"
tristate "Second extended fs support (DEPRECATED)"
select BUFFER_HEAD
select FS_IOMAP
select LEGACY_DIRECT_IO
help
Ext2 is a standard Linux file system for hard disks.
To compile this file system support as a module, choose M here: the
module will be called ext2.
This filesystem driver is deprecated because it does not properly
support inode time stamps beyond 03:14:07 UTC on 19 January 2038.
If unsure, say Y.
Ext2 users are advised to use ext4 driver to access their filesystem.
The driver is fully compatible, supports filesystems without journal
or extents, and also supports larger time stamps if the filesystem
is created with at least 256 byte inodes.
This code is kept as a simple reference for filesystem developers.
If unsure, say N.
config EXT2_FS_XATTR
bool "Ext2 extended attributes"

View File

@ -412,7 +412,7 @@ void ext2_init_block_alloc_info(struct inode *inode)
struct ext2_block_alloc_info *block_i;
struct super_block *sb = inode->i_sb;
block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
block_i = kmalloc(sizeof(*block_i), GFP_KERNEL);
if (block_i) {
struct ext2_reserve_window_node *rsv = &block_i->rsv_window_node;

View File

@ -674,7 +674,7 @@ struct ext2_inode_info {
struct inode vfs_inode;
struct list_head i_orphan; /* unlinked but open inodes */
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
};

View File

@ -754,7 +754,7 @@ static int ext2_get_blocks(struct inode *inode,
*/
err = sb_issue_zeroout(inode->i_sb,
le32_to_cpu(chain[depth-1].key), count,
GFP_NOFS);
GFP_KERNEL);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;

View File

@ -319,7 +319,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, siz
static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
static int ext2_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
static struct dquot **ext2_get_dquots(struct inode *inode)
static struct dquot __rcu **ext2_get_dquots(struct inode *inode)
{
return EXT2_I(inode)->i_dquot;
}

View File

@ -874,7 +874,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
int error;
error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
error = mb_cache_entry_create(cache, GFP_KERNEL, hash, bh->b_blocknr,
true);
if (error) {
if (error == -EBUSY) {

View File

@ -1158,7 +1158,7 @@ struct ext4_inode_info {
tid_t i_datasync_tid;
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */

View File

@ -1599,7 +1599,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
static struct dquot **ext4_get_dquots(struct inode *inode)
static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
{
return EXT4_I(inode)->i_dquot;
}

View File

@ -830,7 +830,7 @@ struct f2fs_inode_info {
spinlock_t i_size_lock; /* protect last_disk_size */
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;

View File

@ -2768,7 +2768,7 @@ int f2fs_dquot_initialize(struct inode *inode)
return dquot_initialize(inode);
}
static struct dquot **f2fs_get_dquots(struct inode *inode)
static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
{
return F2FS_I(inode)->i_dquot;
}

View File

@ -908,8 +908,22 @@ root_found:
* we then decide whether to use the Joliet descriptor.
*/
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
if (IS_ERR(inode))
goto out_no_root;
/*
* Fix for broken CDs with a corrupt root inode but a correct Joliet
* root directory.
*/
if (IS_ERR(inode)) {
if (joliet_level && sbi->s_firstdatazone != first_data_zone) {
printk(KERN_NOTICE
"ISOFS: root inode is unusable. "
"Disabling Rock Ridge and switching to Joliet.");
sbi->s_rock = 0;
inode = NULL;
} else {
goto out_no_root;
}
}
/*
* Fix for broken CDs with Rock Ridge and empty ISO root directory but

View File

@ -92,7 +92,7 @@ struct jfs_inode_info {
} link;
} u;
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
u32 dev; /* will die when we get wide dev_t */
struct inode vfs_inode;

View File

@ -824,7 +824,7 @@ out:
return len - towrite;
}
static struct dquot **jfs_get_dquots(struct inode *inode)
static struct dquot __rcu **jfs_get_dquots(struct inode *inode)
{
return JFS_IP(inode)->i_dquot;
}

View File

@ -65,7 +65,7 @@ struct ocfs2_inode_info
tid_t i_sync_tid;
tid_t i_datasync_tid;
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
};
/*

View File

@ -447,14 +447,17 @@ int ocfs2_global_write_info(struct super_block *sb, int type)
int err;
struct quota_info *dqopt = sb_dqopt(sb);
struct ocfs2_mem_dqinfo *info = dqopt->info[type].dqi_priv;
unsigned int memalloc;
down_write(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
err = ocfs2_qinfo_lock(info, 1);
if (err < 0)
goto out_sem;
err = __ocfs2_global_write_info(sb, type);
ocfs2_qinfo_unlock(info, 1);
out_sem:
memalloc_nofs_restore(memalloc);
up_write(&dqopt->dqio_sem);
return err;
}
@ -601,6 +604,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_super *osb = OCFS2_SB(sb);
int status = 0;
unsigned int memalloc;
trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
dquot->dq_id.type,
@ -618,6 +622,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
goto out_ilock;
}
down_write(&sb_dqopt(sb)->dqio_sem);
memalloc = memalloc_nofs_save();
status = ocfs2_sync_dquot(dquot);
if (status < 0)
mlog_errno(status);
@ -625,6 +630,7 @@ static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
status = ocfs2_local_write_dquot(dquot);
if (status < 0)
mlog_errno(status);
memalloc_nofs_restore(memalloc);
up_write(&sb_dqopt(sb)->dqio_sem);
ocfs2_commit_trans(osb, handle);
out_ilock:
@ -662,6 +668,7 @@ static int ocfs2_write_dquot(struct dquot *dquot)
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
unsigned int memalloc;
trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
dquot->dq_id.type);
@ -673,7 +680,9 @@ static int ocfs2_write_dquot(struct dquot *dquot)
goto out;
}
down_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
memalloc = memalloc_nofs_save();
status = ocfs2_local_write_dquot(dquot);
memalloc_nofs_restore(memalloc);
up_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
ocfs2_commit_trans(osb, handle);
out:
@ -920,6 +929,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(sb);
unsigned int memalloc;
trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
type);
@ -946,6 +956,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
goto out_ilock;
}
down_write(&sb_dqopt(sb)->dqio_sem);
memalloc = memalloc_nofs_save();
status = ocfs2_sync_dquot(dquot);
if (status < 0) {
mlog_errno(status);
@ -954,6 +965,7 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
/* Now write updated local dquot structure */
status = ocfs2_local_write_dquot(dquot);
out_dlock:
memalloc_nofs_restore(memalloc);
up_write(&sb_dqopt(sb)->dqio_sem);
ocfs2_commit_trans(osb, handle);
out_ilock:

View File

@ -470,6 +470,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
int bit, chunk;
struct ocfs2_recovery_chunk *rchunk, *next;
qsize_t spacechange, inodechange;
unsigned int memalloc;
trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type);
@ -521,6 +522,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
goto out_drop_lock;
}
down_write(&sb_dqopt(sb)->dqio_sem);
memalloc = memalloc_nofs_save();
spin_lock(&dquot->dq_dqb_lock);
/* Add usage from quota entry into quota changes
* of our node. Auxiliary variables are important
@ -553,6 +555,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
unlock_buffer(qbh);
ocfs2_journal_dirty(handle, qbh);
out_commit:
memalloc_nofs_restore(memalloc);
up_write(&sb_dqopt(sb)->dqio_sem);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out_drop_lock:

View File

@ -122,7 +122,7 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend);
static int ocfs2_enable_quotas(struct ocfs2_super *osb);
static void ocfs2_disable_quotas(struct ocfs2_super *osb);
static struct dquot **ocfs2_get_dquots(struct inode *inode)
static struct dquot __rcu **ocfs2_get_dquots(struct inode *inode)
{
return OCFS2_I(inode)->i_dquot;
}

View File

@ -399,15 +399,17 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* Dirtify all the dquots - this can block when journalling */
static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
{
int ret, err, cnt;
struct dquot *dquot;
ret = err = 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquot[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (dquot)
/* Even in case of error we have to continue */
ret = mark_dquot_dirty(dquot[cnt]);
ret = mark_dquot_dirty(dquot);
if (!err)
err = ret;
}
@ -875,10 +877,7 @@ void dqput(struct dquot *dquot)
}
/* Need to release dquot? */
#ifdef CONFIG_QUOTA_DEBUG
/* sanity check */
BUG_ON(!list_empty(&dquot->dq_free));
#endif
WARN_ON_ONCE(!list_empty(&dquot->dq_free));
put_releasing_dquots(dquot);
atomic_dec(&dquot->dq_count);
spin_unlock(&dq_list_lock);
@ -987,9 +986,8 @@ we_slept:
* smp_mb__before_atomic() in dquot_acquire().
*/
smp_rmb();
#ifdef CONFIG_QUOTA_DEBUG
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
/* Has somebody invalidated entry under us? */
WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
out:
if (empty)
do_destroy_dquot(empty);
@ -998,14 +996,14 @@ out:
}
EXPORT_SYMBOL(dqget);
static inline struct dquot **i_dquot(struct inode *inode)
static inline struct dquot __rcu **i_dquot(struct inode *inode)
{
return inode->i_sb->s_op->get_dquots(inode);
}
static int dqinit_needed(struct inode *inode, int type)
{
struct dquot * const *dquots;
struct dquot __rcu * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@ -1095,14 +1093,16 @@ static void remove_dquot_ref(struct super_block *sb, int type)
*/
spin_lock(&dq_data_lock);
if (!IS_NOQUOTA(inode)) {
struct dquot **dquots = i_dquot(inode);
struct dquot *dquot = dquots[type];
struct dquot __rcu **dquots = i_dquot(inode);
struct dquot *dquot = srcu_dereference_check(
dquots[type], &dquot_srcu,
lockdep_is_held(&dq_data_lock));
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
dquots[type] = NULL;
rcu_assign_pointer(dquots[type], NULL);
if (dquot)
dqput(dquot);
}
@ -1455,7 +1455,8 @@ static int inode_quota_active(const struct inode *inode)
static int __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
struct dquot **dquots, *got[MAXQUOTAS] = {};
struct dquot __rcu **dquots;
struct dquot *got[MAXQUOTAS] = {};
struct super_block *sb = inode->i_sb;
qsize_t rsv;
int ret = 0;
@ -1530,7 +1531,7 @@ static int __dquot_initialize(struct inode *inode, int type)
if (!got[cnt])
continue;
if (!dquots[cnt]) {
dquots[cnt] = got[cnt];
rcu_assign_pointer(dquots[cnt], got[cnt]);
got[cnt] = NULL;
/*
* Make quota reservation system happy if someone
@ -1538,12 +1539,16 @@ static int __dquot_initialize(struct inode *inode, int type)
*/
rsv = inode_get_rsv_space(inode);
if (unlikely(rsv)) {
struct dquot *dquot = srcu_dereference_check(
dquots[cnt], &dquot_srcu,
lockdep_is_held(&dq_data_lock));
spin_lock(&inode->i_lock);
/* Get reservation again under proper lock */
rsv = __inode_get_rsv_space(inode);
spin_lock(&dquots[cnt]->dq_dqb_lock);
dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
spin_unlock(&dquots[cnt]->dq_dqb_lock);
spin_lock(&dquot->dq_dqb_lock);
dquot->dq_dqb.dqb_rsvspace += rsv;
spin_unlock(&dquot->dq_dqb_lock);
spin_unlock(&inode->i_lock);
}
}
@ -1565,7 +1570,7 @@ EXPORT_SYMBOL(dquot_initialize);
bool dquot_initialize_needed(struct inode *inode)
{
struct dquot **dquots;
struct dquot __rcu **dquots;
int i;
if (!inode_quota_active(inode))
@ -1590,13 +1595,14 @@ EXPORT_SYMBOL(dquot_initialize_needed);
static void __dquot_drop(struct inode *inode)
{
int cnt;
struct dquot **dquots = i_dquot(inode);
struct dquot __rcu **dquots = i_dquot(inode);
struct dquot *put[MAXQUOTAS];
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = dquots[cnt];
dquots[cnt] = NULL;
put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
lockdep_is_held(&dq_data_lock));
rcu_assign_pointer(dquots[cnt], NULL);
}
spin_unlock(&dq_data_lock);
dqput_all(put);
@ -1604,7 +1610,7 @@ static void __dquot_drop(struct inode *inode)
void dquot_drop(struct inode *inode)
{
struct dquot * const *dquots;
struct dquot __rcu * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@ -1677,7 +1683,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
int reserve = flags & DQUOT_SPACE_RESERVE;
struct dquot **dquots;
struct dquot __rcu **dquots;
struct dquot *dquot;
if (!inode_quota_active(inode)) {
if (reserve) {
@ -1697,27 +1704,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (!dquot)
continue;
if (reserve) {
ret = dquot_add_space(dquots[cnt], 0, number, flags,
&warn[cnt]);
ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
} else {
ret = dquot_add_space(dquots[cnt], number, 0, flags,
&warn[cnt]);
ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
}
if (ret) {
/* Back out changes we already did */
for (cnt--; cnt >= 0; cnt--) {
if (!dquots[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (!dquot)
continue;
spin_lock(&dquots[cnt]->dq_dqb_lock);
spin_lock(&dquot->dq_dqb_lock);
if (reserve)
dquot_free_reserved_space(dquots[cnt],
number);
dquot_free_reserved_space(dquot, number);
else
dquot_decr_space(dquots[cnt], number);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
dquot_decr_space(dquot, number);
spin_unlock(&dquot->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
goto out_flush_warn;
@ -1747,7 +1753,8 @@ int dquot_alloc_inode(struct inode *inode)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
struct dquot * const *dquots;
struct dquot __rcu * const *dquots;
struct dquot *dquot;
if (!inode_quota_active(inode))
return 0;
@ -1758,17 +1765,19 @@ int dquot_alloc_inode(struct inode *inode)
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (!dquot)
continue;
ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
if (ret) {
for (cnt--; cnt >= 0; cnt--) {
if (!dquots[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (!dquot)
continue;
/* Back out changes we already did */
spin_lock(&dquots[cnt]->dq_dqb_lock);
dquot_decr_inodes(dquots[cnt], 1);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
spin_lock(&dquot->dq_dqb_lock);
dquot_decr_inodes(dquot, 1);
spin_unlock(&dquot->dq_dqb_lock);
}
goto warn_put_all;
}
@ -1789,7 +1798,8 @@ EXPORT_SYMBOL(dquot_alloc_inode);
*/
void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
struct dquot **dquots;
struct dquot __rcu **dquots;
struct dquot *dquot;
int cnt, index;
if (!inode_quota_active(inode)) {
@ -1805,9 +1815,8 @@ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquots[cnt]) {
struct dquot *dquot = dquots[cnt];
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (dquot) {
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
number = dquot->dq_dqb.dqb_rsvspace;
@ -1831,7 +1840,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
*/
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
{
struct dquot **dquots;
struct dquot __rcu **dquots;
struct dquot *dquot;
int cnt, index;
if (!inode_quota_active(inode)) {
@ -1847,9 +1857,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquots[cnt]) {
struct dquot *dquot = dquots[cnt];
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (dquot) {
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
number = dquot->dq_dqb.dqb_curspace;
@ -1875,7 +1884,8 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
struct dquot **dquots;
struct dquot __rcu **dquots;
struct dquot *dquot;
int reserve = flags & DQUOT_SPACE_RESERVE, index;
if (!inode_quota_active(inode)) {
@ -1896,17 +1906,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
int wtype;
warn[cnt].w_type = QUOTA_NL_NOWARN;
if (!dquots[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (!dquot)
continue;
spin_lock(&dquots[cnt]->dq_dqb_lock);
wtype = info_bdq_free(dquots[cnt], number);
spin_lock(&dquot->dq_dqb_lock);
wtype = info_bdq_free(dquot, number);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn[cnt], dquots[cnt], wtype);
prepare_warning(&warn[cnt], dquot, wtype);
if (reserve)
dquot_free_reserved_space(dquots[cnt], number);
dquot_free_reserved_space(dquot, number);
else
dquot_decr_space(dquots[cnt], number);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
dquot_decr_space(dquot, number);
spin_unlock(&dquot->dq_dqb_lock);
}
if (reserve)
*inode_reserved_space(inode) -= number;
@ -1930,7 +1941,8 @@ void dquot_free_inode(struct inode *inode)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
struct dquot * const *dquots;
struct dquot __rcu * const *dquots;
struct dquot *dquot;
int index;
if (!inode_quota_active(inode))
@ -1941,16 +1953,16 @@ void dquot_free_inode(struct inode *inode)
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype;
warn[cnt].w_type = QUOTA_NL_NOWARN;
if (!dquots[cnt])
dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
if (!dquot)
continue;
spin_lock(&dquots[cnt]->dq_dqb_lock);
wtype = info_idq_free(dquots[cnt], 1);
spin_lock(&dquot->dq_dqb_lock);
wtype = info_idq_free(dquot, 1);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn[cnt], dquots[cnt], wtype);
dquot_decr_inodes(dquots[cnt], 1);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
prepare_warning(&warn[cnt], dquot, wtype);
dquot_decr_inodes(dquot, 1);
spin_unlock(&dquot->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
@ -1976,8 +1988,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
qsize_t cur_space;
qsize_t rsv_space = 0;
qsize_t inode_usage = 1;
struct dquot __rcu **dquots;
struct dquot *transfer_from[MAXQUOTAS] = {};
int cnt, ret = 0;
int cnt, index, ret = 0;
char is_valid[MAXQUOTAS] = {};
struct dquot_warn warn_to[MAXQUOTAS];
struct dquot_warn warn_from_inodes[MAXQUOTAS];
@ -2008,6 +2021,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
}
cur_space = __inode_get_bytes(inode);
rsv_space = __inode_get_rsv_space(inode);
dquots = i_dquot(inode);
/*
* Build the transfer_from list, check limits, and update usage in
* the target structures.
@ -2022,7 +2036,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
if (!sb_has_quota_active(inode->i_sb, cnt))
continue;
is_valid[cnt] = 1;
transfer_from[cnt] = i_dquot(inode)[cnt];
transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
&dquot_srcu, lockdep_is_held(&dq_data_lock));
ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
&warn_to[cnt]);
if (ret)
@ -2061,13 +2076,21 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
rsv_space);
spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
}
i_dquot(inode)[cnt] = transfer_to[cnt];
rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
}
spin_unlock(&inode->i_lock);
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
/*
* These arrays are local and we hold dquot references so we don't need
* the srcu protection but still take dquot_srcu to avoid warning in
* mark_all_dquot_dirty().
*/
index = srcu_read_lock(&dquot_srcu);
mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn_to);
flush_warnings(warn_from_inodes);
flush_warnings(warn_from_space);
@ -2388,7 +2411,8 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
lockdep_assert_held_write(&sb->s_umount);
/* Just unsuspend quotas? */
BUG_ON(flags & DQUOT_SUSPENDED);
if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED))
return -EINVAL;
if (!fmt)
return -ESRCH;

View File

@ -21,6 +21,12 @@ MODULE_AUTHOR("Jan Kara");
MODULE_DESCRIPTION("Quota trie support");
MODULE_LICENSE("GPL");
/*
* Maximum quota tree depth we support. Only to limit recursion when working
* with the tree.
*/
#define MAX_QTREE_DEPTH 6
#define __QUOTA_QT_PARANOIA
static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
@ -108,7 +114,7 @@ static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int ret, blk;
@ -160,7 +166,7 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
uint blk)
{
char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
uint nextblk = le32_to_cpu(dh->dqdh_next_free);
uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@ -207,7 +213,7 @@ out_buf:
static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
uint blk)
{
char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
@ -255,7 +261,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
{
uint blk, i;
struct qt_disk_dqdbheader *dh;
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
char *ddquot;
*err = 0;
@ -327,27 +333,36 @@ out_buf:
/* Insert reference to structure into the trie */
static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *treeblk, int depth)
uint *blks, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
int ret = 0, newson = 0, newact = 0;
__le32 *ref;
uint newblk;
int i;
if (!buf)
return -ENOMEM;
if (!*treeblk) {
if (!blks[depth]) {
ret = get_free_dqblk(info);
if (ret < 0)
goto out_buf;
*treeblk = ret;
for (i = 0; i < depth; i++)
if (ret == blks[i]) {
quota_error(dquot->dq_sb,
"Free block already used in tree: block %u",
ret);
ret = -EIO;
goto out_buf;
}
blks[depth] = ret;
memset(buf, 0, info->dqi_usable_bs);
newact = 1;
} else {
ret = read_blk(info, *treeblk, buf);
ret = read_blk(info, blks[depth], buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read tree quota "
"block %u", *treeblk);
"block %u", blks[depth]);
goto out_buf;
}
}
@ -357,8 +372,20 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (!newblk)
if (!newblk) {
newson = 1;
} else {
for (i = 0; i <= depth; i++)
if (newblk == blks[i]) {
quota_error(dquot->dq_sb,
"Cycle in quota tree detected: block %u index %u",
blks[depth],
get_index(info, dquot->dq_id, depth));
ret = -EIO;
goto out_buf;
}
}
blks[depth + 1] = newblk;
if (depth == info->dqi_qtree_depth - 1) {
#ifdef __QUOTA_QT_PARANOIA
if (newblk) {
@ -370,16 +397,16 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
goto out_buf;
}
#endif
newblk = find_free_dqentry(info, dquot, &ret);
blks[depth + 1] = find_free_dqentry(info, dquot, &ret);
} else {
ret = do_insert_tree(info, dquot, &newblk, depth+1);
ret = do_insert_tree(info, dquot, blks, depth + 1);
}
if (newson && ret >= 0) {
ref[get_index(info, dquot->dq_id, depth)] =
cpu_to_le32(newblk);
ret = write_blk(info, *treeblk, buf);
cpu_to_le32(blks[depth + 1]);
ret = write_blk(info, blks[depth], buf);
} else if (newact && ret < 0) {
put_free_dqblk(info, buf, *treeblk);
put_free_dqblk(info, buf, blks[depth]);
}
out_buf:
kfree(buf);
@ -390,7 +417,7 @@ out_buf:
static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
struct dquot *dquot)
{
int tmp = QT_TREEOFF;
uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
#ifdef __QUOTA_QT_PARANOIA
if (info->dqi_blocks <= QT_TREEOFF) {
@ -398,7 +425,11 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
return -EIO;
}
#endif
return do_insert_tree(info, dquot, &tmp, 0);
if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
quota_error(dquot->dq_sb, "Quota tree depth too big!");
return -EIO;
}
return do_insert_tree(info, dquot, blks, 0);
}
/*
@ -410,7 +441,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
int type = dquot->dq_id.type;
struct super_block *sb = dquot->dq_sb;
ssize_t ret;
char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
char *ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
if (!ddquot)
return -ENOMEM;
@ -449,7 +480,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint blk)
{
struct qt_disk_dqdbheader *dh;
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
int ret = 0;
if (!buf)
@ -511,19 +542,20 @@ out_buf:
/* Remove reference to dquot from tree */
static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blk, int depth)
uint *blks, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
int ret = 0;
uint newblk;
__le32 *ref = (__le32 *)buf;
int i;
if (!buf)
return -ENOMEM;
ret = read_blk(info, *blk, buf);
ret = read_blk(info, blks[depth], buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota data block %u",
*blk);
blks[depth]);
goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@ -532,29 +564,38 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
if (ret)
goto out_buf;
for (i = 0; i <= depth; i++)
if (newblk == blks[i]) {
quota_error(dquot->dq_sb,
"Cycle in quota tree detected: block %u index %u",
blks[depth],
get_index(info, dquot->dq_id, depth));
ret = -EIO;
goto out_buf;
}
if (depth == info->dqi_qtree_depth - 1) {
ret = free_dqentry(info, dquot, newblk);
newblk = 0;
blks[depth + 1] = 0;
} else {
ret = remove_tree(info, dquot, &newblk, depth+1);
blks[depth + 1] = newblk;
ret = remove_tree(info, dquot, blks, depth + 1);
}
if (ret >= 0 && !newblk) {
int i;
if (ret >= 0 && !blks[depth + 1]) {
ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
/* Block got empty? */
for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
;
/* Don't put the root block into the free block list */
if (i == (info->dqi_usable_bs >> 2)
&& *blk != QT_TREEOFF) {
put_free_dqblk(info, buf, *blk);
*blk = 0;
&& blks[depth] != QT_TREEOFF) {
put_free_dqblk(info, buf, blks[depth]);
blks[depth] = 0;
} else {
ret = write_blk(info, *blk, buf);
ret = write_blk(info, blks[depth], buf);
if (ret < 0)
quota_error(dquot->dq_sb,
"Can't write quota tree block %u",
*blk);
blks[depth]);
}
}
out_buf:
@ -565,11 +606,15 @@ out_buf:
/* Delete dquot from tree */
int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
uint tmp = QT_TREEOFF;
uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
if (!dquot->dq_off) /* Even not allocated? */
return 0;
return remove_tree(info, dquot, &tmp, 0);
if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
quota_error(dquot->dq_sb, "Quota tree depth too big!");
return -EIO;
}
return remove_tree(info, dquot, blks, 0);
}
EXPORT_SYMBOL(qtree_delete_dquot);
@ -577,7 +622,7 @@ EXPORT_SYMBOL(qtree_delete_dquot);
static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
loff_t ret = 0;
int i;
char *ddquot;
@ -613,18 +658,20 @@ out_buf:
/* Find entry for given id in the tree */
static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk, int depth)
struct dquot *dquot, uint *blks, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
loff_t ret = 0;
__le32 *ref = (__le32 *)buf;
uint blk;
int i;
if (!buf)
return -ENOMEM;
ret = read_blk(info, blk, buf);
ret = read_blk(info, blks[depth], buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota tree block %u",
blk);
blks[depth]);
goto out_buf;
}
ret = 0;
@ -636,8 +683,19 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
if (ret)
goto out_buf;
/* Check for cycles in the tree */
for (i = 0; i <= depth; i++)
if (blk == blks[i]) {
quota_error(dquot->dq_sb,
"Cycle in quota tree detected: block %u index %u",
blks[depth],
get_index(info, dquot->dq_id, depth));
ret = -EIO;
goto out_buf;
}
blks[depth + 1] = blk;
if (depth < info->dqi_qtree_depth - 1)
ret = find_tree_dqentry(info, dquot, blk, depth+1);
ret = find_tree_dqentry(info, dquot, blks, depth + 1);
else
ret = find_block_dqentry(info, dquot, blk);
out_buf:
@ -649,7 +707,13 @@ out_buf:
static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot)
{
return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF };
if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) {
quota_error(dquot->dq_sb, "Quota tree depth too big!");
return -EIO;
}
return find_tree_dqentry(info, dquot, blks, 0);
}
int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
@ -684,7 +748,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
}
dquot->dq_off = offset;
}
ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL);
if (!ddquot)
return -ENOMEM;
ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
@ -728,7 +792,7 @@ EXPORT_SYMBOL(qtree_release_dquot);
static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
unsigned int blk, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL);
__le32 *ref = (__le32 *)buf;
ssize_t ret;
unsigned int epb = info->dqi_usable_bs >> 2;

View File

@ -160,9 +160,11 @@ static int v1_read_file_info(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
struct v1_disk_dqblk dqblk;
unsigned int memalloc;
int ret;
down_read(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret != sizeof(struct v1_disk_dqblk)) {
@ -179,6 +181,7 @@ static int v1_read_file_info(struct super_block *sb, int type)
dqopt->info[type].dqi_bgrace =
dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
out:
memalloc_nofs_restore(memalloc);
up_read(&dqopt->dqio_sem);
return ret;
}
@ -187,9 +190,11 @@ static int v1_write_file_info(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
struct v1_disk_dqblk dqblk;
unsigned int memalloc;
int ret;
down_write(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret != sizeof(struct v1_disk_dqblk)) {
@ -209,6 +214,7 @@ static int v1_write_file_info(struct super_block *sb, int type)
else if (ret >= 0)
ret = -EIO;
out:
memalloc_nofs_restore(memalloc);
up_write(&dqopt->dqio_sem);
return ret;
}

View File

@ -96,9 +96,11 @@ static int v2_read_file_info(struct super_block *sb, int type)
struct qtree_mem_dqinfo *qinfo;
ssize_t size;
unsigned int version;
unsigned int memalloc;
int ret;
down_read(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
ret = v2_read_header(sb, type, &dqhead);
if (ret < 0)
goto out;
@ -119,7 +121,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
ret = -EIO;
goto out;
}
info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_KERNEL);
if (!info->dqi_priv) {
ret = -ENOMEM;
goto out;
@ -166,14 +168,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
i_size_read(sb_dqopt(sb)->files[type]));
goto out_free;
}
if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
quota_error(sb, "Free block number too big (%u >= %u).",
qinfo->dqi_free_blk, qinfo->dqi_blocks);
if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF ||
qinfo->dqi_free_blk >= qinfo->dqi_blocks)) {
quota_error(sb, "Free block number %u out of range (%u, %u).",
qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks);
goto out_free;
}
if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
quota_error(sb, "Block with free entry too big (%u >= %u).",
qinfo->dqi_free_entry, qinfo->dqi_blocks);
if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF ||
qinfo->dqi_free_entry >= qinfo->dqi_blocks)) {
quota_error(sb, "Block with free entry %u out of range (%u, %u).",
qinfo->dqi_free_entry, QT_TREEOFF,
qinfo->dqi_blocks);
goto out_free;
}
ret = 0;
@ -183,6 +188,7 @@ out_free:
info->dqi_priv = NULL;
}
out:
memalloc_nofs_restore(memalloc);
up_read(&dqopt->dqio_sem);
return ret;
}
@ -195,8 +201,10 @@ static int v2_write_file_info(struct super_block *sb, int type)
struct mem_dqinfo *info = &dqopt->info[type];
struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
ssize_t size;
unsigned int memalloc;
down_write(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
spin_lock(&dq_data_lock);
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
@ -209,6 +217,7 @@ static int v2_write_file_info(struct super_block *sb, int type)
dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
memalloc_nofs_restore(memalloc);
up_write(&dqopt->dqio_sem);
if (size != sizeof(struct v2_disk_dqinfo)) {
quota_error(sb, "Can't write info structure");
@ -328,11 +337,14 @@ static int v2_read_dquot(struct dquot *dquot)
{
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
int ret;
unsigned int memalloc;
down_read(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
ret = qtree_read_dquot(
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
dquot);
memalloc_nofs_restore(memalloc);
up_read(&dqopt->dqio_sem);
return ret;
}
@ -342,6 +354,7 @@ static int v2_write_dquot(struct dquot *dquot)
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
int ret;
bool alloc = false;
unsigned int memalloc;
/*
* If space for dquot is already allocated, we don't need any
@ -355,9 +368,11 @@ static int v2_write_dquot(struct dquot *dquot)
} else {
down_read(&dqopt->dqio_sem);
}
memalloc = memalloc_nofs_save();
ret = qtree_write_dquot(
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
dquot);
memalloc_nofs_restore(memalloc);
if (alloc)
up_write(&dqopt->dqio_sem);
else
@ -368,10 +383,13 @@ static int v2_write_dquot(struct dquot *dquot)
static int v2_release_dquot(struct dquot *dquot)
{
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
unsigned int memalloc;
int ret;
down_write(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
ret = qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot);
memalloc_nofs_restore(memalloc);
up_write(&dqopt->dqio_sem);
return ret;
@ -386,10 +404,13 @@ static int v2_free_file_info(struct super_block *sb, int type)
static int v2_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct quota_info *dqopt = sb_dqopt(sb);
unsigned int memalloc;
int ret;
down_read(&dqopt->dqio_sem);
memalloc = memalloc_nofs_save();
ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid);
memalloc_nofs_restore(memalloc);
up_read(&dqopt->dqio_sem);
return ret;
}

View File

@ -97,7 +97,7 @@ struct reiserfs_inode_info {
struct rw_semaphore i_xattr_sem;
#endif
#ifdef CONFIG_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
struct inode vfs_inode;

View File

@ -801,7 +801,7 @@ static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t,
loff_t);
static struct dquot **reiserfs_get_dquots(struct inode *inode)
static struct dquot __rcu **reiserfs_get_dquots(struct inode *inode)
{
return REISERFS_I(inode)->i_dquot;
}

View File

@ -67,7 +67,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
pos_valid = true;
}
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
fname = kmalloc(UDF_NAME_LEN, GFP_KERNEL);
if (!fname) {
ret = -ENOMEM;
goto out;

View File

@ -357,7 +357,7 @@ int udf_expand_file_adinicb(struct inode *inode)
return 0;
}
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
page = find_or_create_page(inode->i_mapping, 0, GFP_KERNEL);
if (!page)
return -ENOMEM;

View File

@ -59,7 +59,7 @@ static int udf_fiiter_find_entry(struct inode *dir, const struct qstr *child,
child->name[0] == '.' && child->name[1] == '.';
int ret;
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
fname = kmalloc(UDF_NAME_LEN, GFP_KERNEL);
if (!fname)
return -ENOMEM;
@ -566,7 +566,7 @@ out:
static int udf_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct inode *inode = udf_new_inode(dir, S_IFLNK | 0777);
struct inode *inode;
struct pathComponent *pc;
const char *compstart;
struct extent_position epos = {};
@ -579,17 +579,20 @@ static int udf_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct udf_inode_info *iinfo;
struct super_block *sb = dir->i_sb;
if (IS_ERR(inode))
return PTR_ERR(inode);
name = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL);
if (!name) {
err = -ENOMEM;
goto out;
}
inode = udf_new_inode(dir, S_IFLNK | 0777);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
}
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto out_no_entry;
}
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode_nohighmem(inode);

View File

@ -40,20 +40,20 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/parser.h>
#include <linux/stat.h>
#include <linux/cdrom.h>
#include <linux/nls.h>
#include <linux/vfs.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/bitmap.h>
#include <linux/crc-itu-t.h>
#include <linux/log2.h>
#include <asm/byteorder.h>
#include <linux/iversion.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include "udf_sb.h"
#include "udf_i.h"
@ -91,16 +91,20 @@ enum { UDF_MAX_LINKS = 0xffff };
#define UDF_MAX_FILESIZE (1ULL << 42)
/* These are the "meat" - everything else is stuffing */
static int udf_fill_super(struct super_block *, void *, int);
static int udf_fill_super(struct super_block *sb, struct fs_context *fc);
static void udf_put_super(struct super_block *);
static int udf_sync_fs(struct super_block *, int);
static int udf_remount_fs(struct super_block *, int *, char *);
static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
static int udf_statfs(struct dentry *, struct kstatfs *);
static int udf_show_options(struct seq_file *, struct dentry *);
static int udf_init_fs_context(struct fs_context *fc);
static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param);
static int udf_reconfigure(struct fs_context *fc);
static void udf_free_fc(struct fs_context *fc);
static const struct fs_parameter_spec udf_param_spec[];
struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
{
@ -119,18 +123,25 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
}
/* UDF filesystem type */
static struct dentry *udf_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
static int udf_get_tree(struct fs_context *fc)
{
return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
return get_tree_bdev(fc, udf_fill_super);
}
static const struct fs_context_operations udf_context_ops = {
.parse_param = udf_parse_param,
.get_tree = udf_get_tree,
.reconfigure = udf_reconfigure,
.free = udf_free_fc,
};
static struct file_system_type udf_fstype = {
.owner = THIS_MODULE,
.name = "udf",
.mount = udf_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
.init_fs_context = udf_init_fs_context,
.parameters = udf_param_spec,
};
MODULE_ALIAS_FS("udf");
@ -203,12 +214,10 @@ static const struct super_operations udf_sb_ops = {
.put_super = udf_put_super,
.sync_fs = udf_sync_fs,
.statfs = udf_statfs,
.remount_fs = udf_remount_fs,
.show_options = udf_show_options,
};
struct udf_options {
unsigned char novrs;
unsigned int blocksize;
unsigned int session;
unsigned int lastblock;
@ -222,6 +231,65 @@ struct udf_options {
struct nls_table *nls_map;
};
/*
* UDF has historically preserved prior mount options across
* a remount, so copy those here if remounting, otherwise set
* initial mount defaults.
*/
static void udf_init_options(struct fs_context *fc, struct udf_options *uopt)
{
if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
struct super_block *sb = fc->root->d_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
uopt->flags = sbi->s_flags;
uopt->uid = sbi->s_uid;
uopt->gid = sbi->s_gid;
uopt->umask = sbi->s_umask;
uopt->fmode = sbi->s_fmode;
uopt->dmode = sbi->s_dmode;
uopt->nls_map = NULL;
} else {
uopt->flags = (1 << UDF_FLAG_USE_AD_IN_ICB) |
(1 << UDF_FLAG_STRICT);
/*
* By default we'll use overflow[ug]id when UDF
* inode [ug]id == -1
*/
uopt->uid = make_kuid(current_user_ns(), overflowuid);
uopt->gid = make_kgid(current_user_ns(), overflowgid);
uopt->umask = 0;
uopt->fmode = UDF_INVALID_MODE;
uopt->dmode = UDF_INVALID_MODE;
uopt->nls_map = NULL;
uopt->session = 0xFFFFFFFF;
}
}
static int udf_init_fs_context(struct fs_context *fc)
{
struct udf_options *uopt;
uopt = kzalloc(sizeof(*uopt), GFP_KERNEL);
if (!uopt)
return -ENOMEM;
udf_init_options(fc, uopt);
fc->fs_private = uopt;
fc->ops = &udf_context_ops;
return 0;
}
static void udf_free_fc(struct fs_context *fc)
{
struct udf_options *uopt = fc->fs_private;
unload_nls(uopt->nls_map);
kfree(fc->fs_private);
}
static int __init init_udf_fs(void)
{
int err;
@ -357,7 +425,7 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
}
/*
* udf_parse_options
* udf_parse_param
*
* PURPOSE
* Parse mount options.
@ -400,12 +468,12 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
* yield highly unpredictable results.
*
* PRE-CONDITIONS
* options Pointer to mount options string.
* uopts Pointer to mount options variable.
* fc fs_context with pointer to mount options variable.
* param Pointer to fs_parameter being parsed.
*
* POST-CONDITIONS
* <return> 1 Mount options parsed okay.
* <return> 0 Error parsing mount options.
* <return> 0 Mount options parsed okay.
* <return> errno Error parsing mount options.
*
* HISTORY
* July 1, 1997 - Andrew E. Mileski
@ -417,229 +485,193 @@ enum {
Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
Opt_rootdir, Opt_utf8, Opt_iocharset,
Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
Opt_fmode, Opt_dmode
Opt_rootdir, Opt_utf8, Opt_iocharset, Opt_err, Opt_fmode, Opt_dmode
};
static const match_table_t tokens = {
{Opt_novrs, "novrs"},
{Opt_nostrict, "nostrict"},
{Opt_bs, "bs=%u"},
{Opt_unhide, "unhide"},
{Opt_undelete, "undelete"},
{Opt_noadinicb, "noadinicb"},
{Opt_adinicb, "adinicb"},
{Opt_shortad, "shortad"},
{Opt_longad, "longad"},
{Opt_uforget, "uid=forget"},
{Opt_uignore, "uid=ignore"},
{Opt_gforget, "gid=forget"},
{Opt_gignore, "gid=ignore"},
{Opt_gid, "gid=%u"},
{Opt_uid, "uid=%u"},
{Opt_umask, "umask=%o"},
{Opt_session, "session=%u"},
{Opt_lastblock, "lastblock=%u"},
{Opt_anchor, "anchor=%u"},
{Opt_volume, "volume=%u"},
{Opt_partition, "partition=%u"},
{Opt_fileset, "fileset=%u"},
{Opt_rootdir, "rootdir=%u"},
{Opt_utf8, "utf8"},
{Opt_iocharset, "iocharset=%s"},
{Opt_fmode, "mode=%o"},
{Opt_dmode, "dmode=%o"},
{Opt_err, NULL}
};
static const struct fs_parameter_spec udf_param_spec[] = {
fsparam_flag ("novrs", Opt_novrs),
fsparam_flag ("nostrict", Opt_nostrict),
fsparam_u32 ("bs", Opt_bs),
fsparam_flag ("unhide", Opt_unhide),
fsparam_flag ("undelete", Opt_undelete),
fsparam_flag_no ("adinicb", Opt_adinicb),
fsparam_flag ("shortad", Opt_shortad),
fsparam_flag ("longad", Opt_longad),
fsparam_string ("gid", Opt_gid),
fsparam_string ("uid", Opt_uid),
fsparam_u32 ("umask", Opt_umask),
fsparam_u32 ("session", Opt_session),
fsparam_u32 ("lastblock", Opt_lastblock),
fsparam_u32 ("anchor", Opt_anchor),
fsparam_u32 ("volume", Opt_volume),
fsparam_u32 ("partition", Opt_partition),
fsparam_u32 ("fileset", Opt_fileset),
fsparam_u32 ("rootdir", Opt_rootdir),
fsparam_flag ("utf8", Opt_utf8),
fsparam_string ("iocharset", Opt_iocharset),
fsparam_u32 ("mode", Opt_fmode),
fsparam_u32 ("dmode", Opt_dmode),
{}
};
static int udf_parse_options(char *options, struct udf_options *uopt,
bool remount)
static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
char *p;
int option;
unsigned int uv;
unsigned int n;
struct udf_options *uopt = fc->fs_private;
struct fs_parse_result result;
int token;
bool remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE);
uopt->novrs = 0;
uopt->session = 0xFFFFFFFF;
uopt->lastblock = 0;
uopt->anchor = 0;
token = fs_parse(fc, udf_param_spec, param, &result);
if (token < 0)
return token;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
unsigned n;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_novrs:
uopt->novrs = 1;
break;
case Opt_bs:
if (match_int(&args[0], &option))
return 0;
n = option;
if (n != 512 && n != 1024 && n != 2048 && n != 4096)
return 0;
uopt->blocksize = n;
uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
break;
case Opt_unhide:
uopt->flags |= (1 << UDF_FLAG_UNHIDE);
break;
case Opt_undelete:
uopt->flags |= (1 << UDF_FLAG_UNDELETE);
break;
case Opt_noadinicb:
switch (token) {
case Opt_novrs:
uopt->flags |= (1 << UDF_FLAG_NOVRS);
break;
case Opt_bs:
n = result.uint_32;
if (n != 512 && n != 1024 && n != 2048 && n != 4096)
return -EINVAL;
uopt->blocksize = n;
uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
break;
case Opt_unhide:
uopt->flags |= (1 << UDF_FLAG_UNHIDE);
break;
case Opt_undelete:
uopt->flags |= (1 << UDF_FLAG_UNDELETE);
break;
case Opt_adinicb:
if (result.negated)
uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
break;
case Opt_adinicb:
else
uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
break;
case Opt_shortad:
uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_longad:
uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_gid:
if (match_uint(args, &uv))
return 0;
uopt->gid = make_kgid(current_user_ns(), uv);
if (!gid_valid(uopt->gid))
return 0;
break;
case Opt_shortad:
uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_longad:
uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_gid:
if (kstrtoint(param->string, 10, &uv) == 0) {
kgid_t gid = make_kgid(current_user_ns(), uv);
if (!gid_valid(gid))
return -EINVAL;
uopt->gid = gid;
uopt->flags |= (1 << UDF_FLAG_GID_SET);
break;
case Opt_uid:
if (match_uint(args, &uv))
return 0;
uopt->uid = make_kuid(current_user_ns(), uv);
if (!uid_valid(uopt->uid))
return 0;
uopt->flags |= (1 << UDF_FLAG_UID_SET);
break;
case Opt_umask:
if (match_octal(args, &option))
return 0;
uopt->umask = option;
break;
case Opt_nostrict:
uopt->flags &= ~(1 << UDF_FLAG_STRICT);
break;
case Opt_session:
if (match_int(args, &option))
return 0;
uopt->session = option;
if (!remount)
uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
break;
case Opt_lastblock:
if (match_int(args, &option))
return 0;
uopt->lastblock = option;
if (!remount)
uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
break;
case Opt_anchor:
if (match_int(args, &option))
return 0;
uopt->anchor = option;
break;
case Opt_volume:
case Opt_partition:
case Opt_fileset:
case Opt_rootdir:
/* Ignored (never implemented properly) */
break;
case Opt_utf8:
if (!remount) {
unload_nls(uopt->nls_map);
uopt->nls_map = NULL;
}
break;
case Opt_iocharset:
if (!remount) {
unload_nls(uopt->nls_map);
uopt->nls_map = NULL;
}
/* When nls_map is not loaded then UTF-8 is used */
if (!remount && strcmp(args[0].from, "utf8") != 0) {
uopt->nls_map = load_nls(args[0].from);
if (!uopt->nls_map) {
pr_err("iocharset %s not found\n",
args[0].from);
return 0;
}
}
break;
case Opt_uforget:
uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
break;
case Opt_uignore:
case Opt_gignore:
/* These options are superseeded by uid=<number> */
break;
case Opt_gforget:
} else if (!strcmp(param->string, "forget")) {
uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
break;
case Opt_fmode:
if (match_octal(args, &option))
return 0;
uopt->fmode = option & 0777;
break;
case Opt_dmode:
if (match_octal(args, &option))
return 0;
uopt->dmode = option & 0777;
break;
default:
pr_err("bad mount option \"%s\" or missing value\n", p);
return 0;
} else if (!strcmp(param->string, "ignore")) {
/* this option is superseded by gid=<number> */
;
} else {
return -EINVAL;
}
break;
case Opt_uid:
if (kstrtoint(param->string, 10, &uv) == 0) {
kuid_t uid = make_kuid(current_user_ns(), uv);
if (!uid_valid(uid))
return -EINVAL;
uopt->uid = uid;
uopt->flags |= (1 << UDF_FLAG_UID_SET);
} else if (!strcmp(param->string, "forget")) {
uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
} else if (!strcmp(param->string, "ignore")) {
/* this option is superseded by uid=<number> */
;
} else {
return -EINVAL;
}
break;
case Opt_umask:
uopt->umask = result.uint_32;
break;
case Opt_nostrict:
uopt->flags &= ~(1 << UDF_FLAG_STRICT);
break;
case Opt_session:
uopt->session = result.uint_32;
if (!remount)
uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
break;
case Opt_lastblock:
uopt->lastblock = result.uint_32;
if (!remount)
uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
break;
case Opt_anchor:
uopt->anchor = result.uint_32;
break;
case Opt_volume:
case Opt_partition:
case Opt_fileset:
case Opt_rootdir:
/* Ignored (never implemented properly) */
break;
case Opt_utf8:
if (!remount) {
unload_nls(uopt->nls_map);
uopt->nls_map = NULL;
}
break;
case Opt_iocharset:
if (!remount) {
unload_nls(uopt->nls_map);
uopt->nls_map = NULL;
}
/* When nls_map is not loaded then UTF-8 is used */
if (!remount && strcmp(param->string, "utf8") != 0) {
uopt->nls_map = load_nls(param->string);
if (!uopt->nls_map) {
errorf(fc, "iocharset %s not found",
param->string);
return -EINVAL;;
}
}
break;
case Opt_fmode:
uopt->fmode = result.uint_32 & 0777;
break;
case Opt_dmode:
uopt->dmode = result.uint_32 & 0777;
break;
default:
return -EINVAL;
}
return 1;
return 0;
}
static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
static int udf_reconfigure(struct fs_context *fc)
{
struct udf_options uopt;
struct udf_options *uopt = fc->fs_private;
struct super_block *sb = fc->root->d_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
int readonly = fc->sb_flags & SB_RDONLY;
int error = 0;
if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
if (!readonly && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
return -EACCES;
sync_filesystem(sb);
uopt.flags = sbi->s_flags;
uopt.uid = sbi->s_uid;
uopt.gid = sbi->s_gid;
uopt.umask = sbi->s_umask;
uopt.fmode = sbi->s_fmode;
uopt.dmode = sbi->s_dmode;
uopt.nls_map = NULL;
if (!udf_parse_options(options, &uopt, true))
return -EINVAL;
write_lock(&sbi->s_cred_lock);
sbi->s_flags = uopt.flags;
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
sbi->s_fmode = uopt.fmode;
sbi->s_dmode = uopt.dmode;
sbi->s_flags = uopt->flags;
sbi->s_uid = uopt->uid;
sbi->s_gid = uopt->gid;
sbi->s_umask = uopt->umask;
sbi->s_fmode = uopt->fmode;
sbi->s_dmode = uopt->dmode;
write_unlock(&sbi->s_cred_lock);
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
if (readonly == sb_rdonly(sb))
goto out_unlock;
if (*flags & SB_RDONLY)
if (readonly)
udf_close_lvid(sb);
else
udf_open_lvid(sb);
@ -863,7 +895,7 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
int ret;
struct timestamp *ts;
outstr = kmalloc(128, GFP_NOFS);
outstr = kmalloc(128, GFP_KERNEL);
if (!outstr)
return -ENOMEM;
@ -1538,6 +1570,20 @@ out_bh:
return ret;
}
static bool udf_lvid_valid(struct super_block *sb,
struct logicalVolIntegrityDesc *lvid)
{
u32 parts, impuselen;
parts = le32_to_cpu(lvid->numOfPartitions);
impuselen = le32_to_cpu(lvid->lengthOfImpUse);
if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
sizeof(struct logicalVolIntegrityDesc) + impuselen +
2 * parts * sizeof(u32) > sb->s_blocksize)
return false;
return true;
}
/*
* Find the prevailing Logical Volume Integrity Descriptor.
*/
@ -1548,7 +1594,6 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
struct udf_sb_info *sbi = UDF_SB(sb);
struct logicalVolIntegrityDesc *lvid;
int indirections = 0;
u32 parts, impuselen;
while (++indirections <= UDF_MAX_LVID_NESTING) {
final_bh = NULL;
@ -1570,32 +1615,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
if (!final_bh)
return;
brelse(sbi->s_lvid_bh);
sbi->s_lvid_bh = final_bh;
lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
if (udf_lvid_valid(sb, lvid)) {
brelse(sbi->s_lvid_bh);
sbi->s_lvid_bh = final_bh;
} else {
udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
"ignoring.\n",
le32_to_cpu(lvid->numOfPartitions),
le32_to_cpu(lvid->lengthOfImpUse));
}
if (lvid->nextIntegrityExt.extLength == 0)
goto check;
return;
loc = leea_to_cpu(lvid->nextIntegrityExt);
}
udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
UDF_MAX_LVID_NESTING);
out_err:
brelse(sbi->s_lvid_bh);
sbi->s_lvid_bh = NULL;
return;
check:
parts = le32_to_cpu(lvid->numOfPartitions);
impuselen = le32_to_cpu(lvid->lengthOfImpUse);
if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
sizeof(struct logicalVolIntegrityDesc) + impuselen +
2 * parts * sizeof(u32) > sb->s_blocksize) {
udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
"ignoring.\n", parts, impuselen);
goto out_err;
}
}
/*
@ -1945,7 +1985,7 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
return -EINVAL;
}
sbi->s_last_block = uopt->lastblock;
if (!uopt->novrs) {
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_NOVRS)) {
/* Check that it is NSR02 compliant */
nsr = udf_check_vsd(sb);
if (!nsr) {
@ -2083,23 +2123,15 @@ u64 lvid_get_unique_id(struct super_block *sb)
return ret;
}
static int udf_fill_super(struct super_block *sb, void *options, int silent)
static int udf_fill_super(struct super_block *sb, struct fs_context *fc)
{
int ret = -EINVAL;
struct inode *inode = NULL;
struct udf_options uopt;
struct udf_options *uopt = fc->fs_private;
struct kernel_lb_addr rootdir, fileset;
struct udf_sb_info *sbi;
bool lvid_open = false;
uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
/* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
uopt.uid = make_kuid(current_user_ns(), overflowuid);
uopt.gid = make_kgid(current_user_ns(), overflowgid);
uopt.umask = 0;
uopt.fmode = UDF_INVALID_MODE;
uopt.dmode = UDF_INVALID_MODE;
uopt.nls_map = NULL;
int silent = fc->sb_flags & SB_SILENT;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
@ -2109,25 +2141,23 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
mutex_init(&sbi->s_alloc_mutex);
if (!udf_parse_options((char *)options, &uopt, false))
goto parse_options_failure;
fileset.logicalBlockNum = 0xFFFFFFFF;
fileset.partitionReferenceNum = 0xFFFF;
sbi->s_flags = uopt.flags;
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
sbi->s_fmode = uopt.fmode;
sbi->s_dmode = uopt.dmode;
sbi->s_nls_map = uopt.nls_map;
sbi->s_flags = uopt->flags;
sbi->s_uid = uopt->uid;
sbi->s_gid = uopt->gid;
sbi->s_umask = uopt->umask;
sbi->s_fmode = uopt->fmode;
sbi->s_dmode = uopt->dmode;
sbi->s_nls_map = uopt->nls_map;
uopt->nls_map = NULL;
rwlock_init(&sbi->s_cred_lock);
if (uopt.session == 0xFFFFFFFF)
if (uopt->session == 0xFFFFFFFF)
sbi->s_session = udf_get_last_session(sb);
else
sbi->s_session = uopt.session;
sbi->s_session = uopt->session;
udf_debug("Multi-session=%d\n", sbi->s_session);
@ -2138,16 +2168,16 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_magic = UDF_SUPER_MAGIC;
sb->s_time_gran = 1000;
if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
if (uopt->flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
ret = udf_load_vrs(sb, uopt, silent, &fileset);
} else {
uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
while (uopt.blocksize <= 4096) {
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
uopt->blocksize = bdev_logical_block_size(sb->s_bdev);
while (uopt->blocksize <= 4096) {
ret = udf_load_vrs(sb, uopt, silent, &fileset);
if (ret < 0) {
if (!silent && ret != -EACCES) {
pr_notice("Scanning with blocksize %u failed\n",
uopt.blocksize);
uopt->blocksize);
}
brelse(sbi->s_lvid_bh);
sbi->s_lvid_bh = NULL;
@ -2160,7 +2190,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
} else
break;
uopt.blocksize <<= 1;
uopt->blocksize <<= 1;
}
}
if (ret < 0) {
@ -2265,8 +2295,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
error_out:
iput(sbi->s_vat_inode);
parse_options_failure:
unload_nls(uopt.nls_map);
unload_nls(uopt->nls_map);
if (lvid_open)
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);

View File

@ -23,6 +23,7 @@
#define UDF_FLAG_STRICT 5
#define UDF_FLAG_UNDELETE 6
#define UDF_FLAG_UNHIDE 7
#define UDF_FLAG_NOVRS 8
#define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
#define UDF_FLAG_GID_FORGET 12
#define UDF_FLAG_UID_SET 13

View File

@ -2179,7 +2179,7 @@ struct super_operations {
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
struct dquot **(*get_dquots)(struct inode *);
struct dquot __rcu **(*get_dquots)(struct inode *);
#endif
long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);

View File

@ -37,7 +37,7 @@ struct shmem_inode_info {
unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
#ifdef CONFIG_TMPFS_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
struct inode vfs_inode;
};

View File

@ -317,7 +317,7 @@ static void shmem_disable_quotas(struct super_block *sb)
dquot_quota_off(sb, type);
}
static struct dquot **shmem_get_dquots(struct inode *inode)
static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
{
return SHMEM_I(inode)->i_dquot;
}