Merge rsync://oss.sgi.com/git/xfs-2.6

This commit is contained in:
Linus Torvalds 2005-06-21 19:51:18 -07:00
commit 2a5a68b840
73 changed files with 959 additions and 1109 deletions

View file

@ -61,12 +61,13 @@
* File wide globals
*/
STATIC kmem_cache_t *pagebuf_cache;
STATIC kmem_cache_t *pagebuf_zone;
STATIC kmem_shaker_t pagebuf_shake;
STATIC int pagebuf_daemon_wakeup(int, unsigned int);
STATIC int xfsbufd_wakeup(int, unsigned int);
STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
STATIC struct workqueue_struct *pagebuf_logio_workqueue;
STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
STATIC struct workqueue_struct *xfslogd_workqueue;
STATIC struct workqueue_struct *xfsdatad_workqueue;
/*
* Pagebuf debugging
@ -123,9 +124,9 @@ ktrace_t *pagebuf_trace_buf;
#define pagebuf_allocate(flags) \
kmem_zone_alloc(pagebuf_cache, pb_to_km(flags))
kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
#define pagebuf_deallocate(pb) \
kmem_zone_free(pagebuf_cache, (pb));
kmem_zone_free(pagebuf_zone, (pb));
/*
* Page Region interfaces.
@ -425,7 +426,7 @@ _pagebuf_lookup_pages(
__FUNCTION__, gfp_mask);
XFS_STATS_INC(pb_page_retries);
pagebuf_daemon_wakeup(0, gfp_mask);
xfsbufd_wakeup(0, gfp_mask);
blk_congestion_wait(WRITE, HZ/50);
goto retry;
}
@ -1136,8 +1137,8 @@ pagebuf_iodone(
if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
if (schedule) {
INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
queue_work(dataio ? pagebuf_dataio_workqueue :
pagebuf_logio_workqueue, &pb->pb_iodone_work);
queue_work(dataio ? xfsdatad_workqueue :
xfslogd_workqueue, &pb->pb_iodone_work);
} else {
pagebuf_iodone_work(pb);
}
@ -1562,16 +1563,6 @@ xfs_free_buftarg(
kmem_free(btp, sizeof(*btp));
}
void
xfs_incore_relse(
xfs_buftarg_t *btp,
int delwri_only,
int wait)
{
invalidate_bdev(btp->pbr_bdev, 1);
truncate_inode_pages(btp->pbr_mapping, 0LL);
}
STATIC int
xfs_setsize_buftarg_flags(
xfs_buftarg_t *btp,
@ -1742,27 +1733,27 @@ pagebuf_runall_queues(
}
/* Defines for pagebuf daemon */
STATIC DECLARE_COMPLETION(pagebuf_daemon_done);
STATIC struct task_struct *pagebuf_daemon_task;
STATIC int pagebuf_daemon_active;
STATIC int force_flush;
STATIC int force_sleep;
STATIC DECLARE_COMPLETION(xfsbufd_done);
STATIC struct task_struct *xfsbufd_task;
STATIC int xfsbufd_active;
STATIC int xfsbufd_force_flush;
STATIC int xfsbufd_force_sleep;
STATIC int
pagebuf_daemon_wakeup(
xfsbufd_wakeup(
int priority,
unsigned int mask)
{
if (force_sleep)
if (xfsbufd_force_sleep)
return 0;
force_flush = 1;
xfsbufd_force_flush = 1;
barrier();
wake_up_process(pagebuf_daemon_task);
wake_up_process(xfsbufd_task);
return 0;
}
STATIC int
pagebuf_daemon(
xfsbufd(
void *data)
{
struct list_head tmp;
@ -1774,17 +1765,17 @@ pagebuf_daemon(
daemonize("xfsbufd");
current->flags |= PF_MEMALLOC;
pagebuf_daemon_task = current;
pagebuf_daemon_active = 1;
xfsbufd_task = current;
xfsbufd_active = 1;
barrier();
INIT_LIST_HEAD(&tmp);
do {
if (unlikely(current->flags & PF_FREEZE)) {
force_sleep = 1;
xfsbufd_force_sleep = 1;
refrigerator(PF_FREEZE);
} else {
force_sleep = 0;
xfsbufd_force_sleep = 0;
}
set_current_state(TASK_INTERRUPTIBLE);
@ -1797,7 +1788,7 @@ pagebuf_daemon(
ASSERT(pb->pb_flags & PBF_DELWRI);
if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
if (!force_flush &&
if (!xfsbufd_force_flush &&
time_before(jiffies,
pb->pb_queuetime + age)) {
pagebuf_unlock(pb);
@ -1824,10 +1815,10 @@ pagebuf_daemon(
if (as_list_len > 0)
purge_addresses();
force_flush = 0;
} while (pagebuf_daemon_active);
xfsbufd_force_flush = 0;
} while (xfsbufd_active);
complete_and_exit(&pagebuf_daemon_done, 0);
complete_and_exit(&xfsbufd_done, 0);
}
/*
@ -1844,8 +1835,8 @@ xfs_flush_buftarg(
xfs_buf_t *pb, *n;
int pincount = 0;
pagebuf_runall_queues(pagebuf_dataio_workqueue);
pagebuf_runall_queues(pagebuf_logio_workqueue);
pagebuf_runall_queues(xfsdatad_workqueue);
pagebuf_runall_queues(xfslogd_workqueue);
INIT_LIST_HEAD(&tmp);
spin_lock(&pbd_delwrite_lock);
@ -1898,43 +1889,43 @@ xfs_flush_buftarg(
}
STATIC int
pagebuf_daemon_start(void)
xfs_buf_daemons_start(void)
{
int rval;
int error = -ENOMEM;
pagebuf_logio_workqueue = create_workqueue("xfslogd");
if (!pagebuf_logio_workqueue)
return -ENOMEM;
xfslogd_workqueue = create_workqueue("xfslogd");
if (!xfslogd_workqueue)
goto out;
pagebuf_dataio_workqueue = create_workqueue("xfsdatad");
if (!pagebuf_dataio_workqueue) {
destroy_workqueue(pagebuf_logio_workqueue);
return -ENOMEM;
}
xfsdatad_workqueue = create_workqueue("xfsdatad");
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
rval = kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES);
if (rval < 0) {
destroy_workqueue(pagebuf_logio_workqueue);
destroy_workqueue(pagebuf_dataio_workqueue);
}
error = kernel_thread(xfsbufd, NULL, CLONE_FS|CLONE_FILES);
if (error < 0)
goto out_destroy_xfsdatad_workqueue;
return 0;
return rval;
out_destroy_xfsdatad_workqueue:
destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out:
return error;
}
/*
* pagebuf_daemon_stop
*
* Note: do not mark as __exit, it is called from pagebuf_terminate.
*/
STATIC void
pagebuf_daemon_stop(void)
xfs_buf_daemons_stop(void)
{
pagebuf_daemon_active = 0;
xfsbufd_active = 0;
barrier();
wait_for_completion(&pagebuf_daemon_done);
wait_for_completion(&xfsbufd_done);
destroy_workqueue(pagebuf_logio_workqueue);
destroy_workqueue(pagebuf_dataio_workqueue);
destroy_workqueue(xfslogd_workqueue);
destroy_workqueue(xfsdatad_workqueue);
}
/*
@ -1944,27 +1935,37 @@ pagebuf_daemon_stop(void)
int __init
pagebuf_init(void)
{
pagebuf_cache = kmem_cache_create("xfs_buf_t", sizeof(xfs_buf_t), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (pagebuf_cache == NULL) {
printk("XFS: couldn't init xfs_buf_t cache\n");
pagebuf_terminate();
return -ENOMEM;
}
int error = -ENOMEM;
pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
if (!pagebuf_zone)
goto out;
#ifdef PAGEBUF_TRACE
pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
#endif
pagebuf_daemon_start();
error = xfs_buf_daemons_start();
if (error)
goto out_free_buf_zone;
pagebuf_shake = kmem_shake_register(pagebuf_daemon_wakeup);
if (pagebuf_shake == NULL) {
pagebuf_terminate();
return -ENOMEM;
pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
if (!pagebuf_shake) {
error = -ENOMEM;
goto out_stop_daemons;
}
return 0;
out_stop_daemons:
xfs_buf_daemons_stop();
out_free_buf_zone:
#ifdef PAGEBUF_TRACE
ktrace_free(pagebuf_trace_buf);
#endif
kmem_zone_destroy(pagebuf_zone);
out:
return error;
}
@ -1976,12 +1977,12 @@ pagebuf_init(void)
void
pagebuf_terminate(void)
{
pagebuf_daemon_stop();
xfs_buf_daemons_stop();
#ifdef PAGEBUF_TRACE
ktrace_free(pagebuf_trace_buf);
#endif
kmem_zone_destroy(pagebuf_cache);
kmem_zone_destroy(pagebuf_zone);
kmem_shake_deregister(pagebuf_shake);
}

View file

@ -576,7 +576,6 @@ extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
extern void xfs_free_buftarg(xfs_buftarg_t *, int);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern void xfs_incore_relse(xfs_buftarg_t *, int, int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
#define xfs_getsize_buftarg(buftarg) \

View file

@ -57,7 +57,9 @@
#include <linux/smp_lock.h>
static struct vm_operations_struct linvfs_file_vm_ops;
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct linvfs_dmapi_file_vm_ops;
#endif
STATIC inline ssize_t
__linvfs_read(
@ -388,6 +390,14 @@ linvfs_readdir(
return -error;
}
#ifdef CONFIG_XFS_DMAPI
STATIC void
linvfs_mmap_close(
struct vm_area_struct *vma)
{
xfs_dm_mm_put(vma);
}
#endif /* CONFIG_XFS_DMAPI */
STATIC int
linvfs_file_mmap(
@ -399,16 +409,19 @@ linvfs_file_mmap(
vattr_t va = { .va_mask = XFS_AT_UPDATIME };
int error;
vma->vm_ops = &linvfs_file_vm_ops;
if (vp->v_vfsp->vfs_flag & VFS_DMI) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = -XFS_SEND_MMAP(mp, vma, 0);
if (error)
return error;
#ifdef CONFIG_XFS_DMAPI
vma->vm_ops = &linvfs_dmapi_file_vm_ops;
#endif
}
vma->vm_ops = &linvfs_file_vm_ops;
VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
if (!error)
vn_revalidate(vp); /* update Linux inode flags */
@ -609,7 +622,15 @@ struct file_operations linvfs_dir_operations = {
static struct vm_operations_struct linvfs_file_vm_ops = {
.nopage = filemap_nopage,
.populate = filemap_populate,
};
#ifdef CONFIG_XFS_DMAPI
static struct vm_operations_struct linvfs_dmapi_file_vm_ops = {
.close = linvfs_mmap_close,
.nopage = filemap_nopage,
.populate = filemap_populate,
#ifdef HAVE_VMOP_MPROTECT
.mprotect = linvfs_mprotect,
#endif
};
#endif /* CONFIG_XFS_DMAPI */

View file

@ -1174,7 +1174,8 @@ xfs_ioc_xattr(
switch (cmd) {
case XFS_IOC_FSGETXATTR: {
va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
XFS_AT_NEXTENTS | XFS_AT_PROJID;
VOP_GETATTR(vp, &va, 0, NULL, error);
if (error)
return -error;
@ -1182,6 +1183,7 @@ xfs_ioc_xattr(
fa.fsx_xflags = va.va_xflags;
fa.fsx_extsize = va.va_extsize;
fa.fsx_nextents = va.va_nextents;
fa.fsx_projid = va.va_projid;
if (copy_to_user(arg, &fa, sizeof(fa)))
return -XFS_ERROR(EFAULT);
@ -1196,9 +1198,10 @@ xfs_ioc_xattr(
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
attr_flags |= ATTR_NONBLOCK;
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
va.va_xflags = fa.fsx_xflags;
va.va_extsize = fa.fsx_extsize;
va.va_projid = fa.fsx_projid;
VOP_SETATTR(vp, &va, attr_flags, NULL, error);
if (!error)
@ -1207,7 +1210,8 @@ xfs_ioc_xattr(
}
case XFS_IOC_FSGETXATTRA: {
va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
XFS_AT_ANEXTENTS | XFS_AT_PROJID;
VOP_GETATTR(vp, &va, 0, NULL, error);
if (error)
return -error;
@ -1215,6 +1219,7 @@ xfs_ioc_xattr(
fa.fsx_xflags = va.va_xflags;
fa.fsx_extsize = va.va_extsize;
fa.fsx_nextents = va.va_anextents;
fa.fsx_projid = va.va_projid;
if (copy_to_user(arg, &fa, sizeof(fa)))
return -XFS_ERROR(EFAULT);

View file

@ -230,8 +230,10 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
* field (see the QCMD macro in quota.h). These macros help keep the
* code portable - they are not visible from the syscall interface.
*/
#define Q_XSETGQLIM XQM_CMD(0x8) /* set groups disk limits */
#define Q_XGETGQUOTA XQM_CMD(0x9) /* get groups disk limits */
#define Q_XSETGQLIM XQM_CMD(8) /* set groups disk limits */
#define Q_XGETGQUOTA XQM_CMD(9) /* get groups disk limits */
#define Q_XSETPQLIM XQM_CMD(10) /* set projects disk limits */
#define Q_XGETPQUOTA XQM_CMD(11) /* get projects disk limits */
/* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */
/* we may well need to fine-tune this if it ever becomes an issue. */

View file

@ -209,30 +209,6 @@ xfs_iozero(
return (-status);
}
/*
* xfs_inval_cached_pages
*
* This routine is responsible for keeping direct I/O and buffered I/O
* somewhat coherent. From here we make sure that we're at least
* temporarily holding the inode I/O lock exclusively and then call
* the page cache to flush and invalidate any cached pages. If there
* are no cached pages this routine will be very quick.
*/
void
xfs_inval_cached_pages(
vnode_t *vp,
xfs_iocore_t *io,
xfs_off_t offset,
int write,
int relock)
{
if (VN_CACHED(vp)) {
xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1);
VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
}
}
ssize_t /* bytes read, or (-) error */
xfs_read(
bhv_desc_t *bdp,
@ -304,10 +280,11 @@ xfs_read(
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(ioflags & IO_INVIS)) {
vrwlock_t locktype = VRWLOCK_READ;
int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
BHV_TO_VNODE(bdp), *offset, size,
FILP_DELAY_FLAG(file), &locktype);
dmflags, &locktype);
if (ret) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
goto unlock_isem;
@ -867,11 +844,15 @@ xfs_write(
!(ioflags & IO_INVIS)) {
xfs_rwunlock(bdp, locktype);
if (need_isem)
up(&inode->i_sem);
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
goto out_unlock_isem;
goto out_nounlocks;
if (need_isem)
down(&inode->i_sem);
xfs_rwlock(bdp, locktype);
pos = xip->i_d.di_size;
ret = 0;
@ -986,6 +967,7 @@ xfs_write(
out_unlock_isem:
if (need_isem)
up(&inode->i_sem);
out_nounlocks:
return -error;
}

View file

@ -94,8 +94,6 @@ extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
xfs_fsize_t, xfs_fsize_t);
extern void xfs_inval_cached_pages(struct vnode *, struct xfs_iocore *,
xfs_off_t, int, int);
extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, int, struct cred *);

View file

@ -590,8 +590,10 @@ linvfs_sync_super(
int error;
int flags = SYNC_FSDATA;
if (wait)
flags |= SYNC_WAIT;
if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
flags = SYNC_QUIESCE;
else
flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
VFS_SYNC(vfsp, flags, NULL, error);
sb->s_dirt = 0;
@ -701,7 +703,8 @@ linvfs_getxquota(
struct vfs *vfsp = LINVFS_GET_VFS(sb);
int error, getmode;
getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA;
getmode = (type == USRQUOTA) ? Q_XGETQUOTA :
((type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETPQUOTA);
VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
return -error;
}
@ -716,7 +719,8 @@ linvfs_setxquota(
struct vfs *vfsp = LINVFS_GET_VFS(sb);
int error, setmode;
setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM;
setmode = (type == USRQUOTA) ? Q_XSETQLIM :
((type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETPQLIM);
VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
return -error;
}

View file

@ -107,6 +107,7 @@ typedef enum {
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */
typedef int (*vfs_mount_t)(bhv_desc_t *,
struct xfs_mount_args *, struct cred *);

View file

@ -411,13 +411,13 @@ vn_remove(
/* 0 */ (void *)(__psint_t)(vk), \
/* 1 */ (void *)(s), \
/* 2 */ (void *)(__psint_t) line, \
/* 3 */ (void *)(vn_count(vp)), \
/* 3 */ (void *)(__psint_t)(vn_count(vp)), \
/* 4 */ (void *)(ra), \
/* 5 */ (void *)(__psunsigned_t)(vp)->v_flag, \
/* 6 */ (void *)(__psint_t)current_cpu(), \
/* 7 */ (void *)(__psint_t)current_pid(), \
/* 8 */ (void *)__return_address, \
/* 9 */ 0, 0, 0, 0, 0, 0, 0)
/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
/*
* Vnode tracing code.

View file

@ -426,7 +426,7 @@ typedef struct vattr {
u_long va_extsize; /* file extent size */
u_long va_nextents; /* number of extents in file */
u_long va_anextents; /* number of attr extents in file */
int va_projid; /* project id */
prid_t va_projid; /* project id */
} vattr_t;
/*

View file

@ -101,7 +101,7 @@ int xfs_dqerror_mod = 33;
* is the d_id field. The idea is to fill in the entire q_core
* when we read in the on disk dquot.
*/
xfs_dquot_t *
STATIC xfs_dquot_t *
xfs_qm_dqinit(
xfs_mount_t *mp,
xfs_dqid_t id,
@ -286,7 +286,9 @@ xfs_qm_adjust_dqlimits(
* We also return 0 as the values of the timers in Q_GETQUOTA calls, when
* enforcement's off.
* In contrast, warnings are a little different in that they don't
* 'automatically' get started when limits get exceeded.
* 'automatically' get started when limits get exceeded. They do
* get reset to zero, however, when we find the count to be under
* the soft limit (they are only ever set non-zero via userspace).
*/
void
xfs_qm_adjust_dqtimers(
@ -315,6 +317,8 @@ xfs_qm_adjust_dqtimers(
INT_GET(d->d_blk_hardlimit, ARCH_CONVERT)))) {
INT_SET(d->d_btimer, ARCH_CONVERT,
get_seconds() + XFS_QI_BTIMELIMIT(mp));
} else {
d->d_bwarns = 0;
}
} else {
if ((!d->d_blk_softlimit ||
@ -336,6 +340,8 @@ xfs_qm_adjust_dqtimers(
INT_GET(d->d_ino_hardlimit, ARCH_CONVERT)))) {
INT_SET(d->d_itimer, ARCH_CONVERT,
get_seconds() + XFS_QI_ITIMELIMIT(mp));
} else {
d->d_iwarns = 0;
}
} else {
if ((!d->d_ino_softlimit ||
@ -357,6 +363,8 @@ xfs_qm_adjust_dqtimers(
INT_GET(d->d_rtb_hardlimit, ARCH_CONVERT)))) {
INT_SET(d->d_rtbtimer, ARCH_CONVERT,
get_seconds() + XFS_QI_RTBTIMELIMIT(mp));
} else {
d->d_rtbwarns = 0;
}
} else {
if ((!d->d_rtb_softlimit ||
@ -370,68 +378,6 @@ xfs_qm_adjust_dqtimers(
}
}
/*
* Increment or reset warnings of a given dquot.
*/
int
xfs_qm_dqwarn(
xfs_disk_dquot_t *d,
uint flags)
{
int warned;
/*
* root's limits are not real limits.
*/
if (!d->d_id)
return (0);
warned = 0;
if (INT_GET(d->d_blk_softlimit, ARCH_CONVERT) &&
(INT_GET(d->d_bcount, ARCH_CONVERT) >=
INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
if (flags & XFS_QMOPT_DOWARN) {
INT_MOD(d->d_bwarns, ARCH_CONVERT, +1);
warned++;
}
} else {
if (!d->d_blk_softlimit ||
(INT_GET(d->d_bcount, ARCH_CONVERT) <
INT_GET(d->d_blk_softlimit, ARCH_CONVERT))) {
d->d_bwarns = 0;
}
}
if (INT_GET(d->d_ino_softlimit, ARCH_CONVERT) > 0 &&
(INT_GET(d->d_icount, ARCH_CONVERT) >=
INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
if (flags & XFS_QMOPT_DOWARN) {
INT_MOD(d->d_iwarns, ARCH_CONVERT, +1);
warned++;
}
} else {
if (!d->d_ino_softlimit ||
(INT_GET(d->d_icount, ARCH_CONVERT) <
INT_GET(d->d_ino_softlimit, ARCH_CONVERT))) {
d->d_iwarns = 0;
}
}
#ifdef QUOTADEBUG
if (INT_GET(d->d_iwarns, ARCH_CONVERT))
cmn_err(CE_DEBUG,
"--------@@Inode warnings running : %Lu >= %Lu",
INT_GET(d->d_icount, ARCH_CONVERT),
INT_GET(d->d_ino_softlimit, ARCH_CONVERT));
if (INT_GET(d->d_bwarns, ARCH_CONVERT))
cmn_err(CE_DEBUG,
"--------@@Blks warnings running : %Lu >= %Lu",
INT_GET(d->d_bcount, ARCH_CONVERT),
INT_GET(d->d_blk_softlimit, ARCH_CONVERT));
#endif
return (warned);
}
/*
* initialize a buffer full of dquots and log the whole thing
*/
@ -461,9 +407,9 @@ xfs_qm_init_dquot_blk(
for (i = 0; i < XFS_QM_DQPERBLK(mp); i++, d++, curid++)
xfs_qm_dqinit_core(curid, type, d);
xfs_trans_dquot_buf(tp, bp,
type & XFS_DQ_USER ?
XFS_BLI_UDQUOT_BUF :
XFS_BLI_GDQUOT_BUF);
(type & XFS_DQ_USER ? XFS_BLI_UDQUOT_BUF :
((type & XFS_DQ_PROJ) ? XFS_BLI_PDQUOT_BUF :
XFS_BLI_GDQUOT_BUF)));
xfs_trans_log_buf(tp, bp, 0, BBTOB(XFS_QI_DQCHUNKLEN(mp)) - 1);
}
@ -544,8 +490,7 @@ xfs_qm_dqalloc(
* the entire thing.
*/
xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT),
dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP),
bp);
dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) {
goto error1;
@ -675,8 +620,7 @@ xfs_qm_dqtobp(
/*
* A simple sanity check in case we got a corrupted dquot...
*/
if (xfs_qm_dqcheck(ddq, id,
dqp->dq_flags & (XFS_DQ_USER|XFS_DQ_GROUP),
if (xfs_qm_dqcheck(ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
"dqtobp")) {
if (!(flags & XFS_QMOPT_DQREPAIR)) {
@ -953,8 +897,8 @@ int
xfs_qm_dqget(
xfs_mount_t *mp,
xfs_inode_t *ip, /* locked inode (optional) */
xfs_dqid_t id, /* gid or uid, depending on type */
uint type, /* UDQUOT or GDQUOT */
xfs_dqid_t id, /* uid/projid/gid depending on type */
uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
{
@ -965,6 +909,7 @@ xfs_qm_dqget(
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
(! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
(! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
return (ESRCH);
}
@ -983,7 +928,9 @@ xfs_qm_dqget(
again:
#ifdef DEBUG
ASSERT(type == XFS_DQ_USER || type == XFS_DQ_GROUP);
ASSERT(type == XFS_DQ_USER ||
type == XFS_DQ_PROJ ||
type == XFS_DQ_GROUP);
if (ip) {
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
if (type == XFS_DQ_USER)
@ -1306,8 +1253,8 @@ xfs_qm_dqflush(
return (error);
}
if (xfs_qm_dqcheck(&dqp->q_core, INT_GET(ddqp->d_id, ARCH_CONVERT), 0, XFS_QMOPT_DOWARN,
"dqflush (incore copy)")) {
if (xfs_qm_dqcheck(&dqp->q_core, INT_GET(ddqp->d_id, ARCH_CONVERT),
0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
xfs_force_shutdown(dqp->q_mount, XFS_CORRUPT_INCORE);
return XFS_ERROR(EIO);
}
@ -1459,7 +1406,8 @@ xfs_dqlock2(
{
if (d1 && d2) {
ASSERT(d1 != d2);
if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) {
if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) >
INT_GET(d2->q_core.d_id, ARCH_CONVERT)) {
xfs_dqlock(d2);
xfs_dqlock(d1);
} else {
@ -1582,8 +1530,7 @@ xfs_qm_dqprint(xfs_dquot_t *dqp)
cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------");
cmn_err(CE_DEBUG, "---- dquotID = %d",
(int)INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
cmn_err(CE_DEBUG, "---- type = %s",
XFS_QM_ISUDQ(dqp) ? "USR" : "GRP");
cmn_err(CE_DEBUG, "---- type = %s", DQFLAGTO_TYPESTR(dqp));
cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount);
cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno);
cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset);

View file

@ -114,25 +114,18 @@ typedef struct xfs_dquot {
#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
/*
* Quota Accounting flags
* Quota Accounting/Enforcement flags
*/
#define XFS_ALL_QUOTA_ACCT (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT)
#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD)
#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD)
#define XFS_ALL_QUOTA_ACTV (XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE)
#define XFS_ALL_QUOTA_ACCT_ENFD (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_GQUOTA_ACCT|XFS_GQUOTA_ENFD)
#define XFS_ALL_QUOTA_ACCT \
(XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD)
#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD)
#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
/*
* Quota Limit Enforcement flags
*/
#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD)
#define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD)
#define XFS_IS_GQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_GQUOTA_ENFD)
#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
#define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT)
#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
#ifdef DEBUG
static inline int
@ -167,6 +160,8 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP)
#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo)
#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \
XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \
@ -174,7 +169,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
(XFS_IS_GQUOTA_ON((d)->q_mount))))
(XFS_IS_OQUOTA_ON((d)->q_mount))))
#ifdef XFS_DQUOT_TRACE
/*
@ -211,7 +206,6 @@ extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *);
extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
xfs_disk_dquot_t *);
extern int xfs_qm_dqwarn(xfs_disk_dquot_t *, uint);
extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
xfs_dqid_t, uint, uint, xfs_dquot_t **);
extern void xfs_qm_dqput(xfs_dquot_t *);

View file

@ -428,7 +428,7 @@ xfs_qm_dquot_logitem_committing(
/*
* This is the ops vector for dquots
*/
struct xfs_item_ops xfs_dquot_item_ops = {
STATIC struct xfs_item_ops xfs_dquot_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_dquot_logitem_format,
@ -646,7 +646,7 @@ xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
return;
}
struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
STATIC struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,
@ -669,7 +669,7 @@ struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
/*
* This is the ops vector shared by all quotaoff-start log items.
*/
struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
STATIC struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_qm_qoff_logitem_format,

View file

@ -81,12 +81,18 @@ struct xfs_qm *xfs_Gqm;
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
kmem_shaker_t xfs_qm_shaker;
STATIC kmem_shaker_t xfs_qm_shaker;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC void xfs_qm_freelist_init(xfs_frlist_t *);
STATIC void xfs_qm_freelist_destroy(xfs_frlist_t *);
STATIC int xfs_qm_mplist_nowait(xfs_mount_t *);
STATIC int xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(int, unsigned int);
#ifdef DEBUG
@ -184,7 +190,7 @@ xfs_Gqm_init(void)
/*
* Destroy the global quota manager when its reference count goes to zero.
*/
void
STATIC void
xfs_qm_destroy(
struct xfs_qm *xqm)
{
@ -304,9 +310,9 @@ xfs_qm_mount_quotainit(
uint flags)
{
/*
* User or group quotas has to be on.
* User, projects or group quotas has to be on.
*/
ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA));
ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_PQUOTA | XFSMNT_GQUOTA));
/*
* Initialize the flags in the mount structure. From this point
@ -324,7 +330,11 @@ xfs_qm_mount_quotainit(
if (flags & XFSMNT_GQUOTA) {
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
if (flags & XFSMNT_GQUOTAENF)
mp->m_qflags |= XFS_GQUOTA_ENFD;
mp->m_qflags |= XFS_OQUOTA_ENFD;
} else if (flags & XFSMNT_PQUOTA) {
mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
if (flags & XFSMNT_PQUOTAENF)
mp->m_qflags |= XFS_OQUOTA_ENFD;
}
}
@ -357,11 +367,11 @@ xfs_qm_mount_quotas(
/*
* If a file system had quotas running earlier, but decided to
* mount without -o quota/uquota/gquota options, revoke the
* mount without -o uquota/pquota/gquota options, revoke the
* quotachecked license, and bail out.
*/
if (! XFS_IS_QUOTA_ON(mp) &&
(mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) {
(mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT)) {
mp->m_qflags = 0;
goto write_changes;
}
@ -509,7 +519,7 @@ xfs_qm_unmount_quotas(
* Flush all dquots of the given file system to disk. The dquots are
* _not_ purged from memory here, just their data written to disk.
*/
int
STATIC int
xfs_qm_dqflush_all(
xfs_mount_t *mp,
int flags)
@ -613,7 +623,7 @@ xfs_qm_detach_gdquots(
STATIC int
xfs_qm_dqpurge_int(
xfs_mount_t *mp,
uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/GQUOTA */
uint flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
{
xfs_dquot_t *dqp;
uint dqtype;
@ -625,6 +635,7 @@ xfs_qm_dqpurge_int(
return (0);
dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
xfs_qm_mplist_lock(mp);
@ -734,11 +745,11 @@ xfs_qm_dqattach_one(
/*
* udqhint is the i_udquot field in inode, and is non-NULL only
* when the type arg is XFS_DQ_GROUP. Its purpose is to save a
* when the type arg is group/project. Its purpose is to save a
* lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
* the user dquot.
*/
ASSERT(!udqhint || type == XFS_DQ_GROUP);
ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
if (udqhint && !dolock)
xfs_dqlock(udqhint);
@ -897,8 +908,8 @@ xfs_qm_dqattach_grouphint(
/*
* Given a locked inode, attach dquot(s) to it, taking UQUOTAON / GQUOTAON
* in to account.
* Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
* into account.
* If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
* If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
* much made this code a complete mess, but it has been pretty useful.
@ -937,8 +948,13 @@ xfs_qm_dqattach(
nquotas++;
}
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
if (XFS_IS_GQUOTA_ON(mp)) {
error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
if (XFS_IS_OQUOTA_ON(mp)) {
error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC,
flags & XFS_QMOPT_DQLOCK,
ip->i_udquot, &ip->i_gdquot) :
xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
flags & XFS_QMOPT_DQALLOC,
flags & XFS_QMOPT_DQLOCK,
ip->i_udquot, &ip->i_gdquot);
@ -989,7 +1005,7 @@ xfs_qm_dqattach(
}
if (XFS_IS_UQUOTA_ON(mp))
ASSERT(ip->i_udquot);
if (XFS_IS_GQUOTA_ON(mp))
if (XFS_IS_OQUOTA_ON(mp))
ASSERT(ip->i_gdquot);
}
#endif
@ -1018,13 +1034,13 @@ xfs_qm_dqdetach(
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
if (ip->i_udquot)
xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
if (ip->i_udquot) {
xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if (ip->i_gdquot) {
xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
@ -1149,7 +1165,7 @@ xfs_qm_sync(
* This initializes all the quota information that's kept in the
* mount structure
*/
int
STATIC int
xfs_qm_init_quotainfo(
xfs_mount_t *mp)
{
@ -1202,8 +1218,9 @@ xfs_qm_init_quotainfo(
* and group quotas, at least not at this point.
*/
error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
(XFS_IS_UQUOTA_RUNNING(mp)) ?
XFS_DQ_USER : XFS_DQ_GROUP,
XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
(XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
XFS_DQ_PROJ),
XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
&dqp);
if (! error) {
@ -1234,6 +1251,10 @@ xfs_qm_init_quotainfo(
INT_GET(ddqp->d_iwarns, ARCH_CONVERT) ?
INT_GET(ddqp->d_iwarns, ARCH_CONVERT) :
XFS_QM_IWARNLIMIT;
qinf->qi_rtbwarnlimit =
INT_GET(ddqp->d_rtbwarns, ARCH_CONVERT) ?
INT_GET(ddqp->d_rtbwarns, ARCH_CONVERT) :
XFS_QM_RTBWARNLIMIT;
qinf->qi_bhardlimit =
INT_GET(ddqp->d_blk_hardlimit, ARCH_CONVERT);
qinf->qi_bsoftlimit =
@ -1259,6 +1280,7 @@ xfs_qm_init_quotainfo(
qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
}
return (0);
@ -1366,13 +1388,20 @@ xfs_qm_dqget_noattach(
ASSERT(udqp);
}
if (XFS_IS_GQUOTA_ON(mp)) {
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
if (udqp)
xfs_dqunlock(udqp);
if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_gid, XFS_DQ_GROUP,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp))) {
error = XFS_IS_GQUOTA_ON(mp) ?
xfs_qm_dqget(mp, ip,
ip->i_d.di_gid, XFS_DQ_GROUP,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp) :
xfs_qm_dqget(mp, ip,
ip->i_d.di_projid, XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
&gdqp);
if (error) {
if (udqp)
xfs_qm_dqrele(udqp);
ASSERT(error != ESRCH);
@ -1521,8 +1550,10 @@ xfs_qm_reset_dqcounts(
INT_SET(ddq->d_rtbcount, ARCH_CONVERT, 0ULL);
INT_SET(ddq->d_btimer, ARCH_CONVERT, (time_t)0);
INT_SET(ddq->d_itimer, ARCH_CONVERT, (time_t)0);
INT_SET(ddq->d_rtbtimer, ARCH_CONVERT, (time_t)0);
INT_SET(ddq->d_bwarns, ARCH_CONVERT, 0UL);
INT_SET(ddq->d_iwarns, ARCH_CONVERT, 0UL);
INT_SET(ddq->d_rtbwarns, ARCH_CONVERT, 0UL);
ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
}
@ -1541,11 +1572,14 @@ xfs_qm_dqiter_bufs(
int error;
int notcommitted;
int incr;
int type;
ASSERT(blkcnt > 0);
notcommitted = 0;
incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
error = 0;
/*
@ -1564,9 +1598,7 @@ xfs_qm_dqiter_bufs(
if (error)
break;
(void) xfs_qm_reset_dqcounts(mp, bp, firstid,
flags & XFS_QMOPT_UQUOTA ?
XFS_DQ_USER : XFS_DQ_GROUP);
(void) xfs_qm_reset_dqcounts(mp, bp, firstid, type);
xfs_bdwrite(mp, bp);
/*
* goto the next block.
@ -1578,7 +1610,7 @@ xfs_qm_dqiter_bufs(
}
/*
* Iterate over all allocated USR/GRP dquots in the system, calling a
* Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
* caller supplied function for every chunk of dquots that we find.
*/
STATIC int
@ -1849,7 +1881,7 @@ xfs_qm_dqusage_adjust(
xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
xfs_qm_dqput(udqp);
}
if (XFS_IS_GQUOTA_ON(mp)) {
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(gdqp);
xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
xfs_qm_dqput(gdqp);
@ -1898,7 +1930,7 @@ xfs_qm_quotacheck(
cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
/*
* First we go thru all the dquots on disk, USR and GRP, and reset
* First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
* their counters to zero. We need a clean slate.
* We don't log our changes till later.
*/
@ -1909,9 +1941,10 @@ xfs_qm_quotacheck(
}
if ((gip = XFS_QI_GQIP(mp))) {
if ((error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA)))
if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
goto error_return;
flags |= XFS_GQUOTA_CHKD;
flags |= XFS_OQUOTA_CHKD;
}
do {
@ -1938,7 +1971,7 @@ xfs_qm_quotacheck(
if (error) {
xfs_qm_dqpurge_all(mp,
XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA|
XFS_QMOPT_QUOTAOFF);
XFS_QMOPT_PQUOTA|XFS_QMOPT_QUOTAOFF);
goto error_return;
}
/*
@ -1961,7 +1994,7 @@ xfs_qm_quotacheck(
* quotachecked status, since we won't be doing accounting for
* that type anymore.
*/
mp->m_qflags &= ~(XFS_GQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags |= flags;
XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
@ -2013,7 +2046,7 @@ xfs_qm_init_quotainos(
0, 0, &uip, 0)))
return XFS_ERROR(error);
}
if (XFS_IS_GQUOTA_ON(mp) &&
if (XFS_IS_OQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
@ -2043,10 +2076,12 @@ xfs_qm_init_quotainos(
flags &= ~XFS_QMOPT_SBVERSION;
}
if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
if ((error = xfs_qm_qino_alloc(mp, &gip,
sbflags | XFS_SB_GQUOTINO,
flags | XFS_QMOPT_GQUOTA))) {
if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
flags |= (XFS_IS_GQUOTA_ON(mp) ?
XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
error = xfs_qm_qino_alloc(mp, &gip,
sbflags | XFS_SB_GQUOTINO, flags);
if (error) {
if (uip)
VN_RELE(XFS_ITOV(uip));
@ -2452,6 +2487,7 @@ xfs_qm_vop_dqalloc(
xfs_inode_t *ip,
uid_t uid,
gid_t gid,
prid_t prid,
uint flags,
xfs_dquot_t **O_udqpp,
xfs_dquot_t **O_gdqpp)
@ -2483,8 +2519,7 @@ xfs_qm_vop_dqalloc(
}
uq = gq = NULL;
if ((flags & XFS_QMOPT_UQUOTA) &&
XFS_IS_UQUOTA_ON(mp)) {
if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
if (ip->i_d.di_uid != uid) {
/*
* What we need is the dquot that has this uid, and
@ -2522,8 +2557,7 @@ xfs_qm_vop_dqalloc(
xfs_dqunlock(uq);
}
}
if ((flags & XFS_QMOPT_GQUOTA) &&
XFS_IS_GQUOTA_ON(mp)) {
if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
if (ip->i_d.di_gid != gid) {
xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
@ -2546,6 +2580,29 @@ xfs_qm_vop_dqalloc(
XFS_DQHOLD(gq);
xfs_dqunlock(gq);
}
} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
if (ip->i_d.di_projid != prid) {
xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC |
XFS_QMOPT_DOWARN,
&gq))) {
if (uq)
xfs_qm_dqrele(uq);
ASSERT(error != ENOENT);
return (error);
}
xfs_dqunlock(gq);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
ASSERT(ip->i_gdquot);
gq = ip->i_gdquot;
xfs_dqlock(gq);
XFS_DQHOLD(gq);
xfs_dqunlock(gq);
}
}
if (uq)
xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
@ -2574,6 +2631,9 @@ xfs_qm_vop_chown(
xfs_dquot_t *newdq)
{
xfs_dquot_t *prevdq;
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
@ -2582,20 +2642,12 @@ xfs_qm_vop_chown(
ASSERT(prevdq);
ASSERT(prevdq != newdq);
xfs_trans_mod_dquot(tp, prevdq,
XFS_TRANS_DQ_BCOUNT,
-(ip->i_d.di_nblocks));
xfs_trans_mod_dquot(tp, prevdq,
XFS_TRANS_DQ_ICOUNT,
-1);
xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
/* the sparkling new dquot */
xfs_trans_mod_dquot(tp, newdq,
XFS_TRANS_DQ_BCOUNT,
ip->i_d.di_nblocks);
xfs_trans_mod_dquot(tp, newdq,
XFS_TRANS_DQ_ICOUNT,
1);
xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
* Take an extra reference, because the inode
@ -2611,7 +2663,7 @@ xfs_qm_vop_chown(
}
/*
* Quota reservations for setattr(AT_UID|AT_GID).
* Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
*/
int
xfs_qm_vop_chown_reserve(
@ -2623,7 +2675,7 @@ xfs_qm_vop_chown_reserve(
{
int error;
xfs_mount_t *mp;
uint delblks;
uint delblks, blkflags;
xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
ASSERT(XFS_ISLOCKED_INODE(ip));
@ -2632,6 +2684,8 @@ xfs_qm_vop_chown_reserve(
delblks = ip->i_delayed_blks;
delblksudq = delblksgdq = unresudq = unresgdq = NULL;
blkflags = XFS_IS_REALTIME_INODE(ip) ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
if (XFS_IS_UQUOTA_ON(mp) && udqp &&
ip->i_d.di_uid != (uid_t)INT_GET(udqp->q_core.d_id, ARCH_CONVERT)) {
@ -2646,18 +2700,22 @@ xfs_qm_vop_chown_reserve(
unresudq = ip->i_udquot;
}
}
if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
ip->i_d.di_gid != INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)) {
delblksgdq = gdqp;
if (delblks) {
ASSERT(ip->i_gdquot);
unresgdq = ip->i_gdquot;
if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
if ((XFS_IS_GQUOTA_ON(ip->i_mount) && ip->i_d.di_gid !=
INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)) ||
(XFS_IS_PQUOTA_ON(ip->i_mount) && ip->i_d.di_projid !=
INT_GET(gdqp->q_core.d_id, ARCH_CONVERT))) {
delblksgdq = gdqp;
if (delblks) {
ASSERT(ip->i_gdquot);
unresgdq = ip->i_gdquot;
}
}
}
if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
flags | XFS_QMOPT_RES_REGBLKS)))
flags | blkflags)))
return (error);
/*
@ -2674,11 +2732,11 @@ xfs_qm_vop_chown_reserve(
ASSERT(unresudq || unresgdq);
if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
flags | XFS_QMOPT_RES_REGBLKS)))
flags | blkflags)))
return (error);
xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
XFS_QMOPT_RES_REGBLKS);
blkflags);
}
return (0);
@ -2751,7 +2809,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
}
/* ------------- list stuff -----------------*/
void
STATIC void
xfs_qm_freelist_init(xfs_frlist_t *ql)
{
ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
@ -2760,7 +2818,7 @@ xfs_qm_freelist_init(xfs_frlist_t *ql)
ql->qh_nelems = 0;
}
void
STATIC void
xfs_qm_freelist_destroy(xfs_frlist_t *ql)
{
xfs_dquot_t *dqp, *nextdqp;
@ -2786,7 +2844,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
ASSERT(ql->qh_nelems == 0);
}
void
STATIC void
xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
{
dq->dq_flnext = ql->qh_next;
@ -2816,7 +2874,7 @@ xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
}
int
STATIC int
xfs_qm_dqhashlock_nowait(
xfs_dquot_t *dqp)
{
@ -2836,7 +2894,7 @@ xfs_qm_freelist_lock_nowait(
return (locked);
}
int
STATIC int
xfs_qm_mplist_nowait(
xfs_mount_t *mp)
{

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -133,8 +133,9 @@ typedef struct xfs_quotainfo {
time_t qi_btimelimit; /* limit for blks timer */
time_t qi_itimelimit; /* limit for inodes timer */
time_t qi_rtbtimelimit;/* limit for rt blks timer */
xfs_qwarncnt_t qi_bwarnlimit; /* limit for num warnings */
xfs_qwarncnt_t qi_iwarnlimit; /* limit for num warnings */
xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
mutex_t qi_quotaofflock;/* to serialize quotaoff */
xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
uint qi_dqperchunk; /* # ondisk dqs in above chunk */
@ -176,6 +177,7 @@ typedef struct xfs_dquot_acct {
#define XFS_QM_BWARNLIMIT 5
#define XFS_QM_IWARNLIMIT 5
#define XFS_QM_RTBWARNLIMIT 5
#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock, PINOD))
#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
@ -184,7 +186,6 @@ typedef struct xfs_dquot_acct {
extern void xfs_mount_reset_sbqflags(xfs_mount_t *);
extern int xfs_qm_init_quotainfo(xfs_mount_t *);
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
extern int xfs_qm_mount_quotas(xfs_mount_t *, int);
extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint);
@ -203,7 +204,7 @@ extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
/* vop stuff */
extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
uid_t, gid_t, uint,
uid_t, gid_t, prid_t, uint,
xfs_dquot_t **, xfs_dquot_t **);
extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_trans_t *, xfs_inode_t *,
@ -215,14 +216,9 @@ extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t *, xfs_dquot_t *, uint);
/* list stuff */
extern void xfs_qm_freelist_init(xfs_frlist_t *);
extern void xfs_qm_freelist_destroy(xfs_frlist_t *);
extern void xfs_qm_freelist_insert(xfs_frlist_t *, xfs_dquot_t *);
extern void xfs_qm_freelist_append(xfs_frlist_t *, xfs_dquot_t *);
extern void xfs_qm_freelist_unlink(xfs_dquot_t *);
extern int xfs_qm_freelist_lock_nowait(xfs_qm_t *);
extern int xfs_qm_mplist_nowait(xfs_mount_t *);
extern int xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
/* system call interface */
extern int xfs_qm_quotactl(bhv_desc_t *, int, int, xfs_caddr_t);

View file

@ -71,10 +71,13 @@
#define MNTOPT_NOQUOTA "noquota" /* no quotas */
#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
STATIC int
@ -109,6 +112,14 @@ xfs_qm_parseargs(
args->flags |= XFSMNT_UQUOTA;
args->flags &= ~XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
!strcmp(this_char, MNTOPT_PRJQUOTA)) {
args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
args->flags |= XFSMNT_PQUOTA;
args->flags &= ~XFSMNT_PQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
!strcmp(this_char, MNTOPT_GRPQUOTA)) {
args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
@ -127,6 +138,12 @@ xfs_qm_parseargs(
*this_char++ = ',';
}
if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) {
cmn_err(CE_WARN,
"XFS: cannot mount with both project and group quota");
return XFS_ERROR(EINVAL);
}
PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
if (!error && !referenced)
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM);
@ -148,13 +165,19 @@ xfs_qm_showargs(
seq_puts(m, "," MNTOPT_UQUOTANOENF);
}
if (mp->m_qflags & XFS_PQUOTA_ACCT) {
(mp->m_qflags & XFS_OQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_PRJQUOTA) :
seq_puts(m, "," MNTOPT_PQUOTANOENF);
}
if (mp->m_qflags & XFS_GQUOTA_ACCT) {
(mp->m_qflags & XFS_GQUOTA_ENFD) ?
(mp->m_qflags & XFS_OQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_GRPQUOTA) :
seq_puts(m, "," MNTOPT_GQUOTANOENF);
}
if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT)))
if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
seq_puts(m, "," MNTOPT_NOQUOTA);
PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
@ -171,7 +194,7 @@ xfs_qm_mount(
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA))
if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA | XFSMNT_PQUOTA))
xfs_qm_mount_quotainit(mp, args->flags);
PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
return error;
@ -255,16 +278,17 @@ xfs_qm_newmount(
uint *quotaflags)
{
uint quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0;
uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
*quotaflags = 0;
*needquotamount = B_FALSE;
quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT);
(mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
if (quotaondisk) {
uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT;
gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
}
@ -277,13 +301,16 @@ xfs_qm_newmount(
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
(pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
(!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_GQUOTA_ON(mp))) &&
(!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
cmn_err(CE_WARN,
"XFS: please mount with%s%s%s.",
"XFS: please mount with%s%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(pquotaondisk ? " prjquota" : ""),
(gquotaondisk ? " grpquota" : ""));
return XFS_ERROR(EPERM);
}
@ -359,7 +386,7 @@ xfs_qm_dqrele_null(
}
struct xfs_qmops xfs_qmcore_xfs = {
STATIC struct xfs_qmops xfs_qmcore_xfs = {
.xfs_qminit = xfs_qm_newmount,
.xfs_qmdone = xfs_qm_unmount_quotadestroy,
.xfs_qmmount = xfs_qm_endmount,

View file

@ -118,40 +118,41 @@ xfs_qm_quotactl(
* The following commands are valid even when quotaoff.
*/
switch (cmd) {
case Q_XQUOTARM:
/*
* truncate quota files. quota must be off.
* Truncate quota files. quota must be off.
*/
case Q_XQUOTARM:
if (XFS_IS_QUOTA_ON(mp) || addr == NULL)
return XFS_ERROR(EINVAL);
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
return (xfs_qm_scall_trunc_qfiles(mp,
xfs_qm_import_qtype_flags(*(uint *)addr)));
case Q_XGETQSTAT:
/*
* Get quota status information.
*/
case Q_XGETQSTAT:
return (xfs_qm_scall_getqstat(mp, (fs_quota_stat_t *)addr));
case Q_XQUOTAON:
/*
* QUOTAON for root f/s and quota enforcement on others..
* Quota accounting for non-root f/s's must be turned on
* at mount time.
* QUOTAON - enabling quota enforcement.
* Quota accounting must be turned on at mount time.
*/
case Q_XQUOTAON:
if (addr == NULL)
return XFS_ERROR(EINVAL);
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
return (xfs_qm_scall_quotaon(mp,
xfs_qm_import_flags(*(uint *)addr)));
case Q_XQUOTAOFF:
case Q_XQUOTAOFF:
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
break;
default:
default:
break;
}
@ -159,7 +160,7 @@ xfs_qm_quotactl(
return XFS_ERROR(ESRCH);
switch (cmd) {
case Q_XQUOTAOFF:
case Q_XQUOTAOFF:
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
error = xfs_qm_scall_quotaoff(mp,
@ -167,42 +168,39 @@ xfs_qm_quotactl(
B_FALSE);
break;
/*
* Defaults to XFS_GETUQUOTA.
*/
case Q_XGETQUOTA:
case Q_XGETQUOTA:
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_USER,
(fs_disk_quota_t *)addr);
break;
/*
* Set limits, both hard and soft. Defaults to Q_SETUQLIM.
*/
case Q_XSETQLIM:
case Q_XGETGQUOTA:
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
(fs_disk_quota_t *)addr);
break;
case Q_XGETPQUOTA:
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
(fs_disk_quota_t *)addr);
break;
case Q_XSETQLIM:
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_USER,
(fs_disk_quota_t *)addr);
break;
case Q_XSETGQLIM:
case Q_XSETGQLIM:
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
(fs_disk_quota_t *)addr);
break;
case Q_XGETGQUOTA:
error = xfs_qm_scall_getquota(mp, (xfs_dqid_t)id, XFS_DQ_GROUP,
(fs_disk_quota_t *)addr);
case Q_XSETPQLIM:
if (vfsp->vfs_flag & VFS_RDONLY)
return XFS_ERROR(EROFS);
error = xfs_qm_scall_setqlim(mp, (xfs_dqid_t)id, XFS_DQ_PROJ,
(fs_disk_quota_t *)addr);
break;
/*
* Quotas are entirely undefined after quotaoff in XFS quotas.
* For instance, there's no way to set limits when quotaoff.
*/
default:
default:
error = XFS_ERROR(EINVAL);
break;
}
@ -286,8 +284,12 @@ xfs_qm_scall_quotaoff(
}
if (flags & XFS_GQUOTA_ACCT) {
dqtype |= XFS_QMOPT_GQUOTA;
flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
inactivate_flags |= XFS_GQUOTA_ACTIVE;
} else if (flags & XFS_PQUOTA_ACCT) {
dqtype |= XFS_QMOPT_PQUOTA;
flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
inactivate_flags |= XFS_PQUOTA_ACTIVE;
}
/*
@ -364,7 +366,8 @@ xfs_qm_scall_quotaoff(
/*
* If quotas is completely disabled, close shop.
*/
if ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_ALL) {
if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
xfs_qm_destroy_quotainfo(mp);
return (0);
@ -378,7 +381,7 @@ xfs_qm_scall_quotaoff(
XFS_PURGE_INODE(XFS_QI_UQIP(mp));
XFS_QI_UQIP(mp) = NULL;
}
if ((dqtype & XFS_QMOPT_GQUOTA) && XFS_QI_GQIP(mp)) {
if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && XFS_QI_GQIP(mp)) {
XFS_PURGE_INODE(XFS_QI_GQIP(mp));
XFS_QI_GQIP(mp) = NULL;
}
@ -411,7 +414,8 @@ xfs_qm_scall_trunc_qfiles(
}
}
if ((flags & XFS_DQ_GROUP) && mp->m_sb.sb_gquotino != NULLFSINO) {
if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
if (! error) {
(void) xfs_truncate_file(mp, qip);
@ -434,7 +438,7 @@ xfs_qm_scall_quotaon(
uint flags)
{
int error;
unsigned long s;
unsigned long s;
uint qf;
uint accflags;
__int64_t sbflags;
@ -468,9 +472,13 @@ xfs_qm_scall_quotaon(
(mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
(flags & XFS_UQUOTA_ENFD))
||
((flags & XFS_PQUOTA_ACCT) == 0 &&
(mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
(flags & XFS_OQUOTA_ENFD))
||
((flags & XFS_GQUOTA_ACCT) == 0 &&
(mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
(flags & XFS_GQUOTA_ENFD))) {
(flags & XFS_OQUOTA_ENFD))) {
qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n",
flags, mp->m_sb.sb_qflags);
return XFS_ERROR(EINVAL);
@ -504,6 +512,10 @@ xfs_qm_scall_quotaon(
*/
if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
(mp->m_qflags & XFS_UQUOTA_ACCT)) ||
((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
(mp->m_qflags & XFS_PQUOTA_ACCT)) ||
((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
(mp->m_qflags & XFS_GQUOTA_ACCT)) ||
(flags & XFS_ALL_QUOTA_ENFD) == 0)
return (0);
@ -521,7 +533,6 @@ xfs_qm_scall_quotaon(
}
/*
* Return quota status information, such as uquota-off, enforcements, etc.
*/
@ -606,7 +617,8 @@ xfs_qm_scall_setqlim(
if (!capable(CAP_SYS_ADMIN))
return XFS_ERROR(EPERM);
if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK)) == 0)
if ((newlim->d_fieldmask &
(FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0)
return (0);
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
@ -691,12 +703,23 @@ xfs_qm_scall_setqlim(
qdprintk("ihard %Ld < isoft %Ld\n", hard, soft);
}
/*
* Update warnings counter(s) if requested
*/
if (newlim->d_fieldmask & FS_DQ_BWARNS)
INT_SET(ddq->d_bwarns, ARCH_CONVERT, newlim->d_bwarns);
if (newlim->d_fieldmask & FS_DQ_IWARNS)
INT_SET(ddq->d_iwarns, ARCH_CONVERT, newlim->d_iwarns);
if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
INT_SET(ddq->d_rtbwarns, ARCH_CONVERT, newlim->d_rtbwarns);
if (id == 0) {
/*
* Timelimits for the super user set the relative time
* the other users can be over quota for this file system.
* If it is zero a default is used. Ditto for the default
* soft and hard limit values (already done, above).
* soft and hard limit values (already done, above), and
* for warnings.
*/
if (newlim->d_fieldmask & FS_DQ_BTIMER) {
mp->m_quotainfo->qi_btimelimit = newlim->d_btimer;
@ -710,7 +733,13 @@ xfs_qm_scall_setqlim(
mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer;
INT_SET(ddq->d_rtbtimer, ARCH_CONVERT, newlim->d_rtbtimer);
}
} else /* if (XFS_IS_QUOTA_ENFORCED(mp)) */ {
if (newlim->d_fieldmask & FS_DQ_BWARNS)
mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns;
if (newlim->d_fieldmask & FS_DQ_IWARNS)
mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns;
if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns;
} else {
/*
* If the user is now over quota, start the timelimit.
* The user will not be 'warned'.
@ -776,9 +805,9 @@ xfs_qm_log_quotaoff_end(
xfs_qoff_logitem_t *startqoff,
uint flags)
{
xfs_trans_t *tp;
xfs_trans_t *tp;
int error;
xfs_qoff_logitem_t *qoffi;
xfs_qoff_logitem_t *qoffi;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
@ -928,18 +957,26 @@ xfs_qm_export_dquot(
STATIC uint
xfs_qm_import_qtype_flags(
uint uflags)
uint uflags)
{
uint oflags = 0;
/*
* Can't be both at the same time.
* Can't be more than one, or none.
*/
if (((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ==
(XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ||
((uflags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) == 0))
(XFS_GROUP_QUOTA | XFS_USER_QUOTA)) ||
((uflags & (XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ==
(XFS_GROUP_QUOTA | XFS_PROJ_QUOTA)) ||
((uflags & (XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ==
(XFS_USER_QUOTA | XFS_PROJ_QUOTA)) ||
((uflags & (XFS_GROUP_QUOTA|XFS_USER_QUOTA|XFS_PROJ_QUOTA)) == 0))
return (0);
return (uflags & XFS_USER_QUOTA) ?
XFS_DQ_USER : XFS_DQ_GROUP;
oflags |= (uflags & XFS_USER_QUOTA) ? XFS_DQ_USER : 0;
oflags |= (uflags & XFS_PROJ_QUOTA) ? XFS_DQ_PROJ : 0;
oflags |= (uflags & XFS_GROUP_QUOTA) ? XFS_DQ_GROUP: 0;
return oflags;
}
STATIC uint
@ -947,14 +984,19 @@ xfs_qm_export_qtype_flags(
uint flags)
{
/*
* Can't be both at the same time.
* Can't be more than one, or none.
*/
ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) !=
(XFS_GROUP_QUOTA | XFS_USER_QUOTA));
ASSERT((flags & (XFS_GROUP_QUOTA | XFS_USER_QUOTA)) != 0);
ASSERT((flags & (XFS_PROJ_QUOTA | XFS_USER_QUOTA)) !=
(XFS_PROJ_QUOTA | XFS_USER_QUOTA));
ASSERT((flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)) !=
(XFS_PROJ_QUOTA | XFS_GROUP_QUOTA));
ASSERT((flags & (XFS_USER_QUOTA | XFS_GROUP_QUOTA)) !=
(XFS_USER_QUOTA | XFS_GROUP_QUOTA));
ASSERT((flags & (XFS_PROJ_QUOTA|XFS_USER_QUOTA|XFS_GROUP_QUOTA)) != 0);
return (flags & XFS_DQ_USER) ?
XFS_USER_QUOTA : XFS_GROUP_QUOTA;
XFS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
XFS_PROJ_QUOTA : XFS_GROUP_QUOTA;
}
STATIC uint
@ -965,12 +1007,14 @@ xfs_qm_import_flags(
if (uflags & XFS_QUOTA_UDQ_ACCT)
flags |= XFS_UQUOTA_ACCT;
if (uflags & XFS_QUOTA_PDQ_ACCT)
flags |= XFS_PQUOTA_ACCT;
if (uflags & XFS_QUOTA_GDQ_ACCT)
flags |= XFS_GQUOTA_ACCT;
if (uflags & XFS_QUOTA_UDQ_ENFD)
flags |= XFS_UQUOTA_ENFD;
if (uflags & XFS_QUOTA_GDQ_ENFD)
flags |= XFS_GQUOTA_ENFD;
if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
flags |= XFS_OQUOTA_ENFD;
return (flags);
}
@ -984,12 +1028,16 @@ xfs_qm_export_flags(
uflags = 0;
if (flags & XFS_UQUOTA_ACCT)
uflags |= XFS_QUOTA_UDQ_ACCT;
if (flags & XFS_PQUOTA_ACCT)
uflags |= XFS_QUOTA_PDQ_ACCT;
if (flags & XFS_GQUOTA_ACCT)
uflags |= XFS_QUOTA_GDQ_ACCT;
if (flags & XFS_UQUOTA_ENFD)
uflags |= XFS_QUOTA_UDQ_ENFD;
if (flags & XFS_GQUOTA_ENFD)
uflags |= XFS_QUOTA_GDQ_ENFD;
if (flags & (XFS_OQUOTA_ENFD)) {
uflags |= (flags & XFS_GQUOTA_ACCT) ?
XFS_QUOTA_GDQ_ENFD : XFS_QUOTA_PDQ_ENFD;
}
return (uflags);
}
@ -1070,7 +1118,7 @@ xfs_qm_dqrele_all_inodes(
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
@ -1160,7 +1208,6 @@ xfs_qm_dqtest_print(
{
cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------");
cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id);
cmn_err(CE_DEBUG, "---- type = %s", XFS_QM_ISUDQ(d)? "USR" : "GRP");
cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount);
cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)",
d->d_bcount, (int)d->d_bcount);
@ -1231,7 +1278,7 @@ xfs_dqtest_cmp2(
#ifdef QUOTADEBUG
if (!err) {
cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked",
d->d_id, XFS_QM_ISUDQ(d) ? "USR" : "GRP", d->q_mount);
d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount);
}
#endif
return (err);
@ -1287,6 +1334,7 @@ STATIC void
xfs_qm_internalqcheck_get_dquots(
xfs_mount_t *mp,
xfs_dqid_t uid,
xfs_dqid_t projid,
xfs_dqid_t gid,
xfs_dqtest_t **ud,
xfs_dqtest_t **gd)
@ -1295,6 +1343,8 @@ xfs_qm_internalqcheck_get_dquots(
xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
if (XFS_IS_GQUOTA_ON(mp))
xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
else if (XFS_IS_PQUOTA_ON(mp))
xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd);
}
@ -1362,13 +1412,14 @@ xfs_qm_internalqcheck_adjust(
}
xfs_qm_internalqcheck_get_dquots(mp,
(xfs_dqid_t) ip->i_d.di_uid,
(xfs_dqid_t) ip->i_d.di_projid,
(xfs_dqid_t) ip->i_d.di_gid,
&ud, &gd);
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ud);
xfs_qm_internalqcheck_dqadjust(ip, ud);
}
if (XFS_IS_GQUOTA_ON(mp)) {
if (XFS_IS_OQUOTA_ON(mp)) {
ASSERT(gd);
xfs_qm_internalqcheck_dqadjust(ip, gd);
}

View file

@ -56,6 +56,7 @@
#define XFS_QI_RTBTIMELIMIT(mp) ((mp)->m_quotainfo->qi_rtbtimelimit)
#define XFS_QI_ITIMELIMIT(mp) ((mp)->m_quotainfo->qi_itimelimit)
#define XFS_QI_BWARNLIMIT(mp) ((mp)->m_quotainfo->qi_bwarnlimit)
#define XFS_QI_RTBWARNLIMIT(mp) ((mp)->m_quotainfo->qi_rtbwarnlimit)
#define XFS_QI_IWARNLIMIT(mp) ((mp)->m_quotainfo->qi_iwarnlimit)
#define XFS_QI_QOFFLOCK(mp) ((mp)->m_quotainfo->qi_quotaofflock)
@ -102,7 +103,8 @@ static inline int XQMISLCKD(struct xfs_dqhash *h)
(xfs_Gqm->qm_grp_dqhtable + \
XFS_DQ_HASHVAL(mp, id)))
#define XFS_IS_DQTYPE_ON(mp, type) (type == XFS_DQ_USER ? \
XFS_IS_UQUOTA_ON(mp):XFS_IS_GQUOTA_ON(mp))
XFS_IS_UQUOTA_ON(mp) : \
XFS_IS_OQUOTA_ON(mp))
#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
!dqp->q_core.d_blk_hardlimit && \
!dqp->q_core.d_blk_softlimit && \
@ -177,16 +179,11 @@ for ((dqp) = (qlist)->qh_next; (dqp) != (xfs_dquot_t *)(qlist); \
(!((dqp)->q_core.d_id))
#define XFS_PURGE_INODE(ip) \
{ \
vmap_t dqvmap; \
vnode_t *dqvp; \
dqvp = XFS_ITOV(ip); \
VMAP(dqvp, dqvmap); \
VN_RELE(dqvp); \
}
IRELE(ip);
#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
(((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : "???"))
(((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
(((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
#define DQFLAGTO_DIRTYSTR(d) (XFS_DQ_IS_DIRTY(d) ? "DIRTY" : "NOTDIRTY")
#endif /* __XFS_QUOTA_PRIV_H__ */

View file

@ -187,7 +187,7 @@ xfs_trans_dup_dqinfo(
/*
* Wrap around mod_dquot to account for both user and group quotas.
*/
void
STATIC void
xfs_trans_mod_dquot_byino(
xfs_trans_t *tp,
xfs_inode_t *ip,
@ -207,12 +207,10 @@ xfs_trans_mod_dquot_byino(
if (tp->t_dqinfo == NULL)
xfs_trans_alloc_dqinfo(tp);
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) {
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
}
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) {
if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
}
}
STATIC xfs_dqtrx_t *
@ -368,7 +366,7 @@ xfs_trans_dqlockedjoin(
* Unreserve just the reservations done by this transaction.
* dquot is still left locked at exit.
*/
void
STATIC void
xfs_trans_apply_dquot_deltas(
xfs_trans_t *tp)
{
@ -499,7 +497,7 @@ xfs_trans_apply_dquot_deltas(
* Adjust the RT reservation.
*/
if (qtrx->qt_rtblk_res != 0) {
if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
if (qtrx->qt_rtblk_res >
qtrx->qt_rtblk_res_used)
dqp->q_res_rtbcount -= (xfs_qcnt_t)
@ -532,12 +530,6 @@ xfs_trans_apply_dquot_deltas(
(xfs_qcnt_t)qtrx->qt_icount_delta;
}
#ifdef QUOTADEBUG
if (qtrx->qt_rtblk_res != 0)
cmn_err(CE_DEBUG, "RT res %d for 0x%p\n",
(int) qtrx->qt_rtblk_res, dqp);
#endif
ASSERT(dqp->q_res_bcount >=
INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
ASSERT(dqp->q_res_icount >=
@ -638,7 +630,10 @@ xfs_trans_dqresv(
int error;
xfs_qcnt_t hardlimit;
xfs_qcnt_t softlimit;
time_t btimer;
time_t timer;
xfs_qwarncnt_t warns;
xfs_qwarncnt_t warnlimit;
xfs_qcnt_t count;
xfs_qcnt_t *resbcountp;
xfs_quotainfo_t *q = mp->m_quotainfo;
@ -653,7 +648,9 @@ xfs_trans_dqresv(
softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT);
if (!softlimit)
softlimit = q->qi_bsoftlimit;
btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT);
timer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT);
warns = INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT);
warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount);
resbcountp = &dqp->q_res_bcount;
} else {
ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
@ -663,7 +660,9 @@ xfs_trans_dqresv(
softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT);
if (!softlimit)
softlimit = q->qi_rtbsoftlimit;
btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT);
timer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT);
warns = INT_GET(dqp->q_core.d_rtbwarns, ARCH_CONVERT);
warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
resbcountp = &dqp->q_res_rtbcount;
}
error = 0;
@ -693,37 +692,36 @@ xfs_trans_dqresv(
* If timer or warnings has expired,
* return EDQUOT
*/
if ((btimer != 0 && get_seconds() > btimer) ||
(dqp->q_core.d_bwarns &&
INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >=
XFS_QI_BWARNLIMIT(dqp->q_mount))) {
if ((timer != 0 && get_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
error = EDQUOT;
goto error_return;
}
}
}
if (ninos > 0) {
hardlimit = INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT);
count = INT_GET(dqp->q_core.d_icount, ARCH_CONVERT);
timer = INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT);
warns = INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT);
warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);
hardlimit = INT_GET(dqp->q_core.d_ino_hardlimit,
ARCH_CONVERT);
if (!hardlimit)
hardlimit = q->qi_ihardlimit;
softlimit = INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT);
softlimit = INT_GET(dqp->q_core.d_ino_softlimit,
ARCH_CONVERT);
if (!softlimit)
softlimit = q->qi_isoftlimit;
if (hardlimit > 0ULL &&
INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= hardlimit) {
if (hardlimit > 0ULL && count >= hardlimit) {
error = EDQUOT;
goto error_return;
} else if (softlimit > 0ULL &&
INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= softlimit) {
} else if (softlimit > 0ULL && count >= softlimit) {
/*
* If timer or warnings has expired,
* return EDQUOT
*/
if ((dqp->q_core.d_itimer &&
get_seconds() > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) ||
(dqp->q_core.d_iwarns &&
INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >=
XFS_QI_IWARNLIMIT(dqp->q_mount))) {
if ((timer != 0 && get_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
error = EDQUOT;
goto error_return;
}

View file

@ -36,7 +36,6 @@
#include <linux/sched.h>
#include <linux/kernel.h>
int doass = 1;
static char message[256]; /* keep it off the stack */
static DEFINE_SPINLOCK(xfs_err_lock);

View file

@ -50,16 +50,11 @@ extern void cmn_err(int, char *, ...);
#endif
#ifdef DEBUG
# ifdef lint
# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */
# else
# define ASSERT(EX) ((!doass||(EX))?((void)0):assfail(#EX, __FILE__, __LINE__))
# endif /* lint */
# define ASSERT(EX) ((EX) ? ((void)0) : assfail(#EX, __FILE__, __LINE__))
#else
# define ASSERT(x) ((void)0)
#endif
extern int doass; /* dynamically turn off asserts */
extern void assfail(char *, char *, int);
#ifdef DEBUG
extern unsigned long random(void);

View file

@ -59,7 +59,7 @@
#define XFSA_FIXUP_BNO_OK 1
#define XFSA_FIXUP_CNT_OK 2
int
STATIC int
xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
@ -2562,7 +2562,7 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
/*
* returns non-zero if any of (agno,bno):len is in a busy list
*/
int
STATIC int
xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,

View file

@ -71,6 +71,11 @@
* Provide the external interfaces to manage attribute lists.
*/
#define ATTR_SYSCOUNT 2
STATIC struct attrnames posix_acl_access;
STATIC struct attrnames posix_acl_default;
STATIC struct attrnames *attr_system_names[ATTR_SYSCOUNT];
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
@ -83,6 +88,7 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
/*
* Internal routines when attribute list is one block.
*/
STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
@ -90,6 +96,7 @@ STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
/*
* Internal routines when attribute list is more than one block.
*/
STATIC int xfs_attr_node_get(xfs_da_args_t *args);
STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context);
@ -1102,7 +1109,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* This leaf block cannot have a "remote" value, we only call this routine
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
int
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
{
xfs_dabuf_t *bp;
@ -1707,7 +1714,7 @@ xfs_attr_refillstate(xfs_da_state_t *state)
* block, ie: both true Btree attr lists and for single-leaf-blocks with
* "remote" values taking up more blocks.
*/
int
STATIC int
xfs_attr_node_get(xfs_da_args_t *args)
{
xfs_da_state_t *state;
@ -2398,7 +2405,7 @@ posix_acl_default_exists(
return xfs_acl_vhasacl_default(vp);
}
struct attrnames posix_acl_access = {
STATIC struct attrnames posix_acl_access = {
.attr_name = "posix_acl_access",
.attr_namelen = sizeof("posix_acl_access") - 1,
.attr_get = posix_acl_access_get,
@ -2407,7 +2414,7 @@ struct attrnames posix_acl_access = {
.attr_exists = posix_acl_access_exists,
};
struct attrnames posix_acl_default = {
STATIC struct attrnames posix_acl_default = {
.attr_name = "posix_acl_default",
.attr_namelen = sizeof("posix_acl_default") - 1,
.attr_get = posix_acl_default_get,
@ -2416,7 +2423,7 @@ struct attrnames posix_acl_default = {
.attr_exists = posix_acl_default_exists,
};
struct attrnames *attr_system_names[] =
STATIC struct attrnames *attr_system_names[] =
{ &posix_acl_access, &posix_acl_default };

View file

@ -76,11 +76,6 @@ extern struct attrnames attr_system;
extern struct attrnames attr_trusted;
extern struct attrnames *attr_namespaces[ATTR_NAMECOUNT];
#define ATTR_SYSCOUNT 2
extern struct attrnames posix_acl_access;
extern struct attrnames posix_acl_default;
extern struct attrnames *attr_system_names[ATTR_SYSCOUNT];
extern attrnames_t *attr_lookup_namespace(char *, attrnames_t **, int);
extern int attr_generic_list(struct vnode *, void *, size_t, int, ssize_t *);
@ -184,8 +179,6 @@ int xfs_attr_list(bhv_desc_t *, char *, int, int,
struct attrlist_cursor_kern *, struct cred *);
int xfs_attr_inactive(struct xfs_inode *dp);
int xfs_attr_node_get(struct xfs_da_args *);
int xfs_attr_leaf_get(struct xfs_da_args *);
int xfs_attr_shortform_getvalue(struct xfs_da_args *);
int xfs_attr_fetch(struct xfs_inode *, char *, int,
char *, int *, int, struct cred *);

View file

@ -79,6 +79,8 @@
/*
* Routines used for growing the Btree.
*/
STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block,
xfs_dabuf_t **bpp);
STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
int freemap_index);
STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer);
@ -91,6 +93,16 @@ STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
int *number_entries_in_blk1,
int *number_usedbytes_in_blk1);
/*
* Routines used for shrinking the Btree.
*/
STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dabuf_t *bp, int level);
STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dabuf_t *bp);
STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dablk_t blkno, int blkcnt);
/*
* Utility routines.
*/
@ -99,6 +111,10 @@ STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf,
xfs_attr_leafblock_t *dst_leaf,
int dst_start, int move_count,
xfs_mount_t *mp);
STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
STATIC int xfs_attr_put_listent(xfs_attr_list_context_t *context,
attrnames_t *, char *name, int namelen,
int valuelen);
/*========================================================================
@ -774,7 +790,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
* Create the initial contents of a leaf attribute list
* or a leaf in a node attribute list.
*/
int
STATIC int
xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
{
xfs_attr_leafblock_t *leaf;
@ -2209,7 +2225,7 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
* Calculate the number of bytes used to store the indicated attribute
* (whether local or remote only calculate bytes in this block).
*/
int
STATIC int
xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
{
xfs_attr_leaf_name_local_t *name_loc;
@ -2380,7 +2396,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
* we may be reading them directly out of a user buffer.
*/
/*ARGSUSED*/
int
STATIC int
xfs_attr_put_listent(xfs_attr_list_context_t *context,
attrnames_t *namesp, char *name, int namelen, int valuelen)
{
@ -2740,7 +2756,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
* Recurse (gasp!) through the attribute nodes until we find leaves.
* We're doing a depth-first traversal in order to invalidate everything.
*/
int
STATIC int
xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
int level)
{
@ -2849,7 +2865,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
* Note that we must release the lock on the buffer so that we are not
* caught holding something that the logging code wants to flush to disk.
*/
int
STATIC int
xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
{
xfs_attr_leafblock_t *leaf;
@ -2934,7 +2950,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
* Look at all the extents for this logical region,
* invalidate any buffers that are incore/in transactions.
*/
int
STATIC int
xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dablk_t blkno, int blkcnt)
{

View file

@ -261,8 +261,6 @@ int xfs_attr_leaf_flipflags(xfs_da_args_t *args);
/*
* Routines used for growing the Btree.
*/
int xfs_attr_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block,
struct xfs_dabuf **bpp);
int xfs_attr_leaf_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk);
@ -284,12 +282,6 @@ void xfs_attr_leaf_unbalance(struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk);
int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
int xfs_attr_node_inactive(struct xfs_trans **trans, struct xfs_inode *dp,
struct xfs_dabuf *bp, int level);
int xfs_attr_leaf_inactive(struct xfs_trans **trans, struct xfs_inode *dp,
struct xfs_dabuf *bp);
int xfs_attr_leaf_freextent(struct xfs_trans **trans, struct xfs_inode *dp,
xfs_dablk_t blkno, int blkcnt);
/*
* Utility routines.
@ -299,10 +291,6 @@ int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
struct xfs_dabuf *leaf2_bp);
int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int blocksize,
int *local);
int xfs_attr_leaf_entsize(struct xfs_attr_leafblock *leaf, int index);
int xfs_attr_put_listent(struct xfs_attr_list_context *context,
struct attrnames *, char *name, int namelen,
int valuelen);
int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
#endif /* __XFS_ATTR_LEAF_H__ */

View file

@ -45,7 +45,7 @@
/*
* Index of high bit number in byte, -1 for none set, 0..7 otherwise.
*/
const char xfs_highbit[256] = {
STATIC const char xfs_highbit[256] = {
-1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */
3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */
4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */

View file

@ -301,6 +301,19 @@ xfs_bmap_search_extents(
xfs_bmbt_irec_t *gotp, /* out: extent entry found */
xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
/*
* Check the last inode extent to determine whether this allocation will result
* in blocks being allocated at the end of the file. When we allocate new data
* blocks at the end of the file which do not start at the previous data block,
* we will try to align the new blocks at stripe unit boundaries.
*/
STATIC int /* error */
xfs_bmap_isaeof(
xfs_inode_t *ip, /* incore inode pointer */
xfs_fileoff_t off, /* file offset in fsblocks */
int whichfork, /* data or attribute fork */
char *aeof); /* return value */
#ifdef XFS_BMAP_TRACE
/*
* Add a bmap trace buffer entry. Base routine for the others.
@ -4532,18 +4545,17 @@ xfs_bmapi(
xfs_extlen_t alen; /* allocated extent length */
xfs_fileoff_t aoff; /* allocated file offset */
xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
char contig; /* allocation must be one extent */
xfs_btree_cur_t *cur; /* bmap btree cursor */
char delay; /* this request is for delayed alloc */
xfs_fileoff_t end; /* end of mapped file region */
int eof; /* we've hit the end of extent list */
char contig; /* allocation must be one extent */
char delay; /* this request is for delayed alloc */
char exact; /* don't do all of wasdelayed extent */
xfs_bmbt_rec_t *ep; /* extent list entry pointer */
int error; /* error return */
char exact; /* don't do all of wasdelayed extent */
xfs_bmbt_irec_t got; /* current extent list record */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_extlen_t indlen; /* indirect blocks length */
char inhole; /* current location is hole in file */
xfs_extnum_t lastx; /* last useful extent number */
int logflags; /* flags for transaction logging */
xfs_extlen_t minleft; /* min blocks left after allocation */
@ -4554,13 +4566,15 @@ xfs_bmapi(
xfs_extnum_t nextents; /* number of extents in file */
xfs_fileoff_t obno; /* old block number (offset) */
xfs_bmbt_irec_t prev; /* previous extent list record */
char stateless; /* ignore state flag set */
int tmp_logflags; /* temp flags holder */
int whichfork; /* data or attr fork */
char inhole; /* current location is hole in file */
char stateless; /* ignore state flag set */
char trim; /* output trimmed to match range */
char userdata; /* allocating non-metadata */
char wasdelay; /* old extent was delayed */
int whichfork; /* data or attr fork */
char wr; /* this is a write request */
char rt; /* this is a realtime file */
char rsvd; /* OK to allocate reserved blocks */
#ifdef DEBUG
xfs_fileoff_t orig_bno; /* original block number value */
@ -4590,6 +4604,7 @@ xfs_bmapi(
}
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
rt = XFS_IS_REALTIME_INODE(ip);
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(ifp->if_ext_max ==
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
@ -4694,9 +4709,16 @@ xfs_bmapi(
}
minlen = contig ? alen : 1;
if (delay) {
indlen = (xfs_extlen_t)
xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0);
xfs_extlen_t extsz = 0;
/* Figure out the extent size, adjust alen */
if (rt) {
if (!(extsz = ip->i_d.di_extsize))
extsz = mp->m_sb.sb_rextsize;
alen = roundup(alen, extsz);
extsz = alen / mp->m_sb.sb_rextsize;
}
/*
* Make a transaction-less quota reservation for
* delayed allocation blocks. This number gets
@ -4704,8 +4726,10 @@ xfs_bmapi(
* We return EDQUOT if we haven't allocated
* blks already inside this loop;
*/
if (XFS_TRANS_RESERVE_BLKQUOTA(
mp, NULL, ip, (long)alen)) {
if (XFS_TRANS_RESERVE_QUOTA_NBLKS(
mp, NULL, ip, (long)alen, 0,
rt ? XFS_QMOPT_RES_RTBLKS :
XFS_QMOPT_RES_REGBLKS)) {
if (n == 0) {
*nmap = 0;
ASSERT(cur == NULL);
@ -4718,40 +4742,34 @@ xfs_bmapi(
* Split changing sb for alen and indlen since
* they could be coming from different places.
*/
if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
xfs_extlen_t extsz;
xfs_extlen_t ralen;
if (!(extsz = ip->i_d.di_extsize))
extsz = mp->m_sb.sb_rextsize;
ralen = roundup(alen, extsz);
ralen = ralen / mp->m_sb.sb_rextsize;
if (xfs_mod_incore_sb(mp,
XFS_SBS_FREXTENTS,
-(ralen), rsvd)) {
if (XFS_IS_QUOTA_ON(ip->i_mount))
XFS_TRANS_UNRESERVE_BLKQUOTA(
mp, NULL, ip,
(long)alen);
break;
}
} else {
if (xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
-(alen), rsvd)) {
if (XFS_IS_QUOTA_ON(ip->i_mount))
XFS_TRANS_UNRESERVE_BLKQUOTA(
mp, NULL, ip,
(long)alen);
break;
}
}
indlen = (xfs_extlen_t)
xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0);
if (xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
-(indlen), rsvd)) {
XFS_TRANS_UNRESERVE_BLKQUOTA(
mp, NULL, ip, (long)alen);
if (rt)
error = xfs_mod_incore_sb(mp,
XFS_SBS_FREXTENTS,
-(extsz), rsvd);
else
error = xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
-(alen), rsvd);
if (!error)
error = xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
-(indlen), rsvd);
if (error) {
if (XFS_IS_QUOTA_ON(ip->i_mount))
/* unreserve the blocks now */
XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
mp, NULL, ip,
(long)alen, 0, rt ?
XFS_QMOPT_RES_RTBLKS :
XFS_QMOPT_RES_REGBLKS);
break;
}
ip->i_delayed_blks += alen;
abno = NULLSTARTBLOCK(indlen);
} else {
@ -5376,13 +5394,24 @@ xfs_bunmapi(
}
if (wasdel) {
ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
(int)del.br_blockcount, rsvd);
/* Unreserve our quota space */
XFS_TRANS_RESERVE_QUOTA_NBLKS(
mp, NULL, ip, -((long)del.br_blockcount), 0,
isrt ? XFS_QMOPT_RES_RTBLKS :
/* Update realtim/data freespace, unreserve quota */
if (isrt) {
xfs_filblks_t rtexts;
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
do_div(rtexts, mp->m_sb.sb_rextsize);
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
(int)rtexts, rsvd);
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
-((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
(int)del.br_blockcount, rsvd);
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
-((long)del.br_blockcount), 0,
XFS_QMOPT_RES_REGBLKS);
}
ip->i_delayed_blks -= del.br_blockcount;
if (cur)
cur->bc_private.b.flags |=
@ -5714,7 +5743,7 @@ xfs_getbmap(
* blocks at the end of the file which do not start at the previous data block,
* we will try to align the new blocks at stripe unit boundaries.
*/
int /* error */
STATIC int /* error */
xfs_bmap_isaeof(
xfs_inode_t *ip, /* incore inode pointer */
xfs_fileoff_t off, /* file offset in fsblocks */

View file

@ -331,19 +331,6 @@ xfs_getbmap(
void __user *ap, /* pointer to user's array */
int iflags); /* interface flags */
/*
* Check the last inode extent to determine whether this allocation will result
* in blocks being allocated at the end of the file. When we allocate new data
* blocks at the end of the file which do not start at the previous data block,
* we will try to align the new blocks at stripe unit boundaries.
*/
int
xfs_bmap_isaeof(
struct xfs_inode *ip,
xfs_fileoff_t off,
int whichfork,
char *aeof);
/*
* Check if the endoff is outside the last extent. If so the caller will grow
* the allocation to a stripe unit boundary

View file

@ -2331,20 +2331,6 @@ xfs_bmbt_lookup_ge(
return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat);
}
int /* error */
xfs_bmbt_lookup_le(
xfs_btree_cur_t *cur,
xfs_fileoff_t off,
xfs_fsblock_t bno,
xfs_filblks_t len,
int *stat) /* success/failure */
{
cur->bc_rec.b.br_startoff = off;
cur->bc_rec.b.br_startblock = bno;
cur->bc_rec.b.br_blockcount = len;
return xfs_bmbt_lookup(cur, XFS_LOOKUP_LE, stat);
}
/*
* Give the bmap btree a new root block. Copy the old broot contents
* down into a real block and make the broot point to it.

View file

@ -580,14 +580,6 @@ xfs_bmbt_lookup_ge(
xfs_filblks_t,
int *);
int
xfs_bmbt_lookup_le(
struct xfs_btree_cur *,
xfs_fileoff_t,
xfs_fsblock_t,
xfs_filblks_t,
int *);
/*
* Give the bmap btree a new root block. Copy the old broot contents
* down into a real block and make the broot point to it.

View file

@ -89,6 +89,16 @@ xfs_btree_maxrecs(
* Internal routines.
*/
/*
* Retrieve the block pointer from the cursor at the given level.
* This may be a bmap btree root or from a buffer.
*/
STATIC xfs_btree_block_t * /* generic btree block pointer */
xfs_btree_get_block(
xfs_btree_cur_t *cur, /* btree cursor */
int level, /* level in btree */
struct xfs_buf **bpp); /* buffer containing the block */
/*
* Checking routine: return maxrecs for the block.
*/
@ -497,7 +507,7 @@ xfs_btree_firstrec(
* Retrieve the block pointer from the cursor at the given level.
* This may be a bmap btree root or from a buffer.
*/
xfs_btree_block_t * /* generic btree block pointer */
STATIC xfs_btree_block_t * /* generic btree block pointer */
xfs_btree_get_block(
xfs_btree_cur_t *cur, /* btree cursor */
int level, /* level in btree */

View file

@ -324,16 +324,6 @@ xfs_btree_firstrec(
xfs_btree_cur_t *cur, /* btree cursor */
int level); /* level to change */
/*
* Retrieve the block pointer from the cursor at the given level.
* This may be a bmap btree root or from a buffer.
*/
xfs_btree_block_t * /* generic btree block pointer */
xfs_btree_get_block(
xfs_btree_cur_t *cur, /* btree cursor */
int level, /* level in btree */
struct xfs_buf **bpp); /* buffer containing the block */
/*
* Get a buffer for the block, return it with no data read.
* Long-form addressing.

View file

@ -172,7 +172,7 @@ STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
*
* If the XFS_BLI_STALE flag has been set, then log nothing.
*/
uint
STATIC uint
xfs_buf_item_size(
xfs_buf_log_item_t *bip)
{
@ -240,7 +240,7 @@ xfs_buf_item_size(
* format structure, and the rest point to contiguous chunks
* within the buffer.
*/
void
STATIC void
xfs_buf_item_format(
xfs_buf_log_item_t *bip,
xfs_log_iovec_t *log_vector)
@ -365,7 +365,7 @@ xfs_buf_item_format(
* item in memory so it cannot be written out. Simply call bpin()
* on the buffer to do this.
*/
void
STATIC void
xfs_buf_item_pin(
xfs_buf_log_item_t *bip)
{
@ -391,7 +391,7 @@ xfs_buf_item_pin(
* If the XFS_BLI_STALE flag is set and we are the last reference,
* then free up the buf log item and unlock the buffer.
*/
void
STATIC void
xfs_buf_item_unpin(
xfs_buf_log_item_t *bip,
int stale)
@ -446,7 +446,7 @@ xfs_buf_item_unpin(
* so we need to free the item's descriptor (that points to the item)
* in the transaction.
*/
void
STATIC void
xfs_buf_item_unpin_remove(
xfs_buf_log_item_t *bip,
xfs_trans_t *tp)
@ -493,7 +493,7 @@ xfs_buf_item_unpin_remove(
* the lock right away, return 0. If we can get the lock, pull the
* buffer from the free list, mark it busy, and return 1.
*/
uint
STATIC uint
xfs_buf_item_trylock(
xfs_buf_log_item_t *bip)
{
@ -537,7 +537,7 @@ xfs_buf_item_trylock(
* This is for support of xfs_trans_bhold(). Make sure the
* XFS_BLI_HOLD field is cleared if we don't free the item.
*/
void
STATIC void
xfs_buf_item_unlock(
xfs_buf_log_item_t *bip)
{
@ -635,7 +635,7 @@ xfs_buf_item_unlock(
* by returning the original lsn of that transaction here rather than
* the current one.
*/
xfs_lsn_t
STATIC xfs_lsn_t
xfs_buf_item_committed(
xfs_buf_log_item_t *bip,
xfs_lsn_t lsn)
@ -654,7 +654,7 @@ xfs_buf_item_committed(
* and have aborted this transaction, we'll trap this buffer when it tries to
* get written out.
*/
void
STATIC void
xfs_buf_item_abort(
xfs_buf_log_item_t *bip)
{
@ -674,7 +674,7 @@ xfs_buf_item_abort(
* B_DELWRI set, then get it going out to disk with a call to bawrite().
* If not, then just release the buffer.
*/
void
STATIC void
xfs_buf_item_push(
xfs_buf_log_item_t *bip)
{
@ -693,7 +693,7 @@ xfs_buf_item_push(
}
/* ARGSUSED */
void
STATIC void
xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
{
}
@ -701,7 +701,7 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
/*
* This is the ops vector shared by all buf log items.
*/
struct xfs_item_ops xfs_buf_item_ops = {
STATIC struct xfs_item_ops xfs_buf_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_buf_item_format,

View file

@ -80,7 +80,7 @@ typedef struct xfs_buf_log_format_t {
* user or group dquots and may require special recovery handling.
*/
#define XFS_BLI_UDQUOT_BUF 0x4
/* #define XFS_BLI_PDQUOT_BUF 0x8 */
#define XFS_BLI_PDQUOT_BUF 0x8
#define XFS_BLI_GDQUOT_BUF 0x10
#define XFS_BLI_CHUNK 128

View file

@ -113,7 +113,10 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra);
STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk);
STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
/*========================================================================
* Routines used for growing the Btree.
@ -1424,7 +1427,7 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
/*
* Unlink a block from a doubly linked list of blocks.
*/
int /* error */
STATIC int /* error */
xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk)
{
@ -2381,7 +2384,7 @@ xfs_da_state_alloc(void)
/*
* Kill the altpath contents of a da-state structure.
*/
void
STATIC void
xfs_da_state_kill_altpath(xfs_da_state_t *state)
{
int i;

View file

@ -296,8 +296,6 @@ int xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
/*
* Utility routines.
*/
int xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk);
int xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
xfs_da_state_blk_t *new_blk);
@ -320,7 +318,6 @@ uint xfs_da_hashname(uchar_t *name_string, int name_length);
uint xfs_da_log2_roundup(uint i);
xfs_da_state_t *xfs_da_state_alloc(void);
void xfs_da_state_free(xfs_da_state_t *state);
void xfs_da_state_kill_altpath(xfs_da_state_t *state);
void xfs_da_buf_done(xfs_dabuf_t *dabuf);
void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first,

View file

@ -180,9 +180,10 @@ xfs_swapext(
goto error0;
}
if (VN_CACHED(tvp) != 0)
xfs_inval_cached_pages(XFS_ITOV(tip), &(tip->i_iocore),
(xfs_off_t)0, 0, 0);
if (VN_CACHED(tvp) != 0) {
xfs_inval_cached_trace(&tip->i_iocore, 0, -1, 0, -1);
VOP_FLUSHINVAL_PAGES(tvp, 0, -1, FI_REMAPF_LOCKED);
}
/* Verify O_DIRECT for ftmp */
if (VN_CACHED(tvp) != 0) {

View file

@ -304,7 +304,7 @@ xfs_dir2_data_freeinsert(
/*
* Remove a bestfree entry from the table.
*/
void
STATIC void
xfs_dir2_data_freeremove(
xfs_dir2_data_t *d, /* data block pointer */
xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */

View file

@ -192,10 +192,6 @@ extern xfs_dir2_data_free_t *
xfs_dir2_data_freeinsert(xfs_dir2_data_t *d,
xfs_dir2_data_unused_t *dup, int *loghead);
extern void
xfs_dir2_data_freeremove(xfs_dir2_data_t *d,
xfs_dir2_data_free_t *dfp, int *loghead);
extern void
xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d,
int *loghead, char *aendp);

View file

@ -77,6 +77,10 @@ static void xfs_dir2_leaf_check(xfs_inode_t *dp, xfs_dabuf_t *bp);
#endif
static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **lbpp,
int *indexp, xfs_dabuf_t **dbpp);
static void xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
int first, int last);
static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp);
/*
* Convert a block form directory to a leaf form directory.
@ -1214,7 +1218,7 @@ xfs_dir2_leaf_init(
/*
* Log the bests entries indicated from a leaf1 block.
*/
void
static void
xfs_dir2_leaf_log_bests(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* leaf buffer */
@ -1278,7 +1282,7 @@ xfs_dir2_leaf_log_header(
/*
* Log the tail of the leaf1 block.
*/
void
STATIC void
xfs_dir2_leaf_log_tail(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp) /* leaf buffer */

View file

@ -329,16 +329,9 @@ extern void
xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
int first, int last);
extern void
xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
int first, int last);
extern void
xfs_dir2_leaf_log_header(struct xfs_trans *tp, struct xfs_dabuf *bp);
extern void
xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp);
extern int
xfs_dir2_leaf_lookup(struct xfs_da_args *args);

View file

@ -91,6 +91,10 @@ STATIC int xfs_dir_leaf_figure_balance(xfs_da_state_t *state,
int *number_entries_in_blk1,
int *number_namebytes_in_blk1);
STATIC int xfs_dir_leaf_create(struct xfs_da_args *args,
xfs_dablk_t which_block,
struct xfs_dabuf **bpp);
/*
* Utility routines.
*/
@ -781,7 +785,7 @@ xfs_dir_leaf_to_node(xfs_da_args_t *args)
* Create the initial contents of a leaf directory
* or a leaf in a node directory.
*/
int
STATIC int
xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
{
xfs_dir_leafblock_t *leaf;

View file

@ -202,8 +202,6 @@ int xfs_dir_leaf_to_shortform(struct xfs_da_args *args);
/*
* Routines used for growing the Btree.
*/
int xfs_dir_leaf_create(struct xfs_da_args *args, xfs_dablk_t which_block,
struct xfs_dabuf **bpp);
int xfs_dir_leaf_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk);

View file

@ -166,27 +166,32 @@ typedef enum {
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
#define DM_FLAGS_ISEM 0x004 /* thread holds i_sem */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,21)
/* i_alloc_sem was added in 2.4.22-pre1 */
#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
#endif
#endif
/*
* Based on IO_ISDIRECT, decide which i_ flag is set.
*/
#ifdef DM_FLAGS_IALLOCSEM_RD
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
DM_FLAGS_ISEM : 0)
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22))
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_ISEM)
#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
#else
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21)
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
0 : DM_FLAGS_ISEM)
#define DM_SEM_FLAG_WR (DM_FLAGS_ISEM)
#endif
/*
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.

View file

@ -280,7 +280,7 @@ xfs_error_report(
}
}
void
STATIC void
xfs_hex_dump(void *p, int length)
{
__uint8_t *uip = (__uint8_t*)p;

View file

@ -73,9 +73,6 @@ xfs_corruption_error(
int linenum,
inst_t *ra);
extern void
xfs_hex_dump(void *p, int length);
#define XFS_ERROR_REPORT(e, lvl, mp) \
xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
#define XFS_CORRUPTION_ERROR(e, lvl, mp, mem) \

View file

@ -59,6 +59,18 @@ STATIC void xfs_efi_item_abort(xfs_efi_log_item_t *);
STATIC void xfs_efd_item_abort(xfs_efd_log_item_t *);
void
xfs_efi_item_free(xfs_efi_log_item_t *efip)
{
int nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
kmem_free(efip, sizeof(xfs_efi_log_item_t) +
(nexts - 1) * sizeof(xfs_extent_t));
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
}
/*
* This returns the number of iovecs needed to log the given efi item.
@ -120,8 +132,6 @@ xfs_efi_item_pin(xfs_efi_log_item_t *efip)
STATIC void
xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
{
int nexts;
int size;
xfs_mount_t *mp;
SPLDECL(s);
@ -132,21 +142,11 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efi_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efip, size);
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
AIL_UNLOCK(mp, s);
}
return;
}
/*
@ -159,8 +159,6 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale)
STATIC void
xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
{
int nexts;
int size;
xfs_mount_t *mp;
xfs_log_item_desc_t *lidp;
SPLDECL(s);
@ -178,23 +176,11 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
/*
* now free the item itself
*/
nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efi_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efip, size);
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
AIL_UNLOCK(mp, s);
}
return;
}
/*
@ -245,18 +231,7 @@ xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
STATIC void
xfs_efi_item_abort(xfs_efi_log_item_t *efip)
{
int nexts;
int size;
nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efi_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efip, size);
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
return;
xfs_efi_item_free(efip);
}
/*
@ -288,7 +263,7 @@ xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
/*
* This is the ops vector shared by all efi log items.
*/
struct xfs_item_ops xfs_efi_item_ops = {
STATIC struct xfs_item_ops xfs_efi_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efi_item_format,
@ -355,8 +330,6 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
{
xfs_mount_t *mp;
int extents_left;
uint size;
int nexts;
SPLDECL(s);
mp = efip->efi_item.li_mountp;
@ -372,20 +345,10 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
xfs_efi_item_free(efip);
} else {
AIL_UNLOCK(mp, s);
}
if (extents_left == 0) {
nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efi_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efip, size);
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
}
}
/*
@ -398,8 +361,6 @@ STATIC void
xfs_efi_cancel(
xfs_efi_log_item_t *efip)
{
int nexts;
int size;
xfs_mount_t *mp;
SPLDECL(s);
@ -410,26 +371,25 @@ xfs_efi_cancel(
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s);
nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efi_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efip, size);
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_CANCELED;
AIL_UNLOCK(mp, s);
}
return;
}
STATIC void
xfs_efd_item_free(xfs_efd_log_item_t *efdp)
{
int nexts = efdp->efd_format.efd_nextents;
if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
kmem_free(efdp, sizeof(xfs_efd_log_item_t) +
(nexts - 1) * sizeof(xfs_extent_t));
} else {
kmem_zone_free(xfs_efd_zone, efdp);
}
}
/*
* This returns the number of iovecs needed to log the given efd item.
@ -533,9 +493,6 @@ xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
STATIC xfs_lsn_t
xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
{
uint size;
int nexts;
/*
* If we got a log I/O error, it's always the case that the LR with the
* EFI got unpinned and freed before the EFD got aborted.
@ -543,15 +500,7 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents);
nexts = efdp->efd_format.efd_nextents;
if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efd_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efdp, size);
} else {
kmem_zone_free(xfs_efd_zone, efdp);
}
xfs_efd_item_free(efdp);
return (xfs_lsn_t)-1;
}
@ -565,9 +514,6 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
STATIC void
xfs_efd_item_abort(xfs_efd_log_item_t *efdp)
{
int nexts;
int size;
/*
* If we got a log I/O error, it's always the case that the LR with the
* EFI got unpinned and freed before the EFD got aborted. So don't
@ -576,15 +522,7 @@ xfs_efd_item_abort(xfs_efd_log_item_t *efdp)
if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
xfs_efi_cancel(efdp->efd_efip);
nexts = efdp->efd_format.efd_nextents;
if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
size = sizeof(xfs_efd_log_item_t);
size += (nexts - 1) * sizeof(xfs_extent_t);
kmem_free(efdp, size);
} else {
kmem_zone_free(xfs_efd_zone, efdp);
}
return;
xfs_efd_item_free(efdp);
}
/*
@ -615,7 +553,7 @@ xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn)
/*
* This is the ops vector shared by all efd log items.
*/
struct xfs_item_ops xfs_efd_item_ops = {
STATIC struct xfs_item_ops xfs_efd_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_efd_item_format,

View file

@ -118,6 +118,8 @@ xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint);
xfs_efd_log_item_t *xfs_efd_init(struct xfs_mount *, xfs_efi_log_item_t *,
uint);
void xfs_efi_item_free(xfs_efi_log_item_t *);
#endif /* __KERNEL__ */
#endif /* __XFS_EXTFREE_ITEM_H__ */

View file

@ -60,7 +60,8 @@ struct fsxattr {
__u32 fsx_xflags; /* xflags field value (get/set) */
__u32 fsx_extsize; /* extsize field value (get/set)*/
__u32 fsx_nextents; /* nextents field value (get) */
unsigned char fsx_pad[16];
__u32 fsx_projid; /* project identifier (get/set) */
unsigned char fsx_pad[12];
};
#endif

View file

@ -559,32 +559,6 @@ xfs_reserve_blocks(
return(0);
}
void
xfs_fs_log_dummy(xfs_mount_t *mp)
{
xfs_trans_t *tp;
xfs_inode_t *ip;
tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
atomic_inc(&mp->m_active_trans);
if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
xfs_trans_cancel(tp, 0);
return;
}
ip = mp->m_rootip;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
xfs_trans_commit(tp, 0, NULL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
int
xfs_fs_goingdown(
xfs_mount_t *mp,

View file

@ -100,9 +100,13 @@ xfs_inofree_t xfs_inobt_mask(int i);
#endif
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_IS_FREE)
int xfs_inobt_is_free(xfs_inobt_rec_t *rp, int i);
#define XFS_INOBT_IS_FREE(rp,i) xfs_inobt_is_free(rp,i)
#define XFS_INOBT_IS_FREE(rp,i) xfs_inobt_is_free(rp,i)
#define XFS_INOBT_IS_FREE_DISK(rp,i) xfs_inobt_is_free_disk(rp,i)
#else
#define XFS_INOBT_IS_FREE(rp,i) (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0)
#define XFS_INOBT_IS_FREE(rp,i) \
(((rp)->ir_free & XFS_INOBT_MASK(i)) != 0)
#define XFS_INOBT_IS_FREE_DISK(rp,i) \
((INT_GET((rp)->ir_free, ARCH_CONVERT) & XFS_INOBT_MASK(i)) != 0)
#endif
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_INOBT_SET_FREE)
void xfs_inobt_set_free(xfs_inobt_rec_t *rp, int i);

View file

@ -145,51 +145,6 @@ xfs_inobp_check(
}
#endif
/*
* called from bwrite on xfs inode buffers
*/
void
xfs_inobp_bwcheck(xfs_buf_t *bp)
{
xfs_mount_t *mp;
int i;
int j;
xfs_dinode_t *dip;
ASSERT(XFS_BUF_FSPRIVATE3(bp, void *) != NULL);
mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
for (i = 0; i < j; i++) {
dip = (xfs_dinode_t *) xfs_buf_offset(bp,
i * mp->m_sb.sb_inodesize);
if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
cmn_err(CE_WARN,
"Bad magic # 0x%x in XFS inode buffer 0x%Lx, starting blockno %Ld, offset 0x%x",
INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
(__uint64_t)(__psunsigned_t) bp,
(__int64_t) XFS_BUF_ADDR(bp),
xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
xfs_fs_cmn_err(CE_WARN, mp,
"corrupt, unmount and run xfs_repair");
}
if (!dip->di_next_unlinked) {
cmn_err(CE_WARN,
"Bad next_unlinked field (0) in XFS inode buffer 0x%p, starting blockno %Ld, offset 0x%x",
(__uint64_t)(__psunsigned_t) bp,
(__int64_t) XFS_BUF_ADDR(bp),
xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
xfs_fs_cmn_err(CE_WARN, mp,
"corrupt, unmount and run xfs_repair");
}
}
return;
}
/*
* This routine is called to map an inode number within a file
* system to the buffer containing the on-disk version of the
@ -203,7 +158,7 @@ xfs_inobp_bwcheck(xfs_buf_t *bp)
* Use xfs_imap() to determine the size and location of the
* buffer to read from disk.
*/
int
STATIC int
xfs_inotobp(
xfs_mount_t *mp,
xfs_trans_t *tp,
@ -1247,26 +1202,32 @@ xfs_ialloc(
case S_IFREG:
case S_IFDIR:
if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
if ((mode & S_IFMT) == S_IFDIR) {
ip->i_d.di_flags |= XFS_DIFLAG_RTINHERIT;
} else {
ip->i_d.di_flags |= XFS_DIFLAG_REALTIME;
uint di_flags = 0;
if ((mode & S_IFMT) == S_IFDIR) {
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
} else {
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
di_flags |= XFS_DIFLAG_REALTIME;
ip->i_iocore.io_flags |= XFS_IOCORE_RT;
}
}
if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
xfs_inherit_noatime)
ip->i_d.di_flags |= XFS_DIFLAG_NOATIME;
di_flags |= XFS_DIFLAG_NOATIME;
if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
xfs_inherit_nodump)
ip->i_d.di_flags |= XFS_DIFLAG_NODUMP;
di_flags |= XFS_DIFLAG_NODUMP;
if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
xfs_inherit_sync)
ip->i_d.di_flags |= XFS_DIFLAG_SYNC;
di_flags |= XFS_DIFLAG_SYNC;
if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
xfs_inherit_nosymlinks)
ip->i_d.di_flags |= XFS_DIFLAG_NOSYMLINKS;
di_flags |= XFS_DIFLAG_NOSYMLINKS;
if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
di_flags |= XFS_DIFLAG_PROJINHERIT;
ip->i_d.di_flags |= di_flags;
}
/* FALLTHROUGH */
case S_IFLNK:
@ -2156,7 +2117,7 @@ static __inline__ int xfs_inode_clean(xfs_inode_t *ip)
(ip->i_update_core == 0));
}
void
STATIC void
xfs_ifree_cluster(
xfs_inode_t *free_ip,
xfs_trans_t *tp,
@ -2875,7 +2836,7 @@ xfs_iunpin(
* be subsequently pinned once someone is waiting for it to be
* unpinned.
*/
void
STATIC void
xfs_iunpin_wait(
xfs_inode_t *ip)
{
@ -3601,106 +3562,42 @@ xfs_iflush_int(
/*
* Flush all inactive inodes in mp. Return true if no user references
* were found, false otherwise.
* Flush all inactive inodes in mp.
*/
int
void
xfs_iflush_all(
xfs_mount_t *mp,
int flag)
xfs_mount_t *mp)
{
int busy;
int done;
int purged;
xfs_inode_t *ip;
vmap_t vmap;
vnode_t *vp;
busy = done = 0;
while (!done) {
purged = 0;
XFS_MOUNT_ILOCK(mp);
ip = mp->m_inodes;
if (ip == NULL) {
break;
again:
XFS_MOUNT_ILOCK(mp);
ip = mp->m_inodes;
if (ip == NULL)
goto out;
do {
/* Make sure we skip markers inserted by sync */
if (ip->i_mount == NULL) {
ip = ip->i_mnext;
continue;
}
do {
/* Make sure we skip markers inserted by sync */
if (ip->i_mount == NULL) {
ip = ip->i_mnext;
continue;
}
/*
* It's up to our caller to purge the root
* and quota vnodes later.
*/
vp = XFS_ITOV_NULL(ip);
if (!vp) {
XFS_MOUNT_IUNLOCK(mp);
xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
purged = 1;
break;
}
if (vn_count(vp) != 0) {
if (vn_count(vp) == 1 &&
(ip == mp->m_rootip ||
(mp->m_quotainfo &&
(ip->i_ino == mp->m_sb.sb_uquotino ||
ip->i_ino == mp->m_sb.sb_gquotino)))) {
ip = ip->i_mnext;
continue;
}
if (!(flag & XFS_FLUSH_ALL)) {
busy = 1;
done = 1;
break;
}
/*
* Ignore busy inodes but continue flushing
* others.
*/
ip = ip->i_mnext;
continue;
}
/*
* Sample vp mapping while holding mp locked on MP
* systems, so we don't purge a reclaimed or
* nonexistent vnode. We break from the loop
* since we know that we modify
* it by pulling ourselves from it in xfs_reclaim()
* called via vn_purge() below. Set ip to the next
* entry in the list anyway so we'll know below
* whether we reached the end or not.
*/
VMAP(vp, vmap);
vp = XFS_ITOV_NULL(ip);
if (!vp) {
XFS_MOUNT_IUNLOCK(mp);
vn_purge(vp, &vmap);
purged = 1;
break;
} while (ip != mp->m_inodes);
/*
* We need to distinguish between when we exit the loop
* after a purge and when we simply hit the end of the
* list. We can't use the (ip == mp->m_inodes) test,
* because when we purge an inode at the start of the list
* the next inode on the list becomes mp->m_inodes. That
* would cause such a test to bail out early. The purged
* variable tells us how we got out of the loop.
*/
if (!purged) {
done = 1;
xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
goto again;
}
}
XFS_MOUNT_IUNLOCK(mp);
return !busy;
}
ASSERT(vn_count(vp) == 0);
ip = ip->i_mnext;
} while (ip != mp->m_inodes);
out:
XFS_MOUNT_IUNLOCK(mp);
}
/*
* xfs_iaccess: check accessibility of inode for mode.

View file

@ -411,11 +411,6 @@ void xfs_ifork_next_set(xfs_inode_t *ip, int w, int n);
#define XFS_IFLUSH_ASYNC 4
#define XFS_IFLUSH_DELWRI 5
/*
* Flags for xfs_iflush_all.
*/
#define XFS_FLUSH_ALL 0x1
/*
* Flags for xfs_itruncate_start().
*/
@ -487,8 +482,6 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int);
/*
* xfs_inode.c prototypes.
*/
int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
xfs_dinode_t **, struct xfs_buf **, int *);
int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **,
xfs_daddr_t);
@ -522,7 +515,7 @@ void xfs_ipin(xfs_inode_t *);
void xfs_iunpin(xfs_inode_t *);
int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int);
int xfs_iflush(xfs_inode_t *, uint);
int xfs_iflush_all(struct xfs_mount *, int);
void xfs_iflush_all(struct xfs_mount *);
int xfs_iaccess(xfs_inode_t *, mode_t, cred_t *);
uint xfs_iroundup(uint);
void xfs_ichgtime(xfs_inode_t *, int);

View file

@ -910,7 +910,7 @@ xfs_inode_item_committing(
/*
* This is the ops vector shared by all buf log items.
*/
struct xfs_item_ops xfs_inode_item_ops = {
STATIC struct xfs_item_ops xfs_inode_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_inode_item_format,

View file

@ -385,15 +385,15 @@ xfs_iomap_write_direct(
int nimaps, maps;
int error;
int bmapi_flag;
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
xfs_bmap_free_t free_list;
int aeof;
xfs_filblks_t datablocks;
xfs_filblks_t datablocks, qblocks, resblks;
int committed;
int numrtextents;
uint resblks;
/*
* Make sure that the dquots are there. This doesn't hold
@ -419,7 +419,6 @@ xfs_iomap_write_direct(
xfs_fileoff_t map_last_fsb;
map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
if (map_last_fsb < last_fsb) {
last_fsb = map_last_fsb;
count_fsb = last_fsb - offset_fsb;
@ -428,56 +427,47 @@ xfs_iomap_write_direct(
}
/*
* determine if reserving space on
* the data or realtime partition.
* Determine if reserving space on the data or realtime partition.
*/
if ((rt = XFS_IS_REALTIME_INODE(ip))) {
int sbrtextsize, iprtextsize;
xfs_extlen_t extsz;
sbrtextsize = mp->m_sb.sb_rextsize;
iprtextsize =
ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize;
numrtextents = (count_fsb + iprtextsize - 1);
do_div(numrtextents, sbrtextsize);
if (!(extsz = ip->i_d.di_extsize))
extsz = mp->m_sb.sb_rextsize;
numrtextents = qblocks = (count_fsb + extsz - 1);
do_div(numrtextents, mp->m_sb.sb_rextsize);
quota_flag = XFS_QMOPT_RES_RTBLKS;
datablocks = 0;
} else {
datablocks = count_fsb;
datablocks = qblocks = count_fsb;
quota_flag = XFS_QMOPT_RES_REGBLKS;
numrtextents = 0;
}
/*
* allocate and setup the transaction
* Allocate and setup the transaction
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), numrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
/*
* check for running out of space
* Check for running out of space, note: need lock to return
*/
if (error)
/*
* Free the transaction structure.
*/
xfs_trans_cancel(tp, 0);
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (error)
goto error_out; /* Don't return in above if .. trans ..,
need lock to return */
goto error_out;
if (XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, resblks)) {
if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) {
error = (EDQUOT);
goto error1;
}
nimaps = 1;
bmapi_flag = XFS_BMAPI_WRITE;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@ -487,31 +477,29 @@ xfs_iomap_write_direct(
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
* issue the bmapi() call to allocate the blocks
* Issue the bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
nimaps = 1;
imapp = &imap[0];
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list);
if (error) {
if (error)
goto error0;
}
/*
* complete the transaction
* Complete the transaction
*/
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
if (error) {
if (error)
goto error0;
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
if (error) {
if (error)
goto error_out;
}
/* copy any maps to caller's array and return any error. */
/*
* Copy any maps to caller's array and return any error.
*/
if (nimaps == 0) {
error = (ENOSPC);
goto error_out;
@ -530,10 +518,11 @@ xfs_iomap_write_direct(
}
return 0;
error0: /* Cancel bmap, unlock inode, and cancel trans */
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
xfs_bmap_cancel(&free_list);
XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
*nmaps = 0; /* nothing set-up here */

View file

@ -134,7 +134,7 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
#define xlog_verify_tail_lsn(a,b,c)
#endif
int xlog_iclogs_empty(xlog_t *log);
STATIC int xlog_iclogs_empty(xlog_t *log);
#ifdef DEBUG
int xlog_do_error = 0;
@ -1857,7 +1857,7 @@ xlog_write(xfs_mount_t * mp,
*
* State Change: DIRTY -> ACTIVE
*/
void
STATIC void
xlog_state_clean_log(xlog_t *log)
{
xlog_in_core_t *iclog;
@ -3542,7 +3542,7 @@ xfs_log_force_umount(
return (retval);
}
int
STATIC int
xlog_iclogs_empty(xlog_t *log)
{
xlog_in_core_t *iclog;

View file

@ -535,7 +535,6 @@ typedef struct log {
/* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
extern int xlog_find_head(xlog_t *log, xfs_daddr_t *head_blk);
extern int xlog_find_tail(xlog_t *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk,
@ -548,7 +547,6 @@ extern void xlog_recover_process_iunlinks(xlog_t *log);
extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
extern void xlog_put_bp(struct xfs_buf *);
extern int xlog_bread(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
extern xfs_caddr_t xlog_align(xlog_t *, xfs_daddr_t, int, struct xfs_buf *);
/* iclog tracing */
#define XLOG_TRACE_GRAB_FLUSH 1

View file

@ -148,7 +148,7 @@ xlog_bread(
* The buffer is kept locked across the write and is returned locked.
* This can only be used for synchronous log writes.
*/
int
STATIC int
xlog_bwrite(
xlog_t *log,
xfs_daddr_t blk_no,
@ -179,7 +179,7 @@ xlog_bwrite(
return error;
}
xfs_caddr_t
STATIC xfs_caddr_t
xlog_align(
xlog_t *log,
xfs_daddr_t blk_no,
@ -528,7 +528,7 @@ xlog_find_verify_log_record(
*
* Return: zero if normal, non-zero if error.
*/
int
STATIC int
xlog_find_head(
xlog_t *log,
xfs_daddr_t *return_head_blk)
@ -1964,7 +1964,8 @@ xlog_recover_do_reg_buffer(
* probably a good thing to do for other buf types also.
*/
error = 0;
if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
if (buf_f->blf_flags &
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
@ -2030,6 +2031,7 @@ xfs_qm_dqcheck(
}
if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER &&
INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_PROJ &&
INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
@ -2135,6 +2137,8 @@ xlog_recover_do_dquot_buffer(
type = 0;
if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
type |= XFS_DQ_USER;
if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
type |= XFS_DQ_PROJ;
if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
type |= XFS_DQ_GROUP;
/*
@ -2247,7 +2251,8 @@ xlog_recover_do_buffer_trans(
error = 0;
if (flags & XFS_BLI_INODE_BUF) {
error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
} else if (flags & (XFS_BLI_UDQUOT_BUF | XFS_BLI_GDQUOT_BUF)) {
} else if (flags &
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
@ -2619,7 +2624,7 @@ xlog_recover_do_dquot_trans(
* This type of quotas was turned off, so ignore this record.
*/
type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
(XFS_DQ_USER | XFS_DQ_GROUP);
(XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
ASSERT(type);
if (log->l_quotaoffs_flag & type)
return (0);
@ -2742,7 +2747,6 @@ xlog_recover_do_efd_trans(
xfs_efi_log_item_t *efip = NULL;
xfs_log_item_t *lip;
int gen;
int nexts;
__uint64_t efi_id;
SPLDECL(s);
@ -2777,22 +2781,15 @@ xlog_recover_do_efd_trans(
}
lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
}
if (lip == NULL) {
AIL_UNLOCK(mp, s);
}
/*
* If we found it, then free it up. If it wasn't there, it
* must have been overwritten in the log. Oh well.
*/
if (lip != NULL) {
nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
kmem_free(lip, sizeof(xfs_efi_log_item_t) +
((nexts - 1) * sizeof(xfs_extent_t)));
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
xfs_efi_item_free(efip);
} else {
AIL_UNLOCK(mp, s);
}
}

View file

@ -1658,6 +1658,11 @@ xfs_inobt_is_free(xfs_inobt_rec_t *rp, int i)
{
return XFS_INOBT_IS_FREE(rp, i);
}
int
xfs_inobt_is_free_disk(xfs_inobt_rec_t *rp, int i)
{
return XFS_INOBT_IS_FREE_DISK(rp, i);
}
#endif
#if XFS_WANT_FUNCS_C || (XFS_WANT_SPACE_C && XFSSO_XFS_INOBT_IS_LAST_REC)

View file

@ -64,6 +64,7 @@
STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
static struct {
short offset;
@ -555,7 +556,7 @@ xfs_readsb(xfs_mount_t *mp)
* fields from the superblock associated with the given
* mount structure
*/
void
STATIC void
xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
{
int i;
@ -1081,7 +1082,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
int64_t fsid;
#endif
xfs_iflush_all(mp, XFS_FLUSH_ALL);
xfs_iflush_all(mp);
XFS_QM_DQPURGEALL(mp,
XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
@ -1111,15 +1112,6 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
*/
ASSERT(mp->m_inodes == NULL);
/*
* We may have bufs that are in the process of getting written still.
* We must wait for the I/O completion of those. The sync flag here
* does a two pass iteration thru the bufcache.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
xfs_incore_relse(mp->m_ddev_targp, 0, 1); /* synchronous */
}
xfs_unmountfs_close(mp, cr);
if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
xfs_uuid_unmount(mp);
@ -1146,7 +1138,7 @@ xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
xfs_free_buftarg(mp->m_ddev_targp, 0);
}
void
STATIC void
xfs_unmountfs_wait(xfs_mount_t *mp)
{
if (mp->m_logdev_targp != mp->m_ddev_targp)

View file

@ -141,7 +141,7 @@ typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint);
typedef void (*xfs_dqdetach_t)(struct xfs_inode *);
typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint);
typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *,
struct xfs_inode *, uid_t, gid_t, uint,
struct xfs_inode *, uid_t, gid_t, prid_t, uint,
struct xfs_dquot **, struct xfs_dquot **);
typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *);
@ -185,8 +185,8 @@ typedef struct xfs_qmops {
(*(mp)->m_qm_ops.xfs_dqdetach)(ip)
#define XFS_QM_DQPURGEALL(mp, fl) \
(*(mp)->m_qm_ops.xfs_dqpurgeall)(mp, fl)
#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, fl, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, fl, dq1, dq2)
#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, prid, fl, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, prid, fl, dq1, dq2)
#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopcreate)(tp, ip, dq1, dq2)
#define XFS_QM_DQVOPRENAME(mp, ip) \
@ -544,7 +544,6 @@ extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv);
extern int xfs_mountfs(struct vfs *, xfs_mount_t *mp, int);
extern int xfs_unmountfs(xfs_mount_t *, struct cred *);
extern void xfs_unmountfs_wait(xfs_mount_t *);
extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *);
extern int xfs_unmountfs_writesb(xfs_mount_t *);
extern int xfs_unmount_flush(xfs_mount_t *, int);

View file

@ -96,7 +96,7 @@ typedef struct xfs_dqblk {
* flags for q_flags field in the dquot.
*/
#define XFS_DQ_USER 0x0001 /* a user quota */
/* #define XFS_DQ_PROJ 0x0002 -- project quota (IRIX) */
#define XFS_DQ_PROJ 0x0002 /* project quota */
#define XFS_DQ_GROUP 0x0004 /* a group quota */
#define XFS_DQ_FLOCKED 0x0008 /* flush lock taken */
#define XFS_DQ_DIRTY 0x0010 /* dquot is dirty */
@ -104,6 +104,8 @@ typedef struct xfs_dqblk {
#define XFS_DQ_INACTIVE 0x0040 /* dq off mplist & hashlist */
#define XFS_DQ_MARKER 0x0080 /* sentinel */
#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
/*
* In the worst case, when both user and group quotas are on,
* we can have a max of three dquots changing in a single transaction.
@ -124,7 +126,7 @@ typedef struct xfs_dqblk {
typedef struct xfs_dq_logformat {
__uint16_t qlf_type; /* dquot log item type */
__uint16_t qlf_size; /* size of this item */
xfs_dqid_t qlf_id; /* usr/grp id number : 32 bits */
xfs_dqid_t qlf_id; /* usr/grp/proj id : 32 bits */
__int64_t qlf_blkno; /* blkno of dquot buffer */
__int32_t qlf_len; /* len of dquot buffer */
__uint32_t qlf_boffset; /* off of dquot in buffer */
@ -152,9 +154,9 @@ typedef struct xfs_qoff_logformat {
#define XFS_UQUOTA_ACCT 0x0001 /* user quota accounting ON */
#define XFS_UQUOTA_ENFD 0x0002 /* user quota limits enforced */
#define XFS_UQUOTA_CHKD 0x0004 /* quotacheck run on usr quotas */
#define XFS_PQUOTA_ACCT 0x0008 /* (IRIX) project quota accounting ON */
#define XFS_GQUOTA_ENFD 0x0010 /* group quota limits enforced */
#define XFS_GQUOTA_CHKD 0x0020 /* quotacheck run on grp quotas */
#define XFS_PQUOTA_ACCT 0x0008 /* project quota accounting ON */
#define XFS_OQUOTA_ENFD 0x0010 /* other (grp/prj) quota limits enforced */
#define XFS_OQUOTA_CHKD 0x0020 /* quotacheck run on other (grp/prj) quotas */
#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */
/*
@ -162,17 +164,22 @@ typedef struct xfs_qoff_logformat {
* are in the process of getting turned off. These flags are in m_qflags but
* never in sb_qflags.
*/
#define XFS_UQUOTA_ACTIVE 0x0080 /* uquotas are being turned off */
#define XFS_GQUOTA_ACTIVE 0x0100 /* gquotas are being turned off */
#define XFS_UQUOTA_ACTIVE 0x0100 /* uquotas are being turned off */
#define XFS_PQUOTA_ACTIVE 0x0200 /* pquotas are being turned off */
#define XFS_GQUOTA_ACTIVE 0x0400 /* gquotas are being turned off */
/*
* Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees
* quota will be not be switched off as long as that inode lock is held.
*/
#define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \
XFS_GQUOTA_ACTIVE))
XFS_GQUOTA_ACTIVE | \
XFS_PQUOTA_ACTIVE))
#define XFS_IS_OQUOTA_ON(mp) ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \
XFS_PQUOTA_ACTIVE))
#define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE)
#define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE)
#define XFS_IS_PQUOTA_ON(mp) ((mp)->m_qflags & XFS_PQUOTA_ACTIVE)
/*
* Flags to tell various functions what to do. Not all of these are meaningful
@ -182,7 +189,7 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_DQLOCK 0x0000001 /* dqlock */
#define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */
#define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */
#define XFS_QMOPT_GQUOTA 0x0000008 /* group dquot requested */
#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
#define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
@ -192,6 +199,7 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if necessary */
#define XFS_QMOPT_ILOCKED 0x0000800 /* inode is already locked (excl) */
#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot, if damaged. */
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
/*
* flags to xfs_trans_mod_dquot to indicate which field needs to be
@ -231,7 +239,8 @@ typedef struct xfs_qoff_logformat {
#define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT
#define XFS_QMOPT_QUOTALL (XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA)
#define XFS_QMOPT_QUOTALL \
(XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
#define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
#ifdef __KERNEL__
@ -246,21 +255,33 @@ typedef struct xfs_qoff_logformat {
*/
#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\
(ip)->i_udquot == NULL) || \
(XFS_IS_GQUOTA_ON(mp) && \
(XFS_IS_OQUOTA_ON(mp) && \
(ip)->i_gdquot == NULL))
#define XFS_QM_NEED_QUOTACHECK(mp) ((XFS_IS_UQUOTA_ON(mp) && \
(mp->m_sb.sb_qflags & \
XFS_UQUOTA_CHKD) == 0) || \
(XFS_IS_GQUOTA_ON(mp) && \
(mp->m_sb.sb_qflags & \
XFS_GQUOTA_CHKD) == 0))
#define XFS_QM_NEED_QUOTACHECK(mp) \
((XFS_IS_UQUOTA_ON(mp) && \
(mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \
(XFS_IS_GQUOTA_ON(mp) && \
((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \
(mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT))) || \
(XFS_IS_PQUOTA_ON(mp) && \
((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \
(mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT))))
#define XFS_MOUNT_QUOTA_SET1 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\
XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD)
#define XFS_MOUNT_QUOTA_SET2 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\
XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD)
#define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\
XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\
XFS_GQUOTA_ENFD|XFS_GQUOTA_CHKD)
XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\
XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD|\
XFS_GQUOTA_ACCT)
#define XFS_MOUNT_QUOTA_MASK (XFS_MOUNT_QUOTA_ALL | XFS_UQUOTA_ACTIVE | \
XFS_GQUOTA_ACTIVE)
XFS_GQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE)
/*
@ -331,15 +352,8 @@ typedef struct xfs_dqtrxops {
#define XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp) \
XFS_DQTRXOP_VOID(mp, tp, qo_unreserve_and_mod_dquots)
#define XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, nblks) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \
XFS_QMOPT_RES_REGBLKS)
#define XFS_TRANS_RESERVE_BLKQUOTA_FORCE(mp, tp, ip, nblks) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, 0, \
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES)
#define XFS_TRANS_UNRESERVE_BLKQUOTA(mp, tp, ip, nblks) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), 0, \
XFS_QMOPT_RES_REGBLKS)
#define XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, nblks, ninos, flags) \
XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, -(nblks), -(ninos), flags)
#define XFS_TRANS_RESERVE_QUOTA(mp, tp, ud, gd, nb, ni, f) \
XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp, ud, gd, nb, ni, \
f | XFS_QMOPT_RES_REGBLKS)

View file

@ -234,9 +234,6 @@ xfs_lock_for_rename(
return 0;
}
int rename_which_error_return = 0;
/*
* xfs_rename
*/
@ -316,7 +313,6 @@ xfs_rename(
&num_inodes);
if (error) {
rename_which_error_return = __LINE__;
/*
* We have nothing locked, no inode references, and
* no transaction, so just get out.
@ -332,7 +328,6 @@ xfs_rename(
*/
if (target_ip == NULL && (src_dp != target_dp) &&
target_dp->i_d.di_nlink >= XFS_MAXLINK) {
rename_which_error_return = __LINE__;
error = XFS_ERROR(EMLINK);
xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
goto rele_return;
@ -359,7 +354,6 @@ xfs_rename(
XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
}
if (error) {
rename_which_error_return = __LINE__;
xfs_trans_cancel(tp, 0);
goto rele_return;
}
@ -369,7 +363,6 @@ xfs_rename(
*/
if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
xfs_trans_cancel(tp, cancel_flags);
rename_which_error_return = __LINE__;
goto rele_return;
}
@ -413,7 +406,6 @@ xfs_rename(
if (spaceres == 0 &&
(error = XFS_DIR_CANENTER(mp, tp, target_dp, target_name,
target_namelen))) {
rename_which_error_return = __LINE__;
goto error_return;
}
/*
@ -425,11 +417,9 @@ xfs_rename(
target_namelen, src_ip->i_ino,
&first_block, &free_list, spaceres);
if (error == ENOSPC) {
rename_which_error_return = __LINE__;
goto error_return;
}
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@ -437,7 +427,6 @@ xfs_rename(
if (new_parent && src_is_directory) {
error = xfs_bumplink(tp, target_dp);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
}
@ -455,7 +444,6 @@ xfs_rename(
if (!(XFS_DIR_ISEMPTY(target_ip->i_mount, target_ip)) ||
(target_ip->i_d.di_nlink > 2)) {
error = XFS_ERROR(EEXIST);
rename_which_error_return = __LINE__;
goto error_return;
}
}
@ -473,7 +461,6 @@ xfs_rename(
target_namelen, src_ip->i_ino, &first_block,
&free_list, spaceres);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@ -484,7 +471,6 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
target_ip_dropped = 1;
@ -495,7 +481,6 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
}
@ -519,7 +504,6 @@ xfs_rename(
&free_list, spaceres);
ASSERT(error != EEXIST);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@ -550,7 +534,6 @@ xfs_rename(
*/
error = xfs_droplink(tp, src_dp);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
}
@ -558,7 +541,6 @@ xfs_rename(
error = XFS_DIR_REMOVENAME(mp, tp, src_dp, src_name, src_namelen,
src_ip->i_ino, &first_block, &free_list, spaceres);
if (error) {
rename_which_error_return = __LINE__;
goto abort_return;
}
xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

View file

@ -331,25 +331,6 @@ xfs_trans_reserve(
}
/*
* This is called to set the a callback to be called when the given
* transaction is committed to disk. The transaction pointer and the
* argument pointer will be passed to the callback routine.
*
* Only one callback can be associated with any single transaction.
*/
void
xfs_trans_callback(
xfs_trans_t *tp,
xfs_trans_callback_t callback,
void *arg)
{
ASSERT(tp->t_callback == NULL);
tp->t_callback = callback;
tp->t_callarg = arg;
}
/*
* Record the indicated change to the given field for application
* to the file system's superblock when the transaction commits.
@ -551,7 +532,7 @@ xfs_trans_apply_sb_deltas(
*
* This is done efficiently with a single call to xfs_mod_incore_sb_batch().
*/
void
STATIC void
xfs_trans_unreserve_and_mod_sb(
xfs_trans_t *tp)
{

View file

@ -987,8 +987,6 @@ xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint);
xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
uint, uint);
void xfs_trans_callback(xfs_trans_t *,
void (*)(xfs_trans_t *, void *), void *);
void xfs_trans_mod_sb(xfs_trans_t *, uint, long);
struct xfs_buf *xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t,
int, uint);
@ -1010,7 +1008,6 @@ int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
xfs_ino_t , uint, uint, struct xfs_inode **);
void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint);
void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *);
void xfs_trans_ihold_release(xfs_trans_t *, struct xfs_inode *);
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);

View file

@ -976,6 +976,7 @@ xfs_trans_dquot_buf(
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT(type == XFS_BLI_UDQUOT_BUF ||
type == XFS_BLI_PDQUOT_BUF ||
type == XFS_BLI_GDQUOT_BUF);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);

View file

@ -253,24 +253,6 @@ xfs_trans_ihold(
ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
}
/*
* Cancel the previous inode hold request made on this inode
* for this transaction.
*/
/*ARGSUSED*/
void
xfs_trans_ihold_release(
xfs_trans_t *tp,
xfs_inode_t *ip)
{
ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
ip->i_itemp->ili_flags &= ~XFS_ILI_HOLD;
}
/*
* This is called to mark the fields indicated in fieldmask as needing

View file

@ -55,7 +55,7 @@ typedef signed long long int __int64_t;
typedef unsigned long long int __uint64_t;
typedef enum { B_FALSE,B_TRUE } boolean_t;
typedef __int64_t prid_t; /* project ID */
typedef __uint32_t prid_t; /* project ID */
typedef __uint32_t inst_t; /* an instruction */
typedef __s64 xfs_off_t; /* <file offset> type */

View file

@ -428,7 +428,7 @@ xfs_truncate_file(
if (ip->i_ino != mp->m_sb.sb_uquotino)
ASSERT(ip->i_udquot);
}
if (XFS_IS_GQUOTA_ON(mp)) {
if (XFS_IS_OQUOTA_ON(mp)) {
if (ip->i_ino != mp->m_sb.sb_gquotino)
ASSERT(ip->i_gdquot);
}

View file

@ -367,16 +367,6 @@ xfs_finish_flags(
return XFS_ERROR(EROFS);
}
/*
* disallow mount attempts with (IRIX) project quota enabled
*/
if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
(mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT)) {
cmn_err(CE_WARN,
"XFS: cannot mount a filesystem with IRIX project quota enabled");
return XFS_ERROR(ENOSYS);
}
/*
* check for shared mount.
*/
@ -622,7 +612,34 @@ xfs_unmount(
return XFS_ERROR(error);
}
#define REMOUNT_READONLY_FLAGS (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT)
STATIC int
xfs_quiesce_fs(
xfs_mount_t *mp)
{
int count = 0, pincount;
xfs_refcache_purge_mp(mp);
xfs_flush_buftarg(mp->m_ddev_targp, 0);
xfs_finish_reclaim_all(mp, 0);
/* This loop must run at least twice.
* The first instance of the loop will flush
* most meta data but that will generate more
* meta data (typically directory updates).
* Which then must be flushed and logged before
* we can write the unmount record.
*/
do {
xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL);
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
if (!pincount) {
delay(50);
count++;
}
} while (count < 2);
return 0;
}
STATIC int
xfs_mntupdate(
@ -632,8 +649,7 @@ xfs_mntupdate(
{
struct vfs *vfsp = bhvtovfs(bdp);
xfs_mount_t *mp = XFS_BHVTOM(bdp);
int pincount, error;
int count = 0;
int error;
if (args->flags & XFSMNT_NOATIME)
mp->m_flags |= XFS_MOUNT_NOATIME;
@ -645,25 +661,7 @@ xfs_mntupdate(
}
if (*flags & MS_RDONLY) {
xfs_refcache_purge_mp(mp);
xfs_flush_buftarg(mp->m_ddev_targp, 0);
xfs_finish_reclaim_all(mp, 0);
/* This loop must run at least twice.
* The first instance of the loop will flush
* most meta data but that will generate more
* meta data (typically directory updates).
* Which then must be flushed and logged before
* we can write the unmount record.
*/
do {
VFS_SYNC(vfsp, REMOUNT_READONLY_FLAGS, NULL, error);
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
if (!pincount) {
delay(50);
count++;
}
} while (count < 2);
xfs_quiesce_fs(mp);
/* Ok now write out an unmount record */
xfs_log_unmount_write(mp);
@ -879,10 +877,12 @@ xfs_sync(
int flags,
cred_t *credp)
{
xfs_mount_t *mp;
xfs_mount_t *mp = XFS_BHVTOM(bdp);
mp = XFS_BHVTOM(bdp);
return (xfs_syncsub(mp, flags, 0, NULL));
if (unlikely(flags == SYNC_QUIESCE))
return xfs_quiesce_fs(mp);
else
return xfs_syncsub(mp, flags, 0, NULL);
}
/*
@ -1681,7 +1681,7 @@ suffix_strtoul(const char *cp, char **endp, unsigned int base)
return simple_strtoul(cp, endp, base) << shift_left_factor;
}
int
STATIC int
xfs_parseargs(
struct bhv_desc *bhv,
char *options,
@ -1867,7 +1867,7 @@ printk("XFS: irixsgid is now a sysctl(2) variable, option is deprecated.\n");
return 0;
}
int
STATIC int
xfs_showargs(
struct bhv_desc *bhv,
struct seq_file *m)

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@ -351,21 +351,28 @@ xfs_setattr(
* If the IDs do change before we take the ilock, we're covered
* because the i_*dquot fields will get updated anyway.
*/
if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) {
if (XFS_IS_QUOTA_ON(mp) &&
(mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID))) {
uint qflags = 0;
if (mask & XFS_AT_UID) {
if ((mask & XFS_AT_UID) && XFS_IS_UQUOTA_ON(mp)) {
uid = vap->va_uid;
qflags |= XFS_QMOPT_UQUOTA;
} else {
uid = ip->i_d.di_uid;
}
if (mask & XFS_AT_GID) {
if ((mask & XFS_AT_GID) && XFS_IS_GQUOTA_ON(mp)) {
gid = vap->va_gid;
qflags |= XFS_QMOPT_GQUOTA;
} else {
gid = ip->i_d.di_gid;
}
if ((mask & XFS_AT_PROJID) && XFS_IS_PQUOTA_ON(mp)) {
projid = vap->va_projid;
qflags |= XFS_QMOPT_PQUOTA;
} else {
projid = ip->i_d.di_projid;
}
/*
* We take a reference when we initialize udqp and gdqp,
* so it is important that we never blindly double trip on
@ -373,7 +380,8 @@ xfs_setattr(
*/
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
code = XFS_QM_DQVOPALLOC(mp, ip, uid,gid, qflags, &udqp, &gdqp);
code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags,
&udqp, &gdqp);
if (code)
return (code);
}
@ -499,8 +507,6 @@ xfs_setattr(
* that the group ID supplied to the chown() function
* shall be equal to either the group ID or one of the
* supplementary group IDs of the calling process.
*
* XXX: How does restricted_chown affect projid?
*/
if (restricted_chown &&
(iuid != uid || (igid != gid &&
@ -510,10 +516,11 @@ xfs_setattr(
goto error_return;
}
/*
* Do a quota reservation only if uid or gid is actually
* Do a quota reservation only if uid/projid/gid is actually
* going to change.
*/
if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
(XFS_IS_PQUOTA_ON(mp) && iprojid != projid) ||
(XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
ASSERT(tp);
code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
@ -774,6 +781,7 @@ xfs_setattr(
}
if (igid != gid) {
if (XFS_IS_GQUOTA_ON(mp)) {
ASSERT(!XFS_IS_PQUOTA_ON(mp));
ASSERT(mask & XFS_AT_GID);
ASSERT(gdqp);
olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
@ -782,6 +790,13 @@ xfs_setattr(
ip->i_d.di_gid = gid;
}
if (iprojid != projid) {
if (XFS_IS_PQUOTA_ON(mp)) {
ASSERT(!XFS_IS_GQUOTA_ON(mp));
ASSERT(mask & XFS_AT_PROJID);
ASSERT(gdqp);
olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
&ip->i_gdquot, gdqp);
}
ip->i_d.di_projid = projid;
/*
* We may have to rev the inode as well as
@ -843,6 +858,8 @@ xfs_setattr(
di_flags |= XFS_DIFLAG_NOATIME;
if (vap->va_xflags & XFS_XFLAG_NODUMP)
di_flags |= XFS_DIFLAG_NODUMP;
if (vap->va_xflags & XFS_XFLAG_PROJINHERIT)
di_flags |= XFS_DIFLAG_PROJINHERIT;
if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
@ -1898,7 +1915,9 @@ xfs_create(
/* Return through std_return after this point. */
udqp = gdqp = NULL;
if (vap->va_mask & XFS_AT_PROJID)
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
else if (vap->va_mask & XFS_AT_PROJID)
prid = (xfs_prid_t)vap->va_projid;
else
prid = (xfs_prid_t)dfltprid;
@ -1907,7 +1926,7 @@ xfs_create(
* Make sure that we have allocated dquot(s) on disk.
*/
error = XFS_QM_DQVOPALLOC(mp, dp,
current_fsuid(credp), current_fsgid(credp),
current_fsuid(credp), current_fsgid(credp), prid,
XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
@ -2604,17 +2623,7 @@ xfs_link(
if (src_vp->v_type == VDIR)
return XFS_ERROR(EPERM);
/*
* For now, manually find the XFS behavior descriptor for
* the source vnode. If it doesn't exist then something
* is wrong and we should just return an error.
* Eventually we need to figure out how link is going to
* work in the face of stacked vnodes.
*/
src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops);
if (src_bdp == NULL) {
return XFS_ERROR(EXDEV);
}
sip = XFS_BHVTOI(src_bdp);
tdp = XFS_BHVTOI(target_dir_bdp);
mp = tdp->i_mount;
@ -2681,6 +2690,17 @@ xfs_link(
goto error_return;
}
/*
* If we are using project inheritance, we only allow hard link
* creation in our tree when the project IDs are the same; else
* the tree quota mechanism could be circumvented.
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
(tdp->i_d.di_projid != sip->i_d.di_projid))) {
error = XFS_ERROR(EPERM);
goto error_return;
}
if (resblks == 0 &&
(error = XFS_DIR_CANENTER(mp, tp, tdp, target_name,
target_namelen)))
@ -2803,7 +2823,9 @@ xfs_mkdir(
mp = dp->i_mount;
udqp = gdqp = NULL;
if (vap->va_mask & XFS_AT_PROJID)
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
else if (vap->va_mask & XFS_AT_PROJID)
prid = (xfs_prid_t)vap->va_projid;
else
prid = (xfs_prid_t)dfltprid;
@ -2812,7 +2834,7 @@ xfs_mkdir(
* Make sure that we have allocated dquot(s) on disk.
*/
error = XFS_QM_DQVOPALLOC(mp, dp,
current_fsuid(credp), current_fsgid(credp),
current_fsuid(credp), current_fsgid(credp), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
@ -3357,7 +3379,9 @@ xfs_symlink(
/* Return through std_return after this point. */
udqp = gdqp = NULL;
if (vap->va_mask & XFS_AT_PROJID)
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
else if (vap->va_mask & XFS_AT_PROJID)
prid = (xfs_prid_t)vap->va_projid;
else
prid = (xfs_prid_t)dfltprid;
@ -3366,7 +3390,7 @@ xfs_symlink(
* Make sure that we have allocated dquot(s) on disk.
*/
error = XFS_QM_DQVOPALLOC(mp, dp,
current_fsuid(credp), current_fsgid(credp),
current_fsuid(credp), current_fsgid(credp), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
if (error)
goto std_return;
@ -4028,7 +4052,7 @@ xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock)
* errno on error
*
*/
int
STATIC int
xfs_alloc_file_space(
xfs_inode_t *ip,
xfs_off_t offset,
@ -4151,9 +4175,8 @@ xfs_alloc_file_space(
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = XFS_TRANS_RESERVE_QUOTA_BYDQUOTS(mp, tp,
ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
ip->i_udquot, ip->i_gdquot, resblks, 0, 0);
if (error)
goto error1;
@ -4305,6 +4328,7 @@ xfs_free_file_space(
xfs_off_t len,
int attr_flags)
{
vnode_t *vp;
int committed;
int done;
xfs_off_t end_dmi_offset;
@ -4325,9 +4349,11 @@ xfs_free_file_space(
xfs_trans_t *tp;
int need_iolock = 1;
vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
vp = XFS_ITOV(ip);
mp = ip->i_mount;
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return error;
@ -4344,7 +4370,7 @@ xfs_free_file_space(
DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
if (end_dmi_offset > ip->i_d.di_size)
end_dmi_offset = ip->i_d.di_size;
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp,
offset, end_dmi_offset - offset,
AT_DELAY_FLAG(attr_flags), NULL);
if (error)
@ -4363,7 +4389,14 @@ xfs_free_file_space(
ioffset = offset & ~(rounding - 1);
if (ilen & (rounding - 1))
ilen = (ilen + rounding) & ~(rounding - 1);
xfs_inval_cached_pages(XFS_ITOV(ip), &(ip->i_iocore), ioffset, 0, 0);
if (VN_CACHED(vp) != 0) {
xfs_inval_cached_trace(&ip->i_iocore, ioffset, -1,
ctooff(offtoct(ioffset)), -1);
VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(ioffset)),
-1, FI_REMAPF_LOCKED);
}
/*
* Need to zero the stuff we're not freeing, on disk.
* If its a realtime file & can't use unwritten extents then we