Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: ext3: Fix dirtying of journalled buffers in data=journal mode ext3: default to ordered mode quota: Use mark_inode_dirty_sync instead of mark_inode_dirty quota: Change quota error message to print out disk and function name MAINTAINERS: Update entries of ext2 and ext3 MAINTAINERS: Update address of Andreas Dilger ext3: Avoid filesystem corruption after a crash under heavy delete load ext3: remove vestiges of nobh support ext3: Fix set but unused variables quota: clean up quota active checks quota: Clean up the namespace in dqblk_xfs.h quota: check quota reservation on remove_dquot_ref
This commit is contained in:
Коммит
90e0c22596
|
@ -2237,6 +2237,7 @@ S: Maintained
|
|||
F: drivers/net/eth16i.c
|
||||
|
||||
EXT2 FILE SYSTEM
|
||||
M: Jan Kara <jack@suse.cz>
|
||||
L: linux-ext4@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/ext2.txt
|
||||
|
@ -2244,8 +2245,9 @@ F: fs/ext2/
|
|||
F: include/linux/ext2*
|
||||
|
||||
EXT3 FILE SYSTEM
|
||||
M: Jan Kara <jack@suse.cz>
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Andreas Dilger <adilger@sun.com>
|
||||
M: Andreas Dilger <adilger.kernel@dilger.ca>
|
||||
L: linux-ext4@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/ext3.txt
|
||||
|
@ -2254,7 +2256,7 @@ F: include/linux/ext3*
|
|||
|
||||
EXT4 FILE SYSTEM
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Andreas Dilger <adilger@sun.com>
|
||||
M: Andreas Dilger <adilger.kernel@dilger.ca>
|
||||
L: linux-ext4@vger.kernel.org
|
||||
W: http://ext4.wiki.kernel.org
|
||||
Q: http://patchwork.ozlabs.org/project/linux-ext4/list/
|
||||
|
|
|
@ -31,6 +31,7 @@ config EXT3_FS
|
|||
config EXT3_DEFAULTS_TO_ORDERED
|
||||
bool "Default to 'data=ordered' in ext3"
|
||||
depends on EXT3_FS
|
||||
default y
|
||||
help
|
||||
The journal mode options for ext3 have different tradeoffs
|
||||
between when data is guaranteed to be on disk and
|
||||
|
|
|
@ -1149,9 +1149,25 @@ static int walk_page_buffers( handle_t *handle,
|
|||
static int do_journal_get_write_access(handle_t *handle,
|
||||
struct buffer_head *bh)
|
||||
{
|
||||
int dirty = buffer_dirty(bh);
|
||||
int ret;
|
||||
|
||||
if (!buffer_mapped(bh) || buffer_freed(bh))
|
||||
return 0;
|
||||
return ext3_journal_get_write_access(handle, bh);
|
||||
/*
|
||||
* __block_prepare_write() could have dirtied some buffers. Clean
|
||||
* the dirty bit as jbd2_journal_get_write_access() could complain
|
||||
* otherwise about fs integrity issues. Setting of the dirty bit
|
||||
* by __block_prepare_write() isn't a real problem here as we clear
|
||||
* the bit before releasing a page lock and thus writeback cannot
|
||||
* ever write the buffer.
|
||||
*/
|
||||
if (dirty)
|
||||
clear_buffer_dirty(bh);
|
||||
ret = ext3_journal_get_write_access(handle, bh);
|
||||
if (!ret && dirty)
|
||||
ret = ext3_journal_dirty_metadata(handle, bh);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1625,10 +1641,7 @@ static int ext3_writeback_writepage(struct page *page,
|
|||
goto out_fail;
|
||||
}
|
||||
|
||||
if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
|
||||
ret = nobh_writepage(page, ext3_get_block, wbc);
|
||||
else
|
||||
ret = block_write_full_page(page, ext3_get_block, wbc);
|
||||
ret = block_write_full_page(page, ext3_get_block, wbc);
|
||||
|
||||
err = ext3_journal_stop(handle);
|
||||
if (!ret)
|
||||
|
@ -1922,17 +1935,6 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
|
|||
length = blocksize - (offset & (blocksize - 1));
|
||||
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
|
||||
|
||||
/*
|
||||
* For "nobh" option, we can only work if we don't need to
|
||||
* read-in the page - otherwise we create buffers to do the IO.
|
||||
*/
|
||||
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
|
||||
ext3_should_writeback_data(inode) && PageUptodate(page)) {
|
||||
zero_user(page, offset, length);
|
||||
set_page_dirty(page);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, blocksize, 0);
|
||||
|
||||
|
@ -2283,27 +2285,6 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
|
|||
(__le32*)bh->b_data + addr_per_block,
|
||||
depth);
|
||||
|
||||
/*
|
||||
* We've probably journalled the indirect block several
|
||||
* times during the truncate. But it's no longer
|
||||
* needed and we now drop it from the transaction via
|
||||
* journal_revoke().
|
||||
*
|
||||
* That's easy if it's exclusively part of this
|
||||
* transaction. But if it's part of the committing
|
||||
* transaction then journal_forget() will simply
|
||||
* brelse() it. That means that if the underlying
|
||||
* block is reallocated in ext3_get_block(),
|
||||
* unmap_underlying_metadata() will find this block
|
||||
* and will try to get rid of it. damn, damn.
|
||||
*
|
||||
* If this block has already been committed to the
|
||||
* journal, a revoke record will be written. And
|
||||
* revoke records must be emitted *before* clearing
|
||||
* this block's bit in the bitmaps.
|
||||
*/
|
||||
ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
|
||||
|
||||
/*
|
||||
* Everything below this this pointer has been
|
||||
* released. Now let this top-of-subtree go.
|
||||
|
@ -2327,6 +2308,31 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
|
|||
truncate_restart_transaction(handle, inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* We've probably journalled the indirect block several
|
||||
* times during the truncate. But it's no longer
|
||||
* needed and we now drop it from the transaction via
|
||||
* journal_revoke().
|
||||
*
|
||||
* That's easy if it's exclusively part of this
|
||||
* transaction. But if it's part of the committing
|
||||
* transaction then journal_forget() will simply
|
||||
* brelse() it. That means that if the underlying
|
||||
* block is reallocated in ext3_get_block(),
|
||||
* unmap_underlying_metadata() will find this block
|
||||
* and will try to get rid of it. damn, damn. Thus
|
||||
* we don't allow a block to be reallocated until
|
||||
* a transaction freeing it has fully committed.
|
||||
*
|
||||
* We also have to make sure journal replay after a
|
||||
* crash does not overwrite non-journaled data blocks
|
||||
* with old metadata when the block got reallocated for
|
||||
* data. Thus we have to store a revoke record for a
|
||||
* block in the same transaction in which we free the
|
||||
* block.
|
||||
*/
|
||||
ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
|
||||
|
||||
ext3_free_blocks(handle, inode, nr, 1);
|
||||
|
||||
if (parent_bh) {
|
||||
|
|
|
@ -1447,7 +1447,6 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
|
|||
struct inode *inode)
|
||||
{
|
||||
struct inode *dir = dentry->d_parent->d_inode;
|
||||
unsigned long offset;
|
||||
struct buffer_head * bh;
|
||||
struct ext3_dir_entry_2 *de;
|
||||
struct super_block * sb;
|
||||
|
@ -1469,7 +1468,7 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
|
|||
ext3_mark_inode_dirty(handle, dir);
|
||||
}
|
||||
blocks = dir->i_size >> sb->s_blocksize_bits;
|
||||
for (block = 0, offset = 0; block < blocks; block++) {
|
||||
for (block = 0; block < blocks; block++) {
|
||||
bh = ext3_bread(handle, dir, block, 0, &retval);
|
||||
if(!bh)
|
||||
return retval;
|
||||
|
|
|
@ -964,7 +964,6 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
|
|||
ext3_fsblk_t n_blocks_count)
|
||||
{
|
||||
ext3_fsblk_t o_blocks_count;
|
||||
unsigned long o_groups_count;
|
||||
ext3_grpblk_t last;
|
||||
ext3_grpblk_t add;
|
||||
struct buffer_head * bh;
|
||||
|
@ -976,7 +975,6 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
|
|||
* yet: we're going to revalidate es->s_blocks_count after
|
||||
* taking the s_resize_lock below. */
|
||||
o_blocks_count = le32_to_cpu(es->s_blocks_count);
|
||||
o_groups_count = EXT3_SB(sb)->s_groups_count;
|
||||
|
||||
if (test_opt(sb, DEBUG))
|
||||
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
|
||||
|
|
|
@ -661,9 +661,6 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
|
|||
*/
|
||||
seq_puts(seq, ",barrier=");
|
||||
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
|
||||
if (test_opt(sb, NOBH))
|
||||
seq_puts(seq, ",nobh");
|
||||
|
||||
seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
|
||||
if (test_opt(sb, DATA_ERR_ABORT))
|
||||
seq_puts(seq, ",data_err=abort");
|
||||
|
@ -1255,10 +1252,12 @@ set_qf_format:
|
|||
*n_blocks_count = option;
|
||||
break;
|
||||
case Opt_nobh:
|
||||
set_opt(sbi->s_mount_opt, NOBH);
|
||||
ext3_msg(sb, KERN_WARNING,
|
||||
"warning: ignoring deprecated nobh option");
|
||||
break;
|
||||
case Opt_bh:
|
||||
clear_opt(sbi->s_mount_opt, NOBH);
|
||||
ext3_msg(sb, KERN_WARNING,
|
||||
"warning: ignoring deprecated bh option");
|
||||
break;
|
||||
default:
|
||||
ext3_msg(sb, KERN_ERR,
|
||||
|
@ -2001,14 +2000,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
|
|||
break;
|
||||
}
|
||||
|
||||
if (test_opt(sb, NOBH)) {
|
||||
if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
|
||||
ext3_msg(sb, KERN_WARNING,
|
||||
"warning: ignoring nobh option - "
|
||||
"it is supported only with writeback mode");
|
||||
clear_opt(sbi->s_mount_opt, NOBH);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The journal_load will have done any necessary log recovery,
|
||||
* so we can safely mount the rest of the filesystem now.
|
||||
|
|
|
@ -1449,10 +1449,10 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
|
|||
|
||||
switch (sdp->sd_args.ar_quota) {
|
||||
case GFS2_QUOTA_ON:
|
||||
fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
|
||||
fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
|
||||
/*FALLTHRU*/
|
||||
case GFS2_QUOTA_ACCOUNT:
|
||||
fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
|
||||
fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
|
||||
break;
|
||||
case GFS2_QUOTA_OFF:
|
||||
break;
|
||||
|
@ -1498,7 +1498,7 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
|
|||
|
||||
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
|
||||
fdq->d_version = FS_DQUOT_VERSION;
|
||||
fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
|
||||
fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
|
||||
fdq->d_id = id;
|
||||
fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
|
||||
fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
|
||||
|
@ -1533,12 +1533,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
|
|||
switch(type) {
|
||||
case USRQUOTA:
|
||||
type = QUOTA_USER;
|
||||
if (fdq->d_flags != XFS_USER_QUOTA)
|
||||
if (fdq->d_flags != FS_USER_QUOTA)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case GRPQUOTA:
|
||||
type = QUOTA_GROUP;
|
||||
if (fdq->d_flags != XFS_GROUP_QUOTA)
|
||||
if (fdq->d_flags != FS_GROUP_QUOTA)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1281,13 +1281,9 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
|
|||
int journal_check_available_features (journal_t *journal, unsigned long compat,
|
||||
unsigned long ro, unsigned long incompat)
|
||||
{
|
||||
journal_superblock_t *sb;
|
||||
|
||||
if (!compat && !ro && !incompat)
|
||||
return 1;
|
||||
|
||||
sb = journal->j_superblock;
|
||||
|
||||
/* We can support any known requested features iff the
|
||||
* superblock is in version 2. Otherwise we fail to support any
|
||||
* extended sb features. */
|
||||
|
@ -1481,7 +1477,6 @@ int journal_flush(journal_t *journal)
|
|||
|
||||
int journal_wipe(journal_t *journal, int write)
|
||||
{
|
||||
journal_superblock_t *sb;
|
||||
int err = 0;
|
||||
|
||||
J_ASSERT (!(journal->j_flags & JFS_LOADED));
|
||||
|
@ -1490,8 +1485,6 @@ int journal_wipe(journal_t *journal, int write)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
sb = journal->j_superblock;
|
||||
|
||||
if (!journal->j_tail)
|
||||
goto no_recovery;
|
||||
|
||||
|
|
|
@ -283,12 +283,9 @@ int journal_recover(journal_t *journal)
|
|||
int journal_skip_recovery(journal_t *journal)
|
||||
{
|
||||
int err;
|
||||
journal_superblock_t * sb;
|
||||
|
||||
struct recovery_info info;
|
||||
|
||||
memset (&info, 0, sizeof(info));
|
||||
sb = journal->j_superblock;
|
||||
|
||||
err = do_one_pass(journal, &info, PASS_SCAN);
|
||||
|
||||
|
@ -297,7 +294,8 @@ int journal_skip_recovery(journal_t *journal)
|
|||
++journal->j_transaction_sequence;
|
||||
} else {
|
||||
#ifdef CONFIG_JBD_DEBUG
|
||||
int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
|
||||
int dropped = info.end_transaction -
|
||||
be32_to_cpu(journal->j_superblock->s_sequence);
|
||||
#endif
|
||||
jbd_debug(1,
|
||||
"JBD: ignoring %d transaction%s from the journal.\n",
|
||||
|
@ -321,11 +319,6 @@ static int do_one_pass(journal_t *journal,
|
|||
unsigned int sequence;
|
||||
int blocktype;
|
||||
|
||||
/* Precompute the maximum metadata descriptors in a descriptor block */
|
||||
int MAX_BLOCKS_PER_DESC;
|
||||
MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
|
||||
/ sizeof(journal_block_tag_t));
|
||||
|
||||
/*
|
||||
* First thing is to establish what we expect to find in the log
|
||||
* (in terms of transaction IDs), and where (in terms of log
|
||||
|
|
|
@ -132,6 +132,22 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
|
|||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
|
||||
EXPORT_SYMBOL(dq_data_lock);
|
||||
|
||||
void __quota_error(struct super_block *sb, const char *func,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (printk_ratelimit()) {
|
||||
va_start(args, fmt);
|
||||
printk(KERN_ERR "Quota error (device %s): %s: ",
|
||||
sb->s_id, func);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
va_end(args);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__quota_error);
|
||||
|
||||
#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
|
||||
static char *quotatypes[] = INITQFNAMES;
|
||||
#endif
|
||||
|
@ -705,11 +721,8 @@ void dqput(struct dquot *dquot)
|
|||
return;
|
||||
#ifdef CONFIG_QUOTA_DEBUG
|
||||
if (!atomic_read(&dquot->dq_count)) {
|
||||
printk("VFS: dqput: trying to free free dquot\n");
|
||||
printk("VFS: device %s, dquot of %s %d\n",
|
||||
dquot->dq_sb->s_id,
|
||||
quotatypes[dquot->dq_type],
|
||||
dquot->dq_id);
|
||||
quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
|
||||
quotatypes[dquot->dq_type], dquot->dq_id);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
@ -732,9 +745,9 @@ we_slept:
|
|||
/* Commit dquot before releasing */
|
||||
ret = dquot->dq_sb->dq_op->write_dquot(dquot);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "VFS: cannot write quota structure on "
|
||||
"device %s (error %d). Quota may get out of "
|
||||
"sync!\n", dquot->dq_sb->s_id, ret);
|
||||
quota_error(dquot->dq_sb, "Can't write quota structure"
|
||||
" (error %d). Quota may get out of sync!",
|
||||
ret);
|
||||
/*
|
||||
* We clear dirty bit anyway, so that we avoid
|
||||
* infinite loop here
|
||||
|
@ -914,9 +927,9 @@ static void add_dquot_ref(struct super_block *sb, int type)
|
|||
|
||||
#ifdef CONFIG_QUOTA_DEBUG
|
||||
if (reserved) {
|
||||
printk(KERN_WARNING "VFS (%s): Writes happened before quota"
|
||||
" was turned on thus quota information is probably "
|
||||
"inconsistent. Please run quotacheck(8).\n", sb->s_id);
|
||||
quota_error(sb, "Writes happened before quota was turned on "
|
||||
"thus quota information is probably inconsistent. "
|
||||
"Please run quotacheck(8)");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -947,7 +960,9 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
|
|||
if (dqput_blocks(dquot)) {
|
||||
#ifdef CONFIG_QUOTA_DEBUG
|
||||
if (atomic_read(&dquot->dq_count) != 1)
|
||||
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
|
||||
quota_error(inode->i_sb, "Adding dquot with "
|
||||
"dq_count %d to dispose list",
|
||||
atomic_read(&dquot->dq_count));
|
||||
#endif
|
||||
spin_lock(&dq_list_lock);
|
||||
/* As dquot must have currently users it can't be on
|
||||
|
@ -986,6 +1001,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
|
|||
struct list_head *tofree_head)
|
||||
{
|
||||
struct inode *inode;
|
||||
int reserved = 0;
|
||||
|
||||
spin_lock(&inode_lock);
|
||||
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
||||
|
@ -995,10 +1011,20 @@ static void remove_dquot_ref(struct super_block *sb, int type,
|
|||
* only quota pointers and these have separate locking
|
||||
* (dqptr_sem).
|
||||
*/
|
||||
if (!IS_NOQUOTA(inode))
|
||||
if (!IS_NOQUOTA(inode)) {
|
||||
if (unlikely(inode_get_rsv_space(inode) > 0))
|
||||
reserved = 1;
|
||||
remove_inode_dquot_ref(inode, type, tofree_head);
|
||||
}
|
||||
}
|
||||
spin_unlock(&inode_lock);
|
||||
#ifdef CONFIG_QUOTA_DEBUG
|
||||
if (reserved) {
|
||||
printk(KERN_WARNING "VFS (%s): Writes happened after quota"
|
||||
" was disabled thus quota information is probably "
|
||||
"inconsistent. Please run quotacheck(8).\n", sb->s_id);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Gather all references from inodes and drop them */
|
||||
|
@ -1304,6 +1330,15 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
|
|||
return QUOTA_NL_NOWARN;
|
||||
}
|
||||
|
||||
static int dquot_active(const struct inode *inode)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
|
||||
if (IS_NOQUOTA(inode))
|
||||
return 0;
|
||||
return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize quota pointers in inode
|
||||
*
|
||||
|
@ -1323,7 +1358,7 @@ static void __dquot_initialize(struct inode *inode, int type)
|
|||
|
||||
/* First test before acquiring mutex - solves deadlocks when we
|
||||
* re-enter the quota code and are already holding the mutex */
|
||||
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
|
||||
if (!dquot_active(inode))
|
||||
return;
|
||||
|
||||
/* First get references to structures we might need. */
|
||||
|
@ -1507,7 +1542,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
|
|||
* First test before acquiring mutex - solves deadlocks when we
|
||||
* re-enter the quota code and are already holding the mutex
|
||||
*/
|
||||
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
|
||||
if (!dquot_active(inode)) {
|
||||
inode_incr_space(inode, number, reserve);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1559,7 +1594,7 @@ int dquot_alloc_inode(const struct inode *inode)
|
|||
|
||||
/* First test before acquiring mutex - solves deadlocks when we
|
||||
* re-enter the quota code and are already holding the mutex */
|
||||
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
|
||||
if (!dquot_active(inode))
|
||||
return 0;
|
||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||
warntype[cnt] = QUOTA_NL_NOWARN;
|
||||
|
@ -1596,7 +1631,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
|
|||
{
|
||||
int cnt;
|
||||
|
||||
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
|
||||
if (!dquot_active(inode)) {
|
||||
inode_claim_rsv_space(inode, number);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1629,7 +1664,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
|
|||
|
||||
/* First test before acquiring mutex - solves deadlocks when we
|
||||
* re-enter the quota code and are already holding the mutex */
|
||||
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
|
||||
if (!dquot_active(inode)) {
|
||||
inode_decr_space(inode, number, reserve);
|
||||
return;
|
||||
}
|
||||
|
@ -1667,7 +1702,7 @@ void dquot_free_inode(const struct inode *inode)
|
|||
|
||||
/* First test before acquiring mutex - solves deadlocks when we
|
||||
* re-enter the quota code and are already holding the mutex */
|
||||
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
|
||||
if (!dquot_active(inode))
|
||||
return;
|
||||
|
||||
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
|
||||
|
@ -1790,7 +1825,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
|
|||
struct super_block *sb = inode->i_sb;
|
||||
int ret;
|
||||
|
||||
if (!sb_any_quota_active(sb) || IS_NOQUOTA(inode))
|
||||
if (!dquot_active(inode))
|
||||
return 0;
|
||||
|
||||
if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
|
||||
|
@ -1957,7 +1992,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
|||
truncate_inode_pages(&toputinode[cnt]->i_data,
|
||||
0);
|
||||
mutex_unlock(&toputinode[cnt]->i_mutex);
|
||||
mark_inode_dirty(toputinode[cnt]);
|
||||
mark_inode_dirty_sync(toputinode[cnt]);
|
||||
}
|
||||
mutex_unlock(&dqopt->dqonoff_mutex);
|
||||
}
|
||||
|
@ -2270,7 +2305,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
|
|||
memset(di, 0, sizeof(*di));
|
||||
di->d_version = FS_DQUOT_VERSION;
|
||||
di->d_flags = dquot->dq_type == USRQUOTA ?
|
||||
XFS_USER_QUOTA : XFS_GROUP_QUOTA;
|
||||
FS_USER_QUOTA : FS_GROUP_QUOTA;
|
||||
di->d_id = dquot->dq_id;
|
||||
|
||||
spin_lock(&dq_data_lock);
|
||||
|
|
|
@ -65,8 +65,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
|
|||
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
|
||||
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
|
||||
if (ret != info->dqi_usable_bs) {
|
||||
q_warn(KERN_WARNING "VFS: dquota write failed on "
|
||||
"dev %s\n", sb->s_id);
|
||||
quota_error(sb, "dquota write failed");
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
}
|
||||
|
@ -160,9 +159,8 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
|
|||
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
|
||||
/* No matter whether write succeeds block is out of list */
|
||||
if (write_blk(info, blk, buf) < 0)
|
||||
q_warn(KERN_ERR
|
||||
"VFS: Can't write block (%u) with free entries.\n",
|
||||
blk);
|
||||
quota_error(info->dqi_sb, "Can't write block (%u) "
|
||||
"with free entries", blk);
|
||||
return 0;
|
||||
out_buf:
|
||||
kfree(tmpbuf);
|
||||
|
@ -252,9 +250,8 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
|
|||
if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
|
||||
*err = remove_free_dqentry(info, buf, blk);
|
||||
if (*err < 0) {
|
||||
q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
|
||||
"remove block (%u) from entry free list.\n",
|
||||
blk);
|
||||
quota_error(dquot->dq_sb, "Can't remove block (%u) "
|
||||
"from entry free list", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
}
|
||||
|
@ -268,16 +265,15 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
|
|||
}
|
||||
#ifdef __QUOTA_QT_PARANOIA
|
||||
if (i == qtree_dqstr_in_blk(info)) {
|
||||
printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
|
||||
"but it shouldn't.\n");
|
||||
quota_error(dquot->dq_sb, "Data block full but it shouldn't");
|
||||
*err = -EIO;
|
||||
goto out_buf;
|
||||
}
|
||||
#endif
|
||||
*err = write_blk(info, blk, buf);
|
||||
if (*err < 0) {
|
||||
q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
|
||||
"data block %u.\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't write quota data block %u",
|
||||
blk);
|
||||
goto out_buf;
|
||||
}
|
||||
dquot->dq_off = (blk << info->dqi_blocksize_bits) +
|
||||
|
@ -311,8 +307,8 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
} else {
|
||||
ret = read_blk(info, *treeblk, buf);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't read tree quota block "
|
||||
"%u.\n", *treeblk);
|
||||
quota_error(dquot->dq_sb, "Can't read tree quota "
|
||||
"block %u", *treeblk);
|
||||
goto out_buf;
|
||||
}
|
||||
}
|
||||
|
@ -323,9 +319,9 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
if (depth == info->dqi_qtree_depth - 1) {
|
||||
#ifdef __QUOTA_QT_PARANOIA
|
||||
if (newblk) {
|
||||
printk(KERN_ERR "VFS: Inserting already present quota "
|
||||
"entry (block %u).\n",
|
||||
le32_to_cpu(ref[get_index(info,
|
||||
quota_error(dquot->dq_sb, "Inserting already present "
|
||||
"quota entry (block %u)",
|
||||
le32_to_cpu(ref[get_index(info,
|
||||
dquot->dq_id, depth)]));
|
||||
ret = -EIO;
|
||||
goto out_buf;
|
||||
|
@ -373,8 +369,8 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
if (!dquot->dq_off) {
|
||||
ret = dq_insert_tree(info, dquot);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Error %zd occurred while "
|
||||
"creating quota.\n", ret);
|
||||
quota_error(sb, "Error %zd occurred while creating "
|
||||
"quota", ret);
|
||||
kfree(ddquot);
|
||||
return ret;
|
||||
}
|
||||
|
@ -385,8 +381,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
|
||||
dquot->dq_off);
|
||||
if (ret != info->dqi_entry_size) {
|
||||
q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
|
||||
sb->s_id);
|
||||
quota_error(sb, "dquota write failed");
|
||||
if (ret >= 0)
|
||||
ret = -ENOSPC;
|
||||
} else {
|
||||
|
@ -410,14 +405,15 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
if (!buf)
|
||||
return -ENOMEM;
|
||||
if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
|
||||
q_warn(KERN_ERR "VFS: Quota structure has offset to other "
|
||||
"block (%u) than it should (%u).\n", blk,
|
||||
(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
|
||||
quota_error(dquot->dq_sb, "Quota structure has offset to "
|
||||
"other block (%u) than it should (%u)", blk,
|
||||
(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
|
||||
goto out_buf;
|
||||
}
|
||||
ret = read_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't read quota data block %u",
|
||||
blk);
|
||||
goto out_buf;
|
||||
}
|
||||
dh = (struct qt_disk_dqdbheader *)buf;
|
||||
|
@ -427,8 +423,8 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
if (ret >= 0)
|
||||
ret = put_free_dqblk(info, buf, blk);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
|
||||
"to free list.\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't move quota data block "
|
||||
"(%u) to free list", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
} else {
|
||||
|
@ -440,15 +436,15 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
/* Insert will write block itself */
|
||||
ret = insert_free_dqentry(info, buf, blk);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't insert quota data "
|
||||
"block (%u) to free entry list.\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't insert quota "
|
||||
"data block (%u) to free entry list", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
} else {
|
||||
ret = write_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't write quota data "
|
||||
"block %u\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't write quota "
|
||||
"data block %u", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
}
|
||||
|
@ -472,7 +468,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
return -ENOMEM;
|
||||
ret = read_blk(info, *blk, buf);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
|
||||
quota_error(dquot->dq_sb, "Can't read quota data "
|
||||
"block %u", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
|
||||
|
@ -496,8 +493,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
|
|||
} else {
|
||||
ret = write_blk(info, *blk, buf);
|
||||
if (ret < 0)
|
||||
q_warn(KERN_ERR "VFS: Can't write quota tree "
|
||||
"block %u.\n", *blk);
|
||||
quota_error(dquot->dq_sb, "Can't write quota "
|
||||
"tree block %u", blk);
|
||||
}
|
||||
}
|
||||
out_buf:
|
||||
|
@ -529,7 +526,8 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
|
|||
return -ENOMEM;
|
||||
ret = read_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't read quota tree "
|
||||
"block %u", blk);
|
||||
goto out_buf;
|
||||
}
|
||||
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
|
||||
|
@ -539,8 +537,8 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
|
|||
ddquot += info->dqi_entry_size;
|
||||
}
|
||||
if (i == qtree_dqstr_in_blk(info)) {
|
||||
q_warn(KERN_ERR "VFS: Quota for id %u referenced "
|
||||
"but not present.\n", dquot->dq_id);
|
||||
quota_error(dquot->dq_sb, "Quota for id %u referenced "
|
||||
"but not present", dquot->dq_id);
|
||||
ret = -EIO;
|
||||
goto out_buf;
|
||||
} else {
|
||||
|
@ -564,7 +562,8 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
|
|||
return -ENOMEM;
|
||||
ret = read_blk(info, blk, buf);
|
||||
if (ret < 0) {
|
||||
q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
|
||||
quota_error(dquot->dq_sb, "Can't read quota tree block %u",
|
||||
blk);
|
||||
goto out_buf;
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -598,7 +597,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
#ifdef __QUOTA_QT_PARANOIA
|
||||
/* Invalidated quota? */
|
||||
if (!sb_dqopt(dquot->dq_sb)->files[type]) {
|
||||
printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
|
||||
quota_error(sb, "Quota invalidated while reading!");
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
|
@ -607,8 +606,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
offset = find_dqentry(info, dquot);
|
||||
if (offset <= 0) { /* Entry not present? */
|
||||
if (offset < 0)
|
||||
q_warn(KERN_ERR "VFS: Can't read quota "
|
||||
"structure for id %u.\n", dquot->dq_id);
|
||||
quota_error(sb, "Can't read quota structure "
|
||||
"for id %u", dquot->dq_id);
|
||||
dquot->dq_off = 0;
|
||||
set_bit(DQ_FAKE_B, &dquot->dq_flags);
|
||||
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
|
||||
|
@ -625,8 +624,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
|
|||
if (ret != info->dqi_entry_size) {
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
q_warn(KERN_ERR "VFS: Error while reading quota "
|
||||
"structure for id %u.\n", dquot->dq_id);
|
||||
quota_error(sb, "Error while reading quota structure for id %u",
|
||||
dquot->dq_id);
|
||||
set_bit(DQ_FAKE_B, &dquot->dq_flags);
|
||||
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
|
||||
kfree(ddquot);
|
||||
|
|
|
@ -22,10 +22,4 @@ struct qt_disk_dqdbheader {
|
|||
|
||||
#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
|
||||
|
||||
#define q_warn(fmt, args...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(fmt, ## args); \
|
||||
} while(0)
|
||||
|
||||
#endif /* _LINUX_QUOTAIO_TREE_H */
|
||||
|
|
|
@ -95,8 +95,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
|
|||
(char *)&dqblk, sizeof(struct v1_disk_dqblk),
|
||||
v1_dqoff(dquot->dq_id));
|
||||
if (ret != sizeof(struct v1_disk_dqblk)) {
|
||||
printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
|
||||
dquot->dq_sb->s_id);
|
||||
quota_error(dquot->dq_sb, "dquota write failed");
|
||||
if (ret >= 0)
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
|
|
@ -63,9 +63,8 @@ static int v2_read_header(struct super_block *sb, int type,
|
|||
size = sb->s_op->quota_read(sb, type, (char *)dqhead,
|
||||
sizeof(struct v2_disk_dqheader), 0);
|
||||
if (size != sizeof(struct v2_disk_dqheader)) {
|
||||
q_warn(KERN_WARNING "quota_v2: Failed header read:"
|
||||
" expected=%zd got=%zd\n",
|
||||
sizeof(struct v2_disk_dqheader), size);
|
||||
quota_error(sb, "Failed header read: expected=%zd got=%zd",
|
||||
sizeof(struct v2_disk_dqheader), size);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
|
@ -106,8 +105,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
|
|||
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
|
||||
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
|
||||
if (size != sizeof(struct v2_disk_dqinfo)) {
|
||||
q_warn(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
|
||||
sb->s_id);
|
||||
quota_error(sb, "Can't read info structure");
|
||||
return -1;
|
||||
}
|
||||
info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
|
||||
|
@ -167,8 +165,7 @@ static int v2_write_file_info(struct super_block *sb, int type)
|
|||
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
|
||||
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
|
||||
if (size != sizeof(struct v2_disk_dqinfo)) {
|
||||
q_warn(KERN_WARNING "Can't write info structure on device %s.\n",
|
||||
sb->s_id);
|
||||
quota_error(sb, "Can't write info structure");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -68,15 +68,15 @@ xfs_fs_set_xstate(
|
|||
if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
|
||||
return -ENOSYS;
|
||||
|
||||
if (uflags & XFS_QUOTA_UDQ_ACCT)
|
||||
if (uflags & FS_QUOTA_UDQ_ACCT)
|
||||
flags |= XFS_UQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_PDQ_ACCT)
|
||||
if (uflags & FS_QUOTA_PDQ_ACCT)
|
||||
flags |= XFS_PQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_GDQ_ACCT)
|
||||
if (uflags & FS_QUOTA_GDQ_ACCT)
|
||||
flags |= XFS_GQUOTA_ACCT;
|
||||
if (uflags & XFS_QUOTA_UDQ_ENFD)
|
||||
if (uflags & FS_QUOTA_UDQ_ENFD)
|
||||
flags |= XFS_UQUOTA_ENFD;
|
||||
if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
|
||||
if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD))
|
||||
flags |= XFS_OQUOTA_ENFD;
|
||||
|
||||
switch (op) {
|
||||
|
|
|
@ -810,9 +810,9 @@ xfs_qm_export_dquot(
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == XFS_USER_QUOTA) ||
|
||||
if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
|
||||
(XFS_IS_OQUOTA_ENFORCED(mp) &&
|
||||
(dst->d_flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)))) &&
|
||||
(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
|
||||
dst->d_id != 0) {
|
||||
if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
|
||||
(dst->d_blk_softlimit > 0)) {
|
||||
|
@ -833,17 +833,17 @@ xfs_qm_export_qtype_flags(
|
|||
/*
|
||||
* Can't be more than one, or none.
|
||||
*/
|
||||
ASSERT((flags & (XFS_PROJ_QUOTA | XFS_USER_QUOTA)) !=
|
||||
(XFS_PROJ_QUOTA | XFS_USER_QUOTA));
|
||||
ASSERT((flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)) !=
|
||||
(XFS_PROJ_QUOTA | XFS_GROUP_QUOTA));
|
||||
ASSERT((flags & (XFS_USER_QUOTA | XFS_GROUP_QUOTA)) !=
|
||||
(XFS_USER_QUOTA | XFS_GROUP_QUOTA));
|
||||
ASSERT((flags & (XFS_PROJ_QUOTA|XFS_USER_QUOTA|XFS_GROUP_QUOTA)) != 0);
|
||||
ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
|
||||
(FS_PROJ_QUOTA | FS_USER_QUOTA));
|
||||
ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
|
||||
(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
|
||||
ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
|
||||
(FS_USER_QUOTA | FS_GROUP_QUOTA));
|
||||
ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
|
||||
|
||||
return (flags & XFS_DQ_USER) ?
|
||||
XFS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
|
||||
XFS_PROJ_QUOTA : XFS_GROUP_QUOTA;
|
||||
FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
|
||||
FS_PROJ_QUOTA : FS_GROUP_QUOTA;
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
|
@ -854,16 +854,16 @@ xfs_qm_export_flags(
|
|||
|
||||
uflags = 0;
|
||||
if (flags & XFS_UQUOTA_ACCT)
|
||||
uflags |= XFS_QUOTA_UDQ_ACCT;
|
||||
uflags |= FS_QUOTA_UDQ_ACCT;
|
||||
if (flags & XFS_PQUOTA_ACCT)
|
||||
uflags |= XFS_QUOTA_PDQ_ACCT;
|
||||
uflags |= FS_QUOTA_PDQ_ACCT;
|
||||
if (flags & XFS_GQUOTA_ACCT)
|
||||
uflags |= XFS_QUOTA_GDQ_ACCT;
|
||||
uflags |= FS_QUOTA_GDQ_ACCT;
|
||||
if (flags & XFS_UQUOTA_ENFD)
|
||||
uflags |= XFS_QUOTA_UDQ_ENFD;
|
||||
uflags |= FS_QUOTA_UDQ_ENFD;
|
||||
if (flags & (XFS_OQUOTA_ENFD)) {
|
||||
uflags |= (flags & XFS_GQUOTA_ACCT) ?
|
||||
XFS_QUOTA_GDQ_ENFD : XFS_QUOTA_PDQ_ENFD;
|
||||
FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
|
||||
}
|
||||
return (uflags);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */
|
||||
typedef struct fs_disk_quota {
|
||||
__s8 d_version; /* version of this structure */
|
||||
__s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */
|
||||
__s8 d_flags; /* FS_{USER,PROJ,GROUP}_QUOTA */
|
||||
__u16 d_fieldmask; /* field specifier */
|
||||
__u32 d_id; /* user, project, or group ID */
|
||||
__u64 d_blk_hardlimit;/* absolute limit on disk blks */
|
||||
|
@ -119,18 +119,18 @@ typedef struct fs_disk_quota {
|
|||
#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
|
||||
|
||||
/*
|
||||
* Various flags related to quotactl(2). Only relevant to XFS filesystems.
|
||||
* Various flags related to quotactl(2).
|
||||
*/
|
||||
#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
|
||||
#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */
|
||||
#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */
|
||||
#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */
|
||||
#define XFS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */
|
||||
#define XFS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */
|
||||
#define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
|
||||
#define FS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */
|
||||
#define FS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */
|
||||
#define FS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */
|
||||
#define FS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */
|
||||
#define FS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */
|
||||
|
||||
#define XFS_USER_QUOTA (1<<0) /* user quota type */
|
||||
#define XFS_PROJ_QUOTA (1<<1) /* project quota type */
|
||||
#define XFS_GROUP_QUOTA (1<<2) /* group quota type */
|
||||
#define FS_USER_QUOTA (1<<0) /* user quota type */
|
||||
#define FS_PROJ_QUOTA (1<<1) /* project quota type */
|
||||
#define FS_GROUP_QUOTA (1<<2) /* group quota type */
|
||||
|
||||
/*
|
||||
* fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system.
|
||||
|
@ -151,7 +151,7 @@ typedef struct fs_qfilestat {
|
|||
|
||||
typedef struct fs_quota_stat {
|
||||
__s8 qs_version; /* version number for future changes */
|
||||
__u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */
|
||||
__u16 qs_flags; /* FS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */
|
||||
__s8 qs_pad; /* unused */
|
||||
fs_qfilestat_t qs_uquota; /* user quota storage information */
|
||||
fs_qfilestat_t qs_gquota; /* group quota storage information */
|
||||
|
|
|
@ -400,7 +400,6 @@ struct ext3_inode {
|
|||
#define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
|
||||
#define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
|
||||
#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
|
||||
#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
|
||||
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
|
||||
#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
|
||||
#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
|
||||
|
|
|
@ -28,6 +28,12 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
|
|||
|
||||
#if defined(CONFIG_QUOTA)
|
||||
|
||||
#define quota_error(sb, fmt, args...) \
|
||||
__quota_error((sb), __func__, fmt , ## args)
|
||||
|
||||
extern void __quota_error(struct super_block *sb, const char *func,
|
||||
const char *fmt, ...);
|
||||
|
||||
/*
|
||||
* declaration of quota_function calls in kernel.
|
||||
*/
|
||||
|
@ -145,11 +151,6 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
|
|||
!sb_has_quota_suspended(sb, type);
|
||||
}
|
||||
|
||||
static inline unsigned sb_any_quota_active(struct super_block *sb)
|
||||
{
|
||||
return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Operations supported for diskquotas.
|
||||
*/
|
||||
|
@ -194,11 +195,6 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int sb_any_quota_active(struct super_block *sb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dquot_initialize(struct inode *inode)
|
||||
{
|
||||
}
|
||||
|
@ -270,7 +266,7 @@ static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
|
|||
static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr)
|
||||
{
|
||||
__dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL);
|
||||
mark_inode_dirty(inode);
|
||||
mark_inode_dirty_sync(inode);
|
||||
}
|
||||
|
||||
static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
|
||||
|
@ -279,7 +275,7 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
|
|||
|
||||
ret = dquot_alloc_space_nodirty(inode, nr);
|
||||
if (!ret)
|
||||
mark_inode_dirty(inode);
|
||||
mark_inode_dirty_sync(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -309,7 +305,7 @@ static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
|
|||
|
||||
ret = dquot_prealloc_block_nodirty(inode, nr);
|
||||
if (!ret)
|
||||
mark_inode_dirty(inode);
|
||||
mark_inode_dirty_sync(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -325,7 +321,7 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
|
|||
|
||||
ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
|
||||
if (!ret)
|
||||
mark_inode_dirty(inode);
|
||||
mark_inode_dirty_sync(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -337,7 +333,7 @@ static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
|
|||
static inline void dquot_free_space(struct inode *inode, qsize_t nr)
|
||||
{
|
||||
dquot_free_space_nodirty(inode, nr);
|
||||
mark_inode_dirty(inode);
|
||||
mark_inode_dirty_sync(inode);
|
||||
}
|
||||
|
||||
static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr)
|
||||
|
|
Загрузка…
Ссылка в новой задаче