Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: (33 commits)
  quota: stop using QUOTA_OK / NO_QUOTA
  dquot: cleanup dquot initialize routine
  dquot: move dquot initialization responsibility into the filesystem
  dquot: cleanup dquot drop routine
  dquot: move dquot drop responsibility into the filesystem
  dquot: cleanup dquot transfer routine
  dquot: move dquot transfer responsibility into the filesystem
  dquot: cleanup inode allocation / freeing routines
  dquot: cleanup space allocation / freeing routines
  ext3: add writepage sanity checks
  ext3: Truncate allocated blocks if direct IO write fails to update i_size
  quota: Properly invalidate caches even for filesystems with blocksize < pagesize
  quota: generalize quota transfer interface
  quota: sb_quota state flags cleanup
  jbd: Delay discarding buffers in journal_unmap_buffer
  ext3: quota_write cross block boundary behaviour
  quota: drop permission checks from xfs_fs_set_xstate/xfs_fs_set_xquota
  quota: split out compat_sys_quotactl support from quota.c
  quota: split out netlink notification support from quota.c
  quota: remove invalid optimization from quota_sync_all
  ...

Fixed trivial conflicts in fs/namei.c and fs/ufs/inode.c
This commit is contained in:
Linus Torvalds 2010-03-05 13:20:53 -08:00
Родитель c812a51d11 efd8f0e6f6
Коммит e213e26ab3
87 изменённых файлов: 1578 добавлений и 1569 удалений

Просмотреть файл

@ -460,13 +460,6 @@ in sys_read() and friends.
--------------------------- dquot_operations -------------------------------
prototypes:
int (*initialize) (struct inode *, int);
int (*drop) (struct inode *);
int (*alloc_space) (struct inode *, qsize_t, int);
int (*alloc_inode) (const struct inode *, unsigned long);
int (*free_space) (struct inode *, qsize_t);
int (*free_inode) (const struct inode *, unsigned long);
int (*transfer) (struct inode *, struct iattr *);
int (*write_dquot) (struct dquot *);
int (*acquire_dquot) (struct dquot *);
int (*release_dquot) (struct dquot *);
@ -479,13 +472,6 @@ a proper locking wrt the filesystem and call the generic quota operations.
What filesystem should expect from the generic quota functions:
FS recursion Held locks when called
initialize: yes maybe dqonoff_sem
drop: yes -
alloc_space: ->mark_dirty() -
alloc_inode: ->mark_dirty() -
free_space: ->mark_dirty() -
free_inode: ->mark_dirty() -
transfer: yes -
write_dquot: yes dqonoff_sem or dqptr_sem
acquire_dquot: yes dqonoff_sem or dqptr_sem
release_dquot: yes dqonoff_sem or dqptr_sem
@ -495,10 +481,6 @@ write_info: yes dqonoff_sem
FS recursion means calling ->quota_read() and ->quota_write() from superblock
operations.
->alloc_space(), ->alloc_inode(), ->free_space(), ->free_inode() are called
only directly by the filesystem and do not call any fs functions only
the ->mark_dirty() operation.
More details about quota locking can be found in fs/dquot.c.
--------------------------- vm_operations_struct -----------------------------

Просмотреть файл

@ -969,7 +969,7 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
err = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
err = dquot_transfer(inode, attr);
if (err)
goto err_out_exit;
}

Просмотреть файл

@ -12,7 +12,6 @@
#include <linux/capability.h>
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
#include <linux/quotaops.h>
#include <linux/security.h>
/* Taken over from the old code... */
@ -212,14 +211,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
error = inode->i_op->setattr(dentry, attr);
} else {
error = inode_change_ok(inode, attr);
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
error = vfs_dq_transfer(inode, attr) ?
-EDQUOT : 0;
if (!error)
error = inode_setattr(inode, attr);
}
if (!error)
error = inode_setattr(inode, attr);
}
if (ia_valid & ATTR_SIZE)

Просмотреть файл

@ -570,7 +570,7 @@ do_more:
error_return:
brelse(bitmap_bh);
release_blocks(sb, freed);
vfs_dq_free_block(inode, freed);
dquot_free_block(inode, freed);
}
/**
@ -1236,6 +1236,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
unsigned short windowsz = 0;
unsigned long ngroups;
unsigned long num = *count;
int ret;
*errp = -ENOSPC;
sb = inode->i_sb;
@ -1247,8 +1248,9 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
/*
* Check quota for allocation of this block.
*/
if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
ret = dquot_alloc_block(inode, num);
if (ret) {
*errp = ret;
return 0;
}
@ -1409,7 +1411,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
vfs_dq_free_block(inode, *count-num);
dquot_free_block(inode, *count-num);
*count = num;
return ret_block;
@ -1420,7 +1422,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
vfs_dq_free_block(inode, *count);
dquot_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}

Просмотреть файл

@ -20,6 +20,7 @@
#include <linux/time.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@ -70,7 +71,7 @@ const struct file_operations ext2_file_operations = {
.compat_ioctl = ext2_compat_ioctl,
#endif
.mmap = generic_file_mmap,
.open = generic_file_open,
.open = dquot_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
.splice_read = generic_file_splice_read,
@ -87,7 +88,7 @@ const struct file_operations ext2_xip_file_operations = {
.compat_ioctl = ext2_compat_ioctl,
#endif
.mmap = xip_file_mmap,
.open = generic_file_open,
.open = dquot_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
};

Просмотреть файл

@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
if (!is_bad_inode(inode)) {
/* Quota is already initialized in iput() */
ext2_xattr_delete_inode(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
dquot_free_inode(inode);
dquot_drop(inode);
}
es = EXT2_SB(sb)->s_es;
@ -586,10 +586,10 @@ got:
goto fail_drop;
}
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
dquot_initialize(inode);
err = dquot_alloc_inode(inode);
if (err)
goto fail_drop;
}
err = ext2_init_acl(inode, dir);
if (err)
@ -605,10 +605,10 @@ got:
return inode;
fail_free_drop:
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
fail_drop:
vfs_dq_drop(inode);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);

Просмотреть файл

@ -60,6 +60,8 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode)
*/
void ext2_delete_inode (struct inode * inode)
{
if (!is_bad_inode(inode))
dquot_initialize(inode);
truncate_inode_pages(&inode->i_data, 0);
if (is_bad_inode(inode))
@ -1464,9 +1466,12 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
error = inode_change_ok(inode, iattr);
if (error)
return error;
if (iattr->ia_valid & ATTR_SIZE)
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
error = dquot_transfer(inode, iattr);
if (error)
return error;
}

Просмотреть файл

@ -31,6 +31,7 @@
*/
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@ -99,24 +100,27 @@ struct dentry *ext2_get_parent(struct dentry *child)
*/
static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd)
{
struct inode * inode = ext2_new_inode (dir, mode);
int err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext2_file_inode_operations;
if (ext2_use_xip(inode->i_sb)) {
inode->i_mapping->a_ops = &ext2_aops_xip;
inode->i_fop = &ext2_xip_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
inode->i_mapping->a_ops = &ext2_aops;
inode->i_fop = &ext2_file_operations;
}
mark_inode_dirty(inode);
err = ext2_add_nondir(dentry, inode);
struct inode *inode;
dquot_initialize(dir);
inode = ext2_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &ext2_file_inode_operations;
if (ext2_use_xip(inode->i_sb)) {
inode->i_mapping->a_ops = &ext2_aops_xip;
inode->i_fop = &ext2_xip_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
inode->i_mapping->a_ops = &ext2_aops;
inode->i_fop = &ext2_file_operations;
}
return err;
mark_inode_dirty(inode);
return ext2_add_nondir(dentry, inode);
}
static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev)
@ -127,6 +131,8 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
inode = ext2_new_inode (dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
@ -151,6 +157,8 @@ static int ext2_symlink (struct inode * dir, struct dentry * dentry,
if (l > sb->s_blocksize)
goto out;
dquot_initialize(dir);
inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO);
err = PTR_ERR(inode);
if (IS_ERR(inode))
@ -194,6 +202,8 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
if (inode->i_nlink >= EXT2_LINK_MAX)
return -EMLINK;
dquot_initialize(dir);
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
atomic_inc(&inode->i_count);
@ -216,6 +226,8 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
if (dir->i_nlink >= EXT2_LINK_MAX)
goto out;
dquot_initialize(dir);
inode_inc_link_count(dir);
inode = ext2_new_inode (dir, S_IFDIR | mode);
@ -262,6 +274,8 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
struct page * page;
int err = -ENOENT;
dquot_initialize(dir);
de = ext2_find_entry (dir, &dentry->d_name, &page);
if (!de)
goto out;
@ -304,6 +318,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
struct ext2_dir_entry_2 * old_de;
int err = -ENOENT;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;

Просмотреть файл

@ -194,6 +194,8 @@ static void destroy_inodecache(void)
static void ext2_clear_inode(struct inode *inode)
{
struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info;
dquot_drop(inode);
ext2_discard_reservation(inode);
EXT2_I(inode)->i_block_alloc_info = NULL;
if (unlikely(rsv))

Просмотреть файл

@ -644,8 +644,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
the inode. */
ea_bdebug(new_bh, "reusing block");
error = -EDQUOT;
if (vfs_dq_alloc_block(inode, 1)) {
error = dquot_alloc_block(inode, 1);
if (error) {
unlock_buffer(new_bh);
goto cleanup;
}
@ -702,7 +702,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh)
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto cleanup;
}
} else
@ -734,7 +734,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
@ -797,7 +797,7 @@ ext2_xattr_delete_inode(struct inode *inode)
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;

Просмотреть файл

@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
}
ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
if (dquot_freed_blocks)
vfs_dq_free_block(inode, dquot_freed_blocks);
dquot_free_block(inode, dquot_freed_blocks);
return;
}
@ -1502,8 +1502,9 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
err = dquot_alloc_block(inode, num);
if (err) {
*errp = err;
return 0;
}
@ -1713,7 +1714,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
vfs_dq_free_block(inode, *count-num);
dquot_free_block(inode, *count-num);
*count = num;
return ret_block;
@ -1728,7 +1729,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
vfs_dq_free_block(inode, *count);
dquot_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}

Просмотреть файл

@ -21,6 +21,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/quotaops.h>
#include <linux/ext3_fs.h>
#include <linux/ext3_jbd.h>
#include "xattr.h"
@ -33,9 +34,9 @@
*/
static int ext3_release_file (struct inode * inode, struct file * filp)
{
if (EXT3_I(inode)->i_state & EXT3_STATE_FLUSH_ON_CLOSE) {
if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) {
filemap_flush(inode->i_mapping);
EXT3_I(inode)->i_state &= ~EXT3_STATE_FLUSH_ON_CLOSE;
ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
}
/* if we are the last writer on the inode, drop the block reservation */
if ((filp->f_mode & FMODE_WRITE) &&
@ -62,7 +63,7 @@ const struct file_operations ext3_file_operations = {
.compat_ioctl = ext3_compat_ioctl,
#endif
.mmap = generic_file_mmap,
.open = generic_file_open,
.open = dquot_file_open,
.release = ext3_release_file,
.fsync = ext3_sync_file,
.splice_read = generic_file_splice_read,

Просмотреть файл

@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
vfs_dq_init(inode);
dquot_initialize(inode);
ext3_xattr_delete_inode(handle, inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
dquot_free_inode(inode);
dquot_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@ -588,10 +588,10 @@ got:
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
ret = inode;
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
dquot_initialize(inode);
err = dquot_alloc_inode(inode);
if (err)
goto fail_drop;
}
err = ext3_init_acl(handle, inode, dir);
if (err)
@ -619,10 +619,10 @@ really_out:
return ret;
fail_free_drop:
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
fail_drop:
vfs_dq_drop(inode);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);

Просмотреть файл

@ -196,6 +196,9 @@ void ext3_delete_inode (struct inode * inode)
{
handle_t *handle;
if (!is_bad_inode(inode))
dquot_initialize(inode);
truncate_inode_pages(&inode->i_data, 0);
if (is_bad_inode(inode))
@ -1378,7 +1381,7 @@ static int ext3_journalled_write_end(struct file *file,
*/
if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
ext3_set_inode_state(inode, EXT3_STATE_JDATA);
if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size;
ret2 = ext3_mark_inode_dirty(handle, inode);
@ -1417,7 +1420,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
journal_t *journal;
int err;
if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
@ -1436,7 +1439,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
* everything they get.
*/
EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
journal = EXT3_JOURNAL(inode);
journal_lock_updates(journal);
err = journal_flush(journal);
@ -1528,6 +1531,7 @@ static int ext3_ordered_writepage(struct page *page,
int err;
J_ASSERT(PageLocked(page));
WARN_ON_ONCE(IS_RDONLY(inode));
/*
* We give up here if we're reentered, because it might be for a
@ -1600,6 +1604,9 @@ static int ext3_writeback_writepage(struct page *page,
int ret = 0;
int err;
J_ASSERT(PageLocked(page));
WARN_ON_ONCE(IS_RDONLY(inode));
if (ext3_journal_current_handle())
goto out_fail;
@ -1642,6 +1649,9 @@ static int ext3_journalled_writepage(struct page *page,
int ret = 0;
int err;
J_ASSERT(PageLocked(page));
WARN_ON_ONCE(IS_RDONLY(inode));
if (ext3_journal_current_handle())
goto no_write;
@ -1670,7 +1680,7 @@ static int ext3_journalled_writepage(struct page *page,
PAGE_CACHE_SIZE, NULL, write_end_fn);
if (ret == 0)
ret = err;
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
ext3_set_inode_state(inode, EXT3_STATE_JDATA);
unlock_page(page);
} else {
/*
@ -1785,8 +1795,9 @@ retry:
handle = ext3_journal_start(inode, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
* the write failed... */
* but cannot extend i_size. Truncate allocated blocks
* and pretend the write failed... */
ext3_truncate(inode);
ret = PTR_ERR(handle);
goto out;
}
@ -2402,7 +2413,7 @@ void ext3_truncate(struct inode *inode)
goto out_notrans;
if (inode->i_size == 0 && ext3_should_writeback_data(inode))
ei->i_state |= EXT3_STATE_FLUSH_ON_CLOSE;
ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
/*
* We have to lock the EOF page here, because lock_page() nests
@ -2721,7 +2732,7 @@ int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
{
/* We have all inode data except xattrs in memory here. */
return __ext3_get_inode_loc(inode, iloc,
!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
}
void ext3_set_inode_flags(struct inode *inode)
@ -2893,7 +2904,7 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
EXT3_GOOD_OLD_INODE_SIZE +
ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
ei->i_state |= EXT3_STATE_XATTR;
ext3_set_inode_state(inode, EXT3_STATE_XATTR);
}
} else
ei->i_extra_isize = 0;
@ -2955,7 +2966,7 @@ again:
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
if (ei->i_state & EXT3_STATE_NEW)
if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
ext3_get_inode_flags(ei);
@ -3052,7 +3063,7 @@ again:
rc = ext3_journal_dirty_metadata(handle, bh);
if (!err)
err = rc;
ei->i_state &= ~EXT3_STATE_NEW;
ext3_clear_inode_state(inode, EXT3_STATE_NEW);
atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
out_brelse:
@ -3140,6 +3151,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
if (ia_valid & ATTR_SIZE)
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
handle_t *handle;
@ -3152,7 +3165,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
error = dquot_transfer(inode, attr);
if (error) {
ext3_journal_stop(handle);
return error;
@ -3237,7 +3250,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
ret = 2 * (bpp + indirects) + 2;
#ifdef CONFIG_QUOTA
/* We know that structure was already allocated during vfs_dq_init so
/* We know that structure was already allocated during dquot_initialize so
* we will be updating only the data blocks + inodes */
ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
#endif
@ -3328,7 +3341,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, vfs_dq_alloc_space() will always dirty the inode when blocks
* Also, dquot_alloc_space() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing

Просмотреть файл

@ -1696,6 +1696,8 @@ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
struct inode * inode;
int err, retries = 0;
dquot_initialize(dir);
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@ -1730,6 +1732,8 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry,
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@ -1766,6 +1770,8 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
if (dir->i_nlink >= EXT3_LINK_MAX)
return -EMLINK;
dquot_initialize(dir);
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@ -2060,7 +2066,9 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
vfs_dq_init(dentry->d_inode);
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2119,7 +2127,9 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
vfs_dq_init(dentry->d_inode);
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2174,6 +2184,8 @@ static int ext3_symlink (struct inode * dir,
if (l > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
dquot_initialize(dir);
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
@ -2228,6 +2240,9 @@ static int ext3_link (struct dentry * old_dentry,
if (inode->i_nlink >= EXT3_LINK_MAX)
return -EMLINK;
dquot_initialize(dir);
/*
* Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing
* otherwise has the potential to corrupt the orphan inode list.
@ -2278,12 +2293,15 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
struct ext3_dir_entry_2 * old_de, * new_de;
int retval, flush_file = 0;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_bh = new_bh = dir_bh = NULL;
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
vfs_dq_init(new_dentry->d_inode);
dquot_initialize(new_dentry->d_inode);
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);

Просмотреть файл

@ -181,7 +181,7 @@ static void ext3_handle_error(struct super_block *sb)
if (!test_opt (sb, ERRORS_CONT)) {
journal_t *journal = EXT3_SB(sb)->s_journal;
EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
if (journal)
journal_abort(journal, -EIO);
}
@ -296,7 +296,7 @@ void ext3_abort (struct super_block * sb, const char * function,
"error: remounting filesystem read-only");
EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
sb->s_flags |= MS_RDONLY;
EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
if (EXT3_SB(sb)->s_journal)
journal_abort(EXT3_SB(sb)->s_journal, -EIO);
}
@ -528,6 +528,8 @@ static void destroy_inodecache(void)
static void ext3_clear_inode(struct inode *inode)
{
struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
dquot_drop(inode);
ext3_discard_reservation(inode);
EXT3_I(inode)->i_block_alloc_info = NULL;
if (unlikely(rsv))
@ -562,10 +564,10 @@ static inline void ext3_show_quota_options(struct seq_file *seq, struct super_bl
if (sbi->s_qf_names[GRPQUOTA])
seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
if (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA)
if (test_opt(sb, USRQUOTA))
seq_puts(seq, ",usrquota");
if (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)
if (test_opt(sb, GRPQUOTA))
seq_puts(seq, ",grpquota");
#endif
}
@ -656,8 +658,7 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
seq_printf(seq, ",data=%s", data_mode_string(sbi->s_mount_opt &
EXT3_MOUNT_DATA_FLAGS));
seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
if (test_opt(sb, DATA_ERR_ABORT))
seq_puts(seq, ",data_err=abort");
@ -751,13 +752,6 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static const struct dquot_operations ext3_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = ext3_write_dquot,
.acquire_dquot = ext3_acquire_dquot,
.release_dquot = ext3_release_dquot,
@ -896,6 +890,63 @@ static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
return sb_block;
}
#ifdef CONFIG_QUOTA
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
char *qname;
if (sb_any_quota_loaded(sb) &&
!sbi->s_qf_names[qtype]) {
ext3_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
return 0;
}
qname = match_strdup(args);
if (!qname) {
ext3_msg(sb, KERN_ERR,
"Not enough memory for storing quotafile name");
return 0;
}
if (sbi->s_qf_names[qtype] &&
strcmp(sbi->s_qf_names[qtype], qname)) {
ext3_msg(sb, KERN_ERR,
"%s quota file already specified", QTYPE2NAME(qtype));
kfree(qname);
return 0;
}
sbi->s_qf_names[qtype] = qname;
if (strchr(sbi->s_qf_names[qtype], '/')) {
ext3_msg(sb, KERN_ERR,
"quotafile must be on filesystem root");
kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
return 0;
}
set_opt(sbi->s_mount_opt, QUOTA);
return 1;
}
static int clear_qf_name(struct super_block *sb, int qtype) {
struct ext3_sb_info *sbi = EXT3_SB(sb);
if (sb_any_quota_loaded(sb) &&
sbi->s_qf_names[qtype]) {
ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
return 0;
}
/*
* The space will be released later when all options are confirmed
* to be correct
*/
sbi->s_qf_names[qtype] = NULL;
return 1;
}
#endif
static int parse_options (char *options, struct super_block *sb,
unsigned int *inum, unsigned long *journal_devnum,
ext3_fsblk_t *n_blocks_count, int is_remount)
@ -906,8 +957,7 @@ static int parse_options (char *options, struct super_block *sb,
int data_opt = 0;
int option;
#ifdef CONFIG_QUOTA
int qtype, qfmt;
char *qname;
int qfmt;
#endif
if (!options)
@ -1065,20 +1115,19 @@ static int parse_options (char *options, struct super_block *sb,
data_opt = EXT3_MOUNT_WRITEBACK_DATA;
datacheck:
if (is_remount) {
if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS)
== data_opt)
if (test_opt(sb, DATA_FLAGS) == data_opt)
break;
ext3_msg(sb, KERN_ERR,
"error: cannot change "
"data mode on remount. The filesystem "
"is mounted in data=%s mode and you "
"try to remount it in data=%s mode.",
data_mode_string(sbi->s_mount_opt &
EXT3_MOUNT_DATA_FLAGS),
data_mode_string(test_opt(sb,
DATA_FLAGS)),
data_mode_string(data_opt));
return 0;
} else {
sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS;
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
sbi->s_mount_opt |= data_opt;
}
break;
@ -1090,62 +1139,20 @@ static int parse_options (char *options, struct super_block *sb,
break;
#ifdef CONFIG_QUOTA
case Opt_usrjquota:
qtype = USRQUOTA;
goto set_qf_name;
if (!set_qf_name(sb, USRQUOTA, &args[0]))
return 0;
break;
case Opt_grpjquota:
qtype = GRPQUOTA;
set_qf_name:
if (sb_any_quota_loaded(sb) &&
!sbi->s_qf_names[qtype]) {
ext3_msg(sb, KERN_ERR,
"error: cannot change journaled "
"quota options when quota turned on.");
if (!set_qf_name(sb, GRPQUOTA, &args[0]))
return 0;
}
qname = match_strdup(&args[0]);
if (!qname) {
ext3_msg(sb, KERN_ERR,
"error: not enough memory for "
"storing quotafile name.");
return 0;
}
if (sbi->s_qf_names[qtype] &&
strcmp(sbi->s_qf_names[qtype], qname)) {
ext3_msg(sb, KERN_ERR,
"error: %s quota file already "
"specified.", QTYPE2NAME(qtype));
kfree(qname);
return 0;
}
sbi->s_qf_names[qtype] = qname;
if (strchr(sbi->s_qf_names[qtype], '/')) {
ext3_msg(sb, KERN_ERR,
"error: quotafile must be on "
"filesystem root.");
kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
return 0;
}
set_opt(sbi->s_mount_opt, QUOTA);
break;
case Opt_offusrjquota:
qtype = USRQUOTA;
goto clear_qf_name;
case Opt_offgrpjquota:
qtype = GRPQUOTA;
clear_qf_name:
if (sb_any_quota_loaded(sb) &&
sbi->s_qf_names[qtype]) {
ext3_msg(sb, KERN_ERR, "error: cannot change "
"journaled quota options when "
"quota turned on.");
if (!clear_qf_name(sb, USRQUOTA))
return 0;
break;
case Opt_offgrpjquota:
if (!clear_qf_name(sb, GRPQUOTA))
return 0;
}
/*
* The space will be released later when all options
* are confirmed to be correct
*/
sbi->s_qf_names[qtype] = NULL;
break;
case Opt_jqfmt_vfsold:
qfmt = QFMT_VFS_OLD;
@ -1244,18 +1251,12 @@ set_qf_format:
}
#ifdef CONFIG_QUOTA
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if ((sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) &&
sbi->s_qf_names[USRQUOTA])
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi->s_mount_opt, USRQUOTA);
if ((sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) &&
sbi->s_qf_names[GRPQUOTA])
if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi->s_mount_opt, GRPQUOTA);
if ((sbi->s_qf_names[USRQUOTA] &&
(sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) ||
(sbi->s_qf_names[GRPQUOTA] &&
(sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) {
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
ext3_msg(sb, KERN_ERR, "error: old and new quota "
"format mixing.");
return 0;
@ -1478,7 +1479,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
}
list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
vfs_dq_init(inode);
dquot_initialize(inode);
if (inode->i_nlink) {
printk(KERN_DEBUG
"%s: truncating inode %lu to %Ld bytes\n",
@ -1671,11 +1672,11 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
sbi->s_mount_opt |= EXT3_MOUNT_JOURNAL_DATA;
set_opt(sbi->s_mount_opt, JOURNAL_DATA);
else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
sbi->s_mount_opt |= EXT3_MOUNT_ORDERED_DATA;
set_opt(sbi->s_mount_opt, ORDERED_DATA);
else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
sbi->s_mount_opt |= EXT3_MOUNT_WRITEBACK_DATA;
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
@ -1694,7 +1695,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
(EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
@ -2561,11 +2562,11 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
goto restore_opts;
}
if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
if (test_opt(sb, ABORT))
ext3_abort(sb, __func__, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
es = sbi->s_es;
@ -2573,7 +2574,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
n_blocks_count > le32_to_cpu(es->s_blocks_count)) {
if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) {
if (test_opt(sb, ABORT)) {
err = -EROFS;
goto restore_opts;
}
@ -2734,7 +2735,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
* Process 1 Process 2
* ext3_create() quota_sync()
* journal_start() write_dquot()
* vfs_dq_init() down(dqio_mutex)
* dquot_initialize() down(dqio_mutex)
* down(dqio_mutex) journal_start()
*
*/
@ -2942,9 +2943,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL;
size_t towrite = len;
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
@ -2955,53 +2954,54 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
bh = ext3_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
if (journal_quota) {
err = ext3_journal_get_write_access(handle, bh);
if (err) {
brelse(bh);
goto out;
}
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
flush_dcache_page(bh->b_page);
unlock_buffer(bh);
if (journal_quota)
err = ext3_journal_dirty_metadata(handle, bh);
else {
/* Always do at least ordered writes for quotas */
err = ext3_journal_dirty_data(handle, bh);
mark_buffer_dirty(bh);
}
brelse(bh);
if (err)
goto out;
offset = 0;
towrite -= tocopy;
data += tocopy;
blk++;
/*
* Since we account only one data block in transaction credits,
* then it is impossible to cross a block boundary.
*/
if (sb->s_blocksize - offset < len) {
ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
" cancelled because not block aligned",
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
bh = ext3_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
if (journal_quota) {
err = ext3_journal_get_write_access(handle, bh);
if (err) {
brelse(bh);
goto out;
}
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, len);
flush_dcache_page(bh->b_page);
unlock_buffer(bh);
if (journal_quota)
err = ext3_journal_dirty_metadata(handle, bh);
else {
/* Always do at least ordered writes for quotas */
err = ext3_journal_dirty_data(handle, bh);
mark_buffer_dirty(bh);
}
brelse(bh);
out:
if (len == towrite) {
if (err) {
mutex_unlock(&inode->i_mutex);
return err;
}
if (inode->i_size < off+len-towrite) {
i_size_write(inode, off+len-towrite);
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT3_I(inode)->i_disksize = inode->i_size;
}
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext3_mark_inode_dirty(handle, inode);
mutex_unlock(&inode->i_mutex);
return len - towrite;
return len;
}
#endif

Просмотреть файл

@ -274,7 +274,7 @@ ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
void *end;
int error;
if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR))
if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
return -ENODATA;
error = ext3_get_inode_loc(inode, &iloc);
if (error)
@ -403,7 +403,7 @@ ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
void *end;
int error;
if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR))
if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
return 0;
error = ext3_get_inode_loc(inode, &iloc);
if (error)
@ -500,7 +500,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@ -775,8 +775,8 @@ inserted:
else {
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (vfs_dq_alloc_block(inode, 1))
error = dquot_alloc_block(inode, 1);
if (error)
goto cleanup;
error = ext3_journal_get_write_access(handle,
new_bh);
@ -850,7 +850,7 @@ cleanup:
return error;
cleanup_dquot:
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto cleanup;
bad_block:
@ -882,7 +882,7 @@ ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
if (EXT3_I(inode)->i_state & EXT3_STATE_XATTR) {
if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) {
error = ext3_xattr_check_names(IFIRST(header), is->s.end);
if (error)
return error;
@ -914,10 +914,10 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
header = IHDR(inode, ext3_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
EXT3_I(inode)->i_state |= EXT3_STATE_XATTR;
ext3_set_inode_state(inode, EXT3_STATE_XATTR);
} else {
header->h_magic = cpu_to_le32(0);
EXT3_I(inode)->i_state &= ~EXT3_STATE_XATTR;
ext3_clear_inode_state(inode, EXT3_STATE_XATTR);
}
return 0;
}
@ -967,10 +967,10 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (error)
goto cleanup;
if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) {
struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW;
ext3_clear_inode_state(inode, EXT3_STATE_NEW);
}
error = ext3_xattr_ibody_find(inode, &i, &is);

Просмотреть файл

@ -23,6 +23,7 @@
#include <linux/jbd2.h>
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/quotaops.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
@ -125,7 +126,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
sb->s_dirt = 1;
}
}
return generic_file_open(inode, filp);
return dquot_file_open(inode, filp);
}
const struct file_operations ext4_file_operations = {

Просмотреть файл

@ -214,10 +214,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
vfs_dq_init(inode);
dquot_initialize(inode);
ext4_xattr_delete_inode(handle, inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
dquot_free_inode(inode);
dquot_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@ -1029,10 +1029,10 @@ got:
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
ret = inode;
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
dquot_initialize(inode);
err = dquot_alloc_inode(inode);
if (err)
goto fail_drop;
}
err = ext4_init_acl(handle, inode, dir);
if (err)
@ -1069,10 +1069,10 @@ really_out:
return ret;
fail_free_drop:
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
fail_drop:
vfs_dq_drop(inode);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
unlock_new_inode(inode);

Просмотреть файл

@ -171,6 +171,9 @@ void ext4_delete_inode(struct inode *inode)
handle_t *handle;
int err;
if (!is_bad_inode(inode))
dquot_initialize(inode);
if (ext4_should_order_data(inode))
ext4_begin_ordered_truncate(inode, 0);
truncate_inode_pages(&inode->i_data, 0);
@ -1108,9 +1111,9 @@ void ext4_da_update_reserve_space(struct inode *inode,
/* Update quota subsystem */
if (quota_claim) {
vfs_dq_claim_block(inode, used);
dquot_claim_block(inode, used);
if (mdb_free)
vfs_dq_release_reservation_block(inode, mdb_free);
dquot_release_reservation_block(inode, mdb_free);
} else {
/*
* We did fallocate with an offset that is already delayed
@ -1121,8 +1124,8 @@ void ext4_da_update_reserve_space(struct inode *inode,
* that
*/
if (allocated_meta_blocks)
vfs_dq_claim_block(inode, allocated_meta_blocks);
vfs_dq_release_reservation_block(inode, mdb_free + used);
dquot_claim_block(inode, allocated_meta_blocks);
dquot_release_reservation_block(inode, mdb_free + used);
}
/*
@ -1857,6 +1860,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned long md_needed, md_reserved;
int ret;
/*
* recalculate the amount of metadata blocks to reserve
@ -1875,11 +1879,12 @@ repeat:
* later. Real quota accounting is done at pages writeout
* time.
*/
if (vfs_dq_reserve_block(inode, md_needed + 1))
return -EDQUOT;
ret = dquot_reserve_block(inode, md_needed + 1);
if (ret)
return ret;
if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
vfs_dq_release_reservation_block(inode, md_needed + 1);
dquot_release_reservation_block(inode, md_needed + 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
@ -1936,7 +1941,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
vfs_dq_release_reservation_block(inode, to_free);
dquot_release_reservation_block(inode, to_free);
}
static void ext4_da_page_release_reservation(struct page *page,
@ -5418,6 +5423,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
if (ia_valid & ATTR_SIZE)
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
handle_t *handle;
@ -5430,7 +5437,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
error = dquot_transfer(inode, attr);
if (error) {
ext4_journal_stop(handle);
return error;
@ -5816,7 +5823,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, vfs_dq_alloc_block() will always dirty the inode when blocks
* Also, dquot_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing

Просмотреть файл

@ -4240,7 +4240,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0;
}
reserv_blks = ar->len;
while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
ar->len--;
}
@ -4317,7 +4317,7 @@ out2:
kmem_cache_free(ext4_ac_cachep, ac);
out1:
if (inquota && ar->len < inquota)
vfs_dq_free_block(ar->inode, inquota - ar->len);
dquot_free_block(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
@ -4631,7 +4631,7 @@ do_more:
sb->s_dirt = 1;
error_return:
if (freed)
vfs_dq_free_block(inode, freed);
dquot_free_block(inode, freed);
brelse(bitmap_bh);
ext4_std_error(sb, err);
if (ac)

Просмотреть файл

@ -1759,6 +1759,8 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, int mode,
struct inode *inode;
int err, retries = 0;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@ -1793,6 +1795,8 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@ -1830,6 +1834,8 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
@ -2137,7 +2143,9 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go in
* separate transaction */
vfs_dq_init(dentry->d_inode);
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2196,7 +2204,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
/* Initialize quotas before so that eventual writes go
* in separate transaction */
vfs_dq_init(dentry->d_inode);
dquot_initialize(dir);
dquot_initialize(dentry->d_inode);
handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
@ -2251,6 +2261,8 @@ static int ext4_symlink(struct inode *dir,
if (l > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
dquot_initialize(dir);
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
@ -2309,6 +2321,8 @@ static int ext4_link(struct dentry *old_dentry,
if (inode->i_nlink >= EXT4_LINK_MAX)
return -EMLINK;
dquot_initialize(dir);
/*
* Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing
* otherwise has the potential to corrupt the orphan inode list.
@ -2359,12 +2373,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ext4_dir_entry_2 *old_de, *new_de;
int retval, force_da_alloc = 0;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_bh = new_bh = dir_bh = NULL;
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
vfs_dq_init(new_dentry->d_inode);
dquot_initialize(new_dentry->d_inode);
handle = ext4_journal_start(old_dir, 2 *
EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);

Просмотреть файл

@ -798,6 +798,7 @@ static void destroy_inodecache(void)
static void ext4_clear_inode(struct inode *inode)
{
dquot_drop(inode);
ext4_discard_preallocations(inode);
if (EXT4_JOURNAL(inode))
jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
@ -1052,19 +1053,9 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static const struct dquot_operations ext4_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.reserve_space = dquot_reserve_space,
.claim_space = dquot_claim_space,
.release_rsv = dquot_release_reserved_space,
#ifdef CONFIG_QUOTA
.get_reserved_space = ext4_get_reserved_space,
#endif
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = ext4_write_dquot,
.acquire_dquot = ext4_acquire_dquot,
.release_dquot = ext4_release_dquot,
@ -2014,7 +2005,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
}
list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
vfs_dq_init(inode);
dquot_initialize(inode);
if (inode->i_nlink) {
ext4_msg(sb, KERN_DEBUG,
"%s: truncating inode %lu to %lld bytes",
@ -3801,7 +3792,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
* Process 1 Process 2
* ext4_create() quota_sync()
* jbd2_journal_start() write_dquot()
* vfs_dq_init() down(dqio_mutex)
* dquot_initialize() down(dqio_mutex)
* down(dqio_mutex) jbd2_journal_start()
*
*/

Просмотреть файл

@ -495,7 +495,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
@ -787,8 +787,8 @@ inserted:
else {
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (vfs_dq_alloc_block(inode, 1))
error = dquot_alloc_block(inode, 1);
if (error)
goto cleanup;
error = ext4_journal_get_write_access(handle,
new_bh);
@ -876,7 +876,7 @@ cleanup:
return error;
cleanup_dquot:
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto cleanup;
bad_block:

Просмотреть файл

@ -1083,7 +1083,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
}
}
int gfs2_quota_sync(struct super_block *sb, int type)
int gfs2_quota_sync(struct super_block *sb, int type, int wait)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
@ -1127,6 +1127,11 @@ int gfs2_quota_sync(struct super_block *sb, int type)
return error;
}
static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
{
return gfs2_quota_sync(sb, type, 0);
}
int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
{
struct gfs2_quota_data *qd;
@ -1382,7 +1387,7 @@ int gfs2_quotad(void *data)
&tune->gt_statfs_quantum);
/* Update quota file */
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
&quotad_timeo, &tune->gt_quota_quantum);
/* Check for & recover partially truncated inodes */

Просмотреть файл

@ -25,7 +25,7 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid);
extern int gfs2_quota_sync(struct super_block *sb, int type);
extern int gfs2_quota_sync(struct super_block *sb, int type, int wait);
extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
extern int gfs2_quota_init(struct gfs2_sbd *sdp);

Просмотреть файл

@ -764,7 +764,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
int error;
flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_quota_sync(sdp->sd_vfs, 0, 1);
gfs2_statfs_sync(sdp->sd_vfs, 0);
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,

Просмотреть файл

@ -167,7 +167,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_quota_sync(sdp->sd_vfs, 0, 1);
return len;
}

Просмотреть файл

@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/dcache.h>
#include <linux/init.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/module.h>
@ -314,7 +313,6 @@ void clear_inode(struct inode *inode)
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
inode_sync_wait(inode);
vfs_dq_drop(inode);
if (inode->i_sb->s_op->clear_inode)
inode->i_sb->s_op->clear_inode(inode);
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@ -1211,8 +1209,6 @@ void generic_delete_inode(struct inode *inode)
if (op->delete_inode) {
void (*delete)(struct inode *) = op->delete_inode;
if (!is_bad_inode(inode))
vfs_dq_init(inode);
/* Filesystems implementing their own
* s_op->delete_inode are required to call
* truncate_inode_pages and clear_inode()

Просмотреть файл

@ -862,12 +862,12 @@ restart_loop:
/* A buffer which has been freed while still being
* journaled by a previous transaction may end up still
* being dirty here, but we want to avoid writing back
* that buffer in the future now that the last use has
* been committed. That's not only a performance gain,
* it also stops aliasing problems if the buffer is left
* behind for writeback and gets reallocated for another
* that buffer in the future after the "add to orphan"
* operation been committed, That's not only a performance
* gain, it also stops aliasing problems if the buffer is
* left behind for writeback and gets reallocated for another
* use in a different page. */
if (buffer_freed(bh)) {
if (buffer_freed(bh) && !jh->b_next_transaction) {
clear_buffer_freed(bh);
clear_buffer_jbddirty(bh);
}

Просмотреть файл

@ -1864,6 +1864,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
if (!jh)
goto zap_buffer_no_jh;
/*
* We cannot remove the buffer from checkpoint lists until the
* transaction adding inode to orphan list (let's call it T)
* is committed. Otherwise if the transaction changing the
* buffer would be cleaned from the journal before T is
* committed, a crash will cause that the correct contents of
* the buffer will be lost. On the other hand we have to
* clear the buffer dirty bit at latest at the moment when the
* transaction marking the buffer as freed in the filesystem
* structures is committed because from that moment on the
* buffer can be reallocated and used by a different page.
* Since the block hasn't been freed yet but the inode has
* already been added to orphan list, it is safe for us to add
* the buffer to BJ_Forget list of the newest transaction.
*/
transaction = jh->b_transaction;
if (transaction == NULL) {
/* First case: not on any transaction. If it
@ -1929,16 +1944,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
goto zap_buffer;
}
/*
* If it is committing, we simply cannot touch it. We
* can remove it's next_transaction pointer from the
* running transaction if that is set, but nothing
* else. */
* The buffer is committing, we simply cannot touch
* it. So we just set j_next_transaction to the
* running transaction (if there is one) and mark
* buffer as freed so that commit code knows it should
* clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (jh->b_next_transaction) {
J_ASSERT(jh->b_next_transaction ==
journal->j_running_transaction);
jh->b_next_transaction = NULL;
}
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
@ -2120,7 +2134,7 @@ void journal_file_buffer(struct journal_head *jh,
*/
void __journal_refile_buffer(struct journal_head *jh)
{
int was_dirty;
int was_dirty, jlist;
struct buffer_head *bh = jh2bh(jh);
J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
@ -2142,8 +2156,13 @@ void __journal_refile_buffer(struct journal_head *jh)
__journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
__journal_file_buffer(jh, jh->b_transaction,
jh->b_modified ? BJ_Metadata : BJ_Reserved);
if (buffer_freed(bh))
jlist = BJ_Forget;
else if (jh->b_modified)
jlist = BJ_Metadata;
else
jlist = BJ_Reserved;
__journal_file_buffer(jh, jh->b_transaction, jlist);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty)

Просмотреть файл

@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/posix_acl_xattr.h>
#include "jfs_incore.h"
#include "jfs_txnmgr.h"
@ -174,7 +173,7 @@ cleanup:
return rc;
}
static int jfs_acl_chmod(struct inode *inode)
int jfs_acl_chmod(struct inode *inode)
{
struct posix_acl *acl, *clone;
int rc;
@ -205,26 +204,3 @@ static int jfs_acl_chmod(struct inode *inode)
posix_acl_release(clone);
return rc;
}
int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int rc;
rc = inode_change_ok(inode, iattr);
if (rc)
return rc;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
if (vfs_dq_transfer(inode, iattr))
return -EDQUOT;
}
rc = inode_setattr(inode, iattr);
if (!rc && (iattr->ia_valid & ATTR_MODE))
rc = jfs_acl_chmod(inode);
return rc;
}

Просмотреть файл

@ -18,6 +18,7 @@
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_dmap.h"
@ -47,7 +48,7 @@ static int jfs_open(struct inode *inode, struct file *file)
{
int rc;
if ((rc = generic_file_open(inode, file)))
if ((rc = dquot_file_open(inode, file)))
return rc;
/*
@ -88,14 +89,40 @@ static int jfs_release(struct inode *inode, struct file *file)
return 0;
}
int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int rc;
rc = inode_change_ok(inode, iattr);
if (rc)
return rc;
if (iattr->ia_valid & ATTR_SIZE)
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
rc = dquot_transfer(inode, iattr);
if (rc)
return rc;
}
rc = inode_setattr(inode, iattr);
if (!rc && (iattr->ia_valid & ATTR_MODE))
rc = jfs_acl_chmod(inode);
return rc;
}
const struct inode_operations jfs_file_inode_operations = {
.truncate = jfs_truncate,
.setxattr = jfs_setxattr,
.getxattr = jfs_getxattr,
.listxattr = jfs_listxattr,
.removexattr = jfs_removexattr,
#ifdef CONFIG_JFS_POSIX_ACL
.setattr = jfs_setattr,
#ifdef CONFIG_JFS_POSIX_ACL
.check_acl = jfs_check_acl,
#endif
};

Просмотреть файл

@ -149,6 +149,9 @@ void jfs_delete_inode(struct inode *inode)
{
jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
if (!is_bad_inode(inode))
dquot_initialize(inode);
if (!is_bad_inode(inode) &&
(JFS_IP(inode)->fileset == FILESYSTEM_I)) {
truncate_inode_pages(&inode->i_data, 0);
@ -161,9 +164,9 @@ void jfs_delete_inode(struct inode *inode)
/*
* Free the inode from the quota allocation.
*/
vfs_dq_init(inode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
dquot_initialize(inode);
dquot_free_inode(inode);
dquot_drop(inode);
}
clear_inode(inode);

Просмотреть файл

@ -22,7 +22,7 @@
int jfs_check_acl(struct inode *, int);
int jfs_init_acl(tid_t, struct inode *, struct inode *);
int jfs_setattr(struct dentry *, struct iattr *);
int jfs_acl_chmod(struct inode *inode);
#else
@ -32,5 +32,10 @@ static inline int jfs_init_acl(tid_t tid, struct inode *inode,
return 0;
}
static inline int jfs_acl_chmod(struct inode *inode)
{
return 0;
}
#endif
#endif /* _H_JFS_ACL */

Просмотреть файл

@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
if (vfs_dq_alloc_block(ip, sbi->nbperpage))
if (dquot_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
vfs_dq_free_block(ip, sbi->nbperpage);
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
vfs_dq_free_block(ip, sbi->nbperpage);
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
@ -1027,10 +1027,9 @@ static int dtSplitUp(tid_t tid,
n = xlen;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, n)) {
rc = -EDQUOT;
rc = dquot_alloc_block(ip, n);
if (rc)
goto extendOut;
}
quota_allocation += n;
if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
@ -1308,7 +1307,7 @@ static int dtSplitUp(tid_t tid,
/* Rollback quota allocation */
if (rc && quota_allocation)
vfs_dq_free_block(ip, quota_allocation);
dquot_free_block(ip, quota_allocation);
dtSplitUp_Exit:
@ -1369,9 +1368,10 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
return -EIO;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return -EDQUOT;
return rc;
}
jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
@ -1892,6 +1892,7 @@ static int dtSplitRoot(tid_t tid,
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int rc;
/* get split root page */
smp = split->mp;
@ -1916,9 +1917,10 @@ static int dtSplitRoot(tid_t tid,
rp = rmp->data;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return -EDQUOT;
return rc;
}
BT_MARK_DIRTY(rmp, ip);
@ -2287,7 +2289,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
@ -2363,7 +2365,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);

Просмотреть файл

@ -141,10 +141,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
}
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, nxlen)) {
rc = dquot_alloc_block(ip, nxlen);
if (rc) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
return rc;
}
/* determine the value of the extent flag */
@ -164,7 +165,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
vfs_dq_free_block(ip, nxlen);
dquot_free_block(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
@ -256,10 +257,11 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
goto exit;
/* Allocat blocks to quota. */
if (vfs_dq_alloc_block(ip, nxlen)) {
rc = dquot_alloc_block(ip, nxlen);
if (rc) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
return rc;
}
delta = nxlen - xlen;
@ -297,7 +299,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
vfs_dq_free_block(ip, nxlen);
dquot_free_block(ip, nxlen);
goto exit;
}
} else {
@ -308,7 +310,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
vfs_dq_free_block(ip, nxlen);
dquot_free_block(ip, nxlen);
goto exit;
}
}

Просмотреть файл

@ -116,10 +116,10 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
/*
* Allocate inode to quota.
*/
if (vfs_dq_alloc_inode(inode)) {
rc = -EDQUOT;
dquot_initialize(inode);
rc = dquot_alloc_inode(inode);
if (rc)
goto fail_drop;
}
inode->i_mode = mode;
/* inherit flags from parent */
@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
return inode;
fail_drop:
vfs_dq_drop(inode);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
fail_unlock:
inode->i_nlink = 0;

Просмотреть файл

@ -40,6 +40,7 @@ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type);
extern void jfs_set_inode_flags(struct inode *);
extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern int jfs_setattr(struct dentry *, struct iattr *);
extern const struct address_space_operations jfs_aops;
extern const struct inode_operations jfs_dir_inode_operations;

Просмотреть файл

@ -585,10 +585,10 @@ int xtInsert(tid_t tid, /* transaction id */
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
if ((rc = vfs_dq_alloc_block(ip, xlen)))
if ((rc = dquot_alloc_block(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
goto out;
}
}
@ -617,7 +617,7 @@ int xtInsert(tid_t tid, /* transaction id */
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
}
return rc;
}
@ -985,10 +985,9 @@ xtSplitPage(tid_t tid, struct inode *ip,
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc)
goto clean_up;
}
quota_allocation += lengthPXD(pxd);
@ -1195,7 +1194,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
/* Rollback quota allocation. */
if (quota_allocation)
vfs_dq_free_block(ip, quota_allocation);
dquot_free_block(ip, quota_allocation);
return (rc);
}
@ -1235,6 +1234,7 @@ xtSplitRoot(tid_t tid,
struct pxdlist *pxdlist;
struct tlock *tlck;
struct xtlock *xtlck;
int rc;
sp = &JFS_IP(ip)->i_xtroot;
@ -1252,9 +1252,10 @@ xtSplitRoot(tid_t tid,
return -EIO;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return -EDQUOT;
return rc;
}
jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
@ -3680,7 +3681,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
ip->i_size = newsize;
/* update quota allocation to reflect freed blocks */
vfs_dq_free_block(ip, nfreed);
dquot_free_block(ip, nfreed);
/*
* free tlock of invalidated pages

Просмотреть файл

@ -85,6 +85,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name);
dquot_initialize(dip);
/*
* search parent directory for entry/freespace
* (dtSearch() returns parent directory page pinned)
@ -215,6 +217,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name);
dquot_initialize(dip);
/* link count overflow on parent directory ? */
if (dip->i_nlink == JFS_LINK_MAX) {
rc = -EMLINK;
@ -356,7 +360,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
vfs_dq_init(ip);
dquot_initialize(dip);
dquot_initialize(ip);
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
@ -483,7 +488,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
/* Init inode for quota operations. */
vfs_dq_init(ip);
dquot_initialize(dip);
dquot_initialize(ip);
if ((rc = get_UCSname(&dname, dentry)))
goto out;
@ -805,6 +811,8 @@ static int jfs_link(struct dentry *old_dentry,
if (ip->i_nlink == 0)
return -ENOENT;
dquot_initialize(dir);
tid = txBegin(ip->i_sb, 0);
mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
@ -896,6 +904,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
dquot_initialize(dip);
ssize = strlen(name) + 1;
/*
@ -1087,6 +1097,9 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
jfs_info("jfs_rename: %s %s", old_dentry->d_name.name,
new_dentry->d_name.name);
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_ip = old_dentry->d_inode;
new_ip = new_dentry->d_inode;
@ -1136,7 +1149,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
} else if (new_ip) {
IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
/* Init inode for quota operations. */
vfs_dq_init(new_ip);
dquot_initialize(new_ip);
}
/*
@ -1360,6 +1373,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
jfs_info("jfs_mknod: %s", dentry->d_name.name);
dquot_initialize(dir);
if ((rc = get_UCSname(&dname, dentry)))
goto out;
@ -1541,8 +1556,8 @@ const struct inode_operations jfs_dir_inode_operations = {
.getxattr = jfs_getxattr,
.listxattr = jfs_listxattr,
.removexattr = jfs_removexattr,
#ifdef CONFIG_JFS_POSIX_ACL
.setattr = jfs_setattr,
#ifdef CONFIG_JFS_POSIX_ACL
.check_acl = jfs_check_acl,
#endif
};

Просмотреть файл

@ -131,6 +131,11 @@ static void jfs_destroy_inode(struct inode *inode)
kmem_cache_free(jfs_inode_cachep, ji);
}
static void jfs_clear_inode(struct inode *inode)
{
dquot_drop(inode);
}
static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
@ -745,6 +750,7 @@ static const struct super_operations jfs_super_operations = {
.dirty_inode = jfs_dirty_inode,
.write_inode = jfs_write_inode,
.delete_inode = jfs_delete_inode,
.clear_inode = jfs_clear_inode,
.put_super = jfs_put_super,
.sync_fs = jfs_sync_fs,
.freeze_fs = jfs_freeze,

Просмотреть файл

@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
if (vfs_dq_alloc_block(ip, nblocks)) {
return -EDQUOT;
}
rc = dquot_alloc_block(ip, nblocks);
if (rc)
return rc;
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
/*Rollback quota allocation. */
vfs_dq_free_block(ip, nblocks);
dquot_free_block(ip, nblocks);
return rc;
}
@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
failed:
/* Rollback quota allocation. */
vfs_dq_free_block(ip, nblocks);
dquot_free_block(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
@ -538,7 +538,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
if (vfs_dq_alloc_block(inode, blocks_needed))
rc = dquot_alloc_block(inode, blocks_needed);
if (rc)
return -EDQUOT;
quota_allocation = blocks_needed;
@ -602,7 +603,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
vfs_dq_free_block(inode, quota_allocation);
dquot_free_block(inode, quota_allocation);
return (rc);
}
@ -677,7 +678,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
vfs_dq_free_block(inode, old_blocks);
dquot_free_block(inode, old_blocks);
inode->i_ctime = CURRENT_TIME;

Просмотреть файл

@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/pagemap.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
@ -1416,7 +1415,6 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
vfs_dq_init(dir);
error = dir->i_op->create(dir, dentry, mode, nd);
if (!error)
fsnotify_create(dir, dentry);
@ -1586,9 +1584,6 @@ static struct file *finish_open(struct nameidata *nd,
}
}
if (!IS_ERR(filp)) {
if (acc_mode & MAY_WRITE)
vfs_dq_init(nd->path.dentry->d_inode);
if (will_truncate) {
error = handle_truncate(&nd->path);
if (error) {
@ -1986,7 +1981,6 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
if (error)
return error;
vfs_dq_init(dir);
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
@ -2085,7 +2079,6 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (error)
return error;
vfs_dq_init(dir);
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
@ -2171,8 +2164,6 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->rmdir)
return -EPERM;
vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
if (d_mountpoint(dentry))
@ -2258,8 +2249,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!dir->i_op->unlink)
return -EPERM;
vfs_dq_init(dir);
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
error = -EBUSY;
@ -2372,7 +2361,6 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
if (error)
return error;
vfs_dq_init(dir);
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
@ -2456,7 +2444,6 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
return error;
mutex_lock(&inode->i_mutex);
vfs_dq_init(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&inode->i_mutex);
if (!error)
@ -2657,9 +2644,6 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir->i_op->rename)
return -EPERM;
vfs_dq_init(old_dir);
vfs_dq_init(new_dir);
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
if (is_dir)

Просмотреть файл

@ -20,7 +20,6 @@
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/delay.h>
#include <linux/quotaops.h>
#include <linux/fsnotify.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
@ -377,7 +376,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
put_write_access(inode);
goto out_nfserr;
}
vfs_dq_init(inode);
}
/* sanitize the mode change */
@ -745,8 +743,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
flags = O_RDWR|O_LARGEFILE;
else
flags = O_WRONLY|O_LARGEFILE;
vfs_dq_init(inode);
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
flags, current_cred());

Просмотреть файл

@ -5713,7 +5713,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
goto out;
}
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(inode->i_sb, len));
ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc);
@ -6936,7 +6936,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
goto bail;
}
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_del));
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
@ -7301,11 +7301,10 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
unsigned int page_end;
u64 phys;
if (vfs_dq_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1))) {
ret = -EDQUOT;
ret = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (ret)
goto out_commit;
}
did_quota = 1;
ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
@ -7381,7 +7380,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
out_commit:
if (ret < 0 && did_quota)
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
ocfs2_commit_trans(osb, handle);

Просмотреть файл

@ -1764,10 +1764,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
wc->w_handle = handle;
if (clusters_to_alloc && vfs_dq_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc))) {
ret = -EDQUOT;
goto out_commit;
if (clusters_to_alloc) {
ret = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
if (ret)
goto out_commit;
}
/*
* We don't want this to fail in ocfs2_write_end(), so do it
@ -1810,7 +1811,7 @@ success:
return 0;
out_quota:
if (clusters_to_alloc)
vfs_dq_free_space(inode,
dquot_free_space(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
out_commit:
ocfs2_commit_trans(osb, handle);

Просмотреть файл

@ -2964,12 +2964,10 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
goto out;
}
if (vfs_dq_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(osb->sb,
alloc + dx_alloc))) {
ret = -EDQUOT;
ret = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
if (ret)
goto out_commit;
}
did_quota = 1;
if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
@ -3178,7 +3176,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
out_commit:
if (ret < 0 && did_quota)
vfs_dq_free_space_nodirty(dir, bytes_allocated);
dquot_free_space_nodirty(dir, bytes_allocated);
ocfs2_commit_trans(osb, handle);
@ -3221,11 +3219,10 @@ static int ocfs2_do_extend_dir(struct super_block *sb,
if (extend) {
u32 offset = OCFS2_I(dir)->ip_clusters;
if (vfs_dq_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(sb, 1))) {
status = -EDQUOT;
status = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(sb, 1));
if (status)
goto bail;
}
did_quota = 1;
status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
@ -3254,7 +3251,7 @@ static int ocfs2_do_extend_dir(struct super_block *sb,
status = 0;
bail:
if (did_quota && status < 0)
vfs_dq_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
mlog_exit(status);
return status;
}
@ -3889,11 +3886,10 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
goto out;
}
if (vfs_dq_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1))) {
ret = -EDQUOT;
ret = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1));
if (ret)
goto out_commit;
}
did_quota = 1;
ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
@ -3983,7 +3979,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
out_commit:
if (ret < 0 && did_quota)
vfs_dq_free_space_nodirty(dir,
dquot_free_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1));
ocfs2_commit_trans(osb, handle);
@ -4165,11 +4161,10 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir,
goto out;
}
if (vfs_dq_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(osb->sb, 1))) {
ret = -EDQUOT;
ret = dquot_alloc_space_nodirty(dir,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (ret)
goto out_commit;
}
did_quota = 1;
/*
@ -4229,7 +4224,7 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir,
out_commit:
if (ret < 0 && did_quota)
vfs_dq_free_space_nodirty(dir,
dquot_free_space_nodirty(dir,
ocfs2_clusters_to_bytes(dir->i_sb, 1));
ocfs2_commit_trans(osb, handle);

Просмотреть файл

@ -107,6 +107,9 @@ static int ocfs2_file_open(struct inode *inode, struct file *file)
mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
if (file->f_mode & FMODE_WRITE)
dquot_initialize(inode);
spin_lock(&oi->ip_lock);
/* Check that the inode hasn't been wiped from disk by another
@ -629,11 +632,10 @@ restart_all:
}
restarted_transaction:
if (vfs_dq_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb,
clusters_to_add))) {
status = -EDQUOT;
status = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
if (status)
goto leave;
}
did_quota = 1;
/* reserve a write to the file entry early on - that we if we
@ -674,7 +676,7 @@ restarted_transaction:
clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
spin_unlock(&OCFS2_I(inode)->ip_lock);
/* Release unused quota reservation */
vfs_dq_free_space(inode,
dquot_free_space(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
did_quota = 0;
@ -710,7 +712,7 @@ restarted_transaction:
leave:
if (status < 0 && did_quota)
vfs_dq_free_space(inode,
dquot_free_space(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
if (handle) {
ocfs2_commit_trans(osb, handle);
@ -978,6 +980,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
if (size_change) {
dquot_initialize(inode);
status = ocfs2_rw_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
@ -1020,7 +1024,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
/*
* Gather pointers to quota structures so that allocation /
* freeing of quota structures happens here and not inside
* vfs_dq_transfer() where we have problems with lock ordering
* dquot_transfer() where we have problems with lock ordering
*/
if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid
&& OCFS2_HAS_RO_COMPAT_FEATURE(sb,
@ -1053,7 +1057,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
mlog_errno(status);
goto bail_unlock;
}
status = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
status = dquot_transfer(inode, attr);
if (status < 0)
goto bail_commit;
} else {

Просмотреть файл

@ -665,7 +665,7 @@ static int ocfs2_remove_inode(struct inode *inode,
}
ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh);
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
status = ocfs2_free_dinode(handle, inode_alloc_inode,
inode_alloc_bh, di);
@ -971,6 +971,8 @@ void ocfs2_delete_inode(struct inode *inode)
goto bail;
}
dquot_initialize(inode);
if (!ocfs2_inode_is_valid_to_delete(inode)) {
/* It's probably not necessary to truncate_inode_pages
* here but we do it for safety anyway (it will most
@ -1087,6 +1089,8 @@ void ocfs2_clear_inode(struct inode *inode)
mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
"Inode=%lu\n", inode->i_ino);
dquot_drop(inode);
/* To preven remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode);

Просмотреть файл

@ -212,7 +212,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, int mode)
} else
inode->i_gid = current_fsgid();
inode->i_mode = mode;
vfs_dq_init(inode);
dquot_initialize(inode);
return inode;
}
@ -244,6 +244,8 @@ static int ocfs2_mknod(struct inode *dir,
(unsigned long)dev, dentry->d_name.len,
dentry->d_name.name);
dquot_initialize(dir);
/* get our super block */
osb = OCFS2_SB(dir->i_sb);
@ -348,13 +350,9 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
/* We don't use standard VFS wrapper because we don't want vfs_dq_init
* to be called. */
if (sb_any_quota_active(osb->sb) &&
osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) {
status = -EDQUOT;
status = dquot_alloc_inode(inode);
if (status)
goto leave;
}
did_quota_inode = 1;
mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry,
@ -431,7 +429,7 @@ static int ocfs2_mknod(struct inode *dir,
status = 0;
leave:
if (status < 0 && did_quota_inode)
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
if (handle)
ocfs2_commit_trans(osb, handle);
@ -636,6 +634,8 @@ static int ocfs2_link(struct dentry *old_dentry,
if (S_ISDIR(inode->i_mode))
return -EPERM;
dquot_initialize(dir);
err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
if (err < 0) {
if (err != -ENOENT)
@ -791,6 +791,8 @@ static int ocfs2_unlink(struct inode *dir,
mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
dentry->d_name.len, dentry->d_name.name);
dquot_initialize(dir);
BUG_ON(dentry->d_parent->d_inode != dir);
mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
@ -1051,6 +1053,9 @@ static int ocfs2_rename(struct inode *old_dir,
old_dentry->d_name.len, old_dentry->d_name.name,
new_dentry->d_name.len, new_dentry->d_name.name);
dquot_initialize(old_dir);
dquot_initialize(new_dir);
osb = OCFS2_SB(old_dir->i_sb);
if (new_inode) {
@ -1599,6 +1604,8 @@ static int ocfs2_symlink(struct inode *dir,
mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir,
dentry, symname, dentry->d_name.len, dentry->d_name.name);
dquot_initialize(dir);
sb = dir->i_sb;
osb = OCFS2_SB(sb);
@ -1688,13 +1695,9 @@ static int ocfs2_symlink(struct inode *dir,
goto bail;
}
/* We don't use standard VFS wrapper because we don't want vfs_dq_init
* to be called. */
if (sb_any_quota_active(osb->sb) &&
osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) {
status = -EDQUOT;
status = dquot_alloc_inode(inode);
if (status)
goto bail;
}
did_quota_inode = 1;
mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry,
@ -1716,11 +1719,10 @@ static int ocfs2_symlink(struct inode *dir,
u32 offset = 0;
inode->i_op = &ocfs2_symlink_inode_operations;
if (vfs_dq_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1))) {
status = -EDQUOT;
status = dquot_alloc_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (status)
goto bail;
}
did_quota = 1;
status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
new_fe_bh,
@ -1788,10 +1790,10 @@ static int ocfs2_symlink(struct inode *dir,
d_instantiate(dentry, inode);
bail:
if (status < 0 && did_quota)
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
if (status < 0 && did_quota_inode)
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
if (handle)
ocfs2_commit_trans(osb, handle);
@ -2099,13 +2101,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
goto leave;
}
/* We don't use standard VFS wrapper because we don't want vfs_dq_init
* to be called. */
if (sb_any_quota_active(osb->sb) &&
osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) {
status = -EDQUOT;
status = dquot_alloc_inode(inode);
if (status)
goto leave;
}
did_quota_inode = 1;
inode->i_nlink = 0;
@ -2140,7 +2138,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
insert_inode_hash(inode);
leave:
if (status < 0 && did_quota_inode)
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
if (handle)
ocfs2_commit_trans(osb, handle);

Просмотреть файл

@ -851,13 +851,6 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
}
const struct dquot_operations ocfs2_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = ocfs2_write_dquot,
.acquire_dquot = ocfs2_acquire_dquot,
.release_dquot = ocfs2_release_dquot,

Просмотреть файл

@ -4390,7 +4390,7 @@ static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
}
mutex_lock(&inode->i_mutex);
vfs_dq_init(dir);
dquot_initialize(dir);
error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
mutex_unlock(&inode->i_mutex);
if (!error)

Просмотреть файл

@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/quotaops.h>
#include <linux/fsnotify.h>
#include <linux/module.h>
#include <linux/slab.h>
@ -278,10 +277,8 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
error = locks_verify_truncate(inode, NULL, length);
if (!error)
error = security_path_truncate(&path, length, 0);
if (!error) {
vfs_dq_init(inode);
if (!error)
error = do_truncate(path.dentry, length, 0, NULL);
}
put_write_and_out:
put_write_access(inode);

Просмотреть файл

@ -59,3 +59,8 @@ config QUOTACTL
bool
depends on XFS_QUOTA || QUOTA
default y
config QUOTACTL_COMPAT
bool
depends on QUOTACTL && COMPAT_FOR_U64_ALIGNMENT
default y

Просмотреть файл

@ -3,3 +3,5 @@ obj-$(CONFIG_QFMT_V1) += quota_v1.o
obj-$(CONFIG_QFMT_V2) += quota_v2.o
obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
obj-$(CONFIG_QUOTACTL) += quota.o
obj-$(CONFIG_QUOTACTL_COMPAT) += compat.o
obj-$(CONFIG_QUOTA_NETLINK_INTERFACE) += netlink.o

118
fs/quota/compat.c Normal file
Просмотреть файл

@ -0,0 +1,118 @@
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/quotaops.h>
/*
* This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
* and is necessary due to alignment problems.
*/
struct compat_if_dqblk {
compat_u64 dqb_bhardlimit;
compat_u64 dqb_bsoftlimit;
compat_u64 dqb_curspace;
compat_u64 dqb_ihardlimit;
compat_u64 dqb_isoftlimit;
compat_u64 dqb_curinodes;
compat_u64 dqb_btime;
compat_u64 dqb_itime;
compat_uint_t dqb_valid;
};
/* XFS structures */
struct compat_fs_qfilestat {
compat_u64 dqb_bhardlimit;
compat_u64 qfs_nblks;
compat_uint_t qfs_nextents;
};
struct compat_fs_quota_stat {
__s8 qs_version;
__u16 qs_flags;
__s8 qs_pad;
struct compat_fs_qfilestat qs_uquota;
struct compat_fs_qfilestat qs_gquota;
compat_uint_t qs_incoredqs;
compat_int_t qs_btimelimit;
compat_int_t qs_itimelimit;
compat_int_t qs_rtbtimelimit;
__u16 qs_bwarnlimit;
__u16 qs_iwarnlimit;
};
asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr)
{
unsigned int cmds;
struct if_dqblk __user *dqblk;
struct compat_if_dqblk __user *compat_dqblk;
struct fs_quota_stat __user *fsqstat;
struct compat_fs_quota_stat __user *compat_fsqstat;
compat_uint_t data;
u16 xdata;
long ret;
cmds = cmd >> SUBCMDSHIFT;
switch (cmds) {
case Q_GETQUOTA:
dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
compat_dqblk = addr;
ret = sys_quotactl(cmd, special, id, dqblk);
if (ret)
break;
if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) ||
get_user(data, &dqblk->dqb_valid) ||
put_user(data, &compat_dqblk->dqb_valid))
ret = -EFAULT;
break;
case Q_SETQUOTA:
dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
compat_dqblk = addr;
ret = -EFAULT;
if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) ||
get_user(data, &compat_dqblk->dqb_valid) ||
put_user(data, &dqblk->dqb_valid))
break;
ret = sys_quotactl(cmd, special, id, dqblk);
break;
case Q_XGETQSTAT:
fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat));
compat_fsqstat = addr;
ret = sys_quotactl(cmd, special, id, fsqstat);
if (ret)
break;
ret = -EFAULT;
/* Copying qs_version, qs_flags, qs_pad */
if (copy_in_user(compat_fsqstat, fsqstat,
offsetof(struct compat_fs_quota_stat, qs_uquota)))
break;
/* Copying qs_uquota */
if (copy_in_user(&compat_fsqstat->qs_uquota,
&fsqstat->qs_uquota,
sizeof(compat_fsqstat->qs_uquota)) ||
get_user(data, &fsqstat->qs_uquota.qfs_nextents) ||
put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents))
break;
/* Copying qs_gquota */
if (copy_in_user(&compat_fsqstat->qs_gquota,
&fsqstat->qs_gquota,
sizeof(compat_fsqstat->qs_gquota)) ||
get_user(data, &fsqstat->qs_gquota.qfs_nextents) ||
put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents))
break;
/* Copying the rest */
if (copy_in_user(&compat_fsqstat->qs_incoredqs,
&fsqstat->qs_incoredqs,
sizeof(struct compat_fs_quota_stat) -
offsetof(struct compat_fs_quota_stat, qs_incoredqs)) ||
get_user(xdata, &fsqstat->qs_iwarnlimit) ||
put_user(xdata, &compat_fsqstat->qs_iwarnlimit))
break;
ret = 0;
break;
default:
ret = sys_quotactl(cmd, special, id, addr);
}
return ret;
}

Просмотреть файл

@ -100,9 +100,13 @@
*
* Any operation working on dquots via inode pointers must hold dqptr_sem. If
* operation is just reading pointers from inode (or not using them at all) the
* read lock is enough. If pointers are altered function must hold write lock
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
* for altering the flag i_mutex is also needed).
* read lock is enough. If pointers are altered function must hold write lock.
* Special care needs to be taken about S_NOQUOTA inode flag (marking that
* inode is a quota file). Functions adding pointers from inode to dquots have
* to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
* have to do all pointer modifications before dropping dqptr_sem. This makes
* sure they cannot race with quotaon which first sets S_NOQUOTA flag and
* then drops all pointers to dquots from an inode.
*
* Each dquot has its dq_lock mutex. Locked dquots might not be referenced
* from inodes (dquot_alloc_space() and such don't check the dq_lock).
@ -225,6 +229,9 @@ static struct hlist_head *dquot_hash;
struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
static qsize_t inode_get_rsv_space(struct inode *inode);
static void __dquot_initialize(struct inode *inode, int type);
static inline unsigned int
hashfn(const struct super_block *sb, unsigned int id, int type)
{
@ -564,7 +571,7 @@ out:
}
EXPORT_SYMBOL(dquot_scan_active);
int vfs_quota_sync(struct super_block *sb, int type)
int vfs_quota_sync(struct super_block *sb, int type, int wait)
{
struct list_head *dirty;
struct dquot *dquot;
@ -609,6 +616,33 @@ int vfs_quota_sync(struct super_block *sb, int type)
spin_unlock(&dq_list_lock);
mutex_unlock(&dqopt->dqonoff_mutex);
if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
return 0;
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible... */
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
I_MUTEX_QUOTA);
truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
}
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
EXPORT_SYMBOL(vfs_quota_sync);
@ -840,11 +874,14 @@ static int dqinit_needed(struct inode *inode, int type)
static void add_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode, *old_inode = NULL;
int reserved = 0;
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
continue;
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
if (!atomic_read(&inode->i_writecount))
continue;
if (!dqinit_needed(inode, type))
@ -854,7 +891,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_unlock(&inode_lock);
iput(old_inode);
sb->dq_op->initialize(inode, type);
__dquot_initialize(inode, type);
/* We hold a reference to 'inode' so it couldn't have been
* removed from s_inodes list while we dropped the inode_lock.
* We cannot iput the inode now as we can be holding the last
@ -865,6 +902,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
}
spin_unlock(&inode_lock);
iput(old_inode);
if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened before quota"
" was turned on thus quota information is probably "
"inconsistent. Please run quotacheck(8).\n", sb->s_id);
}
}
/*
@ -978,10 +1021,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
/*
* Claim reserved quota space
*/
static void dquot_claim_reserved_space(struct dquot *dquot,
qsize_t number)
static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
{
WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
if (dquot->dq_dqb.dqb_rsvspace < number) {
WARN_ON_ONCE(1);
number = dquot->dq_dqb.dqb_rsvspace;
}
dquot->dq_dqb.dqb_curspace += number;
dquot->dq_dqb.dqb_rsvspace -= number;
}
@ -989,7 +1034,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot,
static inline
void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
{
dquot->dq_dqb.dqb_rsvspace -= number;
if (dquot->dq_dqb.dqb_rsvspace >= number)
dquot->dq_dqb.dqb_rsvspace -= number;
else {
WARN_ON_ONCE(1);
dquot->dq_dqb.dqb_rsvspace = 0;
}
}
static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
@ -1131,13 +1181,13 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
*warntype = QUOTA_NL_NOWARN;
if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
return 0;
if (dquot->dq_dqb.dqb_ihardlimit &&
newinodes > dquot->dq_dqb.dqb_ihardlimit &&
!ignore_hardlimit(dquot)) {
*warntype = QUOTA_NL_IHARDWARN;
return NO_QUOTA;
return -EDQUOT;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
@ -1146,7 +1196,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
get_seconds() >= dquot->dq_dqb.dqb_itime &&
!ignore_hardlimit(dquot)) {
*warntype = QUOTA_NL_ISOFTLONGWARN;
return NO_QUOTA;
return -EDQUOT;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
@ -1157,7 +1207,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
}
return QUOTA_OK;
return 0;
}
/* needs dq_data_lock */
@ -1169,7 +1219,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
*warntype = QUOTA_NL_NOWARN;
if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
return 0;
tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ space;
@ -1179,7 +1229,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BHARDWARN;
return NO_QUOTA;
return -EDQUOT;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
@ -1189,7 +1239,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BSOFTLONGWARN;
return NO_QUOTA;
return -EDQUOT;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
@ -1205,10 +1255,10 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
* We don't allow preallocation to exceed softlimit so exceeding will
* be always printed
*/
return NO_QUOTA;
return -EDQUOT;
}
return QUOTA_OK;
return 0;
}
static int info_idq_free(struct dquot *dquot, qsize_t inodes)
@ -1242,25 +1292,32 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
return QUOTA_NL_BHARDBELOW;
return QUOTA_NL_NOWARN;
}
/*
* Initialize quota pointers in inode
* We do things in a bit complicated way but by that we avoid calling
* dqget() and thus filesystem callbacks under dqptr_sem.
* Initialize quota pointers in inode
*
* We do things in a bit complicated way but by that we avoid calling
* dqget() and thus filesystem callbacks under dqptr_sem.
*
* It is better to call this function outside of any transaction as it
* might need a lot of space in journal for dquot structure allocation.
*/
int dquot_initialize(struct inode *inode, int type)
static void __dquot_initialize(struct inode *inode, int type)
{
unsigned int id = 0;
int cnt, ret = 0;
struct dquot *got[MAXQUOTAS] = { NULL, NULL };
int cnt;
struct dquot *got[MAXQUOTAS];
struct super_block *sb = inode->i_sb;
qsize_t rsv;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return 0;
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
return;
/* First get references to structures we might need. */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
got[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
switch (cnt) {
@ -1275,7 +1332,6 @@ int dquot_initialize(struct inode *inode, int type)
}
down_write(&sb_dqopt(sb)->dqptr_sem);
/* Having dqptr_sem we know NOQUOTA flags can't be altered... */
if (IS_NOQUOTA(inode))
goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@ -1287,20 +1343,31 @@ int dquot_initialize(struct inode *inode, int type)
if (!inode->i_dquot[cnt]) {
inode->i_dquot[cnt] = got[cnt];
got[cnt] = NULL;
/*
* Make quota reservation system happy if someone
* did a write before quota was turned on
*/
rsv = inode_get_rsv_space(inode);
if (unlikely(rsv))
dquot_resv_space(inode->i_dquot[cnt], rsv);
}
}
out_err:
up_write(&sb_dqopt(sb)->dqptr_sem);
/* Drop unused references */
dqput_all(got);
return ret;
}
void dquot_initialize(struct inode *inode)
{
__dquot_initialize(inode, -1);
}
EXPORT_SYMBOL(dquot_initialize);
/*
* Release all quotas referenced by inode
*/
int dquot_drop(struct inode *inode)
static void __dquot_drop(struct inode *inode)
{
int cnt;
struct dquot *put[MAXQUOTAS];
@ -1312,33 +1379,32 @@ int dquot_drop(struct inode *inode)
}
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
dqput_all(put);
return 0;
}
void dquot_drop(struct inode *inode)
{
int cnt;
if (IS_NOQUOTA(inode))
return;
/*
* Test before calling to rule out calls from proc and such
* where we are not allowed to block. Note that this is
* actually reliable test even without the lock - the caller
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt])
break;
}
if (cnt < MAXQUOTAS)
__dquot_drop(inode);
}
EXPORT_SYMBOL(dquot_drop);
/* Wrapper to remove references to quota structures from inode */
void vfs_dq_drop(struct inode *inode)
{
/* Here we can get arbitrary inode from clear_inode() so we have
* to be careful. OTOH we don't need locking as quota operations
* are allowed to change only at mount time */
if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
&& inode->i_sb->dq_op->drop) {
int cnt;
/* Test before calling to rule out calls from proc and such
* where we are not allowed to block. Note that this is
* actually reliable test even without the lock - the caller
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (inode->i_dquot[cnt])
break;
if (cnt < MAXQUOTAS)
inode->i_sb->dq_op->drop(inode);
}
}
EXPORT_SYMBOL(vfs_dq_drop);
/*
* inode_reserved_space is managed internally by quota, and protected by
* i_lock similar to i_blocks+i_bytes.
@ -1351,28 +1417,30 @@ static qsize_t *inode_reserved_space(struct inode * inode)
return inode->i_sb->dq_op->get_reserved_space(inode);
}
static void inode_add_rsv_space(struct inode *inode, qsize_t number)
void inode_add_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) += number;
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(inode_add_rsv_space);
static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
void inode_claim_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
__inode_add_bytes(inode, number);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(inode_claim_rsv_space);
static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
void inode_sub_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(inode_sub_rsv_space);
static qsize_t inode_get_rsv_space(struct inode *inode)
{
@ -1404,38 +1472,34 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
}
/*
* Following four functions update i_blocks+i_bytes fields and
* quota information (together with appropriate checks)
* NOTE: We absolutely rely on the fact that caller dirties
* the inode (usually macros in quotaops.h care about this) and
* holds a handle for the current transaction so that dquot write and
* inode write go into the same transaction.
* This functions updates i_blocks+i_bytes fields and quota information
* (together with appropriate checks).
*
* NOTE: We absolutely rely on the fact that caller dirties the inode
* (usually helpers in quotaops.h care about this) and holds a handle for
* the current transaction so that dquot write and inode write go into the
* same transaction.
*/
/*
* This operation can block, but only after everything is updated
*/
int __dquot_alloc_space(struct inode *inode, qsize_t number,
int warn, int reserve)
int warn, int reserve)
{
int cnt, ret = QUOTA_OK;
int cnt, ret = 0;
char warntype[MAXQUOTAS];
/*
* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex
*/
if (IS_NOQUOTA(inode)) {
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
inode_incr_space(inode, number, reserve);
goto out;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
inode_incr_space(inode, number, reserve);
goto out_unlock;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
@ -1443,9 +1507,9 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
== NO_QUOTA) {
ret = NO_QUOTA;
ret = check_bdq(inode->i_dquot[cnt], number, !warn,
warntype+cnt);
if (ret) {
spin_unlock(&dq_data_lock);
goto out_flush_warn;
}
@ -1466,61 +1530,45 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
mark_all_dquot_dirty(inode->i_dquot);
out_flush_warn:
flush_warnings(inode->i_dquot, warntype);
out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
}
int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
{
return __dquot_alloc_space(inode, number, warn, 0);
}
EXPORT_SYMBOL(dquot_alloc_space);
int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
{
return __dquot_alloc_space(inode, number, warn, 1);
}
EXPORT_SYMBOL(dquot_reserve_space);
EXPORT_SYMBOL(__dquot_alloc_space);
/*
* This operation can block, but only after everything is updated
*/
int dquot_alloc_inode(const struct inode *inode, qsize_t number)
int dquot_alloc_inode(const struct inode *inode)
{
int cnt, ret = NO_QUOTA;
int cnt, ret = 0;
char warntype[MAXQUOTAS];
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
== NO_QUOTA)
ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
if (ret)
goto warn_put_all;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
dquot_incr_inodes(inode->i_dquot[cnt], number);
dquot_incr_inodes(inode->i_dquot[cnt], 1);
}
ret = QUOTA_OK;
warn_put_all:
spin_unlock(&dq_data_lock);
if (ret == QUOTA_OK)
if (ret == 0)
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@ -1528,23 +1576,19 @@ warn_put_all:
}
EXPORT_SYMBOL(dquot_alloc_inode);
int dquot_claim_space(struct inode *inode, qsize_t number)
/*
* Convert in-memory reserved quotas to real consumed quotas
*/
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
int cnt;
int ret = QUOTA_OK;
if (IS_NOQUOTA(inode)) {
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
inode_claim_rsv_space(inode, number);
goto out;
return 0;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
inode_claim_rsv_space(inode, number);
goto out;
}
spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@ -1557,33 +1601,26 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
return 0;
}
EXPORT_SYMBOL(dquot_claim_space);
EXPORT_SYMBOL(dquot_claim_space_nodirty);
/*
* This operation can block, but only after everything is updated
*/
int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
void __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
out_sub:
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
inode_decr_space(inode, number, reserve);
return QUOTA_OK;
return;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Now recheck reliably when holding dqptr_sem */
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
goto out_sub;
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@ -1603,56 +1640,34 @@ out_sub:
out_unlock:
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
int dquot_free_space(struct inode *inode, qsize_t number)
{
return __dquot_free_space(inode, number, 0);
}
EXPORT_SYMBOL(dquot_free_space);
/*
* Release reserved quota space
*/
void dquot_release_reserved_space(struct inode *inode, qsize_t number)
{
__dquot_free_space(inode, number, 1);
}
EXPORT_SYMBOL(dquot_release_reserved_space);
EXPORT_SYMBOL(__dquot_free_space);
/*
* This operation can block, but only after everything is updated
*/
int dquot_free_inode(const struct inode *inode, qsize_t number)
void dquot_free_inode(const struct inode *inode)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
return;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Now recheck reliably when holding dqptr_sem */
if (IS_NOQUOTA(inode)) {
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
continue;
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
dquot_decr_inodes(inode->i_dquot[cnt], number);
warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
dquot_decr_inodes(inode->i_dquot[cnt], 1);
}
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(inode->i_dquot);
flush_warnings(inode->i_dquot, warntype);
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
EXPORT_SYMBOL(dquot_free_inode);
@ -1662,37 +1677,31 @@ EXPORT_SYMBOL(dquot_free_inode);
* This operation can block, but only after everything is updated
* A transaction must be started when entering this function.
*/
int dquot_transfer(struct inode *inode, struct iattr *iattr)
static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask)
{
qsize_t space, cur_space;
qsize_t rsv_space = 0;
struct dquot *transfer_from[MAXQUOTAS];
struct dquot *transfer_to[MAXQUOTAS];
int cnt, ret = QUOTA_OK;
int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
int cnt, ret = 0;
char warntype_to[MAXQUOTAS];
char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
return 0;
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
transfer_from[cnt] = NULL;
transfer_to[cnt] = NULL;
warntype_to[cnt] = QUOTA_NL_NOWARN;
}
if (chuid)
transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
USRQUOTA);
if (chgid)
transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
GRPQUOTA);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (mask & (1 << cnt))
transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt);
}
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
/* Now recheck reliably when holding dqptr_sem */
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
goto put_all;
@ -1706,9 +1715,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
if (!transfer_to[cnt])
continue;
transfer_from[cnt] = inode->i_dquot[cnt];
if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
warntype_to + cnt) == NO_QUOTA)
ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
if (ret)
goto over_quota;
ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt);
if (ret)
goto over_quota;
}
@ -1762,22 +1773,32 @@ over_quota:
/* Clear dquot pointers we don't want to dqput() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
transfer_from[cnt] = NULL;
ret = NO_QUOTA;
goto warn_put_all;
}
EXPORT_SYMBOL(dquot_transfer);
/* Wrapper for transferring ownership of an inode */
int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
/* Wrapper for transferring ownership of an inode for uid/gid only
* Called from FSXXX_setattr()
*/
int dquot_transfer(struct inode *inode, struct iattr *iattr)
{
qid_t chid[MAXQUOTAS];
unsigned long mask = 0;
if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) {
mask |= 1 << USRQUOTA;
chid[USRQUOTA] = iattr->ia_uid;
}
if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) {
mask |= 1 << GRPQUOTA;
chid[GRPQUOTA] = iattr->ia_gid;
}
if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
vfs_dq_init(inode);
if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
return 1;
dquot_initialize(inode);
return __dquot_transfer(inode, chid, mask);
}
return 0;
}
EXPORT_SYMBOL(vfs_dq_transfer);
EXPORT_SYMBOL(dquot_transfer);
/*
* Write info of quota file to disk
@ -1798,13 +1819,6 @@ EXPORT_SYMBOL(dquot_commit_info);
* Definitions of diskquota operations.
*/
const struct dquot_operations dquot_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = dquot_commit,
.acquire_dquot = dquot_acquire,
.release_dquot = dquot_release,
@ -1814,6 +1828,20 @@ const struct dquot_operations dquot_operations = {
.destroy_dquot = dquot_destroy,
};
/*
* Generic helper for ->open on filesystems supporting disk quotas.
*/
int dquot_file_open(struct inode *inode, struct file *file)
{
int error;
error = generic_file_open(inode, file);
if (!error && (file->f_mode & FMODE_WRITE))
dquot_initialize(inode);
return error;
}
EXPORT_SYMBOL(dquot_file_open);
/*
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
*/
@ -1993,11 +2021,13 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
}
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
/* As we bypass the pagecache we must now flush the inode so
* that we see all the changes from userspace... */
write_inode_now(inode, 1);
/* And now flush the block cache so that kernel sees the
* changes */
/* As we bypass the pagecache we must now flush all the
* dirty data and invalidate caches so that kernel sees
* changes from userspace. It is not enough to just flush
* the quota file since if blocksize < pagesize, invalidation
* of the cache could fail because of other unrelated dirty
* data */
sync_filesystem(sb);
invalidate_bdev(sb->s_bdev);
}
mutex_lock(&dqopt->dqonoff_mutex);
@ -2010,14 +2040,16 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
/* We don't want quota and atime on quota files (deadlocks
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
down_write(&dqopt->dqptr_sem);
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
mutex_unlock(&inode->i_mutex);
up_write(&dqopt->dqptr_sem);
sb->dq_op->drop(inode);
/*
* When S_NOQUOTA is set, remove dquot references as no more
* references can be added
*/
__dquot_drop(inode);
}
error = -EIO;
@ -2053,14 +2085,12 @@ out_file_init:
iput(inode);
out_lock:
if (oldflags != -1) {
down_write(&dqopt->dqptr_sem);
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
/* Set the flags back (in the case of accidental quotaon()
* on a wrong file we don't want to mess up the flags) */
inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
inode->i_flags |= oldflags;
mutex_unlock(&inode->i_mutex);
up_write(&dqopt->dqptr_sem);
}
mutex_unlock(&dqopt->dqonoff_mutex);
out_fmt:

95
fs/quota/netlink.c Normal file
Просмотреть файл

@ -0,0 +1,95 @@
#include <linux/cred.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/quotaops.h>
#include <linux/sched.h>
#include <net/netlink.h>
#include <net/genetlink.h>
/* Netlink family structure for quota */
static struct genl_family quota_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = "VFS_DQUOT",
.version = 1,
.maxattr = QUOTA_NL_A_MAX,
};
/**
* quota_send_warning - Send warning to userspace about exceeded quota
* @type: The quota type: USRQQUOTA, GRPQUOTA,...
* @id: The user or group id of the quota that was exceeded
* @dev: The device on which the fs is mounted (sb->s_dev)
* @warntype: The type of the warning: QUOTA_NL_...
*
* This can be used by filesystems (including those which don't use
* dquot) to send a message to userspace relating to quota limits.
*
*/
void quota_send_warning(short type, unsigned int id, dev_t dev,
const char warntype)
{
static atomic_t seq;
struct sk_buff *skb;
void *msg_head;
int ret;
int msg_size = 4 * nla_total_size(sizeof(u32)) +
2 * nla_total_size(sizeof(u64));
/* We have to allocate using GFP_NOFS as we are called from a
* filesystem performing write and thus further recursion into
* the fs to free some data could cause deadlocks. */
skb = genlmsg_new(msg_size, GFP_NOFS);
if (!skb) {
printk(KERN_ERR
"VFS: Not enough memory to send quota warning.\n");
return;
}
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
&quota_genl_family, 0, QUOTA_NL_C_WARNING);
if (!msg_head) {
printk(KERN_ERR
"VFS: Cannot store netlink header in quota warning.\n");
goto err_out;
}
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
if (ret)
goto attr_err_out;
genlmsg_end(skb, msg_head);
genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
return;
attr_err_out:
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
kfree_skb(skb);
}
EXPORT_SYMBOL(quota_send_warning);
static int __init quota_init(void)
{
if (genl_register_family(&quota_genl_family) != 0)
printk(KERN_ERR
"VFS: Failed to create quota netlink interface.\n");
return 0;
};
module_init(quota_init);

Просмотреть файл

@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/syscalls.h>
@ -18,220 +17,205 @@
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <linux/writeback.h>
/* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
qid_t id)
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
{
if (type >= MAXQUOTAS)
return -EINVAL;
if (!sb && cmd != Q_SYNC)
return -ENODEV;
/* Is operation supported? */
if (sb && !sb->s_qcop)
return -ENOSYS;
switch (cmd) {
case Q_GETFMT:
/* these commands do not require any special privilegues */
case Q_GETFMT:
case Q_SYNC:
case Q_GETINFO:
case Q_XGETQSTAT:
case Q_XQUOTASYNC:
break;
/* allow to query information for dquots we "own" */
case Q_GETQUOTA:
case Q_XGETQUOTA:
if ((type == USRQUOTA && current_euid() == id) ||
(type == GRPQUOTA && in_egroup_p(id)))
break;
case Q_QUOTAON:
if (!sb->s_qcop->quota_on)
return -ENOSYS;
break;
case Q_QUOTAOFF:
if (!sb->s_qcop->quota_off)
return -ENOSYS;
break;
case Q_SETINFO:
if (!sb->s_qcop->set_info)
return -ENOSYS;
break;
case Q_GETINFO:
if (!sb->s_qcop->get_info)
return -ENOSYS;
break;
case Q_SETQUOTA:
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
break;
case Q_GETQUOTA:
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
break;
case Q_SYNC:
if (sb && !sb->s_qcop->quota_sync)
return -ENOSYS;
break;
default:
return -EINVAL;
}
/* Is quota turned on for commands which need it? */
switch (cmd) {
case Q_GETFMT:
case Q_GETINFO:
case Q_SETINFO:
case Q_SETQUOTA:
case Q_GETQUOTA:
/* This is just an informative test so we are satisfied
* without the lock */
if (!sb_has_quota_active(sb, type))
return -ESRCH;
}
/* Check privileges */
if (cmd == Q_GETQUOTA) {
if (((type == USRQUOTA && current_euid() != id) ||
(type == GRPQUOTA && !in_egroup_p(id))) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
}
else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/* Check validity of XFS Quota Manager commands */
static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
qid_t id)
{
if (type >= XQM_MAXQUOTAS)
return -EINVAL;
if (!sb)
return -ENODEV;
if (!sb->s_qcop)
return -ENOSYS;
switch (cmd) {
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
if (!sb->s_qcop->set_xstate)
return -ENOSYS;
break;
case Q_XGETQSTAT:
if (!sb->s_qcop->get_xstate)
return -ENOSYS;
break;
case Q_XSETQLIM:
if (!sb->s_qcop->set_xquota)
return -ENOSYS;
break;
case Q_XGETQUOTA:
if (!sb->s_qcop->get_xquota)
return -ENOSYS;
break;
case Q_XQUOTASYNC:
if (!sb->s_qcop->quota_sync)
return -ENOSYS;
break;
default:
return -EINVAL;
}
/* Check privileges */
if (cmd == Q_XGETQUOTA) {
if (((type == XQM_USRQUOTA && current_euid() != id) ||
(type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
} else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
/*FALLTHROUGH*/
default:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
return 0;
return security_quotactl(cmd, type, id, sb);
}
static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
qid_t id)
{
int error;
if (XQM_COMMAND(cmd))
error = xqm_quotactl_valid(sb, type, cmd, id);
else
error = generic_quotactl_valid(sb, type, cmd, id);
if (!error)
error = security_quotactl(cmd, type, id, sb);
return error;
}
#ifdef CONFIG_QUOTA
void sync_quota_sb(struct super_block *sb, int type)
{
int cnt;
if (!sb->s_qcop->quota_sync)
return;
sb->s_qcop->quota_sync(sb, type);
if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)
return;
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible... */
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
I_MUTEX_QUOTA);
truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
}
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
}
#endif
static void sync_dquots(int type)
static int quota_sync_all(int type)
{
struct super_block *sb;
int cnt;
int ret;
if (type >= MAXQUOTAS)
return -EINVAL;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
if (ret)
return ret;
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
/* This test just improves performance so it needn't be
* reliable... */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && type != cnt)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
continue;
break;
}
if (cnt == MAXQUOTAS)
if (!sb->s_qcop || !sb->s_qcop->quota_sync)
continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
if (sb->s_root)
sync_quota_sb(sb, type);
sb->s_qcop->quota_sync(sb, type, 1);
up_read(&sb->s_umount);
spin_lock(&sb_lock);
if (__put_super_and_need_restart(sb))
goto restart;
}
spin_unlock(&sb_lock);
return 0;
}
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr)
{
char *pathname;
int ret = -ENOSYS;
pathname = getname(addr);
if (IS_ERR(pathname))
return PTR_ERR(pathname);
if (sb->s_qcop->quota_on)
ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
putname(pathname);
return ret;
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
down_read(&sb_dqopt(sb)->dqptr_sem);
if (!sb_has_quota_active(sb, type)) {
up_read(&sb_dqopt(sb)->dqptr_sem);
return -ESRCH;
}
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
up_read(&sb_dqopt(sb)->dqptr_sem);
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
}
static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
int ret;
if (!sb_has_quota_active(sb, type))
return -ESRCH;
if (!sb->s_qcop->get_info)
return -ENOSYS;
ret = sb->s_qcop->get_info(sb, type, &info);
if (!ret && copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
return ret;
}
static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
if (!sb_has_quota_active(sb, type))
return -ESRCH;
if (!sb->s_qcop->set_info)
return -ENOSYS;
return sb->s_qcop->set_info(sb, type, &info);
}
static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct if_dqblk idq;
int ret;
if (!sb_has_quota_active(sb, type))
return -ESRCH;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
if (ret)
return ret;
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct if_dqblk idq;
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
if (!sb_has_quota_active(sb, type))
return -ESRCH;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
return sb->s_qcop->set_dqblk(sb, type, id, &idq);
}
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->set_xstate)
return -ENOSYS;
return sb->s_qcop->set_xstate(sb, flags, cmd);
}
static int quota_getxstate(struct super_block *sb, void __user *addr)
{
struct fs_quota_stat fqs;
int ret;
if (!sb->s_qcop->get_xstate)
return -ENOSYS;
ret = sb->s_qcop->get_xstate(sb, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
}
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
if (!sb->s_qcop->set_xquota)
return -ENOSYS;
return sb->s_qcop->set_xquota(sb, type, id, &fdq);
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
int ret;
if (!sb->s_qcop->get_xquota)
return -ENOSYS;
ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
/* Copy parameters and call proper function */
@ -240,117 +224,55 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{
int ret;
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
if (!sb->s_qcop)
return -ENOSYS;
ret = check_quotactl_permission(sb, type, cmd, id);
if (ret < 0)
return ret;
switch (cmd) {
case Q_QUOTAON: {
char *pathname;
pathname = getname(addr);
if (IS_ERR(pathname))
return PTR_ERR(pathname);
ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
putname(pathname);
return ret;
}
case Q_QUOTAOFF:
return sb->s_qcop->quota_off(sb, type, 0);
case Q_GETFMT: {
__u32 fmt;
down_read(&sb_dqopt(sb)->dqptr_sem);
if (!sb_has_quota_active(sb, type)) {
up_read(&sb_dqopt(sb)->dqptr_sem);
return -ESRCH;
}
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
up_read(&sb_dqopt(sb)->dqptr_sem);
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
}
case Q_GETINFO: {
struct if_dqinfo info;
ret = sb->s_qcop->get_info(sb, type, &info);
if (ret)
return ret;
if (copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
return 0;
}
case Q_SETINFO: {
struct if_dqinfo info;
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
return sb->s_qcop->set_info(sb, type, &info);
}
case Q_GETQUOTA: {
struct if_dqblk idq;
ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
if (ret)
return ret;
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
case Q_SETQUOTA: {
struct if_dqblk idq;
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
return sb->s_qcop->set_dqblk(sb, type, id, &idq);
}
case Q_SYNC:
if (sb)
sync_quota_sb(sb, type);
else
sync_dquots(type);
return 0;
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM: {
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
return sb->s_qcop->set_xstate(sb, flags, cmd);
}
case Q_XGETQSTAT: {
struct fs_quota_stat fqs;
if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
return ret;
if (copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return 0;
}
case Q_XSETQLIM: {
struct fs_disk_quota fdq;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
return sb->s_qcop->set_xquota(sb, type, id, &fdq);
}
case Q_XGETQUOTA: {
struct fs_disk_quota fdq;
ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
if (ret)
return ret;
if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return 0;
}
case Q_XQUOTASYNC:
return sb->s_qcop->quota_sync(sb, type);
/* We never reach here unless validity check is broken */
default:
BUG();
case Q_QUOTAON:
return quota_quotaon(sb, type, cmd, id, addr);
case Q_QUOTAOFF:
if (!sb->s_qcop->quota_off)
return -ENOSYS;
return sb->s_qcop->quota_off(sb, type, 0);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
return quota_getinfo(sb, type, addr);
case Q_SETINFO:
return quota_setinfo(sb, type, addr);
case Q_GETQUOTA:
return quota_getquota(sb, type, id, addr);
case Q_SETQUOTA:
return quota_setquota(sb, type, id, addr);
case Q_SYNC:
if (!sb->s_qcop->quota_sync)
return -ENOSYS;
return sb->s_qcop->quota_sync(sb, type, 1);
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
return quota_setxstate(sb, cmd, addr);
case Q_XGETQSTAT:
return quota_getxstate(sb, addr);
case Q_XSETQLIM:
return quota_setxquota(sb, type, id, addr);
case Q_XGETQUOTA:
return quota_getxquota(sb, type, id, addr);
case Q_XQUOTASYNC:
/* caller already holds s_umount */
if (sb->s_flags & MS_RDONLY)
return -EROFS;
writeback_inodes_sb(sb);
return 0;
default:
return -EINVAL;
}
return 0;
}
/*
@ -397,224 +319,23 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK;
if (cmds != Q_SYNC || special) {
sb = quotactl_block(special);
if (IS_ERR(sb))
return PTR_ERR(sb);
/*
* As a special case Q_SYNC can be called without a specific device.
* It will iterate all superblocks that have quota enabled and call
* the sync action on each of them.
*/
if (!special) {
if (cmds == Q_SYNC)
return quota_sync_all(type);
return -ENODEV;
}
ret = check_quotactl_valid(sb, type, cmds, id);
if (ret >= 0)
ret = do_quotactl(sb, type, cmds, id, addr);
if (sb)
drop_super(sb);
sb = quotactl_block(special);
if (IS_ERR(sb))
return PTR_ERR(sb);
ret = do_quotactl(sb, type, cmds, id, addr);
drop_super(sb);
return ret;
}
#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT)
/*
* This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
* and is necessary due to alignment problems.
*/
struct compat_if_dqblk {
compat_u64 dqb_bhardlimit;
compat_u64 dqb_bsoftlimit;
compat_u64 dqb_curspace;
compat_u64 dqb_ihardlimit;
compat_u64 dqb_isoftlimit;
compat_u64 dqb_curinodes;
compat_u64 dqb_btime;
compat_u64 dqb_itime;
compat_uint_t dqb_valid;
};
/* XFS structures */
struct compat_fs_qfilestat {
compat_u64 dqb_bhardlimit;
compat_u64 qfs_nblks;
compat_uint_t qfs_nextents;
};
struct compat_fs_quota_stat {
__s8 qs_version;
__u16 qs_flags;
__s8 qs_pad;
struct compat_fs_qfilestat qs_uquota;
struct compat_fs_qfilestat qs_gquota;
compat_uint_t qs_incoredqs;
compat_int_t qs_btimelimit;
compat_int_t qs_itimelimit;
compat_int_t qs_rtbtimelimit;
__u16 qs_bwarnlimit;
__u16 qs_iwarnlimit;
};
asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr)
{
unsigned int cmds;
struct if_dqblk __user *dqblk;
struct compat_if_dqblk __user *compat_dqblk;
struct fs_quota_stat __user *fsqstat;
struct compat_fs_quota_stat __user *compat_fsqstat;
compat_uint_t data;
u16 xdata;
long ret;
cmds = cmd >> SUBCMDSHIFT;
switch (cmds) {
case Q_GETQUOTA:
dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
compat_dqblk = addr;
ret = sys_quotactl(cmd, special, id, dqblk);
if (ret)
break;
if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) ||
get_user(data, &dqblk->dqb_valid) ||
put_user(data, &compat_dqblk->dqb_valid))
ret = -EFAULT;
break;
case Q_SETQUOTA:
dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
compat_dqblk = addr;
ret = -EFAULT;
if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) ||
get_user(data, &compat_dqblk->dqb_valid) ||
put_user(data, &dqblk->dqb_valid))
break;
ret = sys_quotactl(cmd, special, id, dqblk);
break;
case Q_XGETQSTAT:
fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat));
compat_fsqstat = addr;
ret = sys_quotactl(cmd, special, id, fsqstat);
if (ret)
break;
ret = -EFAULT;
/* Copying qs_version, qs_flags, qs_pad */
if (copy_in_user(compat_fsqstat, fsqstat,
offsetof(struct compat_fs_quota_stat, qs_uquota)))
break;
/* Copying qs_uquota */
if (copy_in_user(&compat_fsqstat->qs_uquota,
&fsqstat->qs_uquota,
sizeof(compat_fsqstat->qs_uquota)) ||
get_user(data, &fsqstat->qs_uquota.qfs_nextents) ||
put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents))
break;
/* Copying qs_gquota */
if (copy_in_user(&compat_fsqstat->qs_gquota,
&fsqstat->qs_gquota,
sizeof(compat_fsqstat->qs_gquota)) ||
get_user(data, &fsqstat->qs_gquota.qfs_nextents) ||
put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents))
break;
/* Copying the rest */
if (copy_in_user(&compat_fsqstat->qs_incoredqs,
&fsqstat->qs_incoredqs,
sizeof(struct compat_fs_quota_stat) -
offsetof(struct compat_fs_quota_stat, qs_incoredqs)) ||
get_user(xdata, &fsqstat->qs_iwarnlimit) ||
put_user(xdata, &compat_fsqstat->qs_iwarnlimit))
break;
ret = 0;
break;
default:
ret = sys_quotactl(cmd, special, id, addr);
}
return ret;
}
#endif
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
/* Netlink family structure for quota */
static struct genl_family quota_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = "VFS_DQUOT",
.version = 1,
.maxattr = QUOTA_NL_A_MAX,
};
/**
* quota_send_warning - Send warning to userspace about exceeded quota
* @type: The quota type: USRQQUOTA, GRPQUOTA,...
* @id: The user or group id of the quota that was exceeded
* @dev: The device on which the fs is mounted (sb->s_dev)
* @warntype: The type of the warning: QUOTA_NL_...
*
* This can be used by filesystems (including those which don't use
* dquot) to send a message to userspace relating to quota limits.
*
*/
void quota_send_warning(short type, unsigned int id, dev_t dev,
const char warntype)
{
static atomic_t seq;
struct sk_buff *skb;
void *msg_head;
int ret;
int msg_size = 4 * nla_total_size(sizeof(u32)) +
2 * nla_total_size(sizeof(u64));
/* We have to allocate using GFP_NOFS as we are called from a
* filesystem performing write and thus further recursion into
* the fs to free some data could cause deadlocks. */
skb = genlmsg_new(msg_size, GFP_NOFS);
if (!skb) {
printk(KERN_ERR
"VFS: Not enough memory to send quota warning.\n");
return;
}
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
&quota_genl_family, 0, QUOTA_NL_C_WARNING);
if (!msg_head) {
printk(KERN_ERR
"VFS: Cannot store netlink header in quota warning.\n");
goto err_out;
}
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
if (ret)
goto attr_err_out;
genlmsg_end(skb, msg_head);
genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
return;
attr_err_out:
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
kfree_skb(skb);
}
EXPORT_SYMBOL(quota_send_warning);
static int __init quota_init(void)
{
if (genl_register_family(&quota_genl_family) != 0)
printk(KERN_ERR
"VFS: Failed to create quota netlink interface.\n");
return 0;
};
module_init(quota_init);
#endif

Просмотреть файл

@ -425,7 +425,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
journal_mark_dirty(th, s, sbh);
if (for_unformatted)
vfs_dq_free_block_nodirty(inode, 1);
dquot_free_block_nodirty(inode, 1);
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@ -1049,7 +1049,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
amount_needed, hint->inode->i_uid);
#endif
quota_ret =
vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
dquot_alloc_block_nodirty(hint->inode, amount_needed);
if (quota_ret) /* Quota exceeded? */
return QUOTA_EXCEEDED;
if (hint->preallocate && hint->prealloc_size) {
@ -1058,7 +1058,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
"reiserquota: allocating (prealloc) %d blocks id=%u",
hint->prealloc_size, hint->inode->i_uid);
#endif
quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
quota_ret = dquot_prealloc_block_nodirty(hint->inode,
hint->prealloc_size);
if (quota_ret)
hint->preallocate = hint->prealloc_size = 0;
@ -1092,7 +1092,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
hint->inode->i_uid);
#endif
/* Free not allocated blocks */
vfs_dq_free_block_nodirty(hint->inode,
dquot_free_block_nodirty(hint->inode,
amount_needed + hint->prealloc_size -
nr_allocated);
}
@ -1125,7 +1125,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
vfs_dq_free_block_nodirty(hint->inode, amount_needed +
dquot_free_block_nodirty(hint->inode, amount_needed +
hint->prealloc_size - nr_allocated -
REISERFS_I(hint->inode)->
i_prealloc_count);

Просмотреть файл

@ -289,7 +289,7 @@ const struct file_operations reiserfs_file_operations = {
.compat_ioctl = reiserfs_compat_ioctl,
#endif
.mmap = reiserfs_file_mmap,
.open = generic_file_open,
.open = dquot_file_open,
.release = reiserfs_file_release,
.fsync = reiserfs_sync_file,
.aio_read = generic_file_aio_read,

Просмотреть файл

@ -34,6 +34,9 @@ void reiserfs_delete_inode(struct inode *inode)
int depth;
int err;
if (!is_bad_inode(inode))
dquot_initialize(inode);
truncate_inode_pages(&inode->i_data, 0);
depth = reiserfs_write_lock_once(inode->i_sb);
@ -54,7 +57,7 @@ void reiserfs_delete_inode(struct inode *inode)
* after delete_object so that quota updates go into the same transaction as
* stat data deletion */
if (!err)
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@ -1765,10 +1768,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
if (vfs_dq_alloc_inode(inode)) {
err = -EDQUOT;
dquot_initialize(inode);
err = dquot_alloc_inode(inode);
if (err)
goto out_end_trans;
}
if (!dir->i_nlink) {
err = -EPERM;
goto out_bad_inode;
@ -1959,12 +1962,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
INODE_PKEY(inode)->k_objectid = 0;
/* Quota change must be inside a transaction for journaling */
vfs_dq_free_inode(inode);
dquot_free_inode(inode);
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
/* Drop can be outside and it needs more credits so it's better to have it outside */
vfs_dq_drop(inode);
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
@ -3073,6 +3076,8 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
depth = reiserfs_write_lock_once(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
dquot_initialize(inode);
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
*/
@ -3134,8 +3139,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
jbegin_count);
if (error)
goto out;
error =
vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
error = dquot_transfer(inode, attr);
if (error) {
journal_end(&th, inode->i_sb,
jbegin_count);

Просмотреть файл

@ -546,7 +546,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
*/
static int drop_new_inode(struct inode *inode)
{
vfs_dq_drop(inode);
dquot_drop(inode);
make_bad_inode(inode);
inode->i_flags |= S_NOQUOTA;
iput(inode);
@ -554,7 +554,7 @@ static int drop_new_inode(struct inode *inode)
}
/* utility function that does setup for reiserfs_new_inode.
** vfs_dq_init needs lots of credits so it's better to have it
** dquot_initialize needs lots of credits so it's better to have it
** outside of a transaction, so we had to pull some bits of
** reiserfs_new_inode out into this func.
*/
@ -577,7 +577,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
} else {
inode->i_gid = current_fsgid();
}
vfs_dq_init(inode);
dquot_initialize(inode);
return 0;
}
@ -594,6 +594,8 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode,
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
dquot_initialize(dir);
if (!(inode = new_inode(dir->i_sb))) {
return -ENOMEM;
}
@ -666,6 +668,8 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
if (!(inode = new_inode(dir->i_sb))) {
return -ENOMEM;
}
@ -739,6 +743,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
dquot_initialize(dir);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
/* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */
REISERFS_I(dir)->new_packing_locality = 1;
@ -842,6 +848,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
dquot_initialize(dir);
reiserfs_write_lock(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval)
@ -923,6 +931,8 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
unsigned long savelink;
int depth;
dquot_initialize(dir);
inode = dentry->d_inode;
/* in this transaction we can be doing at max two balancings and update
@ -1024,6 +1034,8 @@ static int reiserfs_symlink(struct inode *parent_dir,
2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb));
dquot_initialize(parent_dir);
if (!(inode = new_inode(parent_dir->i_sb))) {
return -ENOMEM;
}
@ -1111,6 +1123,8 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
JOURNAL_PER_BALANCE_CNT * 3 +
2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
dquot_initialize(dir);
reiserfs_write_lock(dir->i_sb);
if (inode->i_nlink >= REISERFS_LINK_MAX) {
//FIXME: sd_nlink is 32 bit for new files
@ -1235,6 +1249,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
JOURNAL_PER_BALANCE_CNT * 3 + 5 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_inode = old_dentry->d_inode;
new_dentry_inode = new_dentry->d_inode;

Просмотреть файл

@ -1299,7 +1299,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, inode->i_uid, head2type(&s_ih));
#endif
vfs_dq_free_space_nodirty(inode, quota_cut_bytes);
dquot_free_space_nodirty(inode, quota_cut_bytes);
/* Return deleted body length */
return ret_value;
@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
quota_cut_bytes);
}
break;
@ -1733,7 +1733,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid, '?');
#endif
vfs_dq_free_space_nodirty(inode, quota_cut_bytes);
dquot_free_space_nodirty(inode, quota_cut_bytes);
return ret_value;
}
@ -1968,9 +1968,10 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
key2type(&(key->on_disk_key)));
#endif
if (vfs_dq_alloc_space_nodirty(inode, pasted_size)) {
retval = dquot_alloc_space_nodirty(inode, pasted_size);
if (retval) {
pathrelse(search_path);
return -EDQUOT;
return retval;
}
init_tb_struct(th, &s_paste_balance, th->t_super, search_path,
pasted_size);
@ -2024,7 +2025,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
pasted_size, inode->i_uid,
key2type(&(key->on_disk_key)));
#endif
vfs_dq_free_space_nodirty(inode, pasted_size);
dquot_free_space_nodirty(inode, pasted_size);
return retval;
}
@ -2062,9 +2063,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
#endif
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
if (retval) {
pathrelse(path);
return -EDQUOT;
return retval;
}
}
init_tb_struct(th, &s_ins_balance, th->t_super, path,
@ -2113,6 +2115,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
quota_bytes, inode->i_uid, head2type(ih));
#endif
if (inode)
vfs_dq_free_space_nodirty(inode, quota_bytes);
dquot_free_space_nodirty(inode, quota_bytes);
return retval;
}

Просмотреть файл

@ -246,7 +246,7 @@ static int finish_unfinished(struct super_block *s)
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
vfs_dq_init(inode);
dquot_initialize(inode);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
@ -578,6 +578,11 @@ out:
reiserfs_write_unlock_once(inode->i_sb, lock_depth);
}
static void reiserfs_clear_inode(struct inode *inode)
{
dquot_drop(inode);
}
#ifdef CONFIG_QUOTA
static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
size_t, loff_t);
@ -590,6 +595,7 @@ static const struct super_operations reiserfs_sops = {
.destroy_inode = reiserfs_destroy_inode,
.write_inode = reiserfs_write_inode,
.dirty_inode = reiserfs_dirty_inode,
.clear_inode = reiserfs_clear_inode,
.delete_inode = reiserfs_delete_inode,
.put_super = reiserfs_put_super,
.write_super = reiserfs_write_super,
@ -616,13 +622,6 @@ static int reiserfs_write_info(struct super_block *, int);
static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
static const struct dquot_operations reiserfs_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = reiserfs_write_dquot,
.acquire_dquot = reiserfs_acquire_dquot,
.release_dquot = reiserfs_release_dquot,

Просмотреть файл

@ -61,7 +61,6 @@
static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
{
BUG_ON(!mutex_is_locked(&dir->i_mutex));
vfs_dq_init(dir);
return dir->i_op->create(dir, dentry, mode, NULL);
}
#endif
@ -69,7 +68,6 @@ static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
BUG_ON(!mutex_is_locked(&dir->i_mutex));
vfs_dq_init(dir);
return dir->i_op->mkdir(dir, dentry, mode);
}
@ -81,7 +79,6 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
{
int error;
BUG_ON(!mutex_is_locked(&dir->i_mutex));
vfs_dq_init(dir);
reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
I_MUTEX_CHILD, dir->i_sb);
@ -97,7 +94,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
{
int error;
BUG_ON(!mutex_is_locked(&dir->i_mutex));
vfs_dq_init(dir);
reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
I_MUTEX_CHILD, dir->i_sb);

Просмотреть файл

@ -34,14 +34,14 @@ static int __sync_filesystem(struct super_block *sb, int wait)
if (!sb->s_bdi)
return 0;
/* Avoid doing twice syncing and cache pruning for quota sync */
if (!wait) {
writeout_quota_sb(sb, -1);
writeback_inodes_sb(sb);
} else {
sync_quota_sb(sb, -1);
if (sb->s_qcop && sb->s_qcop->quota_sync)
sb->s_qcop->quota_sync(sb, -1, wait);
if (wait)
sync_inodes_sb(sb);
}
else
writeback_inodes_sb(sb);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, wait);
return __sync_blockdev(sb->s_bdev, wait);

Просмотреть файл

@ -208,7 +208,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
((char *)bh->b_data)[(bit + i) >> 3]);
} else {
if (inode)
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
udf_add_free_space(sb, sbi->s_partition, 1);
}
}
@ -260,11 +260,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
while (bit < (sb->s_blocksize << 3) && block_count > 0) {
if (!udf_test_bit(bit, bh->b_data))
goto out;
else if (vfs_dq_prealloc_block(inode, 1))
else if (dquot_prealloc_block(inode, 1))
goto out;
else if (!udf_clear_bit(bit, bh->b_data)) {
udf_debug("bit already cleared for block %d\n", bit);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto out;
}
block_count--;
@ -390,10 +390,14 @@ got_block:
/*
* Check quota for allocation of this block.
*/
if (inode && vfs_dq_alloc_block(inode, 1)) {
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
return 0;
if (inode) {
int ret = dquot_alloc_block(inode, 1);
if (ret) {
mutex_unlock(&sbi->s_alloc_mutex);
*err = ret;
return 0;
}
}
newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
@ -449,7 +453,7 @@ static void udf_table_free_blocks(struct super_block *sb,
/* We do this up front - There are some error conditions that
could occure, but.. oh well */
if (inode)
vfs_dq_free_block(inode, count);
dquot_free_block(inode, count);
udf_add_free_space(sb, sbi->s_partition, count);
start = bloc->logicalBlockNum + offset;
@ -694,7 +698,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
epos.offset -= adsize;
alloc_count = (elen >> sb->s_blocksize_bits);
if (inode && vfs_dq_prealloc_block(inode,
if (inode && dquot_prealloc_block(inode,
alloc_count > block_count ? block_count : alloc_count))
alloc_count = 0;
else if (alloc_count > block_count) {
@ -797,12 +801,13 @@ static int udf_table_new_block(struct super_block *sb,
newblock = goal_eloc.logicalBlockNum;
goal_eloc.logicalBlockNum++;
goal_elen -= sb->s_blocksize;
if (inode && vfs_dq_alloc_block(inode, 1)) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
*err = -EDQUOT;
return 0;
if (inode) {
*err = dquot_alloc_block(inode, 1);
if (*err) {
brelse(goal_epos.bh);
mutex_unlock(&sbi->s_alloc_mutex);
return 0;
}
}
if (goal_elen)

Просмотреть файл

@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/aio.h>
@ -207,7 +208,7 @@ const struct file_operations udf_file_operations = {
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.ioctl = udf_ioctl,
.open = generic_file_open,
.open = dquot_file_open,
.mmap = generic_file_mmap,
.write = do_sync_write,
.aio_write = udf_file_aio_write,
@ -217,6 +218,29 @@ const struct file_operations udf_file_operations = {
.llseek = generic_file_llseek,
};
static int udf_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int error;
error = inode_change_ok(inode, iattr);
if (error)
return error;
if (iattr->ia_valid & ATTR_SIZE)
dquot_initialize(inode);
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
(iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
error = dquot_transfer(inode, iattr);
if (error)
return error;
}
return inode_setattr(inode, iattr);
}
const struct inode_operations udf_file_inode_operations = {
.truncate = udf_truncate,
.truncate = udf_truncate,
.setattr = udf_setattr,
};

Просмотреть файл

@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
dquot_free_inode(inode);
dquot_drop(inode);
clear_inode(inode);
@ -61,7 +61,7 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct inode *inode;
int block;
int block, ret;
uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
struct udf_inode_info *iinfo;
struct udf_inode_info *dinfo = UDF_I(dir);
@ -153,12 +153,14 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
insert_inode_hash(inode);
mark_inode_dirty(inode);
if (vfs_dq_alloc_inode(inode)) {
vfs_dq_drop(inode);
dquot_initialize(inode);
ret = dquot_alloc_inode(inode);
if (ret) {
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
*err = -EDQUOT;
*err = ret;
return NULL;
}

Просмотреть файл

@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/crc-itu-t.h>
@ -70,6 +71,9 @@ static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
void udf_delete_inode(struct inode *inode)
{
if (!is_bad_inode(inode))
dquot_initialize(inode);
truncate_inode_pages(&inode->i_data, 0);
if (is_bad_inode(inode))
@ -108,6 +112,8 @@ void udf_clear_inode(struct inode *inode)
(unsigned long long)inode->i_size,
(unsigned long long)iinfo->i_lenExtents);
}
dquot_drop(inode);
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
}

Просмотреть файл

@ -563,6 +563,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
int err;
struct udf_inode_info *iinfo;
dquot_initialize(dir);
lock_kernel();
inode = udf_new_inode(dir, mode, &err);
if (!inode) {
@ -616,6 +618,8 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
if (!old_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
lock_kernel();
err = -EIO;
inode = udf_new_inode(dir, mode, &err);
@ -662,6 +666,8 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
struct udf_inode_info *dinfo = UDF_I(dir);
struct udf_inode_info *iinfo;
dquot_initialize(dir);
lock_kernel();
err = -EMLINK;
if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
@ -799,6 +805,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
struct fileIdentDesc *fi, cfi;
struct kernel_lb_addr tloc;
dquot_initialize(dir);
retval = -ENOENT;
lock_kernel();
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
@ -845,6 +853,8 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
struct fileIdentDesc cfi;
struct kernel_lb_addr tloc;
dquot_initialize(dir);
retval = -ENOENT;
lock_kernel();
fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
@ -899,6 +909,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
struct buffer_head *bh;
struct udf_inode_info *iinfo;
dquot_initialize(dir);
lock_kernel();
inode = udf_new_inode(dir, S_IFLNK, &err);
if (!inode)
@ -1069,6 +1081,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
int err;
struct buffer_head *bh;
dquot_initialize(dir);
lock_kernel();
if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
unlock_kernel();
@ -1131,6 +1145,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct kernel_lb_addr tloc;
struct udf_inode_info *old_iinfo = UDF_I(old_inode);
dquot_initialize(old_dir);
dquot_initialize(new_dir);
lock_kernel();
ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
if (ofi) {

Просмотреть файл

@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
"bit already cleared for fragment %u", i);
}
vfs_dq_free_block(inode, count);
dquot_free_block(inode, count);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@ -195,7 +195,7 @@ do_more:
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
vfs_dq_free_block(inode, uspi->s_fpb);
dquot_free_block(inode, uspi->s_fpb);
fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
uspi->cs_total.cs_nbfree++;
@ -511,6 +511,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
unsigned cgno, fragno, fragoff, count, fragsize, i;
int ret;
UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
(unsigned long long)fragment, oldcount, newcount);
@ -556,8 +557,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
for (i = oldcount; i < newcount; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
ret = dquot_alloc_block(inode, count);
if (ret) {
*err = ret;
return 0;
}
@ -596,6 +598,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
struct ufs_cylinder_group * ucg;
unsigned oldcg, i, j, k, allocsize;
u64 result;
int ret;
UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
inode->i_ino, cgno, (unsigned long long)goal, count);
@ -664,7 +667,7 @@ cg_found:
for (i = count; i < uspi->s_fpb; i++)
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
vfs_dq_free_block(inode, i);
dquot_free_block(inode, i);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
@ -676,8 +679,9 @@ cg_found:
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
if (vfs_dq_alloc_block(inode, count)) {
*err = -EDQUOT;
ret = dquot_alloc_block(inode, count);
if (ret) {
*err = ret;
return 0;
}
for (i = 0; i < count; i++)
@ -714,6 +718,7 @@ static u64 ufs_alloccg_block(struct inode *inode,
struct ufs_super_block_first * usb1;
struct ufs_cylinder_group * ucg;
u64 result, blkno;
int ret;
UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
@ -747,8 +752,9 @@ gotit:
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, -1);
if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
*err = -EDQUOT;
ret = dquot_alloc_block(inode, uspi->s_fpb);
if (ret) {
*err = ret;
return INVBLOCK;
}

Просмотреть файл

@ -24,6 +24,7 @@
*/
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "ufs_fs.h"
#include "ufs.h"
@ -40,7 +41,7 @@ const struct file_operations ufs_file_operations = {
.write = do_sync_write,
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.open = generic_file_open,
.open = dquot_file_open,
.fsync = simple_fsync,
.splice_read = generic_file_splice_read,
};

Просмотреть файл

@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
is_directory = S_ISDIR(inode->i_mode);
vfs_dq_free_inode(inode);
vfs_dq_drop(inode);
dquot_free_inode(inode);
dquot_drop(inode);
clear_inode (inode);
@ -355,9 +355,10 @@ cg_found:
unlock_super (sb);
if (vfs_dq_alloc_inode(inode)) {
vfs_dq_drop(inode);
err = -EDQUOT;
dquot_initialize(inode);
err = dquot_alloc_inode(inode);
if (err) {
dquot_drop(inode);
goto fail_without_unlock;
}

Просмотреть файл

@ -37,6 +37,7 @@
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include "ufs_fs.h"
#include "ufs.h"
@ -909,6 +910,9 @@ void ufs_delete_inode (struct inode * inode)
{
loff_t old_i_size;
if (!is_bad_inode(inode))
dquot_initialize(inode);
truncate_inode_pages(&inode->i_data, 0);
if (is_bad_inode(inode))
goto no_delete;

Просмотреть файл

@ -30,6 +30,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/smp_lock.h>
#include <linux/quotaops.h>
#include "ufs_fs.h"
#include "ufs.h"
@ -84,6 +85,9 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
int err;
UFSD("BEGIN\n");
dquot_initialize(dir);
inode = ufs_new_inode(dir, mode);
err = PTR_ERR(inode);
@ -107,6 +111,9 @@ static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t
if (!old_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
inode = ufs_new_inode(dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
@ -131,6 +138,8 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
if (l > sb->s_blocksize)
goto out_notlocked;
dquot_initialize(dir);
lock_kernel();
inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
err = PTR_ERR(inode);
@ -176,6 +185,8 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
return -EMLINK;
}
dquot_initialize(dir);
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
atomic_inc(&inode->i_count);
@ -193,6 +204,8 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
if (dir->i_nlink >= UFS_LINK_MAX)
goto out;
dquot_initialize(dir);
lock_kernel();
inode_inc_link_count(dir);
@ -237,6 +250,8 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
struct page *page;
int err = -ENOENT;
dquot_initialize(dir);
de = ufs_find_entry(dir, &dentry->d_name, &page);
if (!de)
goto out;
@ -281,6 +296,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ufs_dir_entry *old_de;
int err = -ENOENT;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;

Просмотреть файл

@ -1432,6 +1432,11 @@ static void destroy_inodecache(void)
kmem_cache_destroy(ufs_inode_cachep);
}
static void ufs_clear_inode(struct inode *inode)
{
dquot_drop(inode);
}
#ifdef CONFIG_QUOTA
static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t);
static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t);
@ -1442,6 +1447,7 @@ static const struct super_operations ufs_super_ops = {
.destroy_inode = ufs_destroy_inode,
.write_inode = ufs_write_inode,
.delete_inode = ufs_delete_inode,
.clear_inode = ufs_clear_inode,
.put_super = ufs_put_super,
.write_super = ufs_write_super,
.sync_fs = ufs_sync_fs,

Просмотреть файл

@ -44,6 +44,7 @@
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/sched.h>
#include <linux/quotaops.h>
#include "ufs_fs.h"
#include "ufs.h"
@ -517,9 +518,18 @@ static int ufs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
error = dquot_transfer(inode, attr);
if (error)
return error;
}
if (ia_valid & ATTR_SIZE &&
attr->ia_size != i_size_read(inode)) {
loff_t old_i_size = inode->i_size;
dquot_initialize(inode);
error = vmtruncate(inode, attr->ia_size);
if (error)
return error;

Просмотреть файл

@ -43,20 +43,6 @@ xfs_quota_type(int type)
}
}
STATIC int
xfs_fs_quota_sync(
struct super_block *sb,
int type)
{
struct xfs_mount *mp = XFS_M(sb);
if (sb->s_flags & MS_RDONLY)
return -EROFS;
if (!XFS_IS_QUOTA_RUNNING(mp))
return -ENOSYS;
return -xfs_sync_data(mp, 0);
}
STATIC int
xfs_fs_get_xstate(
struct super_block *sb,
@ -82,8 +68,6 @@ xfs_fs_set_xstate(
return -EROFS;
if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
return -ENOSYS;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (uflags & XFS_QUOTA_UDQ_ACCT)
flags |= XFS_UQUOTA_ACCT;
@ -144,14 +128,11 @@ xfs_fs_set_xquota(
return -ENOSYS;
if (!XFS_IS_QUOTA_ON(mp))
return -ESRCH;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq);
}
const struct quotactl_ops xfs_quotactl_operations = {
.quota_sync = xfs_fs_quota_sync,
.get_xstate = xfs_fs_get_xstate,
.set_xstate = xfs_fs_set_xstate,
.get_xquota = xfs_fs_get_xquota,

Просмотреть файл

@ -202,14 +202,6 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
return flags & EXT3_OTHER_FLMASK;
}
/*
* Inode dynamic state flags
*/
#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
/* Used to pass group descriptor data when online resize is done */
struct ext3_new_group_input {
__u32 group; /* Group number for this data */
@ -560,6 +552,31 @@ static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
(ino >= EXT3_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
}
/*
* Inode dynamic state flags
*/
enum {
EXT3_STATE_JDATA, /* journaled data exists */
EXT3_STATE_NEW, /* inode is newly created */
EXT3_STATE_XATTR, /* has in-inode xattrs */
EXT3_STATE_FLUSH_ON_CLOSE, /* flush dirty pages on close */
};
static inline int ext3_test_inode_state(struct inode *inode, int bit)
{
return test_bit(bit, &EXT3_I(inode)->i_state);
}
static inline void ext3_set_inode_state(struct inode *inode, int bit)
{
set_bit(bit, &EXT3_I(inode)->i_state);
}
static inline void ext3_clear_inode_state(struct inode *inode, int bit)
{
clear_bit(bit, &EXT3_I(inode)->i_state);
}
#else
/* Assume that user mode programs are passing in an ext3fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test

Просмотреть файл

@ -87,7 +87,7 @@ struct ext3_inode_info {
* near to their parent directory's inode.
*/
__u32 i_block_group;
__u32 i_state; /* Dynamic state flags for ext3 */
unsigned long i_state; /* Dynamic state flags for ext3 */
/* block reservation info */
struct ext3_block_alloc_info *i_block_alloc_info;

Просмотреть файл

@ -246,19 +246,8 @@ typedef struct journal_superblock_s
#define J_ASSERT(assert) BUG_ON(!(assert))
#if defined(CONFIG_BUFFER_DEBUG)
void buffer_assertion_failure(struct buffer_head *bh);
#define J_ASSERT_BH(bh, expr) \
do { \
if (!(expr)) \
buffer_assertion_failure(bh); \
J_ASSERT(expr); \
} while (0)
#define J_ASSERT_JH(jh, expr) J_ASSERT_BH(jh2bh(jh), expr)
#else
#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
#endif
#if defined(JBD_PARANOID_IOFAIL)
#define J_EXPECT(expr, why...) J_ASSERT(expr)

Просмотреть файл

@ -277,19 +277,8 @@ typedef struct journal_superblock_s
#define J_ASSERT(assert) BUG_ON(!(assert))
#if defined(CONFIG_BUFFER_DEBUG)
void buffer_assertion_failure(struct buffer_head *bh);
#define J_ASSERT_BH(bh, expr) \
do { \
if (!(expr)) \
buffer_assertion_failure(bh); \
J_ASSERT(expr); \
} while (0)
#define J_ASSERT_JH(jh, expr) J_ASSERT_BH(jh2bh(jh), expr)
#else
#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
#endif
#if defined(JBD2_PARANOID_IOFAIL)
#define J_EXPECT(expr, why...) J_ASSERT(expr)

Просмотреть файл

@ -279,9 +279,6 @@ struct dquot {
struct mem_dqblk dq_dqb; /* Diskquota usage */
};
#define QUOTA_OK 0
#define NO_QUOTA 1
/* Operations which must be implemented by each quota format */
struct quota_format_ops {
int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */
@ -295,13 +292,6 @@ struct quota_format_ops {
/* Operations working with dquots */
struct dquot_operations {
int (*initialize) (struct inode *, int);
int (*drop) (struct inode *);
int (*alloc_space) (struct inode *, qsize_t, int);
int (*alloc_inode) (const struct inode *, qsize_t);
int (*free_space) (struct inode *, qsize_t);
int (*free_inode) (const struct inode *, qsize_t);
int (*transfer) (struct inode *, struct iattr *);
int (*write_dquot) (struct dquot *); /* Ordinary dquot write */
struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */
void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */
@ -309,12 +299,6 @@ struct dquot_operations {
int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
/* reserve quota for delayed block allocation */
int (*reserve_space) (struct inode *, qsize_t, int);
/* claim reserved quota for delayed alloc */
int (*claim_space) (struct inode *, qsize_t);
/* release rsved quota for delayed alloc */
void (*release_rsv) (struct inode *, qsize_t);
/* get reserved quota for delayed alloc, value returned is managed by
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
@ -324,7 +308,7 @@ struct dquot_operations {
struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, char *, int);
int (*quota_off)(struct super_block *, int, int);
int (*quota_sync)(struct super_block *, int);
int (*quota_sync)(struct super_block *, int, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
@ -357,26 +341,25 @@ enum {
#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \
DQUOT_SUSPENDED)
/* Other quota flags */
#define DQUOT_QUOTA_SYS_FILE (1 << 6) /* Quota file is a special
#define DQUOT_STATE_LAST (_DQUOT_STATE_FLAGS * MAXQUOTAS)
#define DQUOT_QUOTA_SYS_FILE (1 << DQUOT_STATE_LAST)
/* Quota file is a special
* system file and user cannot
* touch it. Filesystem is
* responsible for setting
* S_NOQUOTA, S_NOATIME flags
*/
#define DQUOT_NEGATIVE_USAGE (1 << 7) /* Allow negative quota usage */
#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
/* Allow negative quota usage */
static inline unsigned int dquot_state_flag(unsigned int flags, int type)
{
if (type == USRQUOTA)
return flags;
return flags << _DQUOT_STATE_FLAGS;
return flags << _DQUOT_STATE_FLAGS * type;
}
static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
{
if (type == USRQUOTA)
return flags;
return flags >> _DQUOT_STATE_FLAGS;
return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS;
}
#ifdef CONFIG_QUOTA_NETLINK_INTERFACE

Просмотреть файл

@ -19,15 +19,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
/*
* declaration of quota_function calls in kernel.
*/
void sync_quota_sb(struct super_block *sb, int type);
static inline void writeout_quota_sb(struct super_block *sb, int type)
{
if (sb->s_qcop->quota_sync)
sb->s_qcop->quota_sync(sb, type);
}
void inode_add_rsv_space(struct inode *inode, qsize_t number);
void inode_claim_rsv_space(struct inode *inode, qsize_t number);
void inode_sub_rsv_space(struct inode *inode, qsize_t number);
int dquot_initialize(struct inode *inode, int type);
int dquot_drop(struct inode *inode);
void dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
@ -36,24 +33,23 @@ int dquot_scan_active(struct super_block *sb,
struct dquot *dquot_alloc(struct super_block *sb, int type);
void dquot_destroy(struct dquot *dquot);
int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
int dquot_alloc_inode(const struct inode *inode, qsize_t number);
int __dquot_alloc_space(struct inode *inode, qsize_t number,
int warn, int reserve);
void __dquot_free_space(struct inode *inode, qsize_t number, int reserve);
int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
int dquot_claim_space(struct inode *inode, qsize_t number);
void dquot_release_reserved_space(struct inode *inode, qsize_t number);
qsize_t dquot_get_reserved_space(struct inode *inode);
int dquot_alloc_inode(const struct inode *inode);
int dquot_free_space(struct inode *inode, qsize_t number);
int dquot_free_inode(const struct inode *inode, qsize_t number);
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(const struct inode *inode);
int dquot_transfer(struct inode *inode, struct iattr *iattr);
int dquot_commit(struct dquot *dquot);
int dquot_acquire(struct dquot *dquot);
int dquot_release(struct dquot *dquot);
int dquot_commit_info(struct super_block *sb, int type);
int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
int vfs_quota_on(struct super_block *sb, int type, int format_id,
char *path, int remount);
int vfs_quota_enable(struct inode *inode, int type, int format_id,
@ -64,14 +60,13 @@ int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
int vfs_quota_off(struct super_block *sb, int type, int remount);
int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags);
int vfs_quota_sync(struct super_block *sb, int type);
int vfs_quota_sync(struct super_block *sb, int type, int wait);
int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di);
void vfs_dq_drop(struct inode *inode);
int vfs_dq_transfer(struct inode *inode, struct iattr *iattr);
int dquot_transfer(struct inode *inode, struct iattr *iattr);
int vfs_dq_quota_on_remount(struct super_block *sb);
static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
@ -83,53 +78,56 @@ static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
* Functions for checking status of quota
*/
static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type)
static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type)
{
return sb_dqopt(sb)->flags &
dquot_state_flag(DQUOT_USAGE_ENABLED, type);
}
static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type)
static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type)
{
return sb_dqopt(sb)->flags &
dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
}
static inline int sb_has_quota_suspended(struct super_block *sb, int type)
static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
{
return sb_dqopt(sb)->flags &
dquot_state_flag(DQUOT_SUSPENDED, type);
}
static inline int sb_any_quota_suspended(struct super_block *sb)
static inline unsigned sb_any_quota_suspended(struct super_block *sb)
{
return sb_has_quota_suspended(sb, USRQUOTA) ||
sb_has_quota_suspended(sb, GRPQUOTA);
unsigned type, tmsk = 0;
for (type = 0; type < MAXQUOTAS; type++)
tmsk |= sb_has_quota_suspended(sb, type) << type;
return tmsk;
}
/* Does kernel know about any quota information for given sb + type? */
static inline int sb_has_quota_loaded(struct super_block *sb, int type)
static inline bool sb_has_quota_loaded(struct super_block *sb, int type)
{
/* Currently if anything is on, then quota usage is on as well */
return sb_has_quota_usage_enabled(sb, type);
}
static inline int sb_any_quota_loaded(struct super_block *sb)
static inline unsigned sb_any_quota_loaded(struct super_block *sb)
{
return sb_has_quota_loaded(sb, USRQUOTA) ||
sb_has_quota_loaded(sb, GRPQUOTA);
unsigned type, tmsk = 0;
for (type = 0; type < MAXQUOTAS; type++)
tmsk |= sb_has_quota_loaded(sb, type) << type;
return tmsk;
}
static inline int sb_has_quota_active(struct super_block *sb, int type)
static inline bool sb_has_quota_active(struct super_block *sb, int type)
{
return sb_has_quota_loaded(sb, type) &&
!sb_has_quota_suspended(sb, type);
}
static inline int sb_any_quota_active(struct super_block *sb)
static inline unsigned sb_any_quota_active(struct super_block *sb)
{
return sb_has_quota_active(sb, USRQUOTA) ||
sb_has_quota_active(sb, GRPQUOTA);
return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
}
/*
@ -141,122 +139,6 @@ extern const struct quotactl_ops vfs_quotactl_ops;
#define sb_dquot_ops (&dquot_operations)
#define sb_quotactl_ops (&vfs_quotactl_ops)
/* It is better to call this function outside of any transaction as it might
* need a lot of space in journal for dquot structure allocation. */
static inline void vfs_dq_init(struct inode *inode)
{
BUG_ON(!inode->i_sb);
if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode))
inode->i_sb->dq_op->initialize(inode, -1);
}
/* The following allocation/freeing/transfer functions *must* be called inside
* a transaction (deadlocks possible otherwise) */
static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb)) {
/* Used space is updated in alloc_space() */
if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA)
return 1;
}
else
inode_add_bytes(inode, nr);
return 0;
}
static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr)
{
int ret;
if (!(ret = vfs_dq_prealloc_space_nodirty(inode, nr)))
mark_inode_dirty(inode);
return ret;
}
static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb)) {
/* Used space is updated in alloc_space() */
if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA)
return 1;
}
else
inode_add_bytes(inode, nr);
return 0;
}
static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
{
int ret;
if (!(ret = vfs_dq_alloc_space_nodirty(inode, nr)))
mark_inode_dirty(inode);
return ret;
}
static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb)) {
/* Used space is updated in alloc_space() */
if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
return 1;
}
return 0;
}
static inline int vfs_dq_alloc_inode(struct inode *inode)
{
if (sb_any_quota_active(inode->i_sb)) {
vfs_dq_init(inode);
if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA)
return 1;
}
return 0;
}
/*
* Convert in-memory reserved quotas to real consumed quotas
*/
static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb)) {
if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
return 1;
} else
inode_add_bytes(inode, nr);
mark_inode_dirty(inode);
return 0;
}
/*
* Release reserved (in-memory) quotas
*/
static inline
void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb))
inode->i_sb->dq_op->release_rsv(inode, nr);
}
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb))
inode->i_sb->dq_op->free_space(inode, nr);
else
inode_sub_bytes(inode, nr);
}
static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
{
vfs_dq_free_space_nodirty(inode, nr);
mark_inode_dirty(inode);
}
static inline void vfs_dq_free_inode(struct inode *inode)
{
if (sb_any_quota_active(inode->i_sb))
inode->i_sb->dq_op->free_inode(inode, 1);
}
/* Cannot be called inside a transaction */
static inline int vfs_dq_off(struct super_block *sb, int remount)
{
@ -316,28 +198,20 @@ static inline int sb_any_quota_active(struct super_block *sb)
#define sb_dquot_ops (NULL)
#define sb_quotactl_ops (NULL)
static inline void vfs_dq_init(struct inode *inode)
static inline void dquot_initialize(struct inode *inode)
{
}
static inline void vfs_dq_drop(struct inode *inode)
static inline void dquot_drop(struct inode *inode)
{
}
static inline int vfs_dq_alloc_inode(struct inode *inode)
static inline int dquot_alloc_inode(const struct inode *inode)
{
return 0;
}
static inline void vfs_dq_free_inode(struct inode *inode)
{
}
static inline void sync_quota_sb(struct super_block *sb, int type)
{
}
static inline void writeout_quota_sb(struct super_block *sb, int type)
static inline void dquot_free_inode(const struct inode *inode)
{
}
@ -351,110 +225,116 @@ static inline int vfs_dq_quota_on_remount(struct super_block *sb)
return 0;
}
static inline int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
{
return 0;
}
static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
int warn, int reserve)
{
inode_add_bytes(inode, nr);
if (!reserve)
inode_add_bytes(inode, number);
return 0;
}
static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr)
static inline void __dquot_free_space(struct inode *inode, qsize_t number,
int reserve)
{
vfs_dq_prealloc_space_nodirty(inode, nr);
mark_inode_dirty(inode);
if (!reserve)
inode_sub_bytes(inode, number);
}
static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
inode_add_bytes(inode, number);
return 0;
}
static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
{
inode_add_bytes(inode, nr);
return 0;
}
static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
{
vfs_dq_alloc_space_nodirty(inode, nr);
mark_inode_dirty(inode);
return 0;
}
static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
{
return 0;
}
static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
{
return vfs_dq_alloc_space(inode, nr);
}
static inline
int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
{
return 0;
}
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
inode_sub_bytes(inode, nr);
}
static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
{
vfs_dq_free_space_nodirty(inode, nr);
mark_inode_dirty(inode);
}
#define dquot_file_open generic_file_open
#endif /* CONFIG_QUOTA */
static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
{
return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
return __dquot_alloc_space(inode, nr, 1, 0);
}
static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
{
return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
int ret;
ret = dquot_alloc_space_nodirty(inode, nr);
if (!ret)
mark_inode_dirty(inode);
return ret;
}
static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr)
{
return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
{
return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
return dquot_alloc_space(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
{
return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0, 0);
}
static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
{
return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
int ret;
ret = dquot_prealloc_block_nodirty(inode, nr);
if (!ret)
mark_inode_dirty(inode);
return ret;
}
static inline
void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
{
vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
return __dquot_alloc_space(inode, nr << inode->i_blkbits, 1, 1);
}
static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
{
vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
int ret;
ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
if (!ret)
mark_inode_dirty(inode);
return ret;
}
static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
{
vfs_dq_free_space(inode, nr << inode->i_blkbits);
__dquot_free_space(inode, nr, 0);
}
static inline void dquot_free_space(struct inode *inode, qsize_t nr)
{
dquot_free_space_nodirty(inode, nr);
mark_inode_dirty(inode);
}
static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr)
{
dquot_free_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline void dquot_free_block(struct inode *inode, qsize_t nr)
{
dquot_free_space(inode, nr << inode->i_blkbits);
}
static inline void dquot_release_reservation_block(struct inode *inode,
qsize_t nr)
{
__dquot_free_space(inode, nr << inode->i_blkbits, 1);
}
#endif /* _LINUX_QUOTAOPS_ */