Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: (52 commits) fs/xfs: Correct NULL test xfs: optimize log flushing in xfs_fsync xfs: only clear the suid bit once in xfs_write xfs: kill xfs_bawrite xfs: log changed inodes instead of writing them synchronously xfs: remove invalid barrier optimization from xfs_fsync xfs: kill the unused XFS_QMOPT_* flush flags V2 xfs: Use delay write promotion for dquot flushing xfs: Sort delayed write buffers before dispatch xfs: Don't issue buffer IO direct from AIL push V2 xfs: Use delayed write for inodes rather than async V2 xfs: Make inode reclaim states explicit xfs: more reserved blocks fixups xfs: turn off sign warnings xfs: don't hold onto reserved blocks on remount,ro xfs: quota limit statvfs available blocks xfs: replace KM_LARGE with explicit vmalloc use xfs: cleanup up xfs_log_force calling conventions xfs: kill XLOG_VEC_SET_TYPE xfs: remove duplicate buffer flags ...
This commit is contained in:
Коммит
b305956abc
|
@ -16,7 +16,7 @@
|
|||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
#
|
||||
|
||||
EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6 -funsigned-char
|
||||
EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6
|
||||
|
||||
XFS_LINUX := linux-2.6
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
@ -24,8 +23,25 @@
|
|||
#include "time.h"
|
||||
#include "kmem.h"
|
||||
|
||||
#define MAX_VMALLOCS 6
|
||||
#define MAX_SLAB_SIZE 0x20000
|
||||
/*
|
||||
* Greedy allocation. May fail and may return vmalloced memory.
|
||||
*
|
||||
* Must be freed using kmem_free_large.
|
||||
*/
|
||||
void *
|
||||
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
|
||||
{
|
||||
void *ptr;
|
||||
size_t kmsize = maxsize;
|
||||
|
||||
while (!(ptr = kmem_zalloc_large(kmsize))) {
|
||||
if ((kmsize >>= 1) <= minsize)
|
||||
kmsize = minsize;
|
||||
}
|
||||
if (ptr)
|
||||
*size = kmsize;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *
|
||||
kmem_alloc(size_t size, unsigned int __nocast flags)
|
||||
|
@ -34,19 +50,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
|
|||
gfp_t lflags = kmem_flags_convert(flags);
|
||||
void *ptr;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
|
||||
printk(KERN_WARNING "Large %s attempt, size=%ld\n",
|
||||
__func__, (long)size);
|
||||
dump_stack();
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
|
||||
ptr = kmalloc(size, lflags);
|
||||
else
|
||||
ptr = __vmalloc(size, lflags, PAGE_KERNEL);
|
||||
ptr = kmalloc(size, lflags);
|
||||
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
|
||||
return ptr;
|
||||
if (!(++retries % 100))
|
||||
|
@ -68,27 +73,6 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void *
|
||||
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
|
||||
unsigned int __nocast flags)
|
||||
{
|
||||
void *ptr;
|
||||
size_t kmsize = maxsize;
|
||||
unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP;
|
||||
|
||||
while (!(ptr = kmem_zalloc(kmsize, kmflags))) {
|
||||
if ((kmsize <= minsize) && (flags & KM_NOSLEEP))
|
||||
break;
|
||||
if ((kmsize >>= 1) <= minsize) {
|
||||
kmsize = minsize;
|
||||
kmflags = flags;
|
||||
}
|
||||
}
|
||||
if (ptr)
|
||||
*size = kmsize;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void
|
||||
kmem_free(const void *ptr)
|
||||
{
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/*
|
||||
* General memory allocation interfaces
|
||||
|
@ -30,7 +31,6 @@
|
|||
#define KM_NOSLEEP 0x0002u
|
||||
#define KM_NOFS 0x0004u
|
||||
#define KM_MAYFAIL 0x0008u
|
||||
#define KM_LARGE 0x0010u
|
||||
|
||||
/*
|
||||
* We use a special process flag to avoid recursive callbacks into
|
||||
|
@ -42,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
|
|||
{
|
||||
gfp_t lflags;
|
||||
|
||||
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
|
||||
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
|
||||
|
||||
if (flags & KM_NOSLEEP) {
|
||||
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
||||
|
@ -56,10 +56,25 @@ kmem_flags_convert(unsigned int __nocast flags)
|
|||
|
||||
extern void *kmem_alloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_zalloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
|
||||
extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
|
||||
extern void kmem_free(const void *);
|
||||
|
||||
static inline void *kmem_zalloc_large(size_t size)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
ptr = vmalloc(size);
|
||||
if (ptr)
|
||||
memset(ptr, 0, size);
|
||||
return ptr;
|
||||
}
|
||||
static inline void kmem_free_large(void *ptr)
|
||||
{
|
||||
vfree(ptr);
|
||||
}
|
||||
|
||||
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
|
||||
|
||||
/*
|
||||
* Zone interfaces
|
||||
*/
|
||||
|
|
|
@ -106,7 +106,7 @@ xfs_get_acl(struct inode *inode, int type)
|
|||
struct posix_acl *acl;
|
||||
struct xfs_acl *xfs_acl;
|
||||
int len = sizeof(struct xfs_acl);
|
||||
char *ea_name;
|
||||
unsigned char *ea_name;
|
||||
int error;
|
||||
|
||||
acl = get_cached_acl(inode, type);
|
||||
|
@ -133,7 +133,8 @@ xfs_get_acl(struct inode *inode, int type)
|
|||
if (!xfs_acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT);
|
||||
error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
|
||||
&len, ATTR_ROOT);
|
||||
if (error) {
|
||||
/*
|
||||
* If the attribute doesn't exist make sure we have a negative
|
||||
|
@ -162,7 +163,7 @@ STATIC int
|
|||
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
char *ea_name;
|
||||
unsigned char *ea_name;
|
||||
int error;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
|
@ -194,7 +195,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
|
|||
(sizeof(struct xfs_acl_entry) *
|
||||
(XFS_ACL_MAX_ENTRIES - acl->a_count));
|
||||
|
||||
error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl,
|
||||
error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
|
||||
len, ATTR_ROOT);
|
||||
|
||||
kfree(xfs_acl);
|
||||
|
@ -262,7 +263,7 @@ xfs_set_mode(struct inode *inode, mode_t mode)
|
|||
}
|
||||
|
||||
static int
|
||||
xfs_acl_exists(struct inode *inode, char *name)
|
||||
xfs_acl_exists(struct inode *inode, unsigned char *name)
|
||||
{
|
||||
int len = sizeof(struct xfs_acl);
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/migrate.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_inum.h"
|
||||
|
@ -1072,22 +1073,30 @@ xfs_buf_ioerror(
|
|||
}
|
||||
|
||||
int
|
||||
xfs_bawrite(
|
||||
void *mp,
|
||||
xfs_bwrite(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
trace_xfs_buf_bawrite(bp, _RET_IP_);
|
||||
int iowait = (bp->b_flags & XBF_ASYNC) == 0;
|
||||
int error = 0;
|
||||
|
||||
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
|
||||
bp->b_strat = xfs_bdstrat_cb;
|
||||
bp->b_mount = mp;
|
||||
bp->b_flags |= XBF_WRITE;
|
||||
if (!iowait)
|
||||
bp->b_flags |= _XBF_RUN_QUEUES;
|
||||
|
||||
xfs_buf_delwri_dequeue(bp);
|
||||
xfs_buf_iostrategy(bp);
|
||||
|
||||
bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
|
||||
bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
|
||||
if (iowait) {
|
||||
error = xfs_buf_iowait(bp);
|
||||
if (error)
|
||||
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
||||
bp->b_mount = mp;
|
||||
bp->b_strat = xfs_bdstrat_cb;
|
||||
return xfs_bdstrat_cb(bp);
|
||||
return error;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1106,6 +1115,126 @@ xfs_bdwrite(
|
|||
xfs_buf_delwri_queue(bp, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when we want to stop a buffer from getting written or read.
|
||||
* We attach the EIO error, muck with its flags, and call biodone
|
||||
* so that the proper iodone callbacks get called.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_bioerror(
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
#ifdef XFSERRORDEBUG
|
||||
ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* No need to wait until the buffer is unpinned, we aren't flushing it.
|
||||
*/
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
|
||||
/*
|
||||
* We're calling biodone, so delete XBF_DONE flag.
|
||||
*/
|
||||
XFS_BUF_UNREAD(bp);
|
||||
XFS_BUF_UNDELAYWRITE(bp);
|
||||
XFS_BUF_UNDONE(bp);
|
||||
XFS_BUF_STALE(bp);
|
||||
|
||||
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
|
||||
xfs_biodone(bp);
|
||||
|
||||
return EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as xfs_bioerror, except that we are releasing the buffer
|
||||
* here ourselves, and avoiding the biodone call.
|
||||
* This is meant for userdata errors; metadata bufs come with
|
||||
* iodone functions attached, so that we can track down errors.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_bioerror_relse(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
int64_t fl = XFS_BUF_BFLAGS(bp);
|
||||
/*
|
||||
* No need to wait until the buffer is unpinned.
|
||||
* We aren't flushing it.
|
||||
*
|
||||
* chunkhold expects B_DONE to be set, whether
|
||||
* we actually finish the I/O or not. We don't want to
|
||||
* change that interface.
|
||||
*/
|
||||
XFS_BUF_UNREAD(bp);
|
||||
XFS_BUF_UNDELAYWRITE(bp);
|
||||
XFS_BUF_DONE(bp);
|
||||
XFS_BUF_STALE(bp);
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
|
||||
if (!(fl & XBF_ASYNC)) {
|
||||
/*
|
||||
* Mark b_error and B_ERROR _both_.
|
||||
* Lot's of chunkcache code assumes that.
|
||||
* There's no reason to mark error for
|
||||
* ASYNC buffers.
|
||||
*/
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
XFS_BUF_FINISH_IOWAIT(bp);
|
||||
} else {
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
||||
return EIO;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* All xfs metadata buffers except log state machine buffers
|
||||
* get this attached as their b_bdstrat callback function.
|
||||
* This is so that we can catch a buffer
|
||||
* after prematurely unpinning it to forcibly shutdown the filesystem.
|
||||
*/
|
||||
int
|
||||
xfs_bdstrat_cb(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
|
||||
trace_xfs_bdstrat_shut(bp, _RET_IP_);
|
||||
/*
|
||||
* Metadata write that didn't get logged but
|
||||
* written delayed anyway. These aren't associated
|
||||
* with a transaction, and can be ignored.
|
||||
*/
|
||||
if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
|
||||
return xfs_bioerror_relse(bp);
|
||||
else
|
||||
return xfs_bioerror(bp);
|
||||
}
|
||||
|
||||
xfs_buf_iorequest(bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper around bdstrat so that we can stop data from going to disk in case
|
||||
* we are shutting down the filesystem. Typically user data goes thru this
|
||||
* path; one of the exceptions is the superblock.
|
||||
*/
|
||||
void
|
||||
xfsbdstrat(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
if (XFS_FORCED_SHUTDOWN(mp)) {
|
||||
trace_xfs_bdstrat_shut(bp, _RET_IP_);
|
||||
xfs_bioerror_relse(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
xfs_buf_iorequest(bp);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
_xfs_buf_ioend(
|
||||
xfs_buf_t *bp,
|
||||
|
@ -1324,7 +1453,7 @@ xfs_buf_iomove(
|
|||
xfs_buf_t *bp, /* buffer to process */
|
||||
size_t boff, /* starting buffer offset */
|
||||
size_t bsize, /* length to copy */
|
||||
caddr_t data, /* data address */
|
||||
void *data, /* data address */
|
||||
xfs_buf_rw_t mode) /* read/write/zero flag */
|
||||
{
|
||||
size_t bend, cpoff, csize;
|
||||
|
@ -1406,8 +1535,8 @@ xfs_alloc_bufhash(
|
|||
|
||||
btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
|
||||
btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
|
||||
btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
|
||||
sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
|
||||
btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
|
||||
sizeof(xfs_bufhash_t));
|
||||
for (i = 0; i < (1 << btp->bt_hashshift); i++) {
|
||||
spin_lock_init(&btp->bt_hash[i].bh_lock);
|
||||
INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
|
||||
|
@ -1418,7 +1547,7 @@ STATIC void
|
|||
xfs_free_bufhash(
|
||||
xfs_buftarg_t *btp)
|
||||
{
|
||||
kmem_free(btp->bt_hash);
|
||||
kmem_free_large(btp->bt_hash);
|
||||
btp->bt_hash = NULL;
|
||||
}
|
||||
|
||||
|
@ -1623,6 +1752,11 @@ xfs_buf_delwri_queue(
|
|||
list_del(&bp->b_list);
|
||||
}
|
||||
|
||||
if (list_empty(dwq)) {
|
||||
/* start xfsbufd as it is about to have something to do */
|
||||
wake_up_process(bp->b_target->bt_task);
|
||||
}
|
||||
|
||||
bp->b_flags |= _XBF_DELWRI_Q;
|
||||
list_add_tail(&bp->b_list, dwq);
|
||||
bp->b_queuetime = jiffies;
|
||||
|
@ -1654,6 +1788,35 @@ xfs_buf_delwri_dequeue(
|
|||
trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* If a delwri buffer needs to be pushed before it has aged out, then promote
|
||||
* it to the head of the delwri queue so that it will be flushed on the next
|
||||
* xfsbufd run. We do this by resetting the queuetime of the buffer to be older
|
||||
* than the age currently needed to flush the buffer. Hence the next time the
|
||||
* xfsbufd sees it is guaranteed to be considered old enough to flush.
|
||||
*/
|
||||
void
|
||||
xfs_buf_delwri_promote(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xfs_buftarg *btp = bp->b_target;
|
||||
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
|
||||
|
||||
ASSERT(bp->b_flags & XBF_DELWRI);
|
||||
ASSERT(bp->b_flags & _XBF_DELWRI_Q);
|
||||
|
||||
/*
|
||||
* Check the buffer age before locking the delayed write queue as we
|
||||
* don't need to promote buffers that are already past the flush age.
|
||||
*/
|
||||
if (bp->b_queuetime < jiffies - age)
|
||||
return;
|
||||
bp->b_queuetime = jiffies - age;
|
||||
spin_lock(&btp->bt_delwrite_lock);
|
||||
list_move(&bp->b_list, &btp->bt_delwrite_queue);
|
||||
spin_unlock(&btp->bt_delwrite_lock);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_buf_runall_queues(
|
||||
struct workqueue_struct *queue)
|
||||
|
@ -1672,6 +1835,8 @@ xfsbufd_wakeup(
|
|||
list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
|
||||
if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
|
||||
continue;
|
||||
if (list_empty(&btp->bt_delwrite_queue))
|
||||
continue;
|
||||
set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
|
||||
wake_up_process(btp->bt_task);
|
||||
}
|
||||
|
@ -1722,20 +1887,53 @@ xfs_buf_delwri_split(
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare function is more complex than it needs to be because
|
||||
* the return value is only 32 bits and we are doing comparisons
|
||||
* on 64 bit values
|
||||
*/
|
||||
static int
|
||||
xfs_buf_cmp(
|
||||
void *priv,
|
||||
struct list_head *a,
|
||||
struct list_head *b)
|
||||
{
|
||||
struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
|
||||
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
|
||||
xfs_daddr_t diff;
|
||||
|
||||
diff = ap->b_bn - bp->b_bn;
|
||||
if (diff < 0)
|
||||
return -1;
|
||||
if (diff > 0)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_buf_delwri_sort(
|
||||
xfs_buftarg_t *target,
|
||||
struct list_head *list)
|
||||
{
|
||||
list_sort(NULL, list, xfs_buf_cmp);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfsbufd(
|
||||
void *data)
|
||||
{
|
||||
struct list_head tmp;
|
||||
xfs_buftarg_t *target = (xfs_buftarg_t *)data;
|
||||
int count;
|
||||
xfs_buf_t *bp;
|
||||
xfs_buftarg_t *target = (xfs_buftarg_t *)data;
|
||||
|
||||
current->flags |= PF_MEMALLOC;
|
||||
|
||||
set_freezable();
|
||||
|
||||
do {
|
||||
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
|
||||
long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
|
||||
int count = 0;
|
||||
struct list_head tmp;
|
||||
|
||||
if (unlikely(freezing(current))) {
|
||||
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
refrigerator();
|
||||
|
@ -1743,17 +1941,16 @@ xfsbufd(
|
|||
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
}
|
||||
|
||||
schedule_timeout_interruptible(
|
||||
xfs_buf_timer_centisecs * msecs_to_jiffies(10));
|
||||
/* sleep for a long time if there is nothing to do. */
|
||||
if (list_empty(&target->bt_delwrite_queue))
|
||||
tout = MAX_SCHEDULE_TIMEOUT;
|
||||
schedule_timeout_interruptible(tout);
|
||||
|
||||
xfs_buf_delwri_split(target, &tmp,
|
||||
xfs_buf_age_centisecs * msecs_to_jiffies(10));
|
||||
|
||||
count = 0;
|
||||
xfs_buf_delwri_split(target, &tmp, age);
|
||||
list_sort(NULL, &tmp, xfs_buf_cmp);
|
||||
while (!list_empty(&tmp)) {
|
||||
bp = list_entry(tmp.next, xfs_buf_t, b_list);
|
||||
ASSERT(target == bp->b_target);
|
||||
|
||||
struct xfs_buf *bp;
|
||||
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
|
||||
list_del_init(&bp->b_list);
|
||||
xfs_buf_iostrategy(bp);
|
||||
count++;
|
||||
|
@ -1779,42 +1976,45 @@ xfs_flush_buftarg(
|
|||
xfs_buftarg_t *target,
|
||||
int wait)
|
||||
{
|
||||
struct list_head tmp;
|
||||
xfs_buf_t *bp, *n;
|
||||
xfs_buf_t *bp;
|
||||
int pincount = 0;
|
||||
LIST_HEAD(tmp_list);
|
||||
LIST_HEAD(wait_list);
|
||||
|
||||
xfs_buf_runall_queues(xfsconvertd_workqueue);
|
||||
xfs_buf_runall_queues(xfsdatad_workqueue);
|
||||
xfs_buf_runall_queues(xfslogd_workqueue);
|
||||
|
||||
set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
|
||||
pincount = xfs_buf_delwri_split(target, &tmp, 0);
|
||||
pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
|
||||
|
||||
/*
|
||||
* Dropped the delayed write list lock, now walk the temporary list
|
||||
* Dropped the delayed write list lock, now walk the temporary list.
|
||||
* All I/O is issued async and then if we need to wait for completion
|
||||
* we do that after issuing all the IO.
|
||||
*/
|
||||
list_for_each_entry_safe(bp, n, &tmp, b_list) {
|
||||
list_sort(NULL, &tmp_list, xfs_buf_cmp);
|
||||
while (!list_empty(&tmp_list)) {
|
||||
bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
|
||||
ASSERT(target == bp->b_target);
|
||||
if (wait)
|
||||
list_del_init(&bp->b_list);
|
||||
if (wait) {
|
||||
bp->b_flags &= ~XBF_ASYNC;
|
||||
else
|
||||
list_del_init(&bp->b_list);
|
||||
|
||||
list_add(&bp->b_list, &wait_list);
|
||||
}
|
||||
xfs_buf_iostrategy(bp);
|
||||
}
|
||||
|
||||
if (wait)
|
||||
if (wait) {
|
||||
/* Expedite and wait for IO to complete. */
|
||||
blk_run_address_space(target->bt_mapping);
|
||||
while (!list_empty(&wait_list)) {
|
||||
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
|
||||
|
||||
/*
|
||||
* Remaining list items must be flushed before returning
|
||||
*/
|
||||
while (!list_empty(&tmp)) {
|
||||
bp = list_entry(tmp.next, xfs_buf_t, b_list);
|
||||
|
||||
list_del_init(&bp->b_list);
|
||||
xfs_iowait(bp);
|
||||
xfs_buf_relse(bp);
|
||||
list_del_init(&bp->b_list);
|
||||
xfs_iowait(bp);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
}
|
||||
|
||||
return pincount;
|
||||
|
|
|
@ -232,13 +232,17 @@ extern void xfs_buf_lock(xfs_buf_t *);
|
|||
extern void xfs_buf_unlock(xfs_buf_t *);
|
||||
|
||||
/* Buffer Read and Write Routines */
|
||||
extern int xfs_bawrite(void *mp, xfs_buf_t *bp);
|
||||
extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
|
||||
extern void xfs_bdwrite(void *mp, xfs_buf_t *bp);
|
||||
|
||||
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
|
||||
extern int xfs_bdstrat_cb(struct xfs_buf *);
|
||||
|
||||
extern void xfs_buf_ioend(xfs_buf_t *, int);
|
||||
extern void xfs_buf_ioerror(xfs_buf_t *, int);
|
||||
extern int xfs_buf_iorequest(xfs_buf_t *);
|
||||
extern int xfs_buf_iowait(xfs_buf_t *);
|
||||
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
|
||||
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
|
||||
xfs_buf_rw_t);
|
||||
|
||||
static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
|
||||
|
@ -261,6 +265,7 @@ extern int xfs_buf_ispin(xfs_buf_t *);
|
|||
|
||||
/* Delayed Write Buffer Routines */
|
||||
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
|
||||
extern void xfs_buf_delwri_promote(xfs_buf_t *);
|
||||
|
||||
/* Buffer Daemon Setup Routines */
|
||||
extern int xfs_buf_init(void);
|
||||
|
@ -270,33 +275,19 @@ extern void xfs_buf_terminate(void);
|
|||
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
|
||||
|
||||
|
||||
#define XFS_B_ASYNC XBF_ASYNC
|
||||
#define XFS_B_DELWRI XBF_DELWRI
|
||||
#define XFS_B_READ XBF_READ
|
||||
#define XFS_B_WRITE XBF_WRITE
|
||||
#define XFS_B_STALE XBF_STALE
|
||||
|
||||
#define XFS_BUF_TRYLOCK XBF_TRYLOCK
|
||||
#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
|
||||
#define XFS_BUF_LOCK XBF_LOCK
|
||||
#define XFS_BUF_MAPPED XBF_MAPPED
|
||||
|
||||
#define BUF_BUSY XBF_DONT_BLOCK
|
||||
|
||||
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
|
||||
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
|
||||
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
|
||||
|
||||
#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
|
||||
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
|
||||
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
|
||||
#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE)
|
||||
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
|
||||
#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
|
||||
#define XFS_BUF_SUPER_STALE(bp) do { \
|
||||
XFS_BUF_STALE(bp); \
|
||||
xfs_buf_delwri_dequeue(bp); \
|
||||
XFS_BUF_DONE(bp); \
|
||||
} while (0)
|
||||
|
||||
#define XFS_BUF_MANAGE XBF_FS_MANAGED
|
||||
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
|
||||
|
||||
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
|
||||
|
@ -385,31 +376,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
|
|||
|
||||
#define xfs_biomove(bp, off, len, data, rw) \
|
||||
xfs_buf_iomove((bp), (off), (len), (data), \
|
||||
((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
|
||||
((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
|
||||
|
||||
#define xfs_biozero(bp, off, len) \
|
||||
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
|
||||
|
||||
|
||||
static inline int XFS_bwrite(xfs_buf_t *bp)
|
||||
{
|
||||
int iowait = (bp->b_flags & XBF_ASYNC) == 0;
|
||||
int error = 0;
|
||||
|
||||
if (!iowait)
|
||||
bp->b_flags |= _XBF_RUN_QUEUES;
|
||||
|
||||
xfs_buf_delwri_dequeue(bp);
|
||||
xfs_buf_iostrategy(bp);
|
||||
if (iowait) {
|
||||
error = xfs_buf_iowait(bp);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
|
||||
|
||||
#define xfs_iowait(bp) xfs_buf_iowait(bp)
|
||||
|
||||
#define xfs_baread(target, rablkno, ralen) \
|
||||
|
@ -424,6 +395,7 @@ extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
|
|||
extern void xfs_wait_buftarg(xfs_buftarg_t *);
|
||||
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
|
||||
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
|
||||
|
||||
#ifdef CONFIG_KDB_MODULES
|
||||
extern struct list_head *xfs_get_buftarg_list(void);
|
||||
#endif
|
||||
|
|
|
@ -79,7 +79,7 @@ xfs_flush_pages(
|
|||
xfs_iflags_clear(ip, XFS_ITRUNCATED);
|
||||
ret = -filemap_fdatawrite(mapping);
|
||||
}
|
||||
if (flags & XFS_B_ASYNC)
|
||||
if (flags & XBF_ASYNC)
|
||||
return ret;
|
||||
ret2 = xfs_wait_on_pages(ip, first, last);
|
||||
if (!ret)
|
||||
|
|
|
@ -447,12 +447,12 @@ xfs_attrlist_by_handle(
|
|||
int
|
||||
xfs_attrmulti_attr_get(
|
||||
struct inode *inode,
|
||||
char *name,
|
||||
char __user *ubuf,
|
||||
unsigned char *name,
|
||||
unsigned char __user *ubuf,
|
||||
__uint32_t *len,
|
||||
__uint32_t flags)
|
||||
{
|
||||
char *kbuf;
|
||||
unsigned char *kbuf;
|
||||
int error = EFAULT;
|
||||
|
||||
if (*len > XATTR_SIZE_MAX)
|
||||
|
@ -476,12 +476,12 @@ xfs_attrmulti_attr_get(
|
|||
int
|
||||
xfs_attrmulti_attr_set(
|
||||
struct inode *inode,
|
||||
char *name,
|
||||
const char __user *ubuf,
|
||||
unsigned char *name,
|
||||
const unsigned char __user *ubuf,
|
||||
__uint32_t len,
|
||||
__uint32_t flags)
|
||||
{
|
||||
char *kbuf;
|
||||
unsigned char *kbuf;
|
||||
int error = EFAULT;
|
||||
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
|
@ -501,7 +501,7 @@ xfs_attrmulti_attr_set(
|
|||
int
|
||||
xfs_attrmulti_attr_remove(
|
||||
struct inode *inode,
|
||||
char *name,
|
||||
unsigned char *name,
|
||||
__uint32_t flags)
|
||||
{
|
||||
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
|
||||
|
@ -519,7 +519,7 @@ xfs_attrmulti_by_handle(
|
|||
xfs_fsop_attrmulti_handlereq_t am_hreq;
|
||||
struct dentry *dentry;
|
||||
unsigned int i, size;
|
||||
char *attr_name;
|
||||
unsigned char *attr_name;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -XFS_ERROR(EPERM);
|
||||
|
@ -547,7 +547,7 @@ xfs_attrmulti_by_handle(
|
|||
|
||||
error = 0;
|
||||
for (i = 0; i < am_hreq.opcount; i++) {
|
||||
ops[i].am_error = strncpy_from_user(attr_name,
|
||||
ops[i].am_error = strncpy_from_user((char *)attr_name,
|
||||
ops[i].am_attrname, MAXNAMELEN);
|
||||
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
|
||||
error = -ERANGE;
|
||||
|
@ -1431,6 +1431,9 @@ xfs_file_ioctl(
|
|||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return -XFS_ERROR(EROFS);
|
||||
|
||||
if (copy_from_user(&inout, arg, sizeof(inout)))
|
||||
return -XFS_ERROR(EFAULT);
|
||||
|
||||
|
|
|
@ -45,23 +45,23 @@ xfs_readlink_by_handle(
|
|||
extern int
|
||||
xfs_attrmulti_attr_get(
|
||||
struct inode *inode,
|
||||
char *name,
|
||||
char __user *ubuf,
|
||||
unsigned char *name,
|
||||
unsigned char __user *ubuf,
|
||||
__uint32_t *len,
|
||||
__uint32_t flags);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_set(
|
||||
xfs_attrmulti_attr_set(
|
||||
struct inode *inode,
|
||||
char *name,
|
||||
const char __user *ubuf,
|
||||
unsigned char *name,
|
||||
const unsigned char __user *ubuf,
|
||||
__uint32_t len,
|
||||
__uint32_t flags);
|
||||
|
||||
extern int
|
||||
xfs_attrmulti_attr_remove(
|
||||
struct inode *inode,
|
||||
char *name,
|
||||
unsigned char *name,
|
||||
__uint32_t flags);
|
||||
|
||||
extern struct dentry *
|
||||
|
|
|
@ -411,7 +411,7 @@ xfs_compat_attrmulti_by_handle(
|
|||
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
|
||||
struct dentry *dentry;
|
||||
unsigned int i, size;
|
||||
char *attr_name;
|
||||
unsigned char *attr_name;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -XFS_ERROR(EPERM);
|
||||
|
@ -440,7 +440,7 @@ xfs_compat_attrmulti_by_handle(
|
|||
|
||||
error = 0;
|
||||
for (i = 0; i < am_hreq.opcount; i++) {
|
||||
ops[i].am_error = strncpy_from_user(attr_name,
|
||||
ops[i].am_error = strncpy_from_user((char *)attr_name,
|
||||
compat_ptr(ops[i].am_attrname),
|
||||
MAXNAMELEN);
|
||||
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
|
||||
|
|
|
@ -140,10 +140,10 @@ xfs_init_security(
|
|||
struct xfs_inode *ip = XFS_I(inode);
|
||||
size_t length;
|
||||
void *value;
|
||||
char *name;
|
||||
unsigned char *name;
|
||||
int error;
|
||||
|
||||
error = security_inode_init_security(inode, dir, &name,
|
||||
error = security_inode_init_security(inode, dir, (char **)&name,
|
||||
&value, &length);
|
||||
if (error) {
|
||||
if (error == -EOPNOTSUPP)
|
||||
|
|
|
@ -630,18 +630,9 @@ start:
|
|||
* by root. This keeps people from modifying setuid and
|
||||
* setgid binaries.
|
||||
*/
|
||||
|
||||
if (((xip->i_d.di_mode & S_ISUID) ||
|
||||
((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
|
||||
(S_ISGID | S_IXGRP))) &&
|
||||
!capable(CAP_FSETID)) {
|
||||
error = xfs_write_clear_setuid(xip);
|
||||
if (likely(!error))
|
||||
error = -file_remove_suid(file);
|
||||
if (unlikely(error)) {
|
||||
goto out_unlock_internal;
|
||||
}
|
||||
}
|
||||
error = -file_remove_suid(file);
|
||||
if (unlikely(error))
|
||||
goto out_unlock_internal;
|
||||
|
||||
/* We can write back this queue in page reclaim */
|
||||
current->backing_dev_info = mapping->backing_dev_info;
|
||||
|
@ -783,53 +774,6 @@ write_retry:
|
|||
return -error;
|
||||
}
|
||||
|
||||
/*
|
||||
* All xfs metadata buffers except log state machine buffers
|
||||
* get this attached as their b_bdstrat callback function.
|
||||
* This is so that we can catch a buffer
|
||||
* after prematurely unpinning it to forcibly shutdown the filesystem.
|
||||
*/
|
||||
int
|
||||
xfs_bdstrat_cb(struct xfs_buf *bp)
|
||||
{
|
||||
if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
|
||||
trace_xfs_bdstrat_shut(bp, _RET_IP_);
|
||||
/*
|
||||
* Metadata write that didn't get logged but
|
||||
* written delayed anyway. These aren't associated
|
||||
* with a transaction, and can be ignored.
|
||||
*/
|
||||
if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
|
||||
(XFS_BUF_ISREAD(bp)) == 0)
|
||||
return (xfs_bioerror_relse(bp));
|
||||
else
|
||||
return (xfs_bioerror(bp));
|
||||
}
|
||||
|
||||
xfs_buf_iorequest(bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper around bdstrat so that we can stop data from going to disk in case
|
||||
* we are shutting down the filesystem. Typically user data goes thru this
|
||||
* path; one of the exceptions is the superblock.
|
||||
*/
|
||||
void
|
||||
xfsbdstrat(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
ASSERT(mp);
|
||||
if (!XFS_FORCED_SHUTDOWN(mp)) {
|
||||
xfs_buf_iorequest(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_xfs_bdstrat_shut(bp, _RET_IP_);
|
||||
xfs_bioerror_relse(bp);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the underlying (data/log/rt) device is readonly, there are some
|
||||
* operations that cannot proceed.
|
||||
|
|
|
@ -22,9 +22,6 @@ struct xfs_mount;
|
|||
struct xfs_inode;
|
||||
struct xfs_buf;
|
||||
|
||||
/* errors from xfsbdstrat() must be extracted from the buffer */
|
||||
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
|
||||
extern int xfs_bdstrat_cb(struct xfs_buf *);
|
||||
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
|
||||
|
||||
extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
|
||||
|
|
|
@ -877,12 +877,11 @@ xfsaild(
|
|||
{
|
||||
struct xfs_ail *ailp = data;
|
||||
xfs_lsn_t last_pushed_lsn = 0;
|
||||
long tout = 0;
|
||||
long tout = 0; /* milliseconds */
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
if (tout)
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(tout));
|
||||
tout = 1000;
|
||||
schedule_timeout_interruptible(tout ?
|
||||
msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
/* swsusp */
|
||||
try_to_freeze();
|
||||
|
@ -1022,12 +1021,45 @@ xfs_fs_dirty_inode(
|
|||
XFS_I(inode)->i_update_core = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempt to flush the inode, this will actually fail
|
||||
* if the inode is pinned, but we dirty the inode again
|
||||
* at the point when it is unpinned after a log write,
|
||||
* since this is when the inode itself becomes flushable.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_log_inode(
|
||||
struct xfs_inode *ip)
|
||||
{
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
|
||||
error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
|
||||
|
||||
if (error) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
/* we need to return with the lock hold shared */
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
return error;
|
||||
}
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
/*
|
||||
* Note - it's possible that we might have pushed ourselves out of the
|
||||
* way during trans_reserve which would flush the inode. But there's
|
||||
* no guarantee that the inode buffer has actually gone out yet (it's
|
||||
* delwri). Plus the buffer could be pinned anyway if it's part of
|
||||
* an inode in another recent transaction. So we play it safe and
|
||||
* fire off the transaction anyway.
|
||||
*/
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ihold(tp, ip);
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
xfs_trans_set_sync(tp);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_write_inode(
|
||||
struct inode *inode,
|
||||
|
@ -1035,7 +1067,7 @@ xfs_fs_write_inode(
|
|||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
int error = 0;
|
||||
int error = EAGAIN;
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
|
@ -1046,36 +1078,56 @@ xfs_fs_write_inode(
|
|||
error = xfs_wait_on_pages(ip, 0, -1);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bypass inodes which have already been cleaned by
|
||||
* the inode flush clustering code inside xfs_iflush
|
||||
*/
|
||||
if (xfs_inode_clean(ip))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We make this non-blocking if the inode is contended, return
|
||||
* EAGAIN to indicate to the caller that they did not succeed.
|
||||
* This prevents the flush path from blocking on inodes inside
|
||||
* another operation right now, they get caught later by xfs_sync.
|
||||
*/
|
||||
if (sync) {
|
||||
/*
|
||||
* Make sure the inode has hit stable storage. By using the
|
||||
* log and the fsync transactions we reduce the IOs we have
|
||||
* to do here from two (log and inode) to just the log.
|
||||
*
|
||||
* Note: We still need to do a delwri write of the inode after
|
||||
* this to flush it to the backing buffer so that bulkstat
|
||||
* works properly if this is the first time the inode has been
|
||||
* written. Because we hold the ilock atomically over the
|
||||
* transaction commit and the inode flush we are guaranteed
|
||||
* that the inode is not pinned when it returns. If the flush
|
||||
* lock is already held, then the inode has already been
|
||||
* flushed once and we don't need to flush it again. Hence
|
||||
* the code will only flush the inode if it isn't already
|
||||
* being flushed.
|
||||
*/
|
||||
xfs_ilock(ip, XFS_ILOCK_SHARED);
|
||||
xfs_iflock(ip);
|
||||
|
||||
error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
|
||||
if (ip->i_update_core) {
|
||||
error = xfs_log_inode(ip);
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
error = EAGAIN;
|
||||
/*
|
||||
* We make this non-blocking if the inode is contended, return
|
||||
* EAGAIN to indicate to the caller that they did not succeed.
|
||||
* This prevents the flush path from blocking on inodes inside
|
||||
* another operation right now, they get caught later by xfs_sync.
|
||||
*/
|
||||
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
|
||||
goto out;
|
||||
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
|
||||
goto out_unlock;
|
||||
|
||||
error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
|
||||
}
|
||||
|
||||
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Now we have the flush lock and the inode is not pinned, we can check
|
||||
* if the inode is really clean as we know that there are no pending
|
||||
* transaction completions, it is not waiting on the delayed write
|
||||
* queue and there is no IO in progress.
|
||||
*/
|
||||
if (xfs_inode_clean(ip)) {
|
||||
xfs_ifunlock(ip);
|
||||
error = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
error = xfs_iflush(ip, 0);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
out:
|
||||
|
@ -1257,6 +1309,29 @@ xfs_fs_statfs(
|
|||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_save_resvblks(struct xfs_mount *mp)
|
||||
{
|
||||
__uint64_t resblks = 0;
|
||||
|
||||
mp->m_resblks_save = mp->m_resblks;
|
||||
xfs_reserve_blocks(mp, &resblks, NULL);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_restore_resvblks(struct xfs_mount *mp)
|
||||
{
|
||||
__uint64_t resblks;
|
||||
|
||||
if (mp->m_resblks_save) {
|
||||
resblks = mp->m_resblks_save;
|
||||
mp->m_resblks_save = 0;
|
||||
} else
|
||||
resblks = xfs_default_resblks(mp);
|
||||
|
||||
xfs_reserve_blocks(mp, &resblks, NULL);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_remount(
|
||||
struct super_block *sb,
|
||||
|
@ -1336,11 +1411,27 @@ xfs_fs_remount(
|
|||
}
|
||||
mp->m_update_flags = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill out the reserve pool if it is empty. Use the stashed
|
||||
* value if it is non-zero, otherwise go with the default.
|
||||
*/
|
||||
xfs_restore_resvblks(mp);
|
||||
}
|
||||
|
||||
/* rw -> ro */
|
||||
if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
|
||||
/*
|
||||
* After we have synced the data but before we sync the
|
||||
* metadata, we need to free up the reserve block pool so that
|
||||
* the used block count in the superblock on disk is correct at
|
||||
* the end of the remount. Stash the current reserve pool size
|
||||
* so that if we get remounted rw, we can return it to the same
|
||||
* size.
|
||||
*/
|
||||
|
||||
xfs_quiesce_data(mp);
|
||||
xfs_save_resvblks(mp);
|
||||
xfs_quiesce_attr(mp);
|
||||
mp->m_flags |= XFS_MOUNT_RDONLY;
|
||||
}
|
||||
|
@ -1359,10 +1450,21 @@ xfs_fs_freeze(
|
|||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
xfs_save_resvblks(mp);
|
||||
xfs_quiesce_attr(mp);
|
||||
return -xfs_fs_log_dummy(mp);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_unfreeze(
|
||||
struct super_block *sb)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
|
||||
xfs_restore_resvblks(mp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_fs_show_options(
|
||||
struct seq_file *m,
|
||||
|
@ -1585,6 +1687,7 @@ static const struct super_operations xfs_super_operations = {
|
|||
.put_super = xfs_fs_put_super,
|
||||
.sync_fs = xfs_fs_sync_fs,
|
||||
.freeze_fs = xfs_fs_freeze,
|
||||
.unfreeze_fs = xfs_fs_unfreeze,
|
||||
.statfs = xfs_fs_statfs,
|
||||
.remount_fs = xfs_fs_remount,
|
||||
.show_options = xfs_fs_show_options,
|
||||
|
|
|
@ -90,14 +90,13 @@ xfs_inode_ag_lookup(
|
|||
STATIC int
|
||||
xfs_inode_ag_walk(
|
||||
struct xfs_mount *mp,
|
||||
xfs_agnumber_t ag,
|
||||
struct xfs_perag *pag,
|
||||
int (*execute)(struct xfs_inode *ip,
|
||||
struct xfs_perag *pag, int flags),
|
||||
int flags,
|
||||
int tag,
|
||||
int exclusive)
|
||||
{
|
||||
struct xfs_perag *pag = &mp->m_perag[ag];
|
||||
uint32_t first_index;
|
||||
int last_error = 0;
|
||||
int skipped;
|
||||
|
@ -141,8 +140,6 @@ restart:
|
|||
delay(1);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
xfs_put_perag(mp, pag);
|
||||
return last_error;
|
||||
}
|
||||
|
||||
|
@ -160,10 +157,16 @@ xfs_inode_ag_iterator(
|
|||
xfs_agnumber_t ag;
|
||||
|
||||
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
|
||||
if (!mp->m_perag[ag].pag_ici_init)
|
||||
struct xfs_perag *pag;
|
||||
|
||||
pag = xfs_perag_get(mp, ag);
|
||||
if (!pag->pag_ici_init) {
|
||||
xfs_perag_put(pag);
|
||||
continue;
|
||||
error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
|
||||
}
|
||||
error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
|
||||
exclusive);
|
||||
xfs_perag_put(pag);
|
||||
if (error) {
|
||||
last_error = error;
|
||||
if (error == EFSCORRUPTED)
|
||||
|
@ -231,7 +234,7 @@ xfs_sync_inode_data(
|
|||
}
|
||||
|
||||
error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
|
||||
0 : XFS_B_ASYNC, FI_NONE);
|
||||
0 : XBF_ASYNC, FI_NONE);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
out_wait:
|
||||
|
@ -267,8 +270,7 @@ xfs_sync_inode_attr(
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
|
||||
XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
|
||||
error = xfs_iflush(ip, flags);
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
@ -293,10 +295,7 @@ xfs_sync_data(
|
|||
if (error)
|
||||
return XFS_ERROR(error);
|
||||
|
||||
xfs_log_force(mp, 0,
|
||||
(flags & SYNC_WAIT) ?
|
||||
XFS_LOG_FORCE | XFS_LOG_SYNC :
|
||||
XFS_LOG_FORCE);
|
||||
xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -322,10 +321,6 @@ xfs_commit_dummy_trans(
|
|||
struct xfs_inode *ip = mp->m_rootip;
|
||||
struct xfs_trans *tp;
|
||||
int error;
|
||||
int log_flags = XFS_LOG_FORCE;
|
||||
|
||||
if (flags & SYNC_WAIT)
|
||||
log_flags |= XFS_LOG_SYNC;
|
||||
|
||||
/*
|
||||
* Put a dummy transaction in the log to tell recovery
|
||||
|
@ -347,11 +342,11 @@ xfs_commit_dummy_trans(
|
|||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
/* the log force ensures this transaction is pushed to disk */
|
||||
xfs_log_force(mp, 0, log_flags);
|
||||
xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
STATIC int
|
||||
xfs_sync_fsdata(
|
||||
struct xfs_mount *mp,
|
||||
int flags)
|
||||
|
@ -367,7 +362,7 @@ xfs_sync_fsdata(
|
|||
if (flags & SYNC_TRYLOCK) {
|
||||
ASSERT(!(flags & SYNC_WAIT));
|
||||
|
||||
bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
|
||||
bp = xfs_getsb(mp, XBF_TRYLOCK);
|
||||
if (!bp)
|
||||
goto out;
|
||||
|
||||
|
@ -387,7 +382,7 @@ xfs_sync_fsdata(
|
|||
* become pinned in between there and here.
|
||||
*/
|
||||
if (XFS_BUF_ISPINNED(bp))
|
||||
xfs_log_force(mp, 0, XFS_LOG_FORCE);
|
||||
xfs_log_force(mp, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -448,9 +443,6 @@ xfs_quiesce_data(
|
|||
xfs_sync_data(mp, SYNC_WAIT);
|
||||
xfs_qm_sync(mp, SYNC_WAIT);
|
||||
|
||||
/* drop inode references pinned by filestreams */
|
||||
xfs_filestream_flush(mp);
|
||||
|
||||
/* write superblock and hoover up shutdown errors */
|
||||
error = xfs_sync_fsdata(mp, SYNC_WAIT);
|
||||
|
||||
|
@ -467,16 +459,18 @@ xfs_quiesce_fs(
|
|||
{
|
||||
int count = 0, pincount;
|
||||
|
||||
xfs_reclaim_inodes(mp, 0);
|
||||
xfs_flush_buftarg(mp->m_ddev_targp, 0);
|
||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||
|
||||
/*
|
||||
* This loop must run at least twice. The first instance of the loop
|
||||
* will flush most meta data but that will generate more meta data
|
||||
* (typically directory updates). Which then must be flushed and
|
||||
* logged before we can write the unmount record.
|
||||
* logged before we can write the unmount record. We also so sync
|
||||
* reclaim of inodes to catch any that the above delwri flush skipped.
|
||||
*/
|
||||
do {
|
||||
xfs_reclaim_inodes(mp, SYNC_WAIT);
|
||||
xfs_sync_attr(mp, SYNC_WAIT);
|
||||
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
|
||||
if (!pincount) {
|
||||
|
@ -575,7 +569,7 @@ xfs_flush_inodes(
|
|||
igrab(inode);
|
||||
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
|
||||
wait_for_completion(&completion);
|
||||
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
|
||||
xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -591,8 +585,8 @@ xfs_sync_worker(
|
|||
int error;
|
||||
|
||||
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
|
||||
xfs_log_force(mp, 0);
|
||||
xfs_reclaim_inodes(mp, 0);
|
||||
/* dgc: errors ignored here */
|
||||
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
|
||||
error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
|
||||
|
@ -690,16 +684,17 @@ void
|
|||
xfs_inode_set_reclaim_tag(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
struct xfs_perag *pag;
|
||||
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
||||
read_lock(&pag->pag_ici_lock);
|
||||
spin_lock(&ip->i_flags_lock);
|
||||
__xfs_inode_set_reclaim_tag(pag, ip);
|
||||
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
xfs_put_perag(mp, pag);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -712,12 +707,64 @@ __xfs_inode_clear_reclaim_tag(
|
|||
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
|
||||
}
|
||||
|
||||
/*
|
||||
* Inodes in different states need to be treated differently, and the return
|
||||
* value of xfs_iflush is not sufficient to get this right. The following table
|
||||
* lists the inode states and the reclaim actions necessary for non-blocking
|
||||
* reclaim:
|
||||
*
|
||||
*
|
||||
* inode state iflush ret required action
|
||||
* --------------- ---------- ---------------
|
||||
* bad - reclaim
|
||||
* shutdown EIO unpin and reclaim
|
||||
* clean, unpinned 0 reclaim
|
||||
* stale, unpinned 0 reclaim
|
||||
* clean, pinned(*) 0 requeue
|
||||
* stale, pinned EAGAIN requeue
|
||||
* dirty, delwri ok 0 requeue
|
||||
* dirty, delwri blocked EAGAIN requeue
|
||||
* dirty, sync flush 0 reclaim
|
||||
*
|
||||
* (*) dgc: I don't think the clean, pinned state is possible but it gets
|
||||
* handled anyway given the order of checks implemented.
|
||||
*
|
||||
* As can be seen from the table, the return value of xfs_iflush() is not
|
||||
* sufficient to correctly decide the reclaim action here. The checks in
|
||||
* xfs_iflush() might look like duplicates, but they are not.
|
||||
*
|
||||
* Also, because we get the flush lock first, we know that any inode that has
|
||||
* been flushed delwri has had the flush completed by the time we check that
|
||||
* the inode is clean. The clean inode check needs to be done before flushing
|
||||
* the inode delwri otherwise we would loop forever requeuing clean inodes as
|
||||
* we cannot tell apart a successful delwri flush and a clean inode from the
|
||||
* return value of xfs_iflush().
|
||||
*
|
||||
* Note that because the inode is flushed delayed write by background
|
||||
* writeback, the flush lock may already be held here and waiting on it can
|
||||
* result in very long latencies. Hence for sync reclaims, where we wait on the
|
||||
* flush lock, the caller should push out delayed write inodes first before
|
||||
* trying to reclaim them to minimise the amount of time spent waiting. For
|
||||
* background relaim, we just requeue the inode for the next pass.
|
||||
*
|
||||
* Hence the order of actions after gaining the locks should be:
|
||||
* bad => reclaim
|
||||
* shutdown => unpin and reclaim
|
||||
* pinned, delwri => requeue
|
||||
* pinned, sync => unpin
|
||||
* stale => reclaim
|
||||
* clean => reclaim
|
||||
* dirty, delwri => flush and requeue
|
||||
* dirty, sync => flush, wait and reclaim
|
||||
*/
|
||||
STATIC int
|
||||
xfs_reclaim_inode(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_perag *pag,
|
||||
int sync_mode)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* The radix tree lock here protects a thread in xfs_iget from racing
|
||||
* with us starting reclaim on the inode. Once we have the
|
||||
|
@ -735,33 +782,70 @@ xfs_reclaim_inode(
|
|||
spin_unlock(&ip->i_flags_lock);
|
||||
write_unlock(&pag->pag_ici_lock);
|
||||
|
||||
/*
|
||||
* If the inode is still dirty, then flush it out. If the inode
|
||||
* is not in the AIL, then it will be OK to flush it delwri as
|
||||
* long as xfs_iflush() does not keep any references to the inode.
|
||||
* We leave that decision up to xfs_iflush() since it has the
|
||||
* knowledge of whether it's OK to simply do a delwri flush of
|
||||
* the inode or whether we need to wait until the inode is
|
||||
* pulled from the AIL.
|
||||
* We get the flush lock regardless, though, just to make sure
|
||||
* we don't free it while it is being flushed.
|
||||
*/
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_iflock(ip);
|
||||
|
||||
/*
|
||||
* In the case of a forced shutdown we rely on xfs_iflush() to
|
||||
* wait for the inode to be unpinned before returning an error.
|
||||
*/
|
||||
if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
|
||||
/* synchronize with xfs_iflush_done */
|
||||
if (!xfs_iflock_nowait(ip)) {
|
||||
if (!(sync_mode & SYNC_WAIT))
|
||||
goto out;
|
||||
xfs_iflock(ip);
|
||||
xfs_ifunlock(ip);
|
||||
}
|
||||
|
||||
if (is_bad_inode(VFS_I(ip)))
|
||||
goto reclaim;
|
||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||
xfs_iunpin_wait(ip);
|
||||
goto reclaim;
|
||||
}
|
||||
if (xfs_ipincount(ip)) {
|
||||
if (!(sync_mode & SYNC_WAIT)) {
|
||||
xfs_ifunlock(ip);
|
||||
goto out;
|
||||
}
|
||||
xfs_iunpin_wait(ip);
|
||||
}
|
||||
if (xfs_iflags_test(ip, XFS_ISTALE))
|
||||
goto reclaim;
|
||||
if (xfs_inode_clean(ip))
|
||||
goto reclaim;
|
||||
|
||||
/* Now we have an inode that needs flushing */
|
||||
error = xfs_iflush(ip, sync_mode);
|
||||
if (sync_mode & SYNC_WAIT) {
|
||||
xfs_iflock(ip);
|
||||
goto reclaim;
|
||||
}
|
||||
|
||||
/*
|
||||
* When we have to flush an inode but don't have SYNC_WAIT set, we
|
||||
* flush the inode out using a delwri buffer and wait for the next
|
||||
* call into reclaim to find it in a clean state instead of waiting for
|
||||
* it now. We also don't return errors here - if the error is transient
|
||||
* then the next reclaim pass will flush the inode, and if the error
|
||||
* is permanent then the next sync reclaim will relcaim the inode and
|
||||
* pass on the error.
|
||||
*/
|
||||
if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||
xfs_fs_cmn_err(CE_WARN, ip->i_mount,
|
||||
"inode 0x%llx background reclaim flush failed with %d",
|
||||
(long long)ip->i_ino, error);
|
||||
}
|
||||
out:
|
||||
xfs_iflags_clear(ip, XFS_IRECLAIM);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
/*
|
||||
* We could return EAGAIN here to make reclaim rescan the inode tree in
|
||||
* a short while. However, this just burns CPU time scanning the tree
|
||||
* waiting for IO to complete and xfssyncd never goes back to the idle
|
||||
* state. Instead, return 0 to let the next scheduled background reclaim
|
||||
* attempt to reclaim the inode again.
|
||||
*/
|
||||
return 0;
|
||||
|
||||
reclaim:
|
||||
xfs_ifunlock(ip);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_ireclaim(ip);
|
||||
return 0;
|
||||
return error;
|
||||
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -37,7 +37,6 @@ void xfs_syncd_stop(struct xfs_mount *mp);
|
|||
|
||||
int xfs_sync_attr(struct xfs_mount *mp, int flags);
|
||||
int xfs_sync_data(struct xfs_mount *mp, int flags);
|
||||
int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
|
||||
|
||||
int xfs_quiesce_data(struct xfs_mount *mp);
|
||||
void xfs_quiesce_attr(struct xfs_mount *mp);
|
||||
|
|
|
@ -78,6 +78,33 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
|
|||
)
|
||||
)
|
||||
|
||||
#define DEFINE_PERAG_REF_EVENT(name) \
|
||||
TRACE_EVENT(name, \
|
||||
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
|
||||
unsigned long caller_ip), \
|
||||
TP_ARGS(mp, agno, refcount, caller_ip), \
|
||||
TP_STRUCT__entry( \
|
||||
__field(dev_t, dev) \
|
||||
__field(xfs_agnumber_t, agno) \
|
||||
__field(int, refcount) \
|
||||
__field(unsigned long, caller_ip) \
|
||||
), \
|
||||
TP_fast_assign( \
|
||||
__entry->dev = mp->m_super->s_dev; \
|
||||
__entry->agno = agno; \
|
||||
__entry->refcount = refcount; \
|
||||
__entry->caller_ip = caller_ip; \
|
||||
), \
|
||||
TP_printk("dev %d:%d agno %u refcount %d caller %pf", \
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), \
|
||||
__entry->agno, \
|
||||
__entry->refcount, \
|
||||
(char *)__entry->caller_ip) \
|
||||
);
|
||||
|
||||
DEFINE_PERAG_REF_EVENT(xfs_perag_get)
|
||||
DEFINE_PERAG_REF_EVENT(xfs_perag_put)
|
||||
|
||||
#define DEFINE_ATTR_LIST_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_attr_list_class, name, \
|
||||
TP_PROTO(struct xfs_attr_list_context *ctx), \
|
||||
|
@ -456,6 +483,7 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
|
|||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
|
||||
DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
|
||||
|
@ -1414,6 +1442,59 @@ TRACE_EVENT(xfs_dir2_leafn_moveents,
|
|||
__entry->count)
|
||||
);
|
||||
|
||||
#define XFS_SWAPEXT_INODES \
|
||||
{ 0, "target" }, \
|
||||
{ 1, "temp" }
|
||||
|
||||
#define XFS_INODE_FORMAT_STR \
|
||||
{ 0, "invalid" }, \
|
||||
{ 1, "local" }, \
|
||||
{ 2, "extent" }, \
|
||||
{ 3, "btree" }
|
||||
|
||||
DECLARE_EVENT_CLASS(xfs_swap_extent_class,
|
||||
TP_PROTO(struct xfs_inode *ip, int which),
|
||||
TP_ARGS(ip, which),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(int, which)
|
||||
__field(xfs_ino_t, ino)
|
||||
__field(int, format)
|
||||
__field(int, nex)
|
||||
__field(int, max_nex)
|
||||
__field(int, broot_size)
|
||||
__field(int, fork_off)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = VFS_I(ip)->i_sb->s_dev;
|
||||
__entry->which = which;
|
||||
__entry->ino = ip->i_ino;
|
||||
__entry->format = ip->i_d.di_format;
|
||||
__entry->nex = ip->i_d.di_nextents;
|
||||
__entry->max_nex = ip->i_df.if_ext_max;
|
||||
__entry->broot_size = ip->i_df.if_broot_bytes;
|
||||
__entry->fork_off = XFS_IFORK_BOFF(ip);
|
||||
),
|
||||
TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
|
||||
"Max in-fork extents %d, broot size %d, fork offset %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->ino,
|
||||
__print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
|
||||
__print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
|
||||
__entry->nex,
|
||||
__entry->max_nex,
|
||||
__entry->broot_size,
|
||||
__entry->fork_off)
|
||||
)
|
||||
|
||||
#define DEFINE_SWAPEXT_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_swap_extent_class, name, \
|
||||
TP_PROTO(struct xfs_inode *ip, int which), \
|
||||
TP_ARGS(ip, which))
|
||||
|
||||
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
|
||||
DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
|
||||
|
||||
#endif /* _TRACE_XFS_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
|
|
@ -45,7 +45,7 @@ xfs_xattr_get(struct dentry *dentry, const char *name,
|
|||
value = NULL;
|
||||
}
|
||||
|
||||
error = -xfs_attr_get(ip, name, value, &asize, xflags);
|
||||
error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags);
|
||||
if (error)
|
||||
return error;
|
||||
return asize;
|
||||
|
@ -67,8 +67,9 @@ xfs_xattr_set(struct dentry *dentry, const char *name, const void *value,
|
|||
xflags |= ATTR_REPLACE;
|
||||
|
||||
if (!value)
|
||||
return -xfs_attr_remove(ip, name, xflags);
|
||||
return -xfs_attr_set(ip, name, (void *)value, size, xflags);
|
||||
return -xfs_attr_remove(ip, (unsigned char *)name, xflags);
|
||||
return -xfs_attr_set(ip, (unsigned char *)name,
|
||||
(void *)value, size, xflags);
|
||||
}
|
||||
|
||||
static struct xattr_handler xfs_xattr_user_handler = {
|
||||
|
@ -124,8 +125,13 @@ static const char *xfs_xattr_prefix(int flags)
|
|||
}
|
||||
|
||||
static int
|
||||
xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
|
||||
char *name, int namelen, int valuelen, char *value)
|
||||
xfs_xattr_put_listent(
|
||||
struct xfs_attr_list_context *context,
|
||||
int flags,
|
||||
unsigned char *name,
|
||||
int namelen,
|
||||
int valuelen,
|
||||
unsigned char *value)
|
||||
{
|
||||
unsigned int prefix_len = xfs_xattr_prefix_len(flags);
|
||||
char *offset;
|
||||
|
@ -148,7 +154,7 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
|
|||
offset = (char *)context->alist + context->count;
|
||||
strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
|
||||
offset += prefix_len;
|
||||
strncpy(offset, name, namelen); /* real name */
|
||||
strncpy(offset, (char *)name, namelen); /* real name */
|
||||
offset += namelen;
|
||||
*offset = '\0';
|
||||
context->count += prefix_len + namelen + 1;
|
||||
|
@ -156,8 +162,13 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
|
|||
}
|
||||
|
||||
static int
|
||||
xfs_xattr_put_listent_sizes(struct xfs_attr_list_context *context, int flags,
|
||||
char *name, int namelen, int valuelen, char *value)
|
||||
xfs_xattr_put_listent_sizes(
|
||||
struct xfs_attr_list_context *context,
|
||||
int flags,
|
||||
unsigned char *name,
|
||||
int namelen,
|
||||
int valuelen,
|
||||
unsigned char *value)
|
||||
{
|
||||
context->count += xfs_xattr_prefix_len(flags) + namelen + 1;
|
||||
return 0;
|
||||
|
|
|
@ -1187,7 +1187,7 @@ xfs_qm_dqflush(
|
|||
* block, nada.
|
||||
*/
|
||||
if (!XFS_DQ_IS_DIRTY(dqp) ||
|
||||
(!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) {
|
||||
(!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
|
||||
xfs_dqfunlock(dqp);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1248,23 +1248,20 @@ xfs_qm_dqflush(
|
|||
*/
|
||||
if (XFS_BUF_ISPINNED(bp)) {
|
||||
trace_xfs_dqflush_force(dqp);
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||
xfs_log_force(mp, 0);
|
||||
}
|
||||
|
||||
if (flags & XFS_QMOPT_DELWRI) {
|
||||
xfs_bdwrite(mp, bp);
|
||||
} else if (flags & XFS_QMOPT_ASYNC) {
|
||||
error = xfs_bawrite(mp, bp);
|
||||
} else {
|
||||
if (flags & SYNC_WAIT)
|
||||
error = xfs_bwrite(mp, bp);
|
||||
}
|
||||
else
|
||||
xfs_bdwrite(mp, bp);
|
||||
|
||||
trace_xfs_dqflush_done(dqp);
|
||||
|
||||
/*
|
||||
* dqp is still locked, but caller is free to unlock it now.
|
||||
*/
|
||||
return (error);
|
||||
return error;
|
||||
|
||||
}
|
||||
|
||||
|
@ -1445,7 +1442,7 @@ xfs_qm_dqpurge(
|
|||
* We don't care about getting disk errors here. We need
|
||||
* to purge this dquot anyway, so we go ahead regardless.
|
||||
*/
|
||||
error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
|
||||
error = xfs_qm_dqflush(dqp, SYNC_WAIT);
|
||||
if (error)
|
||||
xfs_fs_cmn_err(CE_WARN, mp,
|
||||
"xfs_qm_dqpurge: dquot %p flush failed", dqp);
|
||||
|
@ -1529,25 +1526,17 @@ xfs_qm_dqflock_pushbuf_wait(
|
|||
* the flush lock when the I/O completes.
|
||||
*/
|
||||
bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
|
||||
XFS_QI_DQCHUNKLEN(dqp->q_mount),
|
||||
XFS_INCORE_TRYLOCK);
|
||||
if (bp != NULL) {
|
||||
if (XFS_BUF_ISDELAYWRITE(bp)) {
|
||||
int error;
|
||||
if (XFS_BUF_ISPINNED(bp)) {
|
||||
xfs_log_force(dqp->q_mount,
|
||||
(xfs_lsn_t)0,
|
||||
XFS_LOG_FORCE);
|
||||
}
|
||||
error = xfs_bawrite(dqp->q_mount, bp);
|
||||
if (error)
|
||||
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
|
||||
"xfs_qm_dqflock_pushbuf_wait: "
|
||||
"pushbuf error %d on dqp %p, bp %p",
|
||||
error, dqp, bp);
|
||||
} else {
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
|
||||
if (!bp)
|
||||
goto out_lock;
|
||||
|
||||
if (XFS_BUF_ISDELAYWRITE(bp)) {
|
||||
if (XFS_BUF_ISPINNED(bp))
|
||||
xfs_log_force(dqp->q_mount, 0);
|
||||
xfs_buf_delwri_promote(bp);
|
||||
wake_up_process(bp->b_target->bt_task);
|
||||
}
|
||||
xfs_buf_relse(bp);
|
||||
out_lock:
|
||||
xfs_dqflock(dqp);
|
||||
}
|
||||
|
|
|
@ -74,11 +74,11 @@ xfs_qm_dquot_logitem_format(
|
|||
|
||||
logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
|
||||
logvec->i_len = sizeof(xfs_dq_logformat_t);
|
||||
XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
|
||||
logvec->i_type = XLOG_REG_TYPE_QFORMAT;
|
||||
logvec++;
|
||||
logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
|
||||
logvec->i_len = sizeof(xfs_disk_dquot_t);
|
||||
XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
|
||||
logvec->i_type = XLOG_REG_TYPE_DQUOT;
|
||||
|
||||
ASSERT(2 == logitem->qli_item.li_desc->lid_size);
|
||||
logitem->qli_format.qlf_size = 2;
|
||||
|
@ -153,7 +153,7 @@ xfs_qm_dquot_logitem_push(
|
|||
* lock without sleeping, then there must not have been
|
||||
* anyone in the process of flushing the dquot.
|
||||
*/
|
||||
error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
|
||||
error = xfs_qm_dqflush(dqp, 0);
|
||||
if (error)
|
||||
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
|
||||
"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
|
||||
|
@ -190,7 +190,7 @@ xfs_qm_dqunpin_wait(
|
|||
/*
|
||||
* Give the log a push so we don't wait here too long.
|
||||
*/
|
||||
xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||
xfs_log_force(dqp->q_mount, 0);
|
||||
wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
|
||||
}
|
||||
|
||||
|
@ -212,18 +212,10 @@ xfs_qm_dquot_logitem_pushbuf(
|
|||
xfs_dquot_t *dqp;
|
||||
xfs_mount_t *mp;
|
||||
xfs_buf_t *bp;
|
||||
uint dopush;
|
||||
|
||||
dqp = qip->qli_dquot;
|
||||
ASSERT(XFS_DQ_IS_LOCKED(dqp));
|
||||
|
||||
/*
|
||||
* The qli_pushbuf_flag keeps others from
|
||||
* trying to duplicate our effort.
|
||||
*/
|
||||
ASSERT(qip->qli_pushbuf_flag != 0);
|
||||
ASSERT(qip->qli_push_owner == current_pid());
|
||||
|
||||
/*
|
||||
* If flushlock isn't locked anymore, chances are that the
|
||||
* inode flush completed and the inode was taken off the AIL.
|
||||
|
@ -231,49 +223,20 @@ xfs_qm_dquot_logitem_pushbuf(
|
|||
*/
|
||||
if (completion_done(&dqp->q_flush) ||
|
||||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
|
||||
qip->qli_pushbuf_flag = 0;
|
||||
xfs_dqunlock(dqp);
|
||||
return;
|
||||
}
|
||||
mp = dqp->q_mount;
|
||||
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
|
||||
XFS_QI_DQCHUNKLEN(mp),
|
||||
XFS_INCORE_TRYLOCK);
|
||||
if (bp != NULL) {
|
||||
if (XFS_BUF_ISDELAYWRITE(bp)) {
|
||||
dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
|
||||
!completion_done(&dqp->q_flush));
|
||||
qip->qli_pushbuf_flag = 0;
|
||||
xfs_dqunlock(dqp);
|
||||
|
||||
if (XFS_BUF_ISPINNED(bp)) {
|
||||
xfs_log_force(mp, (xfs_lsn_t)0,
|
||||
XFS_LOG_FORCE);
|
||||
}
|
||||
if (dopush) {
|
||||
int error;
|
||||
#ifdef XFSRACEDEBUG
|
||||
delay_for_intr();
|
||||
delay(300);
|
||||
#endif
|
||||
error = xfs_bawrite(mp, bp);
|
||||
if (error)
|
||||
xfs_fs_cmn_err(CE_WARN, mp,
|
||||
"xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
|
||||
error, qip, bp);
|
||||
} else {
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
} else {
|
||||
qip->qli_pushbuf_flag = 0;
|
||||
xfs_dqunlock(dqp);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
qip->qli_pushbuf_flag = 0;
|
||||
XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
|
||||
xfs_dqunlock(dqp);
|
||||
if (!bp)
|
||||
return;
|
||||
if (XFS_BUF_ISDELAYWRITE(bp))
|
||||
xfs_buf_delwri_promote(bp);
|
||||
xfs_buf_relse(bp);
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -291,50 +254,24 @@ xfs_qm_dquot_logitem_trylock(
|
|||
xfs_dq_logitem_t *qip)
|
||||
{
|
||||
xfs_dquot_t *dqp;
|
||||
uint retval;
|
||||
|
||||
dqp = qip->qli_dquot;
|
||||
if (atomic_read(&dqp->q_pincount) > 0)
|
||||
return (XFS_ITEM_PINNED);
|
||||
return XFS_ITEM_PINNED;
|
||||
|
||||
if (! xfs_qm_dqlock_nowait(dqp))
|
||||
return (XFS_ITEM_LOCKED);
|
||||
return XFS_ITEM_LOCKED;
|
||||
|
||||
retval = XFS_ITEM_SUCCESS;
|
||||
if (!xfs_dqflock_nowait(dqp)) {
|
||||
/*
|
||||
* The dquot is already being flushed. It may have been
|
||||
* flushed delayed write, however, and we don't want to
|
||||
* get stuck waiting for that to complete. So, we want to check
|
||||
* to see if we can lock the dquot's buffer without sleeping.
|
||||
* If we can and it is marked for delayed write, then we
|
||||
* hold it and send it out from the push routine. We don't
|
||||
* want to do that now since we might sleep in the device
|
||||
* strategy routine. We also don't want to grab the buffer lock
|
||||
* here because we'd like not to call into the buffer cache
|
||||
* while holding the AIL lock.
|
||||
* Make sure to only return PUSHBUF if we set pushbuf_flag
|
||||
* ourselves. If someone else is doing it then we don't
|
||||
* want to go to the push routine and duplicate their efforts.
|
||||
* dquot has already been flushed to the backing buffer,
|
||||
* leave it locked, pushbuf routine will unlock it.
|
||||
*/
|
||||
if (qip->qli_pushbuf_flag == 0) {
|
||||
qip->qli_pushbuf_flag = 1;
|
||||
ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
|
||||
#ifdef DEBUG
|
||||
qip->qli_push_owner = current_pid();
|
||||
#endif
|
||||
/*
|
||||
* The dquot is left locked.
|
||||
*/
|
||||
retval = XFS_ITEM_PUSHBUF;
|
||||
} else {
|
||||
retval = XFS_ITEM_FLUSHING;
|
||||
xfs_dqunlock_nonotify(dqp);
|
||||
}
|
||||
return XFS_ITEM_PUSHBUF;
|
||||
}
|
||||
|
||||
ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
|
||||
return (retval);
|
||||
return XFS_ITEM_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
@ -467,7 +404,7 @@ xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
|
|||
|
||||
log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
|
||||
log_vector->i_len = sizeof(xfs_qoff_logitem_t);
|
||||
XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
|
||||
log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
|
||||
qf->qql_format.qf_size = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,10 +27,6 @@ typedef struct xfs_dq_logitem {
|
|||
xfs_log_item_t qli_item; /* common portion */
|
||||
struct xfs_dquot *qli_dquot; /* dquot ptr */
|
||||
xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
|
||||
unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */
|
||||
#ifdef DEBUG
|
||||
uint64_t qli_push_owner;
|
||||
#endif
|
||||
xfs_dq_logformat_t qli_format; /* logged structure */
|
||||
} xfs_dq_logitem_t;
|
||||
|
||||
|
|
|
@ -118,9 +118,14 @@ xfs_Gqm_init(void)
|
|||
*/
|
||||
udqhash = kmem_zalloc_greedy(&hsize,
|
||||
XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
|
||||
XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
|
||||
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
|
||||
gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
|
||||
XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
|
||||
if (!udqhash)
|
||||
goto out;
|
||||
|
||||
gdqhash = kmem_zalloc_large(hsize);
|
||||
if (!gdqhash)
|
||||
goto out_free_udqhash;
|
||||
|
||||
hsize /= sizeof(xfs_dqhash_t);
|
||||
ndquot = hsize << 8;
|
||||
|
||||
|
@ -170,6 +175,11 @@ xfs_Gqm_init(void)
|
|||
mutex_init(&qcheck_lock);
|
||||
#endif
|
||||
return xqm;
|
||||
|
||||
out_free_udqhash:
|
||||
kmem_free_large(udqhash);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -189,8 +199,8 @@ xfs_qm_destroy(
|
|||
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
|
||||
xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
|
||||
}
|
||||
kmem_free(xqm->qm_usr_dqhtable);
|
||||
kmem_free(xqm->qm_grp_dqhtable);
|
||||
kmem_free_large(xqm->qm_usr_dqhtable);
|
||||
kmem_free_large(xqm->qm_grp_dqhtable);
|
||||
xqm->qm_usr_dqhtable = NULL;
|
||||
xqm->qm_grp_dqhtable = NULL;
|
||||
xqm->qm_dqhashmask = 0;
|
||||
|
@ -219,8 +229,12 @@ xfs_qm_hold_quotafs_ref(
|
|||
*/
|
||||
mutex_lock(&xfs_Gqm_lock);
|
||||
|
||||
if (xfs_Gqm == NULL)
|
||||
if (!xfs_Gqm) {
|
||||
xfs_Gqm = xfs_Gqm_init();
|
||||
if (!xfs_Gqm)
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can keep a list of all filesystems with quotas mounted for
|
||||
* debugging and statistical purposes, but ...
|
||||
|
@ -436,7 +450,7 @@ xfs_qm_unmount_quotas(
|
|||
STATIC int
|
||||
xfs_qm_dqflush_all(
|
||||
xfs_mount_t *mp,
|
||||
int flags)
|
||||
int sync_mode)
|
||||
{
|
||||
int recl;
|
||||
xfs_dquot_t *dqp;
|
||||
|
@ -472,7 +486,7 @@ again:
|
|||
* across a disk write.
|
||||
*/
|
||||
xfs_qm_mplist_unlock(mp);
|
||||
error = xfs_qm_dqflush(dqp, flags);
|
||||
error = xfs_qm_dqflush(dqp, sync_mode);
|
||||
xfs_dqunlock(dqp);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -912,13 +926,11 @@ xfs_qm_sync(
|
|||
{
|
||||
int recl, restarts;
|
||||
xfs_dquot_t *dqp;
|
||||
uint flush_flags;
|
||||
int error;
|
||||
|
||||
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
|
||||
return 0;
|
||||
|
||||
flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI;
|
||||
restarts = 0;
|
||||
|
||||
again:
|
||||
|
@ -978,7 +990,7 @@ xfs_qm_sync(
|
|||
* across a disk write
|
||||
*/
|
||||
xfs_qm_mplist_unlock(mp);
|
||||
error = xfs_qm_dqflush(dqp, flush_flags);
|
||||
error = xfs_qm_dqflush(dqp, flags);
|
||||
xfs_dqunlock(dqp);
|
||||
if (error && XFS_FORCED_SHUTDOWN(mp))
|
||||
return 0; /* Need to prevent umount failure */
|
||||
|
@ -1782,7 +1794,7 @@ xfs_qm_quotacheck(
|
|||
* successfully.
|
||||
*/
|
||||
if (!error)
|
||||
error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
|
||||
error = xfs_qm_dqflush_all(mp, 0);
|
||||
|
||||
/*
|
||||
* We can get this error if we couldn't do a dquot allocation inside
|
||||
|
@ -2004,7 +2016,7 @@ xfs_qm_shake_freelist(
|
|||
* We flush it delayed write, so don't bother
|
||||
* releasing the mplock.
|
||||
*/
|
||||
error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
|
||||
error = xfs_qm_dqflush(dqp, 0);
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
|
||||
"xfs_qm_dqflush_all: dquot %p flush failed", dqp);
|
||||
|
@ -2187,7 +2199,7 @@ xfs_qm_dqreclaim_one(void)
|
|||
* We flush it delayed write, so don't bother
|
||||
* releasing the freelist lock.
|
||||
*/
|
||||
error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
|
||||
error = xfs_qm_dqflush(dqp, 0);
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
|
||||
"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
|
||||
|
|
|
@ -59,7 +59,7 @@ xfs_fill_statvfs_from_dquot(
|
|||
be64_to_cpu(dp->d_blk_hardlimit);
|
||||
if (limit && statp->f_blocks > limit) {
|
||||
statp->f_blocks = limit;
|
||||
statp->f_bfree =
|
||||
statp->f_bfree = statp->f_bavail =
|
||||
(statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
|
||||
(statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
|
||||
}
|
||||
|
|
|
@ -1192,9 +1192,9 @@ xfs_qm_internalqcheck(
|
|||
if (! XFS_IS_QUOTA_ON(mp))
|
||||
return XFS_ERROR(ESRCH);
|
||||
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
XFS_bflush(mp->m_ddev_targp);
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
XFS_bflush(mp->m_ddev_targp);
|
||||
|
||||
mutex_lock(&qcheck_lock);
|
||||
|
|
|
@ -589,12 +589,18 @@ xfs_trans_unreserve_and_mod_dquots(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_quota_error(uint flags)
|
||||
STATIC void
|
||||
xfs_quota_warn(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_dquot *dqp,
|
||||
int type)
|
||||
{
|
||||
if (flags & XFS_QMOPT_ENOSPC)
|
||||
return ENOSPC;
|
||||
return EDQUOT;
|
||||
/* no warnings for project quotas - we just return ENOSPC later */
|
||||
if (dqp->dq_flags & XFS_DQ_PROJ)
|
||||
return;
|
||||
quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA,
|
||||
be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev,
|
||||
type);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -612,7 +618,6 @@ xfs_trans_dqresv(
|
|||
long ninos,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
xfs_qcnt_t hardlimit;
|
||||
xfs_qcnt_t softlimit;
|
||||
time_t timer;
|
||||
|
@ -649,7 +654,6 @@ xfs_trans_dqresv(
|
|||
warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
|
||||
resbcountp = &dqp->q_res_rtbcount;
|
||||
}
|
||||
error = 0;
|
||||
|
||||
if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
|
||||
dqp->q_core.d_id &&
|
||||
|
@ -667,18 +671,20 @@ xfs_trans_dqresv(
|
|||
* nblks.
|
||||
*/
|
||||
if (hardlimit > 0ULL &&
|
||||
(hardlimit <= nblks + *resbcountp)) {
|
||||
error = xfs_quota_error(flags);
|
||||
hardlimit <= nblks + *resbcountp) {
|
||||
xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
if (softlimit > 0ULL &&
|
||||
(softlimit <= nblks + *resbcountp)) {
|
||||
softlimit <= nblks + *resbcountp) {
|
||||
if ((timer != 0 && get_seconds() > timer) ||
|
||||
(warns != 0 && warns >= warnlimit)) {
|
||||
error = xfs_quota_error(flags);
|
||||
xfs_quota_warn(mp, dqp,
|
||||
QUOTA_NL_BSOFTLONGWARN);
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
|
||||
}
|
||||
}
|
||||
if (ninos > 0) {
|
||||
|
@ -692,15 +698,19 @@ xfs_trans_dqresv(
|
|||
softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
|
||||
if (!softlimit)
|
||||
softlimit = q->qi_isoftlimit;
|
||||
|
||||
if (hardlimit > 0ULL && count >= hardlimit) {
|
||||
error = xfs_quota_error(flags);
|
||||
xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
|
||||
goto error_return;
|
||||
} else if (softlimit > 0ULL && count >= softlimit) {
|
||||
if ((timer != 0 && get_seconds() > timer) ||
|
||||
}
|
||||
if (softlimit > 0ULL && count >= softlimit) {
|
||||
if ((timer != 0 && get_seconds() > timer) ||
|
||||
(warns != 0 && warns >= warnlimit)) {
|
||||
error = xfs_quota_error(flags);
|
||||
xfs_quota_warn(mp, dqp,
|
||||
QUOTA_NL_ISOFTLONGWARN);
|
||||
goto error_return;
|
||||
}
|
||||
xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -736,9 +746,14 @@ xfs_trans_dqresv(
|
|||
ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
|
||||
ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
|
||||
|
||||
xfs_dqunlock(dqp);
|
||||
return 0;
|
||||
|
||||
error_return:
|
||||
xfs_dqunlock(dqp);
|
||||
return error;
|
||||
if (flags & XFS_QMOPT_ENOSPC)
|
||||
return ENOSPC;
|
||||
return EDQUOT;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@ struct xfs_acl {
|
|||
};
|
||||
|
||||
/* On-disk XFS extended attribute names */
|
||||
#define SGI_ACL_FILE "SGI_ACL_FILE"
|
||||
#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
|
||||
#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
|
||||
#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
|
||||
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
|
||||
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
|
||||
|
||||
|
|
|
@ -187,17 +187,13 @@ typedef struct xfs_perag_busy {
|
|||
/*
|
||||
* Per-ag incore structure, copies of information in agf and agi,
|
||||
* to improve the performance of allocation group selection.
|
||||
*
|
||||
* pick sizes which fit in allocation buckets well
|
||||
*/
|
||||
#if (BITS_PER_LONG == 32)
|
||||
#define XFS_PAGB_NUM_SLOTS 84
|
||||
#elif (BITS_PER_LONG == 64)
|
||||
#define XFS_PAGB_NUM_SLOTS 128
|
||||
#endif
|
||||
|
||||
typedef struct xfs_perag
|
||||
{
|
||||
typedef struct xfs_perag {
|
||||
struct xfs_mount *pag_mount; /* owner filesystem */
|
||||
xfs_agnumber_t pag_agno; /* AG this structure belongs to */
|
||||
atomic_t pag_ref; /* perag reference count */
|
||||
char pagf_init; /* this agf's entry is initialized */
|
||||
char pagi_init; /* this agi's entry is initialized */
|
||||
char pagf_metadata; /* the agf is preferred to be metadata */
|
||||
|
@ -210,8 +206,6 @@ typedef struct xfs_perag
|
|||
__uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
|
||||
xfs_agino_t pagi_freecount; /* number of free inodes */
|
||||
xfs_agino_t pagi_count; /* number of allocated inodes */
|
||||
int pagb_count; /* pagb slots in use */
|
||||
xfs_perag_busy_t *pagb_list; /* unstable blocks */
|
||||
|
||||
/*
|
||||
* Inode allocation search lookup optimisation.
|
||||
|
@ -230,6 +224,8 @@ typedef struct xfs_perag
|
|||
rwlock_t pag_ici_lock; /* incore inode lock */
|
||||
struct radix_tree_root pag_ici_root; /* incore inode cache root */
|
||||
#endif
|
||||
int pagb_count; /* pagb slots in use */
|
||||
xfs_perag_busy_t pagb_list[XFS_PAGB_NUM_SLOTS]; /* unstable blocks */
|
||||
} xfs_perag_t;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1662,11 +1662,13 @@ xfs_free_ag_extent(
|
|||
xfs_agf_t *agf;
|
||||
xfs_perag_t *pag; /* per allocation group data */
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
pag->pagf_freeblks += len;
|
||||
xfs_perag_put(pag);
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
pag = &mp->m_perag[agno];
|
||||
be32_add_cpu(&agf->agf_freeblks, len);
|
||||
xfs_trans_agblocks_delta(tp, len);
|
||||
pag->pagf_freeblks += len;
|
||||
XFS_WANT_CORRUPTED_GOTO(
|
||||
be32_to_cpu(agf->agf_freeblks) <=
|
||||
be32_to_cpu(agf->agf_length),
|
||||
|
@ -1969,10 +1971,12 @@ xfs_alloc_get_freelist(
|
|||
xfs_trans_brelse(tp, agflbp);
|
||||
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
|
||||
agf->agf_flfirst = 0;
|
||||
pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
|
||||
|
||||
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
|
||||
be32_add_cpu(&agf->agf_flcount, -1);
|
||||
xfs_trans_agflist_delta(tp, -1);
|
||||
pag->pagf_flcount--;
|
||||
xfs_perag_put(pag);
|
||||
|
||||
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
|
||||
if (btreeblk) {
|
||||
|
@ -2078,7 +2082,8 @@ xfs_alloc_put_freelist(
|
|||
be32_add_cpu(&agf->agf_fllast, 1);
|
||||
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
|
||||
agf->agf_fllast = 0;
|
||||
pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
|
||||
|
||||
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
|
||||
be32_add_cpu(&agf->agf_flcount, 1);
|
||||
xfs_trans_agflist_delta(tp, 1);
|
||||
pag->pagf_flcount++;
|
||||
|
@ -2089,6 +2094,7 @@ xfs_alloc_put_freelist(
|
|||
pag->pagf_btreeblks--;
|
||||
logflags |= XFS_AGF_BTREEBLKS;
|
||||
}
|
||||
xfs_perag_put(pag);
|
||||
|
||||
xfs_alloc_log_agf(tp, agbp, logflags);
|
||||
|
||||
|
@ -2152,7 +2158,6 @@ xfs_read_agf(
|
|||
xfs_trans_brelse(tp, *bpp);
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
}
|
||||
|
||||
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2175,7 +2180,7 @@ xfs_alloc_read_agf(
|
|||
ASSERT(agno != NULLAGNUMBER);
|
||||
|
||||
error = xfs_read_agf(mp, tp, agno,
|
||||
(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0,
|
||||
(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
|
||||
bpp);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -2184,7 +2189,7 @@ xfs_alloc_read_agf(
|
|||
ASSERT(!XFS_BUF_GETERROR(*bpp));
|
||||
|
||||
agf = XFS_BUF_TO_AGF(*bpp);
|
||||
pag = &mp->m_perag[agno];
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
if (!pag->pagf_init) {
|
||||
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
|
||||
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
|
||||
|
@ -2195,8 +2200,8 @@ xfs_alloc_read_agf(
|
|||
pag->pagf_levels[XFS_BTNUM_CNTi] =
|
||||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
|
||||
spin_lock_init(&pag->pagb_lock);
|
||||
pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
|
||||
sizeof(xfs_perag_busy_t), KM_SLEEP);
|
||||
pag->pagb_count = 0;
|
||||
memset(pag->pagb_list, 0, sizeof(pag->pagb_list));
|
||||
pag->pagf_init = 1;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
|
@ -2211,6 +2216,7 @@ xfs_alloc_read_agf(
|
|||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
|
||||
}
|
||||
#endif
|
||||
xfs_perag_put(pag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2270,8 +2276,7 @@ xfs_alloc_vextent(
|
|||
* These three force us into a single a.g.
|
||||
*/
|
||||
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
|
||||
down_read(&mp->m_peraglock);
|
||||
args->pag = &mp->m_perag[args->agno];
|
||||
args->pag = xfs_perag_get(mp, args->agno);
|
||||
args->minleft = 0;
|
||||
error = xfs_alloc_fix_freelist(args, 0);
|
||||
args->minleft = minleft;
|
||||
|
@ -2280,14 +2285,12 @@ xfs_alloc_vextent(
|
|||
goto error0;
|
||||
}
|
||||
if (!args->agbp) {
|
||||
up_read(&mp->m_peraglock);
|
||||
trace_xfs_alloc_vextent_noagbp(args);
|
||||
break;
|
||||
}
|
||||
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
|
||||
if ((error = xfs_alloc_ag_vextent(args)))
|
||||
goto error0;
|
||||
up_read(&mp->m_peraglock);
|
||||
break;
|
||||
case XFS_ALLOCTYPE_START_BNO:
|
||||
/*
|
||||
|
@ -2339,9 +2342,8 @@ xfs_alloc_vextent(
|
|||
* Loop over allocation groups twice; first time with
|
||||
* trylock set, second time without.
|
||||
*/
|
||||
down_read(&mp->m_peraglock);
|
||||
for (;;) {
|
||||
args->pag = &mp->m_perag[args->agno];
|
||||
args->pag = xfs_perag_get(mp, args->agno);
|
||||
if (no_min) args->minleft = 0;
|
||||
error = xfs_alloc_fix_freelist(args, flags);
|
||||
args->minleft = minleft;
|
||||
|
@ -2400,8 +2402,8 @@ xfs_alloc_vextent(
|
|||
}
|
||||
}
|
||||
}
|
||||
xfs_perag_put(args->pag);
|
||||
}
|
||||
up_read(&mp->m_peraglock);
|
||||
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
|
||||
if (args->agno == sagno)
|
||||
mp->m_agfrotor = (mp->m_agfrotor + 1) %
|
||||
|
@ -2427,9 +2429,10 @@ xfs_alloc_vextent(
|
|||
args->len);
|
||||
#endif
|
||||
}
|
||||
xfs_perag_put(args->pag);
|
||||
return 0;
|
||||
error0:
|
||||
up_read(&mp->m_peraglock);
|
||||
xfs_perag_put(args->pag);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -2454,8 +2457,7 @@ xfs_free_extent(
|
|||
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
|
||||
ASSERT(args.agno < args.mp->m_sb.sb_agcount);
|
||||
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
|
||||
down_read(&args.mp->m_peraglock);
|
||||
args.pag = &args.mp->m_perag[args.agno];
|
||||
args.pag = xfs_perag_get(args.mp, args.agno);
|
||||
if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
|
||||
goto error0;
|
||||
#ifdef DEBUG
|
||||
|
@ -2465,7 +2467,7 @@ xfs_free_extent(
|
|||
#endif
|
||||
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
|
||||
error0:
|
||||
up_read(&args.mp->m_peraglock);
|
||||
xfs_perag_put(args.pag);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -2486,15 +2488,15 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
|
|||
xfs_agblock_t bno,
|
||||
xfs_extlen_t len)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
xfs_perag_busy_t *bsy;
|
||||
struct xfs_perag *pag;
|
||||
int n;
|
||||
|
||||
mp = tp->t_mountp;
|
||||
spin_lock(&mp->m_perag[agno].pagb_lock);
|
||||
pag = xfs_perag_get(tp->t_mountp, agno);
|
||||
spin_lock(&pag->pagb_lock);
|
||||
|
||||
/* search pagb_list for an open slot */
|
||||
for (bsy = mp->m_perag[agno].pagb_list, n = 0;
|
||||
for (bsy = pag->pagb_list, n = 0;
|
||||
n < XFS_PAGB_NUM_SLOTS;
|
||||
bsy++, n++) {
|
||||
if (bsy->busy_tp == NULL) {
|
||||
|
@ -2502,11 +2504,11 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
|
|||
}
|
||||
}
|
||||
|
||||
trace_xfs_alloc_busy(mp, agno, bno, len, n);
|
||||
trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len, n);
|
||||
|
||||
if (n < XFS_PAGB_NUM_SLOTS) {
|
||||
bsy = &mp->m_perag[agno].pagb_list[n];
|
||||
mp->m_perag[agno].pagb_count++;
|
||||
bsy = &pag->pagb_list[n];
|
||||
pag->pagb_count++;
|
||||
bsy->busy_start = bno;
|
||||
bsy->busy_length = len;
|
||||
bsy->busy_tp = tp;
|
||||
|
@ -2521,7 +2523,8 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
|
|||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
spin_unlock(&mp->m_perag[agno].pagb_lock);
|
||||
spin_unlock(&pag->pagb_lock);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2529,24 +2532,23 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
|
|||
xfs_agnumber_t agno,
|
||||
int idx)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
struct xfs_perag *pag;
|
||||
xfs_perag_busy_t *list;
|
||||
|
||||
mp = tp->t_mountp;
|
||||
|
||||
spin_lock(&mp->m_perag[agno].pagb_lock);
|
||||
list = mp->m_perag[agno].pagb_list;
|
||||
|
||||
ASSERT(idx < XFS_PAGB_NUM_SLOTS);
|
||||
pag = xfs_perag_get(tp->t_mountp, agno);
|
||||
spin_lock(&pag->pagb_lock);
|
||||
list = pag->pagb_list;
|
||||
|
||||
trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
|
||||
trace_xfs_alloc_unbusy(tp->t_mountp, agno, idx, list[idx].busy_tp == tp);
|
||||
|
||||
if (list[idx].busy_tp == tp) {
|
||||
list[idx].busy_tp = NULL;
|
||||
mp->m_perag[agno].pagb_count--;
|
||||
pag->pagb_count--;
|
||||
}
|
||||
|
||||
spin_unlock(&mp->m_perag[agno].pagb_lock);
|
||||
spin_unlock(&pag->pagb_lock);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2560,17 +2562,15 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
|
|||
xfs_agblock_t bno,
|
||||
xfs_extlen_t len)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
struct xfs_perag *pag;
|
||||
xfs_perag_busy_t *bsy;
|
||||
xfs_agblock_t uend, bend;
|
||||
xfs_lsn_t lsn = 0;
|
||||
int cnt;
|
||||
|
||||
mp = tp->t_mountp;
|
||||
|
||||
spin_lock(&mp->m_perag[agno].pagb_lock);
|
||||
|
||||
uend = bno + len - 1;
|
||||
pag = xfs_perag_get(tp->t_mountp, agno);
|
||||
spin_lock(&pag->pagb_lock);
|
||||
cnt = pag->pagb_count;
|
||||
|
||||
/*
|
||||
* search pagb_list for this slot, skipping open slots. We have to
|
||||
|
@ -2578,8 +2578,9 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
|
|||
* we have to get the most recent LSN for the log force to push out
|
||||
* all the transactions that span the range.
|
||||
*/
|
||||
for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
|
||||
bsy = &mp->m_perag[agno].pagb_list[cnt];
|
||||
uend = bno + len - 1;
|
||||
for (cnt = 0; cnt < pag->pagb_count; cnt++) {
|
||||
bsy = &pag->pagb_list[cnt];
|
||||
if (!bsy->busy_tp)
|
||||
continue;
|
||||
|
||||
|
@ -2591,7 +2592,8 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
|
|||
if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
|
||||
lsn = bsy->busy_tp->t_commit_lsn;
|
||||
}
|
||||
spin_unlock(&mp->m_perag[agno].pagb_lock);
|
||||
spin_unlock(&pag->pagb_lock);
|
||||
xfs_perag_put(pag);
|
||||
trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn);
|
||||
|
||||
/*
|
||||
|
@ -2599,5 +2601,5 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
|
|||
* transaction that freed the block
|
||||
*/
|
||||
if (lsn)
|
||||
xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
|
||||
xfs_log_force_lsn(tp->t_mountp, lsn, XFS_LOG_SYNC);
|
||||
}
|
||||
|
|
|
@ -61,12 +61,14 @@ xfs_allocbt_set_root(
|
|||
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
||||
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
||||
int btnum = cur->bc_btnum;
|
||||
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
|
||||
|
||||
ASSERT(ptr->s != 0);
|
||||
|
||||
agf->agf_roots[btnum] = ptr->s;
|
||||
be32_add_cpu(&agf->agf_levels[btnum], inc);
|
||||
cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;
|
||||
pag->pagf_levels[btnum] += inc;
|
||||
xfs_perag_put(pag);
|
||||
|
||||
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
|
||||
}
|
||||
|
@ -150,6 +152,7 @@ xfs_allocbt_update_lastrec(
|
|||
{
|
||||
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
||||
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
||||
struct xfs_perag *pag;
|
||||
__be32 len;
|
||||
int numrecs;
|
||||
|
||||
|
@ -193,7 +196,9 @@ xfs_allocbt_update_lastrec(
|
|||
}
|
||||
|
||||
agf->agf_longest = len;
|
||||
cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
|
||||
pag = xfs_perag_get(cur->bc_mp, seqno);
|
||||
pag->pagf_longest = be32_to_cpu(len);
|
||||
xfs_perag_put(pag);
|
||||
xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
|
||||
}
|
||||
|
||||
|
|
|
@ -93,12 +93,12 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
|
|||
STATIC int
|
||||
xfs_attr_name_to_xname(
|
||||
struct xfs_name *xname,
|
||||
const char *aname)
|
||||
const unsigned char *aname)
|
||||
{
|
||||
if (!aname)
|
||||
return EINVAL;
|
||||
xname->name = aname;
|
||||
xname->len = strlen(aname);
|
||||
xname->len = strlen((char *)aname);
|
||||
if (xname->len >= MAXNAMELEN)
|
||||
return EFAULT; /* match IRIX behaviour */
|
||||
|
||||
|
@ -124,7 +124,7 @@ STATIC int
|
|||
xfs_attr_get_int(
|
||||
struct xfs_inode *ip,
|
||||
struct xfs_name *name,
|
||||
char *value,
|
||||
unsigned char *value,
|
||||
int *valuelenp,
|
||||
int flags)
|
||||
{
|
||||
|
@ -171,8 +171,8 @@ xfs_attr_get_int(
|
|||
int
|
||||
xfs_attr_get(
|
||||
xfs_inode_t *ip,
|
||||
const char *name,
|
||||
char *value,
|
||||
const unsigned char *name,
|
||||
unsigned char *value,
|
||||
int *valuelenp,
|
||||
int flags)
|
||||
{
|
||||
|
@ -197,7 +197,7 @@ xfs_attr_get(
|
|||
/*
|
||||
* Calculate how many blocks we need for the new attribute,
|
||||
*/
|
||||
int
|
||||
STATIC int
|
||||
xfs_attr_calc_size(
|
||||
struct xfs_inode *ip,
|
||||
int namelen,
|
||||
|
@ -235,8 +235,12 @@ xfs_attr_calc_size(
|
|||
}
|
||||
|
||||
STATIC int
|
||||
xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
|
||||
char *value, int valuelen, int flags)
|
||||
xfs_attr_set_int(
|
||||
struct xfs_inode *dp,
|
||||
struct xfs_name *name,
|
||||
unsigned char *value,
|
||||
int valuelen,
|
||||
int flags)
|
||||
{
|
||||
xfs_da_args_t args;
|
||||
xfs_fsblock_t firstblock;
|
||||
|
@ -452,8 +456,8 @@ out:
|
|||
int
|
||||
xfs_attr_set(
|
||||
xfs_inode_t *dp,
|
||||
const char *name,
|
||||
char *value,
|
||||
const unsigned char *name,
|
||||
unsigned char *value,
|
||||
int valuelen,
|
||||
int flags)
|
||||
{
|
||||
|
@ -600,7 +604,7 @@ out:
|
|||
int
|
||||
xfs_attr_remove(
|
||||
xfs_inode_t *dp,
|
||||
const char *name,
|
||||
const unsigned char *name,
|
||||
int flags)
|
||||
{
|
||||
int error;
|
||||
|
@ -669,9 +673,13 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
|
|||
*/
|
||||
/*ARGSUSED*/
|
||||
STATIC int
|
||||
xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
|
||||
char *name, int namelen,
|
||||
int valuelen, char *value)
|
||||
xfs_attr_put_listent(
|
||||
xfs_attr_list_context_t *context,
|
||||
int flags,
|
||||
unsigned char *name,
|
||||
int namelen,
|
||||
int valuelen,
|
||||
unsigned char *value)
|
||||
{
|
||||
struct attrlist *alist = (struct attrlist *)context->alist;
|
||||
attrlist_ent_t *aep;
|
||||
|
@ -1980,7 +1988,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
|
|||
xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
|
||||
xfs_mount_t *mp;
|
||||
xfs_daddr_t dblkno;
|
||||
xfs_caddr_t dst;
|
||||
void *dst;
|
||||
xfs_buf_t *bp;
|
||||
int nmap, error, tmp, valuelen, blkcnt, i;
|
||||
xfs_dablk_t lblkno;
|
||||
|
@ -2007,15 +2015,14 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
|
|||
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
|
||||
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
|
||||
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
|
||||
blkcnt,
|
||||
XFS_BUF_LOCK | XBF_DONT_BLOCK,
|
||||
blkcnt, XBF_LOCK | XBF_DONT_BLOCK,
|
||||
&bp);
|
||||
if (error)
|
||||
return(error);
|
||||
|
||||
tmp = (valuelen < XFS_BUF_SIZE(bp))
|
||||
? valuelen : XFS_BUF_SIZE(bp);
|
||||
xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
|
||||
xfs_biomove(bp, 0, tmp, dst, XBF_READ);
|
||||
xfs_buf_relse(bp);
|
||||
dst += tmp;
|
||||
valuelen -= tmp;
|
||||
|
@ -2039,7 +2046,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
|
|||
xfs_inode_t *dp;
|
||||
xfs_bmbt_irec_t map;
|
||||
xfs_daddr_t dblkno;
|
||||
xfs_caddr_t src;
|
||||
void *src;
|
||||
xfs_buf_t *bp;
|
||||
xfs_dablk_t lblkno;
|
||||
int blkcnt, valuelen, nmap, error, tmp, committed;
|
||||
|
@ -2141,13 +2148,13 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
|
|||
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
|
||||
|
||||
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
|
||||
XFS_BUF_LOCK | XBF_DONT_BLOCK);
|
||||
XBF_LOCK | XBF_DONT_BLOCK);
|
||||
ASSERT(bp);
|
||||
ASSERT(!XFS_BUF_GETERROR(bp));
|
||||
|
||||
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
|
||||
XFS_BUF_SIZE(bp);
|
||||
xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
|
||||
xfs_biomove(bp, 0, tmp, src, XBF_WRITE);
|
||||
if (tmp < XFS_BUF_SIZE(bp))
|
||||
xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
|
||||
if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
|
||||
|
@ -2208,8 +2215,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
|
|||
/*
|
||||
* If the "remote" value is in the cache, remove it.
|
||||
*/
|
||||
bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
|
||||
XFS_INCORE_TRYLOCK);
|
||||
bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
|
||||
if (bp) {
|
||||
XFS_BUF_STALE(bp);
|
||||
XFS_BUF_UNDELAYWRITE(bp);
|
||||
|
|
|
@ -113,7 +113,7 @@ typedef struct attrlist_cursor_kern {
|
|||
|
||||
|
||||
typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
|
||||
char *, int, int, char *);
|
||||
unsigned char *, int, int, unsigned char *);
|
||||
|
||||
typedef struct xfs_attr_list_context {
|
||||
struct xfs_inode *dp; /* inode */
|
||||
|
@ -139,7 +139,6 @@ typedef struct xfs_attr_list_context {
|
|||
/*
|
||||
* Overall external interface routines.
|
||||
*/
|
||||
int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
|
||||
int xfs_attr_inactive(struct xfs_inode *dp);
|
||||
int xfs_attr_rmtval_get(struct xfs_da_args *args);
|
||||
int xfs_attr_list_int(struct xfs_attr_list_context *);
|
||||
|
|
|
@ -521,11 +521,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
|
|||
|
||||
sfe = &sf->list[0];
|
||||
for (i = 0; i < sf->hdr.count; i++) {
|
||||
nargs.name = (char *)sfe->nameval;
|
||||
nargs.name = sfe->nameval;
|
||||
nargs.namelen = sfe->namelen;
|
||||
nargs.value = (char *)&sfe->nameval[nargs.namelen];
|
||||
nargs.value = &sfe->nameval[nargs.namelen];
|
||||
nargs.valuelen = sfe->valuelen;
|
||||
nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
|
||||
nargs.hashval = xfs_da_hashname(sfe->nameval,
|
||||
sfe->namelen);
|
||||
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
|
||||
error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
|
||||
|
@ -612,10 +612,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
|
|||
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
|
||||
error = context->put_listent(context,
|
||||
sfe->flags,
|
||||
(char *)sfe->nameval,
|
||||
sfe->nameval,
|
||||
(int)sfe->namelen,
|
||||
(int)sfe->valuelen,
|
||||
(char*)&sfe->nameval[sfe->namelen]);
|
||||
&sfe->nameval[sfe->namelen]);
|
||||
|
||||
/*
|
||||
* Either search callback finished early or
|
||||
|
@ -659,8 +659,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
|
|||
}
|
||||
|
||||
sbp->entno = i;
|
||||
sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen);
|
||||
sbp->name = (char *)sfe->nameval;
|
||||
sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
|
||||
sbp->name = sfe->nameval;
|
||||
sbp->namelen = sfe->namelen;
|
||||
/* These are bytes, and both on-disk, don't endian-flip */
|
||||
sbp->valuelen = sfe->valuelen;
|
||||
|
@ -818,9 +818,9 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
|
|||
continue;
|
||||
ASSERT(entry->flags & XFS_ATTR_LOCAL);
|
||||
name_loc = xfs_attr_leaf_name_local(leaf, i);
|
||||
nargs.name = (char *)name_loc->nameval;
|
||||
nargs.name = name_loc->nameval;
|
||||
nargs.namelen = name_loc->namelen;
|
||||
nargs.value = (char *)&name_loc->nameval[nargs.namelen];
|
||||
nargs.value = &name_loc->nameval[nargs.namelen];
|
||||
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
|
||||
nargs.hashval = be32_to_cpu(entry->hashval);
|
||||
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
|
||||
|
@ -2370,10 +2370,10 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
|
|||
|
||||
retval = context->put_listent(context,
|
||||
entry->flags,
|
||||
(char *)name_loc->nameval,
|
||||
name_loc->nameval,
|
||||
(int)name_loc->namelen,
|
||||
be16_to_cpu(name_loc->valuelen),
|
||||
(char *)&name_loc->nameval[name_loc->namelen]);
|
||||
&name_loc->nameval[name_loc->namelen]);
|
||||
if (retval)
|
||||
return retval;
|
||||
} else {
|
||||
|
@ -2397,15 +2397,15 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
|
|||
return retval;
|
||||
retval = context->put_listent(context,
|
||||
entry->flags,
|
||||
(char *)name_rmt->name,
|
||||
name_rmt->name,
|
||||
(int)name_rmt->namelen,
|
||||
valuelen,
|
||||
(char*)args.value);
|
||||
args.value);
|
||||
kmem_free(args.value);
|
||||
} else {
|
||||
retval = context->put_listent(context,
|
||||
entry->flags,
|
||||
(char *)name_rmt->name,
|
||||
name_rmt->name,
|
||||
(int)name_rmt->namelen,
|
||||
valuelen,
|
||||
NULL);
|
||||
|
@ -2950,7 +2950,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
|
|||
map.br_blockcount);
|
||||
bp = xfs_trans_get_buf(*trans,
|
||||
dp->i_mount->m_ddev_targp,
|
||||
dblkno, dblkcnt, XFS_BUF_LOCK);
|
||||
dblkno, dblkcnt, XBF_LOCK);
|
||||
xfs_trans_binval(*trans, bp);
|
||||
/*
|
||||
* Roll to next transaction.
|
||||
|
|
|
@ -52,7 +52,7 @@ typedef struct xfs_attr_sf_sort {
|
|||
__uint8_t valuelen; /* length of value */
|
||||
__uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
|
||||
xfs_dahash_t hash; /* this entry's hash value */
|
||||
char *name; /* name value, pointer into buffer */
|
||||
unsigned char *name; /* name value, pointer into buffer */
|
||||
} xfs_attr_sf_sort_t;
|
||||
|
||||
#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \
|
||||
|
|
|
@ -2629,13 +2629,12 @@ xfs_bmap_btalloc(
|
|||
if (startag == NULLAGNUMBER)
|
||||
startag = ag = 0;
|
||||
notinit = 0;
|
||||
down_read(&mp->m_peraglock);
|
||||
pag = xfs_perag_get(mp, ag);
|
||||
while (blen < ap->alen) {
|
||||
pag = &mp->m_perag[ag];
|
||||
if (!pag->pagf_init &&
|
||||
(error = xfs_alloc_pagf_init(mp, args.tp,
|
||||
ag, XFS_ALLOC_FLAG_TRYLOCK))) {
|
||||
up_read(&mp->m_peraglock);
|
||||
xfs_perag_put(pag);
|
||||
return error;
|
||||
}
|
||||
/*
|
||||
|
@ -2667,13 +2666,13 @@ xfs_bmap_btalloc(
|
|||
break;
|
||||
|
||||
error = xfs_filestream_new_ag(ap, &ag);
|
||||
if (error) {
|
||||
up_read(&mp->m_peraglock);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* loop again to set 'blen'*/
|
||||
startag = NULLAGNUMBER;
|
||||
pag = xfs_perag_get(mp, ag);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -2681,8 +2680,10 @@ xfs_bmap_btalloc(
|
|||
ag = 0;
|
||||
if (ag == startag)
|
||||
break;
|
||||
xfs_perag_put(pag);
|
||||
pag = xfs_perag_get(mp, ag);
|
||||
}
|
||||
up_read(&mp->m_peraglock);
|
||||
xfs_perag_put(pag);
|
||||
/*
|
||||
* Since the above loop did a BUF_TRYLOCK, it is
|
||||
* possible that there is space for this request.
|
||||
|
@ -4470,7 +4471,7 @@ xfs_bmapi(
|
|||
xfs_fsblock_t abno; /* allocated block number */
|
||||
xfs_extlen_t alen; /* allocated extent length */
|
||||
xfs_fileoff_t aoff; /* allocated file offset */
|
||||
xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
|
||||
xfs_bmalloca_t bma = { 0 }; /* args for xfs_bmap_alloc */
|
||||
xfs_btree_cur_t *cur; /* bmap btree cursor */
|
||||
xfs_fileoff_t end; /* end of mapped file region */
|
||||
int eof; /* we've hit the end of extents */
|
||||
|
|
|
@ -334,7 +334,7 @@ xfs_bmbt_disk_set_allf(
|
|||
/*
|
||||
* Set all the fields in a bmap extent record from the uncompressed form.
|
||||
*/
|
||||
void
|
||||
STATIC void
|
||||
xfs_bmbt_disk_set_all(
|
||||
xfs_bmbt_rec_t *r,
|
||||
xfs_bmbt_irec_t *s)
|
||||
|
|
|
@ -223,7 +223,6 @@ extern void xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t *r, xfs_fsblock_t v);
|
|||
extern void xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t *r, xfs_fileoff_t v);
|
||||
extern void xfs_bmbt_set_state(xfs_bmbt_rec_host_t *r, xfs_exntst_t v);
|
||||
|
||||
extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
|
||||
extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
|
||||
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
|
||||
|
||||
|
|
|
@ -977,7 +977,7 @@ xfs_btree_get_buf_block(
|
|||
xfs_daddr_t d;
|
||||
|
||||
/* need to sort out how callers deal with failures first */
|
||||
ASSERT(!(flags & XFS_BUF_TRYLOCK));
|
||||
ASSERT(!(flags & XBF_TRYLOCK));
|
||||
|
||||
d = xfs_btree_ptr_to_daddr(cur, ptr);
|
||||
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
|
||||
|
@ -1008,7 +1008,7 @@ xfs_btree_read_buf_block(
|
|||
int error;
|
||||
|
||||
/* need to sort out how callers deal with failures first */
|
||||
ASSERT(!(flags & XFS_BUF_TRYLOCK));
|
||||
ASSERT(!(flags & XBF_TRYLOCK));
|
||||
|
||||
d = xfs_btree_ptr_to_daddr(cur, ptr);
|
||||
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
|
||||
|
|
|
@ -250,7 +250,7 @@ xfs_buf_item_format(
|
|||
((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
|
||||
vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
|
||||
vecp->i_len = base_size;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT);
|
||||
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
|
||||
vecp++;
|
||||
nvecs = 1;
|
||||
|
||||
|
@ -297,14 +297,14 @@ xfs_buf_item_format(
|
|||
buffer_offset = first_bit * XFS_BLI_CHUNK;
|
||||
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
|
||||
vecp->i_len = nbits * XFS_BLI_CHUNK;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
|
||||
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
|
||||
nvecs++;
|
||||
break;
|
||||
} else if (next_bit != last_bit + 1) {
|
||||
buffer_offset = first_bit * XFS_BLI_CHUNK;
|
||||
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
|
||||
vecp->i_len = nbits * XFS_BLI_CHUNK;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
|
||||
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
|
||||
nvecs++;
|
||||
vecp++;
|
||||
first_bit = next_bit;
|
||||
|
@ -316,7 +316,7 @@ xfs_buf_item_format(
|
|||
buffer_offset = first_bit * XFS_BLI_CHUNK;
|
||||
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
|
||||
vecp->i_len = nbits * XFS_BLI_CHUNK;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
|
||||
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
|
||||
/* You would think we need to bump the nvecs here too, but we do not
|
||||
* this number is used by recovery, and it gets confused by the boundary
|
||||
* split here
|
||||
|
@ -467,8 +467,10 @@ xfs_buf_item_unpin_remove(
|
|||
/*
|
||||
* This is called to attempt to lock the buffer associated with this
|
||||
* buf log item. Don't sleep on the buffer lock. If we can't get
|
||||
* the lock right away, return 0. If we can get the lock, pull the
|
||||
* buffer from the free list, mark it busy, and return 1.
|
||||
* the lock right away, return 0. If we can get the lock, take a
|
||||
* reference to the buffer. If this is a delayed write buffer that
|
||||
* needs AIL help to be written back, invoke the pushbuf routine
|
||||
* rather than the normal success path.
|
||||
*/
|
||||
STATIC uint
|
||||
xfs_buf_item_trylock(
|
||||
|
@ -477,24 +479,18 @@ xfs_buf_item_trylock(
|
|||
xfs_buf_t *bp;
|
||||
|
||||
bp = bip->bli_buf;
|
||||
|
||||
if (XFS_BUF_ISPINNED(bp)) {
|
||||
if (XFS_BUF_ISPINNED(bp))
|
||||
return XFS_ITEM_PINNED;
|
||||
}
|
||||
|
||||
if (!XFS_BUF_CPSEMA(bp)) {
|
||||
if (!XFS_BUF_CPSEMA(bp))
|
||||
return XFS_ITEM_LOCKED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the buffer from the free list. Only do this
|
||||
* if it's on the free list. Private buffers like the
|
||||
* superblock buffer are not.
|
||||
*/
|
||||
/* take a reference to the buffer. */
|
||||
XFS_BUF_HOLD(bp);
|
||||
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
trace_xfs_buf_item_trylock(bip);
|
||||
if (XFS_BUF_ISDELAYWRITE(bp))
|
||||
return XFS_ITEM_PUSHBUF;
|
||||
return XFS_ITEM_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -626,11 +622,9 @@ xfs_buf_item_committed(
|
|||
}
|
||||
|
||||
/*
|
||||
* This is called to asynchronously write the buffer associated with this
|
||||
* buf log item out to disk. The buffer will already have been locked by
|
||||
* a successful call to xfs_buf_item_trylock(). If the buffer still has
|
||||
* B_DELWRI set, then get it going out to disk with a call to bawrite().
|
||||
* If not, then just release the buffer.
|
||||
* The buffer is locked, but is not a delayed write buffer. This happens
|
||||
* if we race with IO completion and hence we don't want to try to write it
|
||||
* again. Just release the buffer.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_buf_item_push(
|
||||
|
@ -642,17 +636,29 @@ xfs_buf_item_push(
|
|||
trace_xfs_buf_item_push(bip);
|
||||
|
||||
bp = bip->bli_buf;
|
||||
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
||||
if (XFS_BUF_ISDELAYWRITE(bp)) {
|
||||
int error;
|
||||
error = xfs_bawrite(bip->bli_item.li_mountp, bp);
|
||||
if (error)
|
||||
xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp,
|
||||
"xfs_buf_item_push: pushbuf error %d on bip %p, bp %p",
|
||||
error, bip, bp);
|
||||
} else {
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
/*
|
||||
* The buffer is locked and is a delayed write buffer. Promote the buffer
|
||||
* in the delayed write queue as the caller knows that they must invoke
|
||||
* the xfsbufd to get this buffer written. We have to unlock the buffer
|
||||
* to allow the xfsbufd to write it, too.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_buf_item_pushbuf(
|
||||
xfs_buf_log_item_t *bip)
|
||||
{
|
||||
xfs_buf_t *bp;
|
||||
|
||||
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
|
||||
trace_xfs_buf_item_pushbuf(bip);
|
||||
|
||||
bp = bip->bli_buf;
|
||||
ASSERT(XFS_BUF_ISDELAYWRITE(bp));
|
||||
xfs_buf_delwri_promote(bp);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
|
@ -677,7 +683,7 @@ static struct xfs_item_ops xfs_buf_item_ops = {
|
|||
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
|
||||
xfs_buf_item_committed,
|
||||
.iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
|
||||
.iop_pushbuf = NULL,
|
||||
.iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
|
||||
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
|
||||
xfs_buf_item_committing
|
||||
};
|
||||
|
|
|
@ -1534,8 +1534,8 @@ xfs_da_hashname(const __uint8_t *name, int namelen)
|
|||
enum xfs_dacmp
|
||||
xfs_da_compname(
|
||||
struct xfs_da_args *args,
|
||||
const char *name,
|
||||
int len)
|
||||
const unsigned char *name,
|
||||
int len)
|
||||
{
|
||||
return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
|
||||
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
|
||||
|
|
|
@ -209,7 +209,8 @@ typedef struct xfs_da_state {
|
|||
*/
|
||||
struct xfs_nameops {
|
||||
xfs_dahash_t (*hashname)(struct xfs_name *);
|
||||
enum xfs_dacmp (*compname)(struct xfs_da_args *, const char *, int);
|
||||
enum xfs_dacmp (*compname)(struct xfs_da_args *,
|
||||
const unsigned char *, int);
|
||||
};
|
||||
|
||||
|
||||
|
@ -260,7 +261,7 @@ int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
|
|||
|
||||
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
|
||||
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
|
||||
const char *name, int len);
|
||||
const unsigned char *name, int len);
|
||||
|
||||
|
||||
xfs_da_state_t *xfs_da_state_alloc(void);
|
||||
|
|
|
@ -45,15 +45,21 @@
|
|||
#include "xfs_vnodeops.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
|
||||
static int xfs_swap_extents(
|
||||
xfs_inode_t *ip, /* target inode */
|
||||
xfs_inode_t *tip, /* tmp inode */
|
||||
xfs_swapext_t *sxp);
|
||||
|
||||
/*
|
||||
* Syssgi interface for swapext
|
||||
* ioctl interface for swapext
|
||||
*/
|
||||
int
|
||||
xfs_swapext(
|
||||
xfs_swapext_t *sxp)
|
||||
{
|
||||
xfs_inode_t *ip, *tip;
|
||||
struct file *file, *target_file;
|
||||
struct file *file, *tmp_file;
|
||||
int error = 0;
|
||||
|
||||
/* Pull information for the target fd */
|
||||
|
@ -68,46 +74,46 @@ xfs_swapext(
|
|||
goto out_put_file;
|
||||
}
|
||||
|
||||
target_file = fget((int)sxp->sx_fdtmp);
|
||||
if (!target_file) {
|
||||
tmp_file = fget((int)sxp->sx_fdtmp);
|
||||
if (!tmp_file) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto out_put_file;
|
||||
}
|
||||
|
||||
if (!(target_file->f_mode & FMODE_WRITE) ||
|
||||
(target_file->f_flags & O_APPEND)) {
|
||||
if (!(tmp_file->f_mode & FMODE_WRITE) ||
|
||||
(tmp_file->f_flags & O_APPEND)) {
|
||||
error = XFS_ERROR(EBADF);
|
||||
goto out_put_target_file;
|
||||
goto out_put_tmp_file;
|
||||
}
|
||||
|
||||
if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
|
||||
IS_SWAPFILE(target_file->f_path.dentry->d_inode)) {
|
||||
IS_SWAPFILE(tmp_file->f_path.dentry->d_inode)) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto out_put_target_file;
|
||||
goto out_put_tmp_file;
|
||||
}
|
||||
|
||||
ip = XFS_I(file->f_path.dentry->d_inode);
|
||||
tip = XFS_I(target_file->f_path.dentry->d_inode);
|
||||
tip = XFS_I(tmp_file->f_path.dentry->d_inode);
|
||||
|
||||
if (ip->i_mount != tip->i_mount) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto out_put_target_file;
|
||||
goto out_put_tmp_file;
|
||||
}
|
||||
|
||||
if (ip->i_ino == tip->i_ino) {
|
||||
error = XFS_ERROR(EINVAL);
|
||||
goto out_put_target_file;
|
||||
goto out_put_tmp_file;
|
||||
}
|
||||
|
||||
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||
error = XFS_ERROR(EIO);
|
||||
goto out_put_target_file;
|
||||
goto out_put_tmp_file;
|
||||
}
|
||||
|
||||
error = xfs_swap_extents(ip, tip, sxp);
|
||||
|
||||
out_put_target_file:
|
||||
fput(target_file);
|
||||
out_put_tmp_file:
|
||||
fput(tmp_file);
|
||||
out_put_file:
|
||||
fput(file);
|
||||
out:
|
||||
|
@ -186,7 +192,7 @@ xfs_swap_extents_check_format(
|
|||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
xfs_swap_extents(
|
||||
xfs_inode_t *ip, /* target inode */
|
||||
xfs_inode_t *tip, /* tmp inode */
|
||||
|
@ -254,6 +260,9 @@ xfs_swap_extents(
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
trace_xfs_swap_extent_before(ip, 0);
|
||||
trace_xfs_swap_extent_before(tip, 1);
|
||||
|
||||
/* check inode formats now that data is flushed */
|
||||
error = xfs_swap_extents_check_format(ip, tip);
|
||||
if (error) {
|
||||
|
@ -421,6 +430,8 @@ xfs_swap_extents(
|
|||
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT);
|
||||
|
||||
trace_xfs_swap_extent_after(ip, 0);
|
||||
trace_xfs_swap_extent_after(tip, 1);
|
||||
out:
|
||||
kmem_free(tempifp);
|
||||
return error;
|
||||
|
|
|
@ -48,9 +48,6 @@ typedef struct xfs_swapext
|
|||
*/
|
||||
int xfs_swapext(struct xfs_swapext *sx);
|
||||
|
||||
int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
|
||||
struct xfs_swapext *sxp);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_DFRAG_H__ */
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#include "xfs_vnodeops.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
struct xfs_name xfs_name_dotdot = {"..", 2};
|
||||
struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2};
|
||||
|
||||
/*
|
||||
* ASCII case-insensitive (ie. A-Z) support for directories that was
|
||||
|
@ -66,8 +66,8 @@ xfs_ascii_ci_hashname(
|
|||
STATIC enum xfs_dacmp
|
||||
xfs_ascii_ci_compname(
|
||||
struct xfs_da_args *args,
|
||||
const char *name,
|
||||
int len)
|
||||
const unsigned char *name,
|
||||
int len)
|
||||
{
|
||||
enum xfs_dacmp result;
|
||||
int i;
|
||||
|
@ -247,7 +247,7 @@ xfs_dir_createname(
|
|||
int
|
||||
xfs_dir_cilookup_result(
|
||||
struct xfs_da_args *args,
|
||||
const char *name,
|
||||
const unsigned char *name,
|
||||
int len)
|
||||
{
|
||||
if (args->cmpresult == XFS_CMP_DIFFERENT)
|
||||
|
|
|
@ -100,7 +100,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
|
|||
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
|
||||
struct xfs_dabuf *bp);
|
||||
|
||||
extern int xfs_dir_cilookup_result(struct xfs_da_args *args, const char *name,
|
||||
int len);
|
||||
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
|
||||
const unsigned char *name, int len);
|
||||
|
||||
#endif /* __XFS_DIR2_H__ */
|
||||
|
|
|
@ -57,8 +57,8 @@ static xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
|
|||
void
|
||||
xfs_dir_startup(void)
|
||||
{
|
||||
xfs_dir_hash_dot = xfs_da_hashname(".", 1);
|
||||
xfs_dir_hash_dotdot = xfs_da_hashname("..", 2);
|
||||
xfs_dir_hash_dot = xfs_da_hashname((unsigned char *)".", 1);
|
||||
xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -513,8 +513,9 @@ xfs_dir2_block_getdents(
|
|||
/*
|
||||
* If it didn't fit, set the final offset to here & return.
|
||||
*/
|
||||
if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
|
||||
be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
|
||||
if (filldir(dirent, (char *)dep->name, dep->namelen,
|
||||
cook & 0x7fffffff, be64_to_cpu(dep->inumber),
|
||||
DT_UNKNOWN)) {
|
||||
*offset = cook & 0x7fffffff;
|
||||
xfs_da_brelse(NULL, bp);
|
||||
return 0;
|
||||
|
|
|
@ -1081,7 +1081,7 @@ xfs_dir2_leaf_getdents(
|
|||
dep = (xfs_dir2_data_entry_t *)ptr;
|
||||
length = xfs_dir2_data_entsize(dep->namelen);
|
||||
|
||||
if (filldir(dirent, dep->name, dep->namelen,
|
||||
if (filldir(dirent, (char *)dep->name, dep->namelen,
|
||||
xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
|
||||
be64_to_cpu(dep->inumber), DT_UNKNOWN))
|
||||
break;
|
||||
|
|
|
@ -65,7 +65,7 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
|
|||
/*
|
||||
* Log entries from a freespace block.
|
||||
*/
|
||||
void
|
||||
STATIC void
|
||||
xfs_dir2_free_log_bests(
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_dabuf_t *bp, /* freespace buffer */
|
||||
|
|
|
@ -75,8 +75,6 @@ xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
|
|||
return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp));
|
||||
}
|
||||
|
||||
extern void xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
|
||||
int first, int last);
|
||||
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
|
||||
struct xfs_dabuf *lbp);
|
||||
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
|
||||
|
|
|
@ -782,7 +782,7 @@ xfs_dir2_sf_getdents(
|
|||
}
|
||||
|
||||
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
|
||||
if (filldir(dirent, sfep->name, sfep->namelen,
|
||||
if (filldir(dirent, (char *)sfep->name, sfep->namelen,
|
||||
off & 0x7fffffff, ino, DT_UNKNOWN)) {
|
||||
*offset = off & 0x7fffffff;
|
||||
return 0;
|
||||
|
|
|
@ -82,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
|
|||
|
||||
log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
|
||||
log_vector->i_len = size;
|
||||
XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFI_FORMAT);
|
||||
log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
|
||||
ASSERT(size >= sizeof(xfs_efi_log_format_t));
|
||||
}
|
||||
|
||||
|
@ -406,7 +406,7 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
|
|||
|
||||
log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
|
||||
log_vector->i_len = size;
|
||||
XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFD_FORMAT);
|
||||
log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
|
||||
ASSERT(size >= sizeof(xfs_efd_log_format_t));
|
||||
}
|
||||
|
||||
|
|
|
@ -140,6 +140,7 @@ _xfs_filestream_pick_ag(
|
|||
int flags,
|
||||
xfs_extlen_t minlen)
|
||||
{
|
||||
int streams, max_streams;
|
||||
int err, trylock, nscan;
|
||||
xfs_extlen_t longest, free, minfree, maxfree = 0;
|
||||
xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
|
||||
|
@ -155,15 +156,15 @@ _xfs_filestream_pick_ag(
|
|||
trylock = XFS_ALLOC_FLAG_TRYLOCK;
|
||||
|
||||
for (nscan = 0; 1; nscan++) {
|
||||
|
||||
TRACE_AG_SCAN(mp, ag, xfs_filestream_peek_ag(mp, ag));
|
||||
|
||||
pag = mp->m_perag + ag;
|
||||
pag = xfs_perag_get(mp, ag);
|
||||
TRACE_AG_SCAN(mp, ag, atomic_read(&pag->pagf_fstrms));
|
||||
|
||||
if (!pag->pagf_init) {
|
||||
err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
|
||||
if (err && !trylock)
|
||||
if (err && !trylock) {
|
||||
xfs_perag_put(pag);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Might fail sometimes during the 1st pass with trylock set. */
|
||||
|
@ -173,6 +174,7 @@ _xfs_filestream_pick_ag(
|
|||
/* Keep track of the AG with the most free blocks. */
|
||||
if (pag->pagf_freeblks > maxfree) {
|
||||
maxfree = pag->pagf_freeblks;
|
||||
max_streams = atomic_read(&pag->pagf_fstrms);
|
||||
max_ag = ag;
|
||||
}
|
||||
|
||||
|
@ -195,6 +197,8 @@ _xfs_filestream_pick_ag(
|
|||
|
||||
/* Break out, retaining the reference on the AG. */
|
||||
free = pag->pagf_freeblks;
|
||||
streams = atomic_read(&pag->pagf_fstrms);
|
||||
xfs_perag_put(pag);
|
||||
*agp = ag;
|
||||
break;
|
||||
}
|
||||
|
@ -202,6 +206,7 @@ _xfs_filestream_pick_ag(
|
|||
/* Drop the reference on this AG, it's not usable. */
|
||||
xfs_filestream_put_ag(mp, ag);
|
||||
next_ag:
|
||||
xfs_perag_put(pag);
|
||||
/* Move to the next AG, wrapping to AG 0 if necessary. */
|
||||
if (++ag >= mp->m_sb.sb_agcount)
|
||||
ag = 0;
|
||||
|
@ -229,6 +234,7 @@ next_ag:
|
|||
if (max_ag != NULLAGNUMBER) {
|
||||
xfs_filestream_get_ag(mp, max_ag);
|
||||
TRACE_AG_PICK1(mp, max_ag, maxfree);
|
||||
streams = max_streams;
|
||||
free = maxfree;
|
||||
*agp = max_ag;
|
||||
break;
|
||||
|
@ -240,16 +246,14 @@ next_ag:
|
|||
return 0;
|
||||
}
|
||||
|
||||
TRACE_AG_PICK2(mp, startag, *agp, xfs_filestream_peek_ag(mp, *agp),
|
||||
free, nscan, flags);
|
||||
TRACE_AG_PICK2(mp, startag, *agp, streams, free, nscan, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the allocation group number for a file or a directory, updating inode
|
||||
* references and per-AG references as appropriate. Must be called with the
|
||||
* m_peraglock held in read mode.
|
||||
* references and per-AG references as appropriate.
|
||||
*/
|
||||
static int
|
||||
_xfs_filestream_update_ag(
|
||||
|
@ -450,20 +454,6 @@ xfs_filestream_unmount(
|
|||
xfs_mru_cache_destroy(mp->m_filestream);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mount point's m_perag array is going to be reallocated, all
|
||||
* outstanding cache entries must be flushed to avoid accessing reference count
|
||||
* addresses that have been freed. The call to xfs_filestream_flush() must be
|
||||
* made inside the block that holds the m_peraglock in write mode to do the
|
||||
* reallocation.
|
||||
*/
|
||||
void
|
||||
xfs_filestream_flush(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
xfs_mru_cache_flush(mp->m_filestream);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the AG of the filestream the file or directory belongs to, or
|
||||
* NULLAGNUMBER otherwise.
|
||||
|
@ -526,7 +516,6 @@ xfs_filestream_associate(
|
|||
|
||||
mp = pip->i_mount;
|
||||
cache = mp->m_filestream;
|
||||
down_read(&mp->m_peraglock);
|
||||
|
||||
/*
|
||||
* We have a problem, Houston.
|
||||
|
@ -543,10 +532,8 @@ xfs_filestream_associate(
|
|||
*
|
||||
* So, if we can't get the iolock without sleeping then just give up
|
||||
*/
|
||||
if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) {
|
||||
up_read(&mp->m_peraglock);
|
||||
if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL))
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* If the parent directory is already in the cache, use its AG. */
|
||||
item = xfs_mru_cache_lookup(cache, pip->i_ino);
|
||||
|
@ -601,7 +588,6 @@ exit_did_pick:
|
|||
|
||||
exit:
|
||||
xfs_iunlock(pip, XFS_IOLOCK_EXCL);
|
||||
up_read(&mp->m_peraglock);
|
||||
return -err;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,12 +79,21 @@ extern ktrace_t *xfs_filestreams_trace_buf;
|
|||
* the cache that reference per-ag array elements that have since been
|
||||
* reallocated.
|
||||
*/
|
||||
/*
|
||||
* xfs_filestream_peek_ag is only used in tracing code
|
||||
*/
|
||||
static inline int
|
||||
xfs_filestream_peek_ag(
|
||||
xfs_mount_t *mp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
return atomic_read(&mp->m_perag[agno].pagf_fstrms);
|
||||
struct xfs_perag *pag;
|
||||
int ret;
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
ret = atomic_read(&pag->pagf_fstrms);
|
||||
xfs_perag_put(pag);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -92,7 +101,13 @@ xfs_filestream_get_ag(
|
|||
xfs_mount_t *mp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms);
|
||||
struct xfs_perag *pag;
|
||||
int ret;
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
ret = atomic_inc_return(&pag->pagf_fstrms);
|
||||
xfs_perag_put(pag);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -100,7 +115,13 @@ xfs_filestream_put_ag(
|
|||
xfs_mount_t *mp,
|
||||
xfs_agnumber_t agno)
|
||||
{
|
||||
return atomic_dec_return(&mp->m_perag[agno].pagf_fstrms);
|
||||
struct xfs_perag *pag;
|
||||
int ret;
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
ret = atomic_dec_return(&pag->pagf_fstrms);
|
||||
xfs_perag_put(pag);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* allocation selection flags */
|
||||
|
@ -114,7 +135,6 @@ int xfs_filestream_init(void);
|
|||
void xfs_filestream_uninit(void);
|
||||
int xfs_filestream_mount(struct xfs_mount *mp);
|
||||
void xfs_filestream_unmount(struct xfs_mount *mp);
|
||||
void xfs_filestream_flush(struct xfs_mount *mp);
|
||||
xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip);
|
||||
int xfs_filestream_associate(struct xfs_inode *dip, struct xfs_inode *ip);
|
||||
void xfs_filestream_deassociate(struct xfs_inode *ip);
|
||||
|
|
|
@ -167,27 +167,14 @@ xfs_growfs_data_private(
|
|||
}
|
||||
new = nb - mp->m_sb.sb_dblocks;
|
||||
oagcount = mp->m_sb.sb_agcount;
|
||||
|
||||
/* allocate the new per-ag structures */
|
||||
if (nagcount > oagcount) {
|
||||
void *new_perag, *old_perag;
|
||||
|
||||
xfs_filestream_flush(mp);
|
||||
|
||||
new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount,
|
||||
KM_MAYFAIL);
|
||||
if (!new_perag)
|
||||
return XFS_ERROR(ENOMEM);
|
||||
|
||||
down_write(&mp->m_peraglock);
|
||||
memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount);
|
||||
old_perag = mp->m_perag;
|
||||
mp->m_perag = new_perag;
|
||||
|
||||
mp->m_flags |= XFS_MOUNT_32BITINODES;
|
||||
nagimax = xfs_initialize_perag(mp, nagcount);
|
||||
up_write(&mp->m_peraglock);
|
||||
|
||||
kmem_free(old_perag);
|
||||
error = xfs_initialize_perag(mp, nagcount, &nagimax);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
|
||||
tp->t_flags |= XFS_TRANS_RESERVE;
|
||||
if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
|
||||
|
@ -196,6 +183,11 @@ xfs_growfs_data_private(
|
|||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write new AG headers to disk. Non-transactional, but written
|
||||
* synchronously so they are completed prior to the growfs transaction
|
||||
* being logged.
|
||||
*/
|
||||
nfree = 0;
|
||||
for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
|
||||
/*
|
||||
|
@ -359,6 +351,12 @@ xfs_growfs_data_private(
|
|||
goto error0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update changed superblock fields transactionally. These are not
|
||||
* seen by the rest of the world until the transaction commit applies
|
||||
* them atomically to the superblock.
|
||||
*/
|
||||
if (nagcount > oagcount)
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
|
||||
if (nb > mp->m_sb.sb_dblocks)
|
||||
|
@ -369,9 +367,9 @@ xfs_growfs_data_private(
|
|||
if (dpct)
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
if (error) {
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
/* New allocation groups fully initialized, so update mount struct */
|
||||
if (nagimax)
|
||||
mp->m_maxagi = nagimax;
|
||||
|
@ -381,6 +379,8 @@ xfs_growfs_data_private(
|
|||
mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
|
||||
} else
|
||||
mp->m_maxicount = 0;
|
||||
|
||||
/* update secondary superblocks. */
|
||||
for (agno = 1; agno < nagcount; agno++) {
|
||||
error = xfs_read_buf(mp, mp->m_ddev_targp,
|
||||
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
|
||||
|
|
|
@ -205,7 +205,7 @@ xfs_ialloc_inode_init(
|
|||
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
|
||||
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
|
||||
mp->m_bsize * blks_per_cluster,
|
||||
XFS_BUF_LOCK);
|
||||
XBF_LOCK);
|
||||
ASSERT(fbuf);
|
||||
ASSERT(!XFS_BUF_GETERROR(fbuf));
|
||||
|
||||
|
@ -253,6 +253,7 @@ xfs_ialloc_ag_alloc(
|
|||
xfs_agino_t thisino; /* current inode number, for loop */
|
||||
int isaligned = 0; /* inode allocation at stripe unit */
|
||||
/* boundary */
|
||||
struct xfs_perag *pag;
|
||||
|
||||
args.tp = tp;
|
||||
args.mp = tp->t_mountp;
|
||||
|
@ -382,9 +383,9 @@ xfs_ialloc_ag_alloc(
|
|||
newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
|
||||
be32_add_cpu(&agi->agi_count, newlen);
|
||||
be32_add_cpu(&agi->agi_freecount, newlen);
|
||||
down_read(&args.mp->m_peraglock);
|
||||
args.mp->m_perag[agno].pagi_freecount += newlen;
|
||||
up_read(&args.mp->m_peraglock);
|
||||
pag = xfs_perag_get(args.mp, agno);
|
||||
pag->pagi_freecount += newlen;
|
||||
xfs_perag_put(pag);
|
||||
agi->agi_newino = cpu_to_be32(newino);
|
||||
|
||||
/*
|
||||
|
@ -486,9 +487,8 @@ xfs_ialloc_ag_select(
|
|||
*/
|
||||
agno = pagno;
|
||||
flags = XFS_ALLOC_FLAG_TRYLOCK;
|
||||
down_read(&mp->m_peraglock);
|
||||
for (;;) {
|
||||
pag = &mp->m_perag[agno];
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
if (!pag->pagi_init) {
|
||||
if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
|
||||
agbp = NULL;
|
||||
|
@ -527,7 +527,7 @@ xfs_ialloc_ag_select(
|
|||
agbp = NULL;
|
||||
goto nextag;
|
||||
}
|
||||
up_read(&mp->m_peraglock);
|
||||
xfs_perag_put(pag);
|
||||
return agbp;
|
||||
}
|
||||
}
|
||||
|
@ -535,22 +535,19 @@ unlock_nextag:
|
|||
if (agbp)
|
||||
xfs_trans_brelse(tp, agbp);
|
||||
nextag:
|
||||
xfs_perag_put(pag);
|
||||
/*
|
||||
* No point in iterating over the rest, if we're shutting
|
||||
* down.
|
||||
*/
|
||||
if (XFS_FORCED_SHUTDOWN(mp)) {
|
||||
up_read(&mp->m_peraglock);
|
||||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return NULL;
|
||||
}
|
||||
agno++;
|
||||
if (agno >= agcount)
|
||||
agno = 0;
|
||||
if (agno == pagno) {
|
||||
if (flags == 0) {
|
||||
up_read(&mp->m_peraglock);
|
||||
if (flags == 0)
|
||||
return NULL;
|
||||
}
|
||||
flags = 0;
|
||||
}
|
||||
}
|
||||
|
@ -672,6 +669,7 @@ xfs_dialloc(
|
|||
xfs_agnumber_t tagno; /* testing allocation group number */
|
||||
xfs_btree_cur_t *tcur; /* temp cursor */
|
||||
xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
|
||||
struct xfs_perag *pag;
|
||||
|
||||
|
||||
if (*IO_agbp == NULL) {
|
||||
|
@ -771,13 +769,13 @@ nextag:
|
|||
*inop = NULLFSINO;
|
||||
return noroom ? ENOSPC : 0;
|
||||
}
|
||||
down_read(&mp->m_peraglock);
|
||||
if (mp->m_perag[tagno].pagi_inodeok == 0) {
|
||||
up_read(&mp->m_peraglock);
|
||||
pag = xfs_perag_get(mp, tagno);
|
||||
if (pag->pagi_inodeok == 0) {
|
||||
xfs_perag_put(pag);
|
||||
goto nextag;
|
||||
}
|
||||
error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
|
||||
up_read(&mp->m_peraglock);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
goto nextag;
|
||||
agi = XFS_BUF_TO_AGI(agbp);
|
||||
|
@ -790,6 +788,7 @@ nextag:
|
|||
*/
|
||||
agno = tagno;
|
||||
*IO_agbp = NULL;
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
|
||||
restart_pagno:
|
||||
cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
|
||||
|
@ -808,7 +807,6 @@ nextag:
|
|||
* If in the same AG as the parent, try to get near the parent.
|
||||
*/
|
||||
if (pagno == agno) {
|
||||
xfs_perag_t *pag = &mp->m_perag[agno];
|
||||
int doneleft; /* done, to the left */
|
||||
int doneright; /* done, to the right */
|
||||
int searchdistance = 10;
|
||||
|
@ -1006,9 +1004,7 @@ alloc_inode:
|
|||
goto error0;
|
||||
be32_add_cpu(&agi->agi_freecount, -1);
|
||||
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
|
||||
down_read(&mp->m_peraglock);
|
||||
mp->m_perag[tagno].pagi_freecount--;
|
||||
up_read(&mp->m_peraglock);
|
||||
pag->pagi_freecount--;
|
||||
|
||||
error = xfs_check_agi_freecount(cur, agi);
|
||||
if (error)
|
||||
|
@ -1016,12 +1012,14 @@ alloc_inode:
|
|||
|
||||
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
|
||||
xfs_perag_put(pag);
|
||||
*inop = ino;
|
||||
return 0;
|
||||
error1:
|
||||
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
|
||||
error0:
|
||||
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
|
||||
xfs_perag_put(pag);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -1052,6 +1050,7 @@ xfs_difree(
|
|||
xfs_mount_t *mp; /* mount structure for filesystem */
|
||||
int off; /* offset of inode in inode chunk */
|
||||
xfs_inobt_rec_incore_t rec; /* btree record */
|
||||
struct xfs_perag *pag;
|
||||
|
||||
mp = tp->t_mountp;
|
||||
|
||||
|
@ -1088,9 +1087,7 @@ xfs_difree(
|
|||
/*
|
||||
* Get the allocation group header.
|
||||
*/
|
||||
down_read(&mp->m_peraglock);
|
||||
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
|
||||
up_read(&mp->m_peraglock);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN,
|
||||
"xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.",
|
||||
|
@ -1157,9 +1154,9 @@ xfs_difree(
|
|||
be32_add_cpu(&agi->agi_count, -ilen);
|
||||
be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
|
||||
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
|
||||
down_read(&mp->m_peraglock);
|
||||
mp->m_perag[agno].pagi_freecount -= ilen - 1;
|
||||
up_read(&mp->m_peraglock);
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
pag->pagi_freecount -= ilen - 1;
|
||||
xfs_perag_put(pag);
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
|
||||
|
||||
|
@ -1188,9 +1185,9 @@ xfs_difree(
|
|||
*/
|
||||
be32_add_cpu(&agi->agi_freecount, 1);
|
||||
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
|
||||
down_read(&mp->m_peraglock);
|
||||
mp->m_perag[agno].pagi_freecount++;
|
||||
up_read(&mp->m_peraglock);
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
pag->pagi_freecount++;
|
||||
xfs_perag_put(pag);
|
||||
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
|
||||
}
|
||||
|
||||
|
@ -1312,9 +1309,7 @@ xfs_imap(
|
|||
xfs_buf_t *agbp; /* agi buffer */
|
||||
int i; /* temp state */
|
||||
|
||||
down_read(&mp->m_peraglock);
|
||||
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
|
||||
up_read(&mp->m_peraglock);
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
|
||||
"xfs_ialloc_read_agi() returned "
|
||||
|
@ -1379,7 +1374,6 @@ xfs_imap(
|
|||
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
|
||||
return XFS_ERROR(EINVAL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1523,8 +1517,7 @@ xfs_ialloc_read_agi(
|
|||
return error;
|
||||
|
||||
agi = XFS_BUF_TO_AGI(*bpp);
|
||||
pag = &mp->m_perag[agno];
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
if (!pag->pagi_init) {
|
||||
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
|
||||
pag->pagi_count = be32_to_cpu(agi->agi_count);
|
||||
|
@ -1537,6 +1530,7 @@ xfs_ialloc_read_agi(
|
|||
*/
|
||||
ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
|
||||
XFS_FORCED_SHUTDOWN(mp));
|
||||
xfs_perag_put(pag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -374,7 +374,7 @@ xfs_iget(
|
|||
return EINVAL;
|
||||
|
||||
/* get the perag structure and ensure that it's inode capable */
|
||||
pag = xfs_get_perag(mp, ino);
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
|
||||
if (!pag->pagi_inodeok)
|
||||
return EINVAL;
|
||||
ASSERT(pag->pag_ici_init);
|
||||
|
@ -398,7 +398,7 @@ again:
|
|||
if (error)
|
||||
goto out_error_or_again;
|
||||
}
|
||||
xfs_put_perag(mp, pag);
|
||||
xfs_perag_put(pag);
|
||||
|
||||
*ipp = ip;
|
||||
|
||||
|
@ -417,7 +417,7 @@ out_error_or_again:
|
|||
delay(1);
|
||||
goto again;
|
||||
}
|
||||
xfs_put_perag(mp, pag);
|
||||
xfs_perag_put(pag);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -488,12 +488,12 @@ xfs_ireclaim(
|
|||
* added to the tree assert that it's been there before to catch
|
||||
* problems with the inode life time early on.
|
||||
*/
|
||||
pag = xfs_get_perag(mp, ip->i_ino);
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
||||
write_lock(&pag->pag_ici_lock);
|
||||
if (!radix_tree_delete(&pag->pag_ici_root, agino))
|
||||
ASSERT(0);
|
||||
write_unlock(&pag->pag_ici_lock);
|
||||
xfs_put_perag(mp, pag);
|
||||
xfs_perag_put(pag);
|
||||
|
||||
/*
|
||||
* Here we do an (almost) spurious inode lock in order to coordinate
|
||||
|
|
|
@ -151,7 +151,7 @@ xfs_imap_to_bp(
|
|||
"an error %d on %s. Returning error.",
|
||||
error, mp->m_fsname);
|
||||
} else {
|
||||
ASSERT(buf_flags & XFS_BUF_TRYLOCK);
|
||||
ASSERT(buf_flags & XBF_TRYLOCK);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ xfs_inotobp(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
|
||||
error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -285,7 +285,7 @@ xfs_itobp(
|
|||
return error;
|
||||
|
||||
if (!bp) {
|
||||
ASSERT(buf_flags & XFS_BUF_TRYLOCK);
|
||||
ASSERT(buf_flags & XBF_TRYLOCK);
|
||||
ASSERT(tp == NULL);
|
||||
*bpp = NULL;
|
||||
return EAGAIN;
|
||||
|
@ -807,7 +807,7 @@ xfs_iread(
|
|||
* Get pointers to the on-disk inode and the buffer containing it.
|
||||
*/
|
||||
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
|
||||
XFS_BUF_LOCK, iget_flags);
|
||||
XBF_LOCK, iget_flags);
|
||||
if (error)
|
||||
return error;
|
||||
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
|
||||
|
@ -1751,7 +1751,7 @@ xfs_iunlink(
|
|||
* Here we put the head pointer into our next pointer,
|
||||
* and then we fall through to point the head at us.
|
||||
*/
|
||||
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
|
||||
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -1833,7 +1833,7 @@ xfs_iunlink_remove(
|
|||
* of dealing with the buffer when there is no need to
|
||||
* change it.
|
||||
*/
|
||||
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
|
||||
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN,
|
||||
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
|
||||
|
@ -1895,7 +1895,7 @@ xfs_iunlink_remove(
|
|||
* Now last_ibp points to the buffer previous to us on
|
||||
* the unlinked list. Pull us from the list.
|
||||
*/
|
||||
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
|
||||
error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN,
|
||||
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
|
||||
|
@ -1946,8 +1946,9 @@ xfs_ifree_cluster(
|
|||
xfs_inode_t *ip, **ip_found;
|
||||
xfs_inode_log_item_t *iip;
|
||||
xfs_log_item_t *lip;
|
||||
xfs_perag_t *pag = xfs_get_perag(mp, inum);
|
||||
struct xfs_perag *pag;
|
||||
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
|
||||
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
|
||||
blks_per_cluster = 1;
|
||||
ninodes = mp->m_sb.sb_inopblock;
|
||||
|
@ -2039,7 +2040,7 @@ xfs_ifree_cluster(
|
|||
|
||||
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
|
||||
mp->m_bsize * blks_per_cluster,
|
||||
XFS_BUF_LOCK);
|
||||
XBF_LOCK);
|
||||
|
||||
pre_flushed = 0;
|
||||
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
|
||||
|
@ -2088,7 +2089,7 @@ xfs_ifree_cluster(
|
|||
}
|
||||
|
||||
kmem_free(ip_found);
|
||||
xfs_put_perag(mp, pag);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2150,7 +2151,7 @@ xfs_ifree(
|
|||
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
|
||||
error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -2483,13 +2484,16 @@ __xfs_iunpin_wait(
|
|||
return;
|
||||
|
||||
/* Give the log a push to start the unpinning I/O */
|
||||
xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
|
||||
iip->ili_last_lsn : 0, XFS_LOG_FORCE);
|
||||
if (iip && iip->ili_last_lsn)
|
||||
xfs_log_force_lsn(ip->i_mount, iip->ili_last_lsn, 0);
|
||||
else
|
||||
xfs_log_force(ip->i_mount, 0);
|
||||
|
||||
if (wait)
|
||||
wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
|
||||
}
|
||||
|
||||
static inline void
|
||||
void
|
||||
xfs_iunpin_wait(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
|
@ -2675,7 +2679,7 @@ xfs_iflush_cluster(
|
|||
xfs_buf_t *bp)
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
|
||||
struct xfs_perag *pag;
|
||||
unsigned long first_index, mask;
|
||||
unsigned long inodes_per_cluster;
|
||||
int ilist_size;
|
||||
|
@ -2686,6 +2690,7 @@ xfs_iflush_cluster(
|
|||
int bufwasdelwri;
|
||||
int i;
|
||||
|
||||
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
|
||||
ASSERT(pag->pagi_inodeok);
|
||||
ASSERT(pag->pag_ici_init);
|
||||
|
||||
|
@ -2693,7 +2698,7 @@ xfs_iflush_cluster(
|
|||
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
|
||||
ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
|
||||
if (!ilist)
|
||||
return 0;
|
||||
goto out_put;
|
||||
|
||||
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
|
||||
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
|
||||
|
@ -2762,6 +2767,8 @@ xfs_iflush_cluster(
|
|||
out_free:
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
kmem_free(ilist);
|
||||
out_put:
|
||||
xfs_perag_put(pag);
|
||||
return 0;
|
||||
|
||||
|
||||
|
@ -2805,6 +2812,7 @@ cluster_corrupt_out:
|
|||
*/
|
||||
xfs_iflush_abort(iq);
|
||||
kmem_free(ilist);
|
||||
xfs_perag_put(pag);
|
||||
return XFS_ERROR(EFSCORRUPTED);
|
||||
}
|
||||
|
||||
|
@ -2827,8 +2835,6 @@ xfs_iflush(
|
|||
xfs_dinode_t *dip;
|
||||
xfs_mount_t *mp;
|
||||
int error;
|
||||
int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
|
||||
enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
|
||||
|
||||
XFS_STATS_INC(xs_iflush_count);
|
||||
|
||||
|
@ -2840,15 +2846,6 @@ xfs_iflush(
|
|||
iip = ip->i_itemp;
|
||||
mp = ip->i_mount;
|
||||
|
||||
/*
|
||||
* If the inode isn't dirty, then just release the inode flush lock and
|
||||
* do nothing.
|
||||
*/
|
||||
if (xfs_inode_clean(ip)) {
|
||||
xfs_ifunlock(ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't flush the inode until it is unpinned, so wait for it if we
|
||||
* are allowed to block. We know noone new can pin it, because we are
|
||||
|
@ -2860,7 +2857,7 @@ xfs_iflush(
|
|||
* in the same cluster are dirty, they will probably write the inode
|
||||
* out for us if they occur after the log force completes.
|
||||
*/
|
||||
if (noblock && xfs_ipincount(ip)) {
|
||||
if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
|
||||
xfs_iunpin_nowait(ip);
|
||||
xfs_ifunlock(ip);
|
||||
return EAGAIN;
|
||||
|
@ -2893,61 +2890,11 @@ xfs_iflush(
|
|||
return XFS_ERROR(EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide how buffer will be flushed out. This is done before
|
||||
* the call to xfs_iflush_int because this field is zeroed by it.
|
||||
*/
|
||||
if (iip != NULL && iip->ili_format.ilf_fields != 0) {
|
||||
/*
|
||||
* Flush out the inode buffer according to the directions
|
||||
* of the caller. In the cases where the caller has given
|
||||
* us a choice choose the non-delwri case. This is because
|
||||
* the inode is in the AIL and we need to get it out soon.
|
||||
*/
|
||||
switch (flags) {
|
||||
case XFS_IFLUSH_SYNC:
|
||||
case XFS_IFLUSH_DELWRI_ELSE_SYNC:
|
||||
flags = 0;
|
||||
break;
|
||||
case XFS_IFLUSH_ASYNC_NOBLOCK:
|
||||
case XFS_IFLUSH_ASYNC:
|
||||
case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
|
||||
flags = INT_ASYNC;
|
||||
break;
|
||||
case XFS_IFLUSH_DELWRI:
|
||||
flags = INT_DELWRI;
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
flags = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (flags) {
|
||||
case XFS_IFLUSH_DELWRI_ELSE_SYNC:
|
||||
case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
|
||||
case XFS_IFLUSH_DELWRI:
|
||||
flags = INT_DELWRI;
|
||||
break;
|
||||
case XFS_IFLUSH_ASYNC_NOBLOCK:
|
||||
case XFS_IFLUSH_ASYNC:
|
||||
flags = INT_ASYNC;
|
||||
break;
|
||||
case XFS_IFLUSH_SYNC:
|
||||
flags = 0;
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
flags = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the buffer containing the on-disk inode.
|
||||
*/
|
||||
error = xfs_itobp(mp, NULL, ip, &dip, &bp,
|
||||
noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
|
||||
(flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
|
||||
if (error || !bp) {
|
||||
xfs_ifunlock(ip);
|
||||
return error;
|
||||
|
@ -2965,7 +2912,7 @@ xfs_iflush(
|
|||
* get stuck waiting in the write for too long.
|
||||
*/
|
||||
if (XFS_BUF_ISPINNED(bp))
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||
xfs_log_force(mp, 0);
|
||||
|
||||
/*
|
||||
* inode clustering:
|
||||
|
@ -2975,13 +2922,10 @@ xfs_iflush(
|
|||
if (error)
|
||||
goto cluster_corrupt_out;
|
||||
|
||||
if (flags & INT_DELWRI) {
|
||||
xfs_bdwrite(mp, bp);
|
||||
} else if (flags & INT_ASYNC) {
|
||||
error = xfs_bawrite(mp, bp);
|
||||
} else {
|
||||
if (flags & SYNC_WAIT)
|
||||
error = xfs_bwrite(mp, bp);
|
||||
}
|
||||
else
|
||||
xfs_bdwrite(mp, bp);
|
||||
return error;
|
||||
|
||||
corrupt_out:
|
||||
|
@ -3016,16 +2960,6 @@ xfs_iflush_int(
|
|||
iip = ip->i_itemp;
|
||||
mp = ip->i_mount;
|
||||
|
||||
|
||||
/*
|
||||
* If the inode isn't dirty, then just release the inode
|
||||
* flush lock and do nothing.
|
||||
*/
|
||||
if (xfs_inode_clean(ip)) {
|
||||
xfs_ifunlock(ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* set *dip = inode's place in the buffer */
|
||||
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
|
||||
|
||||
|
|
|
@ -419,16 +419,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
|
|||
#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
|
||||
#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
|
||||
|
||||
/*
|
||||
* Flags for xfs_iflush()
|
||||
*/
|
||||
#define XFS_IFLUSH_DELWRI_ELSE_SYNC 1
|
||||
#define XFS_IFLUSH_DELWRI_ELSE_ASYNC 2
|
||||
#define XFS_IFLUSH_SYNC 3
|
||||
#define XFS_IFLUSH_ASYNC 4
|
||||
#define XFS_IFLUSH_DELWRI 5
|
||||
#define XFS_IFLUSH_ASYNC_NOBLOCK 6
|
||||
|
||||
/*
|
||||
* Flags for xfs_itruncate_start().
|
||||
*/
|
||||
|
@ -483,6 +473,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
|
|||
void xfs_iext_realloc(xfs_inode_t *, int, int);
|
||||
void xfs_ipin(xfs_inode_t *);
|
||||
void xfs_iunpin(xfs_inode_t *);
|
||||
void xfs_iunpin_wait(xfs_inode_t *);
|
||||
int xfs_iflush(xfs_inode_t *, uint);
|
||||
void xfs_ichgtime(xfs_inode_t *, int);
|
||||
void xfs_lock_inodes(xfs_inode_t **, int, uint);
|
||||
|
|
|
@ -228,7 +228,7 @@ xfs_inode_item_format(
|
|||
|
||||
vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
|
||||
vecp->i_len = sizeof(xfs_inode_log_format_t);
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IFORMAT);
|
||||
vecp->i_type = XLOG_REG_TYPE_IFORMAT;
|
||||
vecp++;
|
||||
nvecs = 1;
|
||||
|
||||
|
@ -279,7 +279,7 @@ xfs_inode_item_format(
|
|||
|
||||
vecp->i_addr = (xfs_caddr_t)&ip->i_d;
|
||||
vecp->i_len = sizeof(struct xfs_icdinode);
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
|
||||
vecp->i_type = XLOG_REG_TYPE_ICORE;
|
||||
vecp++;
|
||||
nvecs++;
|
||||
iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
|
||||
|
@ -336,7 +336,7 @@ xfs_inode_item_format(
|
|||
vecp->i_addr =
|
||||
(char *)(ip->i_df.if_u1.if_extents);
|
||||
vecp->i_len = ip->i_df.if_bytes;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
|
||||
vecp->i_type = XLOG_REG_TYPE_IEXT;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -355,7 +355,7 @@ xfs_inode_item_format(
|
|||
vecp->i_addr = (xfs_caddr_t)ext_buffer;
|
||||
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
|
||||
XFS_DATA_FORK);
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
|
||||
vecp->i_type = XLOG_REG_TYPE_IEXT;
|
||||
}
|
||||
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
|
||||
iip->ili_format.ilf_dsize = vecp->i_len;
|
||||
|
@ -373,7 +373,7 @@ xfs_inode_item_format(
|
|||
ASSERT(ip->i_df.if_broot != NULL);
|
||||
vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
|
||||
vecp->i_len = ip->i_df.if_broot_bytes;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IBROOT);
|
||||
vecp->i_type = XLOG_REG_TYPE_IBROOT;
|
||||
vecp++;
|
||||
nvecs++;
|
||||
iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
|
||||
|
@ -399,7 +399,7 @@ xfs_inode_item_format(
|
|||
ASSERT((ip->i_df.if_real_bytes == 0) ||
|
||||
(ip->i_df.if_real_bytes == data_bytes));
|
||||
vecp->i_len = (int)data_bytes;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ILOCAL);
|
||||
vecp->i_type = XLOG_REG_TYPE_ILOCAL;
|
||||
vecp++;
|
||||
nvecs++;
|
||||
iip->ili_format.ilf_dsize = (unsigned)data_bytes;
|
||||
|
@ -477,7 +477,7 @@ xfs_inode_item_format(
|
|||
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
|
||||
XFS_ATTR_FORK);
|
||||
#endif
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_EXT);
|
||||
vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
|
||||
iip->ili_format.ilf_asize = vecp->i_len;
|
||||
vecp++;
|
||||
nvecs++;
|
||||
|
@ -492,7 +492,7 @@ xfs_inode_item_format(
|
|||
ASSERT(ip->i_afp->if_broot != NULL);
|
||||
vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
|
||||
vecp->i_len = ip->i_afp->if_broot_bytes;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_BROOT);
|
||||
vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
|
||||
vecp++;
|
||||
nvecs++;
|
||||
iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
|
||||
|
@ -516,7 +516,7 @@ xfs_inode_item_format(
|
|||
ASSERT((ip->i_afp->if_real_bytes == 0) ||
|
||||
(ip->i_afp->if_real_bytes == data_bytes));
|
||||
vecp->i_len = (int)data_bytes;
|
||||
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_LOCAL);
|
||||
vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL;
|
||||
vecp++;
|
||||
nvecs++;
|
||||
iip->ili_format.ilf_asize = (unsigned)data_bytes;
|
||||
|
@ -602,33 +602,20 @@ xfs_inode_item_trylock(
|
|||
|
||||
if (!xfs_iflock_nowait(ip)) {
|
||||
/*
|
||||
* If someone else isn't already trying to push the inode
|
||||
* buffer, we get to do it.
|
||||
* inode has already been flushed to the backing buffer,
|
||||
* leave it locked in shared mode, pushbuf routine will
|
||||
* unlock it.
|
||||
*/
|
||||
if (iip->ili_pushbuf_flag == 0) {
|
||||
iip->ili_pushbuf_flag = 1;
|
||||
#ifdef DEBUG
|
||||
iip->ili_push_owner = current_pid();
|
||||
#endif
|
||||
/*
|
||||
* Inode is left locked in shared mode.
|
||||
* Pushbuf routine gets to unlock it.
|
||||
*/
|
||||
return XFS_ITEM_PUSHBUF;
|
||||
} else {
|
||||
/*
|
||||
* We hold the AIL lock, so we must specify the
|
||||
* NONOTIFY flag so that we won't double trip.
|
||||
*/
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
|
||||
return XFS_ITEM_FLUSHING;
|
||||
}
|
||||
/* NOTREACHED */
|
||||
return XFS_ITEM_PUSHBUF;
|
||||
}
|
||||
|
||||
/* Stale items should force out the iclog */
|
||||
if (ip->i_flags & XFS_ISTALE) {
|
||||
xfs_ifunlock(ip);
|
||||
/*
|
||||
* we hold the AIL lock - notify the unlock routine of this
|
||||
* so it doesn't try to get the lock again.
|
||||
*/
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
|
||||
return XFS_ITEM_PINNED;
|
||||
}
|
||||
|
@ -746,11 +733,8 @@ xfs_inode_item_committed(
|
|||
* This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
|
||||
* failed to get the inode flush lock but did get the inode locked SHARED.
|
||||
* Here we're trying to see if the inode buffer is incore, and if so whether it's
|
||||
* marked delayed write. If that's the case, we'll initiate a bawrite on that
|
||||
* buffer to expedite the process.
|
||||
*
|
||||
* We aren't holding the AIL lock (or the flush lock) when this gets called,
|
||||
* so it is inherently race-y.
|
||||
* marked delayed write. If that's the case, we'll promote it and that will
|
||||
* allow the caller to write the buffer by triggering the xfsbufd to run.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_inode_item_pushbuf(
|
||||
|
@ -759,82 +743,30 @@ xfs_inode_item_pushbuf(
|
|||
xfs_inode_t *ip;
|
||||
xfs_mount_t *mp;
|
||||
xfs_buf_t *bp;
|
||||
uint dopush;
|
||||
|
||||
ip = iip->ili_inode;
|
||||
|
||||
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
|
||||
|
||||
/*
|
||||
* The ili_pushbuf_flag keeps others from
|
||||
* trying to duplicate our effort.
|
||||
*/
|
||||
ASSERT(iip->ili_pushbuf_flag != 0);
|
||||
ASSERT(iip->ili_push_owner == current_pid());
|
||||
|
||||
/*
|
||||
* If a flush is not in progress anymore, chances are that the
|
||||
* inode was taken off the AIL. So, just get out.
|
||||
*/
|
||||
if (completion_done(&ip->i_flush) ||
|
||||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
return;
|
||||
}
|
||||
|
||||
mp = ip->i_mount;
|
||||
bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
|
||||
iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);
|
||||
iip->ili_format.ilf_len, XBF_TRYLOCK);
|
||||
|
||||
if (bp != NULL) {
|
||||
if (XFS_BUF_ISDELAYWRITE(bp)) {
|
||||
/*
|
||||
* We were racing with iflush because we don't hold
|
||||
* the AIL lock or the flush lock. However, at this point,
|
||||
* we have the buffer, and we know that it's dirty.
|
||||
* So, it's possible that iflush raced with us, and
|
||||
* this item is already taken off the AIL.
|
||||
* If not, we can flush it async.
|
||||
*/
|
||||
dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
|
||||
!completion_done(&ip->i_flush));
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
trace_xfs_inode_item_push(bp, _RET_IP_);
|
||||
|
||||
if (XFS_BUF_ISPINNED(bp)) {
|
||||
xfs_log_force(mp, (xfs_lsn_t)0,
|
||||
XFS_LOG_FORCE);
|
||||
}
|
||||
if (dopush) {
|
||||
int error;
|
||||
error = xfs_bawrite(mp, bp);
|
||||
if (error)
|
||||
xfs_fs_cmn_err(CE_WARN, mp,
|
||||
"xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p",
|
||||
error, iip, bp);
|
||||
} else {
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
} else {
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* We have to be careful about resetting pushbuf flag too early (above).
|
||||
* Even though in theory we can do it as soon as we have the buflock,
|
||||
* we don't want others to be doing work needlessly. They'll come to
|
||||
* this function thinking that pushing the buffer is their
|
||||
* responsibility only to find that the buffer is still locked by
|
||||
* another doing the same thing
|
||||
*/
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
if (!bp)
|
||||
return;
|
||||
if (XFS_BUF_ISDELAYWRITE(bp))
|
||||
xfs_buf_delwri_promote(bp);
|
||||
xfs_buf_relse(bp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -867,10 +799,14 @@ xfs_inode_item_push(
|
|||
iip->ili_format.ilf_fields != 0);
|
||||
|
||||
/*
|
||||
* Write out the inode. The completion routine ('iflush_done') will
|
||||
* pull it from the AIL, mark it clean, unlock the flush lock.
|
||||
* Push the inode to it's backing buffer. This will not remove the
|
||||
* inode from the AIL - a further push will be required to trigger a
|
||||
* buffer push. However, this allows all the dirty inodes to be pushed
|
||||
* to the buffer before it is pushed to disk. THe buffer IO completion
|
||||
* will pull th einode from the AIL, mark it clean and unlock the flush
|
||||
* lock.
|
||||
*/
|
||||
(void) xfs_iflush(ip, XFS_IFLUSH_ASYNC);
|
||||
(void) xfs_iflush(ip, 0);
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
return;
|
||||
|
@ -934,7 +870,6 @@ xfs_inode_item_init(
|
|||
/*
|
||||
We have zeroed memory. No need ...
|
||||
iip->ili_extents_buf = NULL;
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
*/
|
||||
|
||||
iip->ili_format.ilf_type = XFS_LI_INODE;
|
||||
|
|
|
@ -144,12 +144,6 @@ typedef struct xfs_inode_log_item {
|
|||
data exts */
|
||||
struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
|
||||
attr exts */
|
||||
unsigned int ili_pushbuf_flag; /* one bit used in push_ail */
|
||||
|
||||
#ifdef DEBUG
|
||||
uint64_t ili_push_owner; /* one who sets pushbuf_flag
|
||||
above gets to push the buf */
|
||||
#endif
|
||||
#ifdef XFS_TRANS_DEBUG
|
||||
int ili_root_size;
|
||||
char *ili_orig_root;
|
||||
|
|
|
@ -408,8 +408,10 @@ xfs_bulkstat(
|
|||
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
|
||||
nimask = ~(nicluster - 1);
|
||||
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
|
||||
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4,
|
||||
KM_SLEEP | KM_MAYFAIL | KM_LARGE);
|
||||
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
|
||||
if (!irbuf)
|
||||
return ENOMEM;
|
||||
|
||||
nirbuf = irbsize / sizeof(*irbuf);
|
||||
|
||||
/*
|
||||
|
@ -420,9 +422,7 @@ xfs_bulkstat(
|
|||
while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
|
||||
cond_resched();
|
||||
bp = NULL;
|
||||
down_read(&mp->m_peraglock);
|
||||
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
|
||||
up_read(&mp->m_peraglock);
|
||||
if (error) {
|
||||
/*
|
||||
* Skip this allocation group and go to the next one.
|
||||
|
@ -729,7 +729,7 @@ xfs_bulkstat(
|
|||
/*
|
||||
* Done, we're either out of filesystem or space to put the data.
|
||||
*/
|
||||
kmem_free(irbuf);
|
||||
kmem_free_large(irbuf);
|
||||
*ubcountp = ubelem;
|
||||
/*
|
||||
* Found some inodes, return them now and return the error next time.
|
||||
|
@ -849,9 +849,7 @@ xfs_inumbers(
|
|||
agbp = NULL;
|
||||
while (left > 0 && agno < mp->m_sb.sb_agcount) {
|
||||
if (agbp == NULL) {
|
||||
down_read(&mp->m_peraglock);
|
||||
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
|
||||
up_read(&mp->m_peraglock);
|
||||
if (error) {
|
||||
/*
|
||||
* If we can't read the AGI of this ag,
|
||||
|
|
387
fs/xfs/xfs_log.c
387
fs/xfs/xfs_log.c
|
@ -50,7 +50,6 @@ kmem_zone_t *xfs_log_ticket_zone;
|
|||
(off) += (bytes);}
|
||||
|
||||
/* Local miscellaneous function prototypes */
|
||||
STATIC int xlog_bdstrat_cb(struct xfs_buf *);
|
||||
STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
|
||||
xlog_in_core_t **, xfs_lsn_t *);
|
||||
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
|
||||
|
@ -80,11 +79,6 @@ STATIC int xlog_state_release_iclog(xlog_t *log,
|
|||
STATIC void xlog_state_switch_iclogs(xlog_t *log,
|
||||
xlog_in_core_t *iclog,
|
||||
int eventual_size);
|
||||
STATIC int xlog_state_sync(xlog_t *log,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags,
|
||||
int *log_flushed);
|
||||
STATIC int xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed);
|
||||
STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
|
||||
|
||||
/* local functions to manipulate grant head */
|
||||
|
@ -297,65 +291,6 @@ xfs_log_done(xfs_mount_t *mp,
|
|||
return lsn;
|
||||
} /* xfs_log_done */
|
||||
|
||||
|
||||
/*
|
||||
* Force the in-core log to disk. If flags == XFS_LOG_SYNC,
|
||||
* the force is done synchronously.
|
||||
*
|
||||
* Asynchronous forces are implemented by setting the WANT_SYNC
|
||||
* bit in the appropriate in-core log and then returning.
|
||||
*
|
||||
* Synchronous forces are implemented with a signal variable. All callers
|
||||
* to force a given lsn to disk will wait on a the sv attached to the
|
||||
* specific in-core log. When given in-core log finally completes its
|
||||
* write to disk, that thread will wake up all threads waiting on the
|
||||
* sv.
|
||||
*/
|
||||
int
|
||||
_xfs_log_force(
|
||||
xfs_mount_t *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags,
|
||||
int *log_flushed)
|
||||
{
|
||||
xlog_t *log = mp->m_log;
|
||||
int dummy;
|
||||
|
||||
if (!log_flushed)
|
||||
log_flushed = &dummy;
|
||||
|
||||
ASSERT(flags & XFS_LOG_FORCE);
|
||||
|
||||
XFS_STATS_INC(xs_log_force);
|
||||
|
||||
if (log->l_flags & XLOG_IO_ERROR)
|
||||
return XFS_ERROR(EIO);
|
||||
if (lsn == 0)
|
||||
return xlog_state_sync_all(log, flags, log_flushed);
|
||||
else
|
||||
return xlog_state_sync(log, lsn, flags, log_flushed);
|
||||
} /* _xfs_log_force */
|
||||
|
||||
/*
|
||||
* Wrapper for _xfs_log_force(), to be used when caller doesn't care
|
||||
* about errors or whether the log was flushed or not. This is the normal
|
||||
* interface to use when trying to unpin items or move the log forward.
|
||||
*/
|
||||
void
|
||||
xfs_log_force(
|
||||
xfs_mount_t *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
error = _xfs_log_force(mp, lsn, flags, NULL);
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
|
||||
"error %d returned.", error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Attaches a new iclog I/O completion callback routine during
|
||||
* transaction commit. If the log is in error state, a non-zero
|
||||
|
@ -602,7 +537,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
|||
if (mp->m_flags & XFS_MOUNT_RDONLY)
|
||||
return 0;
|
||||
|
||||
error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL);
|
||||
error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
|
||||
ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -618,7 +553,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
|
|||
if (! (XLOG_FORCED_SHUTDOWN(log))) {
|
||||
reg[0].i_addr = (void*)&magic;
|
||||
reg[0].i_len = sizeof(magic);
|
||||
XLOG_VEC_SET_TYPE(®[0], XLOG_REG_TYPE_UNMOUNT);
|
||||
reg[0].i_type = XLOG_REG_TYPE_UNMOUNT;
|
||||
|
||||
error = xfs_log_reserve(mp, 600, 1, &tic,
|
||||
XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
|
||||
|
@ -987,35 +922,6 @@ xlog_iodone(xfs_buf_t *bp)
|
|||
|
||||
} /* xlog_iodone */
|
||||
|
||||
/*
|
||||
* The bdstrat callback function for log bufs. This gives us a central
|
||||
* place to trap bufs in case we get hit by a log I/O error and need to
|
||||
* shutdown. Actually, in practice, even when we didn't get a log error,
|
||||
* we transition the iclogs to IOERROR state *after* flushing all existing
|
||||
* iclogs to disk. This is because we don't want anymore new transactions to be
|
||||
* started or completed afterwards.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_bdstrat_cb(struct xfs_buf *bp)
|
||||
{
|
||||
xlog_in_core_t *iclog;
|
||||
|
||||
iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
|
||||
|
||||
if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) {
|
||||
/* note for irix bstrat will need struct bdevsw passed
|
||||
* Fix the following macro if the code ever is merged
|
||||
*/
|
||||
XFS_bdstrat(bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
XFS_BUF_STALE(bp);
|
||||
xfs_biodone(bp);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return size of each in-core log record buffer.
|
||||
*
|
||||
|
@ -1158,7 +1064,6 @@ xlog_alloc_log(xfs_mount_t *mp,
|
|||
if (!bp)
|
||||
goto out_free_log;
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
||||
XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
||||
ASSERT(XFS_BUF_ISBUSY(bp));
|
||||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
|
@ -1196,7 +1101,6 @@ xlog_alloc_log(xfs_mount_t *mp,
|
|||
if (!XFS_BUF_CPSEMA(bp))
|
||||
ASSERT(0);
|
||||
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
||||
XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
|
||||
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
||||
iclog->ic_bp = bp;
|
||||
iclog->ic_data = bp->b_addr;
|
||||
|
@ -1268,7 +1172,7 @@ xlog_commit_record(xfs_mount_t *mp,
|
|||
|
||||
reg[0].i_addr = NULL;
|
||||
reg[0].i_len = 0;
|
||||
XLOG_VEC_SET_TYPE(®[0], XLOG_REG_TYPE_COMMIT);
|
||||
reg[0].i_type = XLOG_REG_TYPE_COMMIT;
|
||||
|
||||
ASSERT_ALWAYS(iclog);
|
||||
if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
|
||||
|
@ -1343,6 +1247,37 @@ xlog_grant_push_ail(xfs_mount_t *mp,
|
|||
xfs_trans_ail_push(log->l_ailp, threshold_lsn);
|
||||
} /* xlog_grant_push_ail */
|
||||
|
||||
/*
|
||||
* The bdstrat callback function for log bufs. This gives us a central
|
||||
* place to trap bufs in case we get hit by a log I/O error and need to
|
||||
* shutdown. Actually, in practice, even when we didn't get a log error,
|
||||
* we transition the iclogs to IOERROR state *after* flushing all existing
|
||||
* iclogs to disk. This is because we don't want anymore new transactions to be
|
||||
* started or completed afterwards.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_bdstrat(
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
struct xlog_in_core *iclog;
|
||||
|
||||
iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
XFS_BUF_STALE(bp);
|
||||
xfs_biodone(bp);
|
||||
/*
|
||||
* It would seem logical to return EIO here, but we rely on
|
||||
* the log state machine to propagate I/O errors instead of
|
||||
* doing it here.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
bp->b_flags |= _XBF_RUN_QUEUES;
|
||||
xfs_buf_iorequest(bp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush out the in-core log (iclog) to the on-disk log in an asynchronous
|
||||
|
@ -1462,7 +1397,7 @@ xlog_sync(xlog_t *log,
|
|||
*/
|
||||
XFS_BUF_WRITE(bp);
|
||||
|
||||
if ((error = XFS_bwrite(bp))) {
|
||||
if ((error = xlog_bdstrat(bp))) {
|
||||
xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
|
||||
XFS_BUF_ADDR(bp));
|
||||
return error;
|
||||
|
@ -1502,7 +1437,7 @@ xlog_sync(xlog_t *log,
|
|||
/* account for internal log which doesn't start at block #0 */
|
||||
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
|
||||
XFS_BUF_WRITE(bp);
|
||||
if ((error = XFS_bwrite(bp))) {
|
||||
if ((error = xlog_bdstrat(bp))) {
|
||||
xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
|
||||
bp, XFS_BUF_ADDR(bp));
|
||||
return error;
|
||||
|
@ -2854,7 +2789,6 @@ xlog_state_switch_iclogs(xlog_t *log,
|
|||
log->l_iclog = iclog->ic_next;
|
||||
} /* xlog_state_switch_iclogs */
|
||||
|
||||
|
||||
/*
|
||||
* Write out all data in the in-core log as of this exact moment in time.
|
||||
*
|
||||
|
@ -2882,11 +2816,17 @@ xlog_state_switch_iclogs(xlog_t *log,
|
|||
* b) when we return from flushing out this iclog, it is still
|
||||
* not in the active nor dirty state.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
|
||||
int
|
||||
_xfs_log_force(
|
||||
struct xfs_mount *mp,
|
||||
uint flags,
|
||||
int *log_flushed)
|
||||
{
|
||||
xlog_in_core_t *iclog;
|
||||
xfs_lsn_t lsn;
|
||||
struct log *log = mp->m_log;
|
||||
struct xlog_in_core *iclog;
|
||||
xfs_lsn_t lsn;
|
||||
|
||||
XFS_STATS_INC(xs_log_force);
|
||||
|
||||
spin_lock(&log->l_icloglock);
|
||||
|
||||
|
@ -2932,7 +2872,9 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
|
|||
|
||||
if (xlog_state_release_iclog(log, iclog))
|
||||
return XFS_ERROR(EIO);
|
||||
*log_flushed = 1;
|
||||
|
||||
if (log_flushed)
|
||||
*log_flushed = 1;
|
||||
spin_lock(&log->l_icloglock);
|
||||
if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
|
||||
iclog->ic_state != XLOG_STATE_DIRTY)
|
||||
|
@ -2976,19 +2918,37 @@ maybe_sleep:
|
|||
*/
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR)
|
||||
return XFS_ERROR(EIO);
|
||||
*log_flushed = 1;
|
||||
|
||||
if (log_flushed)
|
||||
*log_flushed = 1;
|
||||
} else {
|
||||
|
||||
no_sleep:
|
||||
spin_unlock(&log->l_icloglock);
|
||||
}
|
||||
return 0;
|
||||
} /* xlog_state_sync_all */
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Used by code which implements synchronous log forces.
|
||||
* Wrapper for _xfs_log_force(), to be used when caller doesn't care
|
||||
* about errors or whether the log was flushed or not. This is the normal
|
||||
* interface to use when trying to unpin items or move the log forward.
|
||||
*/
|
||||
void
|
||||
xfs_log_force(
|
||||
xfs_mount_t *mp,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = _xfs_log_force(mp, flags, NULL);
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
|
||||
"error %d returned.", error);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the in-core log to disk for a specific LSN.
|
||||
*
|
||||
* Find in-core log with lsn.
|
||||
* If it is in the DIRTY state, just return.
|
||||
|
@ -2996,109 +2956,142 @@ no_sleep:
|
|||
* state and go to sleep or return.
|
||||
* If it is in any other state, go to sleep or return.
|
||||
*
|
||||
* If filesystem activity goes to zero, the iclog will get flushed only by
|
||||
* bdflush().
|
||||
* Synchronous forces are implemented with a signal variable. All callers
|
||||
* to force a given lsn to disk will wait on a the sv attached to the
|
||||
* specific in-core log. When given in-core log finally completes its
|
||||
* write to disk, that thread will wake up all threads waiting on the
|
||||
* sv.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_state_sync(xlog_t *log,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags,
|
||||
int *log_flushed)
|
||||
int
|
||||
_xfs_log_force_lsn(
|
||||
struct xfs_mount *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags,
|
||||
int *log_flushed)
|
||||
{
|
||||
xlog_in_core_t *iclog;
|
||||
int already_slept = 0;
|
||||
struct log *log = mp->m_log;
|
||||
struct xlog_in_core *iclog;
|
||||
int already_slept = 0;
|
||||
|
||||
ASSERT(lsn != 0);
|
||||
|
||||
XFS_STATS_INC(xs_log_force);
|
||||
|
||||
try_again:
|
||||
spin_lock(&log->l_icloglock);
|
||||
iclog = log->l_iclog;
|
||||
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
|
||||
do {
|
||||
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
|
||||
iclog = iclog->ic_next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_DIRTY) {
|
||||
spin_lock(&log->l_icloglock);
|
||||
iclog = log->l_iclog;
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return 0;
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
|
||||
/*
|
||||
* We sleep here if we haven't already slept (e.g.
|
||||
* this is the first time we've looked at the correct
|
||||
* iclog buf) and the buffer before us is going to
|
||||
* be sync'ed. The reason for this is that if we
|
||||
* are doing sync transactions here, by waiting for
|
||||
* the previous I/O to complete, we can allow a few
|
||||
* more transactions into this iclog before we close
|
||||
* it down.
|
||||
*
|
||||
* Otherwise, we mark the buffer WANT_SYNC, and bump
|
||||
* up the refcnt so we can release the log (which drops
|
||||
* the ref count). The state switch keeps new transaction
|
||||
* commits from using this buffer. When the current commits
|
||||
* finish writing into the buffer, the refcount will drop to
|
||||
* zero and the buffer will go out then.
|
||||
*/
|
||||
if (!already_slept &&
|
||||
(iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC |
|
||||
XLOG_STATE_SYNCING))) {
|
||||
ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
|
||||
XFS_STATS_INC(xs_log_force_sleep);
|
||||
sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
|
||||
&log->l_icloglock, s);
|
||||
*log_flushed = 1;
|
||||
already_slept = 1;
|
||||
goto try_again;
|
||||
} else {
|
||||
do {
|
||||
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
|
||||
iclog = iclog->ic_next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_DIRTY) {
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
|
||||
/*
|
||||
* We sleep here if we haven't already slept (e.g.
|
||||
* this is the first time we've looked at the correct
|
||||
* iclog buf) and the buffer before us is going to
|
||||
* be sync'ed. The reason for this is that if we
|
||||
* are doing sync transactions here, by waiting for
|
||||
* the previous I/O to complete, we can allow a few
|
||||
* more transactions into this iclog before we close
|
||||
* it down.
|
||||
*
|
||||
* Otherwise, we mark the buffer WANT_SYNC, and bump
|
||||
* up the refcnt so we can release the log (which
|
||||
* drops the ref count). The state switch keeps new
|
||||
* transaction commits from using this buffer. When
|
||||
* the current commits finish writing into the buffer,
|
||||
* the refcount will drop to zero and the buffer will
|
||||
* go out then.
|
||||
*/
|
||||
if (!already_slept &&
|
||||
(iclog->ic_prev->ic_state &
|
||||
(XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
|
||||
ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
|
||||
|
||||
XFS_STATS_INC(xs_log_force_sleep);
|
||||
|
||||
sv_wait(&iclog->ic_prev->ic_write_wait,
|
||||
PSWP, &log->l_icloglock, s);
|
||||
if (log_flushed)
|
||||
*log_flushed = 1;
|
||||
already_slept = 1;
|
||||
goto try_again;
|
||||
}
|
||||
atomic_inc(&iclog->ic_refcnt);
|
||||
xlog_state_switch_iclogs(log, iclog, 0);
|
||||
spin_unlock(&log->l_icloglock);
|
||||
if (xlog_state_release_iclog(log, iclog))
|
||||
return XFS_ERROR(EIO);
|
||||
*log_flushed = 1;
|
||||
if (log_flushed)
|
||||
*log_flushed = 1;
|
||||
spin_lock(&log->l_icloglock);
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & XFS_LOG_SYNC) && /* sleep */
|
||||
!(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
|
||||
if ((flags & XFS_LOG_SYNC) && /* sleep */
|
||||
!(iclog->ic_state &
|
||||
(XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
|
||||
/*
|
||||
* Don't wait on completion if we know that we've
|
||||
* gotten a log write error.
|
||||
*/
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
XFS_STATS_INC(xs_log_force_sleep);
|
||||
sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
|
||||
/*
|
||||
* No need to grab the log lock here since we're
|
||||
* only deciding whether or not to return EIO
|
||||
* and the memory read should be atomic.
|
||||
*/
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR)
|
||||
return XFS_ERROR(EIO);
|
||||
|
||||
/*
|
||||
* Don't wait on completion if we know that we've
|
||||
* gotten a log write error.
|
||||
*/
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
||||
if (log_flushed)
|
||||
*log_flushed = 1;
|
||||
} else { /* just return */
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
XFS_STATS_INC(xs_log_force_sleep);
|
||||
sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
|
||||
/*
|
||||
* No need to grab the log lock here since we're
|
||||
* only deciding whether or not to return EIO
|
||||
* and the memory read should be atomic.
|
||||
*/
|
||||
if (iclog->ic_state & XLOG_STATE_IOERROR)
|
||||
return XFS_ERROR(EIO);
|
||||
*log_flushed = 1;
|
||||
} else { /* just return */
|
||||
spin_unlock(&log->l_icloglock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
} while (iclog != log->l_iclog);
|
||||
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
} while (iclog != log->l_iclog);
|
||||
|
||||
spin_unlock(&log->l_icloglock);
|
||||
return 0;
|
||||
} /* xlog_state_sync */
|
||||
/*
|
||||
* Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
|
||||
* about errors or whether the log was flushed or not. This is the normal
|
||||
* interface to use when trying to unpin items or move the log forward.
|
||||
*/
|
||||
void
|
||||
xfs_log_force_lsn(
|
||||
xfs_mount_t *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
|
||||
if (error) {
|
||||
xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
|
||||
"error %d returned.", error);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when we want to mark the current iclog as being ready to sync to
|
||||
|
@ -3463,7 +3456,6 @@ xfs_log_force_umount(
|
|||
xlog_ticket_t *tic;
|
||||
xlog_t *log;
|
||||
int retval;
|
||||
int dummy;
|
||||
|
||||
log = mp->m_log;
|
||||
|
||||
|
@ -3537,13 +3529,14 @@ xfs_log_force_umount(
|
|||
}
|
||||
spin_unlock(&log->l_grant_lock);
|
||||
|
||||
if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
|
||||
if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
|
||||
ASSERT(!logerror);
|
||||
/*
|
||||
* Force the incore logs to disk before shutting the
|
||||
* log down completely.
|
||||
*/
|
||||
xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy);
|
||||
_xfs_log_force(mp, XFS_LOG_SYNC, NULL);
|
||||
|
||||
spin_lock(&log->l_icloglock);
|
||||
retval = xlog_state_ioerror(log);
|
||||
spin_unlock(&log->l_icloglock);
|
||||
|
|
|
@ -70,14 +70,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
|
|||
* Flags to xfs_log_force()
|
||||
*
|
||||
* XFS_LOG_SYNC: Synchronous force in-core log to disk
|
||||
* XFS_LOG_FORCE: Start in-core log write now.
|
||||
* XFS_LOG_URGE: Start write within some window of time.
|
||||
*
|
||||
* Note: Either XFS_LOG_FORCE or XFS_LOG_URGE must be set.
|
||||
*/
|
||||
#define XFS_LOG_SYNC 0x1
|
||||
#define XFS_LOG_FORCE 0x2
|
||||
#define XFS_LOG_URGE 0x4
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -110,10 +104,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
|
|||
#define XLOG_REG_TYPE_TRANSHDR 19
|
||||
#define XLOG_REG_TYPE_MAX 19
|
||||
|
||||
#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
|
||||
|
||||
typedef struct xfs_log_iovec {
|
||||
xfs_caddr_t i_addr; /* beginning address of region */
|
||||
xfs_caddr_t i_addr; /* beginning address of region */
|
||||
int i_len; /* length in bytes of region */
|
||||
uint i_type; /* type of region */
|
||||
} xfs_log_iovec_t;
|
||||
|
@ -140,12 +132,17 @@ xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
|
|||
void **iclog,
|
||||
uint flags);
|
||||
int _xfs_log_force(struct xfs_mount *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags,
|
||||
int *log_forced);
|
||||
void xfs_log_force(struct xfs_mount *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags);
|
||||
int _xfs_log_force_lsn(struct xfs_mount *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags,
|
||||
int *log_forced);
|
||||
void xfs_log_force_lsn(struct xfs_mount *mp,
|
||||
xfs_lsn_t lsn,
|
||||
uint flags);
|
||||
int xfs_log_mount(struct xfs_mount *mp,
|
||||
struct xfs_buftarg *log_target,
|
||||
xfs_daddr_t start_block,
|
||||
|
|
|
@ -443,14 +443,9 @@ typedef struct log {
|
|||
|
||||
/* common routines */
|
||||
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
|
||||
extern int xlog_find_tail(xlog_t *log,
|
||||
xfs_daddr_t *head_blk,
|
||||
xfs_daddr_t *tail_blk);
|
||||
extern int xlog_recover(xlog_t *log);
|
||||
extern int xlog_recover_finish(xlog_t *log);
|
||||
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
|
||||
extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
|
||||
extern void xlog_put_bp(struct xfs_buf *);
|
||||
|
||||
extern kmem_zone_t *xfs_log_ticket_zone;
|
||||
|
||||
|
|
|
@ -50,8 +50,6 @@
|
|||
|
||||
STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
|
||||
STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
|
||||
STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q,
|
||||
xlog_recover_item_t *item);
|
||||
#if defined(DEBUG)
|
||||
STATIC void xlog_recover_check_summary(xlog_t *);
|
||||
#else
|
||||
|
@ -68,7 +66,7 @@ STATIC void xlog_recover_check_summary(xlog_t *);
|
|||
((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
|
||||
#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
|
||||
|
||||
xfs_buf_t *
|
||||
STATIC xfs_buf_t *
|
||||
xlog_get_bp(
|
||||
xlog_t *log,
|
||||
int nbblks)
|
||||
|
@ -88,7 +86,7 @@ xlog_get_bp(
|
|||
return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
|
||||
}
|
||||
|
||||
void
|
||||
STATIC void
|
||||
xlog_put_bp(
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
|
@ -805,7 +803,7 @@ xlog_find_head(
|
|||
* We could speed up search by using current head_blk buffer, but it is not
|
||||
* available.
|
||||
*/
|
||||
int
|
||||
STATIC int
|
||||
xlog_find_tail(
|
||||
xlog_t *log,
|
||||
xfs_daddr_t *head_blk,
|
||||
|
@ -1367,36 +1365,45 @@ xlog_clear_stale_blocks(
|
|||
|
||||
STATIC xlog_recover_t *
|
||||
xlog_recover_find_tid(
|
||||
xlog_recover_t *q,
|
||||
struct hlist_head *head,
|
||||
xlog_tid_t tid)
|
||||
{
|
||||
xlog_recover_t *p = q;
|
||||
xlog_recover_t *trans;
|
||||
struct hlist_node *n;
|
||||
|
||||
while (p != NULL) {
|
||||
if (p->r_log_tid == tid)
|
||||
break;
|
||||
p = p->r_next;
|
||||
hlist_for_each_entry(trans, n, head, r_list) {
|
||||
if (trans->r_log_tid == tid)
|
||||
return trans;
|
||||
}
|
||||
return p;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xlog_recover_put_hashq(
|
||||
xlog_recover_t **q,
|
||||
xlog_recover_t *trans)
|
||||
xlog_recover_new_tid(
|
||||
struct hlist_head *head,
|
||||
xlog_tid_t tid,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
trans->r_next = *q;
|
||||
*q = trans;
|
||||
xlog_recover_t *trans;
|
||||
|
||||
trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
|
||||
trans->r_log_tid = tid;
|
||||
trans->r_lsn = lsn;
|
||||
INIT_LIST_HEAD(&trans->r_itemq);
|
||||
|
||||
INIT_HLIST_NODE(&trans->r_list);
|
||||
hlist_add_head(&trans->r_list, head);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xlog_recover_add_item(
|
||||
xlog_recover_item_t **itemq)
|
||||
struct list_head *head)
|
||||
{
|
||||
xlog_recover_item_t *item;
|
||||
|
||||
item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
|
||||
xlog_recover_insert_item_backq(itemq, item);
|
||||
INIT_LIST_HEAD(&item->ri_list);
|
||||
list_add_tail(&item->ri_list, head);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
|
@ -1409,8 +1416,7 @@ xlog_recover_add_to_cont_trans(
|
|||
xfs_caddr_t ptr, old_ptr;
|
||||
int old_len;
|
||||
|
||||
item = trans->r_itemq;
|
||||
if (item == NULL) {
|
||||
if (list_empty(&trans->r_itemq)) {
|
||||
/* finish copying rest of trans header */
|
||||
xlog_recover_add_item(&trans->r_itemq);
|
||||
ptr = (xfs_caddr_t) &trans->r_theader +
|
||||
|
@ -1418,7 +1424,8 @@ xlog_recover_add_to_cont_trans(
|
|||
memcpy(ptr, dp, len); /* d, s, l */
|
||||
return 0;
|
||||
}
|
||||
item = item->ri_prev;
|
||||
/* take the tail entry */
|
||||
item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
|
||||
|
||||
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
|
||||
old_len = item->ri_buf[item->ri_cnt-1].i_len;
|
||||
|
@ -1455,8 +1462,7 @@ xlog_recover_add_to_trans(
|
|||
|
||||
if (!len)
|
||||
return 0;
|
||||
item = trans->r_itemq;
|
||||
if (item == NULL) {
|
||||
if (list_empty(&trans->r_itemq)) {
|
||||
/* we need to catch log corruptions here */
|
||||
if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
|
||||
xlog_warn("XFS: xlog_recover_add_to_trans: "
|
||||
|
@ -1474,12 +1480,15 @@ xlog_recover_add_to_trans(
|
|||
memcpy(ptr, dp, len);
|
||||
in_f = (xfs_inode_log_format_t *)ptr;
|
||||
|
||||
if (item->ri_prev->ri_total != 0 &&
|
||||
item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
|
||||
/* take the tail entry */
|
||||
item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
|
||||
if (item->ri_total != 0 &&
|
||||
item->ri_total == item->ri_cnt) {
|
||||
/* tail item is in use, get a new one */
|
||||
xlog_recover_add_item(&trans->r_itemq);
|
||||
item = list_entry(trans->r_itemq.prev,
|
||||
xlog_recover_item_t, ri_list);
|
||||
}
|
||||
item = trans->r_itemq;
|
||||
item = item->ri_prev;
|
||||
|
||||
if (item->ri_total == 0) { /* first region to be added */
|
||||
if (in_f->ilf_size == 0 ||
|
||||
|
@ -1504,96 +1513,29 @@ xlog_recover_add_to_trans(
|
|||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xlog_recover_new_tid(
|
||||
xlog_recover_t **q,
|
||||
xlog_tid_t tid,
|
||||
xfs_lsn_t lsn)
|
||||
{
|
||||
xlog_recover_t *trans;
|
||||
|
||||
trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
|
||||
trans->r_log_tid = tid;
|
||||
trans->r_lsn = lsn;
|
||||
xlog_recover_put_hashq(q, trans);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xlog_recover_unlink_tid(
|
||||
xlog_recover_t **q,
|
||||
xlog_recover_t *trans)
|
||||
{
|
||||
xlog_recover_t *tp;
|
||||
int found = 0;
|
||||
|
||||
ASSERT(trans != NULL);
|
||||
if (trans == *q) {
|
||||
*q = (*q)->r_next;
|
||||
} else {
|
||||
tp = *q;
|
||||
while (tp) {
|
||||
if (tp->r_next == trans) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
tp = tp->r_next;
|
||||
}
|
||||
if (!found) {
|
||||
xlog_warn(
|
||||
"XFS: xlog_recover_unlink_tid: trans not found");
|
||||
ASSERT(0);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
tp->r_next = tp->r_next->r_next;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xlog_recover_insert_item_backq(
|
||||
xlog_recover_item_t **q,
|
||||
xlog_recover_item_t *item)
|
||||
{
|
||||
if (*q == NULL) {
|
||||
item->ri_prev = item->ri_next = item;
|
||||
*q = item;
|
||||
} else {
|
||||
item->ri_next = *q;
|
||||
item->ri_prev = (*q)->ri_prev;
|
||||
(*q)->ri_prev = item;
|
||||
item->ri_prev->ri_next = item;
|
||||
}
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xlog_recover_insert_item_frontq(
|
||||
xlog_recover_item_t **q,
|
||||
xlog_recover_item_t *item)
|
||||
{
|
||||
xlog_recover_insert_item_backq(q, item);
|
||||
*q = item;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort the log items in the transaction. Cancelled buffers need
|
||||
* to be put first so they are processed before any items that might
|
||||
* modify the buffers. If they are cancelled, then the modifications
|
||||
* don't need to be replayed.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_recover_reorder_trans(
|
||||
xlog_recover_t *trans)
|
||||
{
|
||||
xlog_recover_item_t *first_item, *itemq, *itemq_next;
|
||||
xfs_buf_log_format_t *buf_f;
|
||||
ushort flags = 0;
|
||||
xlog_recover_item_t *item, *n;
|
||||
LIST_HEAD(sort_list);
|
||||
|
||||
first_item = itemq = trans->r_itemq;
|
||||
trans->r_itemq = NULL;
|
||||
do {
|
||||
itemq_next = itemq->ri_next;
|
||||
buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
|
||||
list_splice_init(&trans->r_itemq, &sort_list);
|
||||
list_for_each_entry_safe(item, n, &sort_list, ri_list) {
|
||||
xfs_buf_log_format_t *buf_f;
|
||||
|
||||
switch (ITEM_TYPE(itemq)) {
|
||||
buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
|
||||
|
||||
switch (ITEM_TYPE(item)) {
|
||||
case XFS_LI_BUF:
|
||||
flags = buf_f->blf_flags;
|
||||
if (!(flags & XFS_BLI_CANCEL)) {
|
||||
xlog_recover_insert_item_frontq(&trans->r_itemq,
|
||||
itemq);
|
||||
if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
|
||||
list_move(&item->ri_list, &trans->r_itemq);
|
||||
break;
|
||||
}
|
||||
case XFS_LI_INODE:
|
||||
|
@ -1601,7 +1543,7 @@ xlog_recover_reorder_trans(
|
|||
case XFS_LI_QUOTAOFF:
|
||||
case XFS_LI_EFD:
|
||||
case XFS_LI_EFI:
|
||||
xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
|
||||
list_move_tail(&item->ri_list, &trans->r_itemq);
|
||||
break;
|
||||
default:
|
||||
xlog_warn(
|
||||
|
@ -1609,8 +1551,8 @@ xlog_recover_reorder_trans(
|
|||
ASSERT(0);
|
||||
return XFS_ERROR(EIO);
|
||||
}
|
||||
itemq = itemq_next;
|
||||
} while (first_item != itemq);
|
||||
}
|
||||
ASSERT(list_empty(&sort_list));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2242,9 +2184,9 @@ xlog_recover_do_buffer_trans(
|
|||
}
|
||||
|
||||
mp = log->l_mp;
|
||||
buf_flags = XFS_BUF_LOCK;
|
||||
buf_flags = XBF_LOCK;
|
||||
if (!(flags & XFS_BLI_INODE_BUF))
|
||||
buf_flags |= XFS_BUF_MAPPED;
|
||||
buf_flags |= XBF_MAPPED;
|
||||
|
||||
bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
|
||||
if (XFS_BUF_ISERROR(bp)) {
|
||||
|
@ -2346,7 +2288,7 @@ xlog_recover_do_inode_trans(
|
|||
}
|
||||
|
||||
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
|
||||
XFS_BUF_LOCK);
|
||||
XBF_LOCK);
|
||||
if (XFS_BUF_ISERROR(bp)) {
|
||||
xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
|
||||
bp, in_f->ilf_blkno);
|
||||
|
@ -2814,14 +2756,13 @@ xlog_recover_do_trans(
|
|||
int pass)
|
||||
{
|
||||
int error = 0;
|
||||
xlog_recover_item_t *item, *first_item;
|
||||
xlog_recover_item_t *item;
|
||||
|
||||
error = xlog_recover_reorder_trans(trans);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
first_item = item = trans->r_itemq;
|
||||
do {
|
||||
list_for_each_entry(item, &trans->r_itemq, ri_list) {
|
||||
switch (ITEM_TYPE(item)) {
|
||||
case XFS_LI_BUF:
|
||||
error = xlog_recover_do_buffer_trans(log, item, pass);
|
||||
|
@ -2854,8 +2795,7 @@ xlog_recover_do_trans(
|
|||
|
||||
if (error)
|
||||
return error;
|
||||
item = item->ri_next;
|
||||
} while (first_item != item);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2869,21 +2809,18 @@ STATIC void
|
|||
xlog_recover_free_trans(
|
||||
xlog_recover_t *trans)
|
||||
{
|
||||
xlog_recover_item_t *first_item, *item, *free_item;
|
||||
xlog_recover_item_t *item, *n;
|
||||
int i;
|
||||
|
||||
item = first_item = trans->r_itemq;
|
||||
do {
|
||||
free_item = item;
|
||||
item = item->ri_next;
|
||||
/* Free the regions in the item. */
|
||||
for (i = 0; i < free_item->ri_cnt; i++) {
|
||||
kmem_free(free_item->ri_buf[i].i_addr);
|
||||
}
|
||||
list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
|
||||
/* Free the regions in the item. */
|
||||
list_del(&item->ri_list);
|
||||
for (i = 0; i < item->ri_cnt; i++)
|
||||
kmem_free(item->ri_buf[i].i_addr);
|
||||
/* Free the item itself */
|
||||
kmem_free(free_item->ri_buf);
|
||||
kmem_free(free_item);
|
||||
} while (first_item != item);
|
||||
kmem_free(item->ri_buf);
|
||||
kmem_free(item);
|
||||
}
|
||||
/* Free the transaction recover structure */
|
||||
kmem_free(trans);
|
||||
}
|
||||
|
@ -2891,14 +2828,12 @@ xlog_recover_free_trans(
|
|||
STATIC int
|
||||
xlog_recover_commit_trans(
|
||||
xlog_t *log,
|
||||
xlog_recover_t **q,
|
||||
xlog_recover_t *trans,
|
||||
int pass)
|
||||
{
|
||||
int error;
|
||||
|
||||
if ((error = xlog_recover_unlink_tid(q, trans)))
|
||||
return error;
|
||||
hlist_del(&trans->r_list);
|
||||
if ((error = xlog_recover_do_trans(log, trans, pass)))
|
||||
return error;
|
||||
xlog_recover_free_trans(trans); /* no error */
|
||||
|
@ -2926,7 +2861,7 @@ xlog_recover_unmount_trans(
|
|||
STATIC int
|
||||
xlog_recover_process_data(
|
||||
xlog_t *log,
|
||||
xlog_recover_t *rhash[],
|
||||
struct hlist_head rhash[],
|
||||
xlog_rec_header_t *rhead,
|
||||
xfs_caddr_t dp,
|
||||
int pass)
|
||||
|
@ -2960,7 +2895,7 @@ xlog_recover_process_data(
|
|||
}
|
||||
tid = be32_to_cpu(ohead->oh_tid);
|
||||
hash = XLOG_RHASH(tid);
|
||||
trans = xlog_recover_find_tid(rhash[hash], tid);
|
||||
trans = xlog_recover_find_tid(&rhash[hash], tid);
|
||||
if (trans == NULL) { /* not found; add new tid */
|
||||
if (ohead->oh_flags & XLOG_START_TRANS)
|
||||
xlog_recover_new_tid(&rhash[hash], tid,
|
||||
|
@ -2978,7 +2913,7 @@ xlog_recover_process_data(
|
|||
switch (flags) {
|
||||
case XLOG_COMMIT_TRANS:
|
||||
error = xlog_recover_commit_trans(log,
|
||||
&rhash[hash], trans, pass);
|
||||
trans, pass);
|
||||
break;
|
||||
case XLOG_UNMOUNT_TRANS:
|
||||
error = xlog_recover_unmount_trans(trans);
|
||||
|
@ -3211,7 +3146,7 @@ xlog_recover_process_one_iunlink(
|
|||
/*
|
||||
* Get the on disk inode to find the next inode in the bucket.
|
||||
*/
|
||||
error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK);
|
||||
error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
|
||||
if (error)
|
||||
goto fail_iput;
|
||||
|
||||
|
@ -3517,7 +3452,7 @@ xlog_do_recovery_pass(
|
|||
int error = 0, h_size;
|
||||
int bblks, split_bblks;
|
||||
int hblks, split_hblks, wrapped_hblks;
|
||||
xlog_recover_t *rhash[XLOG_RHASH_SIZE];
|
||||
struct hlist_head rhash[XLOG_RHASH_SIZE];
|
||||
|
||||
ASSERT(head_blk != tail_blk);
|
||||
|
||||
|
@ -3978,8 +3913,7 @@ xlog_recover_finish(
|
|||
* case the unlink transactions would have problems
|
||||
* pushing the EFIs out of the way.
|
||||
*/
|
||||
xfs_log_force(log->l_mp, (xfs_lsn_t)0,
|
||||
(XFS_LOG_FORCE | XFS_LOG_SYNC));
|
||||
xfs_log_force(log->l_mp, XFS_LOG_SYNC);
|
||||
|
||||
xlog_recover_process_iunlinks(log);
|
||||
|
||||
|
|
|
@ -35,22 +35,21 @@
|
|||
* item headers are in ri_buf[0]. Additional buffers follow.
|
||||
*/
|
||||
typedef struct xlog_recover_item {
|
||||
struct xlog_recover_item *ri_next;
|
||||
struct xlog_recover_item *ri_prev;
|
||||
int ri_type;
|
||||
int ri_cnt; /* count of regions found */
|
||||
int ri_total; /* total regions */
|
||||
xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
|
||||
struct list_head ri_list;
|
||||
int ri_type;
|
||||
int ri_cnt; /* count of regions found */
|
||||
int ri_total; /* total regions */
|
||||
xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
|
||||
} xlog_recover_item_t;
|
||||
|
||||
struct xlog_tid;
|
||||
typedef struct xlog_recover {
|
||||
struct xlog_recover *r_next;
|
||||
xlog_tid_t r_log_tid; /* log's transaction id */
|
||||
xfs_trans_header_t r_theader; /* trans header for partial */
|
||||
int r_state; /* not needed */
|
||||
xfs_lsn_t r_lsn; /* xact lsn */
|
||||
xlog_recover_item_t *r_itemq; /* q for items */
|
||||
struct hlist_node r_list;
|
||||
xlog_tid_t r_log_tid; /* log's transaction id */
|
||||
xfs_trans_header_t r_theader; /* trans header for partial */
|
||||
int r_state; /* not needed */
|
||||
xfs_lsn_t r_lsn; /* xact lsn */
|
||||
struct list_head r_itemq; /* q for items */
|
||||
} xlog_recover_t;
|
||||
|
||||
#define ITEM_TYPE(i) (*(ushort *)(i)->ri_buf[0].i_addr)
|
||||
|
|
|
@ -200,6 +200,38 @@ xfs_uuid_unmount(
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* Reference counting access wrappers to the perag structures.
|
||||
*/
|
||||
struct xfs_perag *
|
||||
xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
|
||||
{
|
||||
struct xfs_perag *pag;
|
||||
int ref = 0;
|
||||
|
||||
spin_lock(&mp->m_perag_lock);
|
||||
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
|
||||
if (pag) {
|
||||
ASSERT(atomic_read(&pag->pag_ref) >= 0);
|
||||
/* catch leaks in the positive direction during testing */
|
||||
ASSERT(atomic_read(&pag->pag_ref) < 1000);
|
||||
ref = atomic_inc_return(&pag->pag_ref);
|
||||
}
|
||||
spin_unlock(&mp->m_perag_lock);
|
||||
trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
|
||||
return pag;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_perag_put(struct xfs_perag *pag)
|
||||
{
|
||||
int ref;
|
||||
|
||||
ASSERT(atomic_read(&pag->pag_ref) > 0);
|
||||
ref = atomic_dec_return(&pag->pag_ref);
|
||||
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free up the resources associated with a mount structure. Assume that
|
||||
* the structure was initially zeroed, so we can tell which fields got
|
||||
|
@ -209,13 +241,16 @@ STATIC void
|
|||
xfs_free_perag(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
if (mp->m_perag) {
|
||||
int agno;
|
||||
xfs_agnumber_t agno;
|
||||
struct xfs_perag *pag;
|
||||
|
||||
for (agno = 0; agno < mp->m_maxagi; agno++)
|
||||
if (mp->m_perag[agno].pagb_list)
|
||||
kmem_free(mp->m_perag[agno].pagb_list);
|
||||
kmem_free(mp->m_perag);
|
||||
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
|
||||
spin_lock(&mp->m_perag_lock);
|
||||
pag = radix_tree_delete(&mp->m_perag_tree, agno);
|
||||
ASSERT(pag);
|
||||
ASSERT(atomic_read(&pag->pag_ref) == 0);
|
||||
spin_unlock(&mp->m_perag_lock);
|
||||
kmem_free(pag);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -389,22 +424,57 @@ xfs_initialize_perag_icache(
|
|||
}
|
||||
}
|
||||
|
||||
xfs_agnumber_t
|
||||
int
|
||||
xfs_initialize_perag(
|
||||
xfs_mount_t *mp,
|
||||
xfs_agnumber_t agcount)
|
||||
xfs_agnumber_t agcount,
|
||||
xfs_agnumber_t *maxagi)
|
||||
{
|
||||
xfs_agnumber_t index, max_metadata;
|
||||
xfs_agnumber_t first_initialised = 0;
|
||||
xfs_perag_t *pag;
|
||||
xfs_agino_t agino;
|
||||
xfs_ino_t ino;
|
||||
xfs_sb_t *sbp = &mp->m_sb;
|
||||
xfs_ino_t max_inum = XFS_MAXINUMBER_32;
|
||||
int error = -ENOMEM;
|
||||
|
||||
/* Check to see if the filesystem can overflow 32 bit inodes */
|
||||
agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
|
||||
ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
|
||||
|
||||
/*
|
||||
* Walk the current per-ag tree so we don't try to initialise AGs
|
||||
* that already exist (growfs case). Allocate and insert all the
|
||||
* AGs we don't find ready for initialisation.
|
||||
*/
|
||||
for (index = 0; index < agcount; index++) {
|
||||
pag = xfs_perag_get(mp, index);
|
||||
if (pag) {
|
||||
xfs_perag_put(pag);
|
||||
continue;
|
||||
}
|
||||
if (!first_initialised)
|
||||
first_initialised = index;
|
||||
pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
|
||||
if (!pag)
|
||||
goto out_unwind;
|
||||
if (radix_tree_preload(GFP_NOFS))
|
||||
goto out_unwind;
|
||||
spin_lock(&mp->m_perag_lock);
|
||||
if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
|
||||
BUG();
|
||||
spin_unlock(&mp->m_perag_lock);
|
||||
radix_tree_preload_end();
|
||||
error = -EEXIST;
|
||||
goto out_unwind;
|
||||
}
|
||||
pag->pag_agno = index;
|
||||
pag->pag_mount = mp;
|
||||
spin_unlock(&mp->m_perag_lock);
|
||||
radix_tree_preload_end();
|
||||
}
|
||||
|
||||
/* Clear the mount flag if no inode can overflow 32 bits
|
||||
* on this filesystem, or if specifically requested..
|
||||
*/
|
||||
|
@ -438,21 +508,33 @@ xfs_initialize_perag(
|
|||
}
|
||||
|
||||
/* This ag is preferred for inodes */
|
||||
pag = &mp->m_perag[index];
|
||||
pag = xfs_perag_get(mp, index);
|
||||
pag->pagi_inodeok = 1;
|
||||
if (index < max_metadata)
|
||||
pag->pagf_metadata = 1;
|
||||
xfs_initialize_perag_icache(pag);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
} else {
|
||||
/* Setup default behavior for smaller filesystems */
|
||||
for (index = 0; index < agcount; index++) {
|
||||
pag = &mp->m_perag[index];
|
||||
pag = xfs_perag_get(mp, index);
|
||||
pag->pagi_inodeok = 1;
|
||||
xfs_initialize_perag_icache(pag);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
}
|
||||
return index;
|
||||
if (maxagi)
|
||||
*maxagi = index;
|
||||
return 0;
|
||||
|
||||
out_unwind:
|
||||
kmem_free(pag);
|
||||
for (; index > first_initialised; index--) {
|
||||
pag = radix_tree_delete(&mp->m_perag_tree, index);
|
||||
kmem_free(pag);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -583,7 +665,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
|
|||
* access to the superblock.
|
||||
*/
|
||||
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
|
||||
extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
|
||||
extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
|
||||
|
||||
bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
|
||||
extra_flags);
|
||||
|
@ -731,12 +813,13 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
|
|||
error = xfs_ialloc_pagi_init(mp, NULL, index);
|
||||
if (error)
|
||||
return error;
|
||||
pag = &mp->m_perag[index];
|
||||
pag = xfs_perag_get(mp, index);
|
||||
ifree += pag->pagi_freecount;
|
||||
ialloc += pag->pagi_count;
|
||||
bfree += pag->pagf_freeblks;
|
||||
bfreelst += pag->pagf_flcount;
|
||||
btree += pag->pagf_btreeblks;
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
/*
|
||||
* Overwrite incore superblock counters with just-read data
|
||||
|
@ -1008,6 +1091,22 @@ xfs_mount_reset_sbqflags(
|
|||
return xfs_trans_commit(tp, 0);
|
||||
}
|
||||
|
||||
__uint64_t
|
||||
xfs_default_resblks(xfs_mount_t *mp)
|
||||
{
|
||||
__uint64_t resblks;
|
||||
|
||||
/*
|
||||
* We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
|
||||
* This may drive us straight to ENOSPC on mount, but that implies
|
||||
* we were already there on the last unmount. Warn if this occurs.
|
||||
*/
|
||||
resblks = mp->m_sb.sb_dblocks;
|
||||
do_div(resblks, 20);
|
||||
resblks = min_t(__uint64_t, resblks, 1024);
|
||||
return resblks;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function does the following on an initial mount of a file system:
|
||||
* - reads the superblock from disk and init the mount struct
|
||||
|
@ -1152,13 +1251,13 @@ xfs_mountfs(
|
|||
/*
|
||||
* Allocate and initialize the per-ag data.
|
||||
*/
|
||||
init_rwsem(&mp->m_peraglock);
|
||||
mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
|
||||
KM_MAYFAIL);
|
||||
if (!mp->m_perag)
|
||||
spin_lock_init(&mp->m_perag_lock);
|
||||
INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS);
|
||||
error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
|
||||
if (error) {
|
||||
cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
|
||||
goto out_remove_uuid;
|
||||
|
||||
mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
|
||||
}
|
||||
|
||||
if (!sbp->sb_logblocks) {
|
||||
cmn_err(CE_WARN, "XFS: no log defined");
|
||||
|
@ -1318,18 +1417,14 @@ xfs_mountfs(
|
|||
* when at ENOSPC. This is needed for operations like create with
|
||||
* attr, unwritten extent conversion at ENOSPC, etc. Data allocations
|
||||
* are not allowed to use this reserved space.
|
||||
*
|
||||
* We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
|
||||
* This may drive us straight to ENOSPC on mount, but that implies
|
||||
* we were already there on the last unmount. Warn if this occurs.
|
||||
*/
|
||||
resblks = mp->m_sb.sb_dblocks;
|
||||
do_div(resblks, 20);
|
||||
resblks = min_t(__uint64_t, resblks, 1024);
|
||||
error = xfs_reserve_blocks(mp, &resblks, NULL);
|
||||
if (error)
|
||||
cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. "
|
||||
"Continuing without a reserve pool.");
|
||||
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
|
||||
resblks = xfs_default_resblks(mp);
|
||||
error = xfs_reserve_blocks(mp, &resblks, NULL);
|
||||
if (error)
|
||||
cmn_err(CE_WARN, "XFS: Unable to allocate reserve "
|
||||
"blocks. Continuing without a reserve pool.");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1372,8 +1467,19 @@ xfs_unmountfs(
|
|||
* push out the iclog we will never get that unlocked. hence we
|
||||
* need to force the log first.
|
||||
*/
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
|
||||
xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC);
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
|
||||
/*
|
||||
* Do a delwri reclaim pass first so that as many dirty inodes are
|
||||
* queued up for IO as possible. Then flush the buffers before making
|
||||
* a synchronous path to catch all the remaining inodes are reclaimed.
|
||||
* This makes the reclaim process as quick as possible by avoiding
|
||||
* synchronous writeout and blocking on inodes already in the delwri
|
||||
* state as much as possible.
|
||||
*/
|
||||
xfs_reclaim_inodes(mp, 0);
|
||||
XFS_bflush(mp->m_ddev_targp);
|
||||
xfs_reclaim_inodes(mp, SYNC_WAIT);
|
||||
|
||||
xfs_qm_unmount(mp);
|
||||
|
||||
|
@ -1382,7 +1488,7 @@ xfs_unmountfs(
|
|||
* that nothing is pinned. This is important because bflush()
|
||||
* will skip pinned buffers.
|
||||
*/
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
|
||||
xfs_log_force(mp, XFS_LOG_SYNC);
|
||||
|
||||
xfs_binval(mp->m_ddev_targp);
|
||||
if (mp->m_rtdev_targp) {
|
||||
|
@ -1548,15 +1654,14 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
|
|||
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
|
||||
|
||||
/* find modified range */
|
||||
f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
|
||||
ASSERT((1LL << f) & XFS_SB_MOD_BITS);
|
||||
last = xfs_sb_info[f + 1].offset - 1;
|
||||
|
||||
f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
|
||||
ASSERT((1LL << f) & XFS_SB_MOD_BITS);
|
||||
first = xfs_sb_info[f].offset;
|
||||
|
||||
f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
|
||||
ASSERT((1LL << f) & XFS_SB_MOD_BITS);
|
||||
last = xfs_sb_info[f + 1].offset - 1;
|
||||
|
||||
xfs_trans_log_buf(tp, bp, first, last);
|
||||
}
|
||||
|
||||
|
@ -1887,7 +1992,7 @@ xfs_getsb(
|
|||
|
||||
ASSERT(mp->m_sb_bp != NULL);
|
||||
bp = mp->m_sb_bp;
|
||||
if (flags & XFS_BUF_TRYLOCK) {
|
||||
if (flags & XBF_TRYLOCK) {
|
||||
if (!XFS_BUF_CPSEMA(bp)) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,8 @@ typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
|
|||
typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
|
||||
struct xfs_inode *, dm_right_t,
|
||||
struct xfs_inode *, dm_right_t,
|
||||
const char *, const char *, mode_t, int, int);
|
||||
const unsigned char *, const unsigned char *,
|
||||
mode_t, int, int);
|
||||
typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
|
||||
char *, char *);
|
||||
typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
|
||||
|
@ -207,8 +208,8 @@ typedef struct xfs_mount {
|
|||
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
|
||||
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
|
||||
uint m_in_maxlevels; /* max inobt btree levels. */
|
||||
struct xfs_perag *m_perag; /* per-ag accounting info */
|
||||
struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */
|
||||
struct radix_tree_root m_perag_tree; /* per-ag accounting info */
|
||||
spinlock_t m_perag_lock; /* lock for m_perag_tree */
|
||||
struct mutex m_growlock; /* growfs mutex */
|
||||
int m_fixedfsid[2]; /* unchanged for life of FS */
|
||||
uint m_dmevmask; /* DMI events for this FS */
|
||||
|
@ -224,6 +225,7 @@ typedef struct xfs_mount {
|
|||
__uint64_t m_maxioffset; /* maximum inode offset */
|
||||
__uint64_t m_resblks; /* total reserved blocks */
|
||||
__uint64_t m_resblks_avail;/* available reserved blocks */
|
||||
__uint64_t m_resblks_save; /* reserved blks @ remount,ro */
|
||||
int m_dalign; /* stripe unit */
|
||||
int m_swidth; /* stripe width */
|
||||
int m_sinoalign; /* stripe unit inode alignment */
|
||||
|
@ -384,19 +386,10 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
|
|||
}
|
||||
|
||||
/*
|
||||
* perag get/put wrappers for eventual ref counting
|
||||
* perag get/put wrappers for ref counting
|
||||
*/
|
||||
static inline xfs_perag_t *
|
||||
xfs_get_perag(struct xfs_mount *mp, xfs_ino_t ino)
|
||||
{
|
||||
return &mp->m_perag[XFS_INO_TO_AGNO(mp, ino)];
|
||||
}
|
||||
|
||||
static inline void
|
||||
xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag)
|
||||
{
|
||||
/* nothing to see here, move along */
|
||||
}
|
||||
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
|
||||
void xfs_perag_put(struct xfs_perag *pag);
|
||||
|
||||
/*
|
||||
* Per-cpu superblock locking functions
|
||||
|
@ -428,6 +421,7 @@ typedef struct xfs_mod_sb {
|
|||
} xfs_mod_sb_t;
|
||||
|
||||
extern int xfs_log_sbcount(xfs_mount_t *, uint);
|
||||
extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
|
||||
extern int xfs_mountfs(xfs_mount_t *mp);
|
||||
|
||||
extern void xfs_unmountfs(xfs_mount_t *);
|
||||
|
@ -450,7 +444,8 @@ extern struct xfs_dmops xfs_dmcore_xfs;
|
|||
#endif /* __KERNEL__ */
|
||||
|
||||
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
|
||||
extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t);
|
||||
extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t,
|
||||
xfs_agnumber_t *);
|
||||
extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
|
||||
extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
|
||||
|
||||
|
|
|
@ -398,7 +398,7 @@ exit:
|
|||
* guaranteed that all the free functions for all the elements have finished
|
||||
* executing and the reaper is not running.
|
||||
*/
|
||||
void
|
||||
static void
|
||||
xfs_mru_cache_flush(
|
||||
xfs_mru_cache_t *mru)
|
||||
{
|
||||
|
|
|
@ -42,7 +42,6 @@ void xfs_mru_cache_uninit(void);
|
|||
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
|
||||
unsigned int grp_count,
|
||||
xfs_mru_cache_free_func_t free_func);
|
||||
void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
|
||||
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
|
||||
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
|
||||
void *value);
|
||||
|
|
|
@ -222,17 +222,10 @@ typedef struct xfs_qoff_logformat {
|
|||
#define XFS_QMOPT_DELRTBCOUNT 0x0400000
|
||||
#define XFS_QMOPT_RES_INOS 0x0800000
|
||||
|
||||
/*
|
||||
* flags for dqflush and dqflush_all.
|
||||
*/
|
||||
#define XFS_QMOPT_SYNC 0x1000000
|
||||
#define XFS_QMOPT_ASYNC 0x2000000
|
||||
#define XFS_QMOPT_DELWRI 0x4000000
|
||||
|
||||
/*
|
||||
* flags for dqalloc.
|
||||
*/
|
||||
#define XFS_QMOPT_INHERIT 0x8000000
|
||||
#define XFS_QMOPT_INHERIT 0x1000000
|
||||
|
||||
/*
|
||||
* flags to xfs_trans_mod_dquot.
|
||||
|
|
155
fs/xfs/xfs_rw.c
155
fs/xfs/xfs_rw.c
|
@ -46,48 +46,6 @@
|
|||
#include "xfs_rw.h"
|
||||
#include "xfs_trace.h"
|
||||
|
||||
/*
|
||||
* This is a subroutine for xfs_write() and other writers (xfs_ioctl)
|
||||
* which clears the setuid and setgid bits when a file is written.
|
||||
*/
|
||||
int
|
||||
xfs_write_clear_setuid(
|
||||
xfs_inode_t *ip)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
xfs_trans_t *tp;
|
||||
int error;
|
||||
|
||||
mp = ip->i_mount;
|
||||
tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
|
||||
if ((error = xfs_trans_reserve(tp, 0,
|
||||
XFS_WRITEID_LOG_RES(mp),
|
||||
0, 0, 0))) {
|
||||
xfs_trans_cancel(tp, 0);
|
||||
return error;
|
||||
}
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ihold(tp, ip);
|
||||
ip->i_d.di_mode &= ~S_ISUID;
|
||||
|
||||
/*
|
||||
* Note that we don't have to worry about mandatory
|
||||
* file locking being disabled here because we only
|
||||
* clear the S_ISGID bit if the Group execute bit is
|
||||
* on, but if it was on then mandatory locking wouldn't
|
||||
* have been enabled.
|
||||
*/
|
||||
if (ip->i_d.di_mode & S_IXGRP) {
|
||||
ip->i_d.di_mode &= ~S_ISGID;
|
||||
}
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
xfs_trans_set_sync(tp);
|
||||
error = xfs_trans_commit(tp, 0);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Force a shutdown of the filesystem instantly while keeping
|
||||
* the filesystem consistent. We don't do an unmount here; just shutdown
|
||||
|
@ -153,88 +111,6 @@ xfs_do_force_shutdown(
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Called when we want to stop a buffer from getting written or read.
|
||||
* We attach the EIO error, muck with its flags, and call biodone
|
||||
* so that the proper iodone callbacks get called.
|
||||
*/
|
||||
int
|
||||
xfs_bioerror(
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
|
||||
#ifdef XFSERRORDEBUG
|
||||
ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* No need to wait until the buffer is unpinned.
|
||||
* We aren't flushing it.
|
||||
*/
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
/*
|
||||
* We're calling biodone, so delete B_DONE flag. Either way
|
||||
* we have to call the iodone callback, and calling biodone
|
||||
* probably is the best way since it takes care of
|
||||
* GRIO as well.
|
||||
*/
|
||||
XFS_BUF_UNREAD(bp);
|
||||
XFS_BUF_UNDELAYWRITE(bp);
|
||||
XFS_BUF_UNDONE(bp);
|
||||
XFS_BUF_STALE(bp);
|
||||
|
||||
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
|
||||
xfs_biodone(bp);
|
||||
|
||||
return (EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as xfs_bioerror, except that we are releasing the buffer
|
||||
* here ourselves, and avoiding the biodone call.
|
||||
* This is meant for userdata errors; metadata bufs come with
|
||||
* iodone functions attached, so that we can track down errors.
|
||||
*/
|
||||
int
|
||||
xfs_bioerror_relse(
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
int64_t fl;
|
||||
|
||||
ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
|
||||
ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
|
||||
|
||||
fl = XFS_BUF_BFLAGS(bp);
|
||||
/*
|
||||
* No need to wait until the buffer is unpinned.
|
||||
* We aren't flushing it.
|
||||
*
|
||||
* chunkhold expects B_DONE to be set, whether
|
||||
* we actually finish the I/O or not. We don't want to
|
||||
* change that interface.
|
||||
*/
|
||||
XFS_BUF_UNREAD(bp);
|
||||
XFS_BUF_UNDELAYWRITE(bp);
|
||||
XFS_BUF_DONE(bp);
|
||||
XFS_BUF_STALE(bp);
|
||||
XFS_BUF_CLR_IODONE_FUNC(bp);
|
||||
XFS_BUF_CLR_BDSTRAT_FUNC(bp);
|
||||
if (!(fl & XFS_B_ASYNC)) {
|
||||
/*
|
||||
* Mark b_error and B_ERROR _both_.
|
||||
* Lot's of chunkcache code assumes that.
|
||||
* There's no reason to mark error for
|
||||
* ASYNC buffers.
|
||||
*/
|
||||
XFS_BUF_ERROR(bp, EIO);
|
||||
XFS_BUF_FINISH_IOWAIT(bp);
|
||||
} else {
|
||||
xfs_buf_relse(bp);
|
||||
}
|
||||
return (EIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prints out an ALERT message about I/O error.
|
||||
*/
|
||||
|
@ -305,37 +181,6 @@ xfs_read_buf(
|
|||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper around bwrite() so that we can trap
|
||||
* write errors, and act accordingly.
|
||||
*/
|
||||
int
|
||||
xfs_bwrite(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_buf *bp)
|
||||
{
|
||||
int error;
|
||||
|
||||
/*
|
||||
* XXXsup how does this work for quotas.
|
||||
*/
|
||||
XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
|
||||
bp->b_mount = mp;
|
||||
XFS_BUF_WRITE(bp);
|
||||
|
||||
if ((error = XFS_bwrite(bp))) {
|
||||
ASSERT(mp);
|
||||
/*
|
||||
* Cannot put a buftrace here since if the buffer is not
|
||||
* B_HOLD then we will brelse() the buffer before returning
|
||||
* from bwrite and we could be tracing a buffer that has
|
||||
* been reused.
|
||||
*/
|
||||
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to extract extent size hint from inode
|
||||
*/
|
||||
|
|
|
@ -39,10 +39,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
|
|||
/*
|
||||
* Prototypes for functions in xfs_rw.c.
|
||||
*/
|
||||
extern int xfs_write_clear_setuid(struct xfs_inode *ip);
|
||||
extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
|
||||
extern int xfs_bioerror(struct xfs_buf *bp);
|
||||
extern int xfs_bioerror_relse(struct xfs_buf *bp);
|
||||
extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
|
||||
xfs_daddr_t blkno, int len, uint flags,
|
||||
struct xfs_buf **bpp);
|
||||
|
|
|
@ -981,9 +981,8 @@ shut_us_down:
|
|||
*/
|
||||
if (sync) {
|
||||
if (!error) {
|
||||
error = _xfs_log_force(mp, commit_lsn,
|
||||
XFS_LOG_FORCE | XFS_LOG_SYNC,
|
||||
log_flushed);
|
||||
error = _xfs_log_force_lsn(mp, commit_lsn,
|
||||
XFS_LOG_SYNC, log_flushed);
|
||||
}
|
||||
XFS_STATS_INC(xs_trans_sync);
|
||||
} else {
|
||||
|
@ -1121,7 +1120,7 @@ xfs_trans_fill_vecs(
|
|||
tp->t_header.th_num_items = nitems;
|
||||
log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
|
||||
log_vector->i_len = sizeof(xfs_trans_header_t);
|
||||
XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR);
|
||||
log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -861,8 +861,7 @@ typedef struct xfs_item_ops {
|
|||
#define XFS_ITEM_SUCCESS 0
|
||||
#define XFS_ITEM_PINNED 1
|
||||
#define XFS_ITEM_LOCKED 2
|
||||
#define XFS_ITEM_FLUSHING 3
|
||||
#define XFS_ITEM_PUSHBUF 4
|
||||
#define XFS_ITEM_PUSHBUF 3
|
||||
|
||||
/*
|
||||
* This structure is used to maintain a list of block ranges that have been
|
||||
|
|
|
@ -237,14 +237,15 @@ out:
|
|||
}
|
||||
|
||||
/*
|
||||
* Function that does the work of pushing on the AIL
|
||||
* xfsaild_push does the work of pushing on the AIL. Returning a timeout of
|
||||
* zero indicates that the caller should sleep until woken.
|
||||
*/
|
||||
long
|
||||
xfsaild_push(
|
||||
struct xfs_ail *ailp,
|
||||
xfs_lsn_t *last_lsn)
|
||||
{
|
||||
long tout = 1000; /* milliseconds */
|
||||
long tout = 0;
|
||||
xfs_lsn_t last_pushed_lsn = *last_lsn;
|
||||
xfs_lsn_t target = ailp->xa_target;
|
||||
xfs_lsn_t lsn;
|
||||
|
@ -252,6 +253,7 @@ xfsaild_push(
|
|||
int flush_log, count, stuck;
|
||||
xfs_mount_t *mp = ailp->xa_mount;
|
||||
struct xfs_ail_cursor *cur = &ailp->xa_cursors;
|
||||
int push_xfsbufd = 0;
|
||||
|
||||
spin_lock(&ailp->xa_lock);
|
||||
xfs_trans_ail_cursor_init(ailp, cur);
|
||||
|
@ -262,7 +264,7 @@ xfsaild_push(
|
|||
*/
|
||||
xfs_trans_ail_cursor_done(ailp, cur);
|
||||
spin_unlock(&ailp->xa_lock);
|
||||
last_pushed_lsn = 0;
|
||||
*last_lsn = 0;
|
||||
return tout;
|
||||
}
|
||||
|
||||
|
@ -279,7 +281,6 @@ xfsaild_push(
|
|||
* prevents use from spinning when we can't do anything or there is
|
||||
* lots of contention on the AIL lists.
|
||||
*/
|
||||
tout = 10;
|
||||
lsn = lip->li_lsn;
|
||||
flush_log = stuck = count = 0;
|
||||
while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
|
||||
|
@ -308,6 +309,7 @@ xfsaild_push(
|
|||
XFS_STATS_INC(xs_push_ail_pushbuf);
|
||||
IOP_PUSHBUF(lip);
|
||||
last_pushed_lsn = lsn;
|
||||
push_xfsbufd = 1;
|
||||
break;
|
||||
|
||||
case XFS_ITEM_PINNED:
|
||||
|
@ -322,12 +324,6 @@ xfsaild_push(
|
|||
stuck++;
|
||||
break;
|
||||
|
||||
case XFS_ITEM_FLUSHING:
|
||||
XFS_STATS_INC(xs_push_ail_flushing);
|
||||
last_pushed_lsn = lsn;
|
||||
stuck++;
|
||||
break;
|
||||
|
||||
default:
|
||||
ASSERT(0);
|
||||
break;
|
||||
|
@ -371,19 +367,24 @@ xfsaild_push(
|
|||
* move forward in the AIL.
|
||||
*/
|
||||
XFS_STATS_INC(xs_push_ail_flush);
|
||||
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
|
||||
xfs_log_force(mp, 0);
|
||||
}
|
||||
|
||||
if (push_xfsbufd) {
|
||||
/* we've got delayed write buffers to flush */
|
||||
wake_up_process(mp->m_ddev_targp->bt_task);
|
||||
}
|
||||
|
||||
if (!count) {
|
||||
/* We're past our target or empty, so idle */
|
||||
tout = 1000;
|
||||
last_pushed_lsn = 0;
|
||||
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
|
||||
/*
|
||||
* We reached the target so wait a bit longer for I/O to
|
||||
* complete and remove pushed items from the AIL before we
|
||||
* start the next scan from the start of the AIL.
|
||||
*/
|
||||
tout += 20;
|
||||
tout = 50;
|
||||
last_pushed_lsn = 0;
|
||||
} else if ((stuck * 100) / count > 90) {
|
||||
/*
|
||||
|
@ -395,11 +396,14 @@ xfsaild_push(
|
|||
* Backoff a bit more to allow some I/O to complete before
|
||||
* continuing from where we were.
|
||||
*/
|
||||
tout += 10;
|
||||
tout = 20;
|
||||
} else {
|
||||
/* more to do, but wait a short while before continuing */
|
||||
tout = 10;
|
||||
}
|
||||
*last_lsn = last_pushed_lsn;
|
||||
return tout;
|
||||
} /* xfsaild_push */
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -75,13 +75,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
|
|||
xfs_buf_log_item_t *bip;
|
||||
|
||||
if (flags == 0)
|
||||
flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
|
||||
flags = XBF_LOCK | XBF_MAPPED;
|
||||
|
||||
/*
|
||||
* Default to a normal get_buf() call if the tp is NULL.
|
||||
*/
|
||||
if (tp == NULL)
|
||||
return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
|
||||
return xfs_buf_get(target_dev, blkno, len,
|
||||
flags | XBF_DONT_BLOCK);
|
||||
|
||||
/*
|
||||
* If we find the buffer in the cache with this transaction
|
||||
|
@ -117,14 +118,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
|
|||
}
|
||||
|
||||
/*
|
||||
* We always specify the BUF_BUSY flag within a transaction so
|
||||
* that get_buf does not try to push out a delayed write buffer
|
||||
* We always specify the XBF_DONT_BLOCK flag within a transaction
|
||||
* so that get_buf does not try to push out a delayed write buffer
|
||||
* which might cause another transaction to take place (if the
|
||||
* buffer was delayed alloc). Such recursive transactions can
|
||||
* easily deadlock with our current transaction as well as cause
|
||||
* us to run out of stack space.
|
||||
*/
|
||||
bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
|
||||
bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
|
||||
if (bp == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -290,15 +291,15 @@ xfs_trans_read_buf(
|
|||
int error;
|
||||
|
||||
if (flags == 0)
|
||||
flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
|
||||
flags = XBF_LOCK | XBF_MAPPED;
|
||||
|
||||
/*
|
||||
* Default to a normal get_buf() call if the tp is NULL.
|
||||
*/
|
||||
if (tp == NULL) {
|
||||
bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
|
||||
bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
|
||||
if (!bp)
|
||||
return (flags & XFS_BUF_TRYLOCK) ?
|
||||
return (flags & XBF_TRYLOCK) ?
|
||||
EAGAIN : XFS_ERROR(ENOMEM);
|
||||
|
||||
if (XFS_BUF_GETERROR(bp) != 0) {
|
||||
|
@ -385,14 +386,14 @@ xfs_trans_read_buf(
|
|||
}
|
||||
|
||||
/*
|
||||
* We always specify the BUF_BUSY flag within a transaction so
|
||||
* that get_buf does not try to push out a delayed write buffer
|
||||
* We always specify the XBF_DONT_BLOCK flag within a transaction
|
||||
* so that get_buf does not try to push out a delayed write buffer
|
||||
* which might cause another transaction to take place (if the
|
||||
* buffer was delayed alloc). Such recursive transactions can
|
||||
* easily deadlock with our current transaction as well as cause
|
||||
* us to run out of stack space.
|
||||
*/
|
||||
bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
|
||||
bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
|
||||
if (bp == NULL) {
|
||||
*bpp = NULL;
|
||||
return 0;
|
||||
|
@ -472,8 +473,8 @@ shutdown_abort:
|
|||
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
|
||||
cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
|
||||
#endif
|
||||
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
|
||||
(XFS_B_STALE|XFS_B_DELWRI));
|
||||
ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
|
||||
(XBF_STALE|XBF_DELWRI));
|
||||
|
||||
trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
|
||||
xfs_buf_relse(bp);
|
||||
|
|
|
@ -151,8 +151,8 @@ typedef enum {
|
|||
} xfs_btnum_t;
|
||||
|
||||
struct xfs_name {
|
||||
const char *name;
|
||||
int len;
|
||||
const unsigned char *name;
|
||||
int len;
|
||||
};
|
||||
|
||||
#endif /* __XFS_TYPES_H__ */
|
||||
|
|
|
@ -256,7 +256,7 @@ xfs_setattr(
|
|||
iattr->ia_size > ip->i_d.di_size) {
|
||||
code = xfs_flush_pages(ip,
|
||||
ip->i_d.di_size, iattr->ia_size,
|
||||
XFS_B_ASYNC, FI_NONE);
|
||||
XBF_ASYNC, FI_NONE);
|
||||
}
|
||||
|
||||
/* wait for all I/O to complete */
|
||||
|
@ -597,7 +597,7 @@ xfs_fsync(
|
|||
{
|
||||
xfs_trans_t *tp;
|
||||
int error = 0;
|
||||
int log_flushed = 0, changed = 1;
|
||||
int log_flushed = 0;
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
|
@ -627,19 +627,16 @@ xfs_fsync(
|
|||
* disk yet, the inode will be still be pinned. If it is,
|
||||
* force the log.
|
||||
*/
|
||||
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
||||
if (xfs_ipincount(ip)) {
|
||||
error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
|
||||
XFS_LOG_FORCE | XFS_LOG_SYNC,
|
||||
&log_flushed);
|
||||
} else {
|
||||
/*
|
||||
* If the inode is not pinned and nothing has changed
|
||||
* we don't need to flush the cache.
|
||||
*/
|
||||
changed = 0;
|
||||
if (ip->i_itemp->ili_last_lsn) {
|
||||
error = _xfs_log_force_lsn(ip->i_mount,
|
||||
ip->i_itemp->ili_last_lsn,
|
||||
XFS_LOG_SYNC, &log_flushed);
|
||||
} else {
|
||||
error = _xfs_log_force(ip->i_mount,
|
||||
XFS_LOG_SYNC, &log_flushed);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
|
@ -674,7 +671,7 @@ xfs_fsync(
|
|||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
|
||||
if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) {
|
||||
if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
|
||||
/*
|
||||
* If the log write didn't issue an ordered tag we need
|
||||
* to flush the disk cache for the data device now.
|
||||
|
@ -1096,7 +1093,7 @@ xfs_release(
|
|||
*/
|
||||
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
|
||||
if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
|
||||
xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
|
||||
xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
|
||||
}
|
||||
|
||||
if (ip->i_d.di_nlink != 0) {
|
||||
|
@ -2199,7 +2196,8 @@ xfs_symlink(
|
|||
if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
|
||||
error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
|
||||
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
|
||||
link_name->name, target_path, 0, 0, 0);
|
||||
link_name->name,
|
||||
(unsigned char *)target_path, 0, 0, 0);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
@ -2395,7 +2393,8 @@ std_return:
|
|||
dp, DM_RIGHT_NULL,
|
||||
error ? NULL : ip,
|
||||
DM_RIGHT_NULL, link_name->name,
|
||||
target_path, 0, error, 0);
|
||||
(unsigned char *)target_path,
|
||||
0, error, 0);
|
||||
}
|
||||
|
||||
if (!error)
|
||||
|
|
|
@ -43,11 +43,11 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd,
|
|||
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
|
||||
struct xfs_inode *src_ip, struct xfs_inode *target_dp,
|
||||
struct xfs_name *target_name, struct xfs_inode *target_ip);
|
||||
int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value,
|
||||
int *valuelenp, int flags);
|
||||
int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value,
|
||||
int valuelen, int flags);
|
||||
int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags);
|
||||
int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
|
||||
unsigned char *value, int *valuelenp, int flags);
|
||||
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
|
||||
unsigned char *value, int valuelen, int flags);
|
||||
int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
|
||||
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
|
||||
int flags, struct attrlist_cursor_kern *cursor);
|
||||
ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb,
|
||||
|
|
Загрузка…
Ссылка в новой задаче