Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: xfs: obey minleft values during extent allocation correctly xfs: reset buffer pointers before freeing them xfs: avoid getting stuck during async inode flushes xfs: fix xfs_itruncate_start tracing xfs: fix duplicate workqueue initialisation xfs: kill off xfs_printk() xfs: fix race condition in AIL push trigger xfs: make AIL target updates and compares 32bit safe. xfs: always push the AIL to the target xfs: exit AIL push work correctly when AIL is empty xfs: ensure reclaim cursor is reset correctly at end of AG xfs: add an x86 compat handler for XFS_IOC_ZERO_RANGE xfs: fix compiler warning in xfs_trace.h xfs: cleanup duplicate initializations xfs: reduce the number of pagb_lock roundtrips in xfs_alloc_clear_busy xfs: exact busy extent tracking xfs: do not immediately reuse busy extent ranges xfs: optimize AGFL refills
This commit is contained in:
Коммит
a77febbef1
|
@ -33,7 +33,6 @@
|
|||
#include <linux/migrate.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_inum.h"
|
||||
|
@ -709,6 +708,27 @@ xfs_buf_get_empty(
|
|||
return bp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a buffer allocated as an empty buffer and associated to external
|
||||
* memory via xfs_buf_associate_memory() back to it's empty state.
|
||||
*/
|
||||
void
|
||||
xfs_buf_set_empty(
|
||||
struct xfs_buf *bp,
|
||||
size_t len)
|
||||
{
|
||||
if (bp->b_pages)
|
||||
_xfs_buf_free_pages(bp);
|
||||
|
||||
bp->b_pages = NULL;
|
||||
bp->b_page_count = 0;
|
||||
bp->b_addr = NULL;
|
||||
bp->b_file_offset = 0;
|
||||
bp->b_buffer_length = bp->b_count_desired = len;
|
||||
bp->b_bn = XFS_BUF_DADDR_NULL;
|
||||
bp->b_flags &= ~XBF_MAPPED;
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
mem_to_page(
|
||||
void *addr)
|
||||
|
|
|
@ -178,6 +178,7 @@ extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
|
|||
xfs_buf_flags_t);
|
||||
|
||||
extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
|
||||
extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len);
|
||||
extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
|
||||
extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
|
||||
extern void xfs_buf_hold(xfs_buf_t *);
|
||||
|
|
|
@ -586,7 +586,8 @@ xfs_file_compat_ioctl(
|
|||
case XFS_IOC_RESVSP_32:
|
||||
case XFS_IOC_UNRESVSP_32:
|
||||
case XFS_IOC_RESVSP64_32:
|
||||
case XFS_IOC_UNRESVSP64_32: {
|
||||
case XFS_IOC_UNRESVSP64_32:
|
||||
case XFS_IOC_ZERO_RANGE_32: {
|
||||
struct xfs_flock64 bf;
|
||||
|
||||
if (xfs_compat_flock64_copyin(&bf, arg))
|
||||
|
|
|
@ -184,6 +184,7 @@ typedef struct compat_xfs_flock64 {
|
|||
#define XFS_IOC_UNRESVSP_32 _IOW('X', 41, struct compat_xfs_flock64)
|
||||
#define XFS_IOC_RESVSP64_32 _IOW('X', 42, struct compat_xfs_flock64)
|
||||
#define XFS_IOC_UNRESVSP64_32 _IOW('X', 43, struct compat_xfs_flock64)
|
||||
#define XFS_IOC_ZERO_RANGE_32 _IOW('X', 57, struct compat_xfs_flock64)
|
||||
|
||||
typedef struct compat_xfs_fsop_geom_v1 {
|
||||
__u32 blocksize; /* filesystem (data) block size */
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
#include <linux/ctype.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/div64.h>
|
||||
|
|
|
@ -41,23 +41,6 @@ __xfs_printk(
|
|||
printk("%sXFS: %pV\n", level, vaf);
|
||||
}
|
||||
|
||||
void xfs_printk(
|
||||
const char *level,
|
||||
const struct xfs_mount *mp,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
__xfs_printk(level, mp, &vaf);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
#define define_xfs_printk_level(func, kern_level) \
|
||||
void func(const struct xfs_mount *mp, const char *fmt, ...) \
|
||||
{ \
|
||||
|
@ -95,8 +78,7 @@ xfs_alert_tag(
|
|||
int do_panic = 0;
|
||||
|
||||
if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
|
||||
xfs_printk(KERN_ALERT, mp,
|
||||
"XFS: Transforming an alert into a BUG.");
|
||||
xfs_alert(mp, "Transforming an alert into a BUG.");
|
||||
do_panic = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,9 +3,6 @@
|
|||
|
||||
struct xfs_mount;
|
||||
|
||||
extern void xfs_printk(const char *level, const struct xfs_mount *mp,
|
||||
const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 3, 4)));
|
||||
extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 2, 3)));
|
||||
extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
|
||||
|
@ -28,7 +25,9 @@ extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
|
|||
extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 2, 3)));
|
||||
#else
|
||||
static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
|
||||
static inline void
|
||||
__attribute__ ((format (printf, 2, 3)))
|
||||
xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1787,10 +1787,6 @@ init_xfs_fs(void)
|
|||
if (error)
|
||||
goto out_cleanup_procfs;
|
||||
|
||||
error = xfs_init_workqueues();
|
||||
if (error)
|
||||
goto out_sysctl_unregister;
|
||||
|
||||
vfs_initquota();
|
||||
|
||||
error = register_filesystem(&xfs_fs_type);
|
||||
|
|
|
@ -267,6 +267,16 @@ xfs_sync_inode_attr(
|
|||
|
||||
error = xfs_iflush(ip, flags);
|
||||
|
||||
/*
|
||||
* We don't want to try again on non-blocking flushes that can't run
|
||||
* again immediately. If an inode really must be written, then that's
|
||||
* what the SYNC_WAIT flag is for.
|
||||
*/
|
||||
if (error == EAGAIN) {
|
||||
ASSERT(!(flags & SYNC_WAIT));
|
||||
error = 0;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
return error;
|
||||
|
|
|
@ -1151,44 +1151,7 @@ TRACE_EVENT(xfs_bunmap,
|
|||
|
||||
);
|
||||
|
||||
#define XFS_BUSY_SYNC \
|
||||
{ 0, "async" }, \
|
||||
{ 1, "sync" }
|
||||
|
||||
TRACE_EVENT(xfs_alloc_busy,
|
||||
TP_PROTO(struct xfs_trans *trans, xfs_agnumber_t agno,
|
||||
xfs_agblock_t agbno, xfs_extlen_t len, int sync),
|
||||
TP_ARGS(trans, agno, agbno, len, sync),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(struct xfs_trans *, tp)
|
||||
__field(int, tid)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_agblock_t, agbno)
|
||||
__field(xfs_extlen_t, len)
|
||||
__field(int, sync)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = trans->t_mountp->m_super->s_dev;
|
||||
__entry->tp = trans;
|
||||
__entry->tid = trans->t_ticket->t_tid;
|
||||
__entry->agno = agno;
|
||||
__entry->agbno = agbno;
|
||||
__entry->len = len;
|
||||
__entry->sync = sync;
|
||||
),
|
||||
TP_printk("dev %d:%d trans 0x%p tid 0x%x agno %u agbno %u len %u %s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->tp,
|
||||
__entry->tid,
|
||||
__entry->agno,
|
||||
__entry->agbno,
|
||||
__entry->len,
|
||||
__print_symbolic(__entry->sync, XFS_BUSY_SYNC))
|
||||
|
||||
);
|
||||
|
||||
TRACE_EVENT(xfs_alloc_unbusy,
|
||||
DECLARE_EVENT_CLASS(xfs_busy_class,
|
||||
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
xfs_agblock_t agbno, xfs_extlen_t len),
|
||||
TP_ARGS(mp, agno, agbno, len),
|
||||
|
@ -1210,35 +1173,45 @@ TRACE_EVENT(xfs_alloc_unbusy,
|
|||
__entry->agbno,
|
||||
__entry->len)
|
||||
);
|
||||
#define DEFINE_BUSY_EVENT(name) \
|
||||
DEFINE_EVENT(xfs_busy_class, name, \
|
||||
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
|
||||
xfs_agblock_t agbno, xfs_extlen_t len), \
|
||||
TP_ARGS(mp, agno, agbno, len))
|
||||
DEFINE_BUSY_EVENT(xfs_alloc_busy);
|
||||
DEFINE_BUSY_EVENT(xfs_alloc_busy_enomem);
|
||||
DEFINE_BUSY_EVENT(xfs_alloc_busy_force);
|
||||
DEFINE_BUSY_EVENT(xfs_alloc_busy_reuse);
|
||||
DEFINE_BUSY_EVENT(xfs_alloc_busy_clear);
|
||||
|
||||
#define XFS_BUSY_STATES \
|
||||
{ 0, "missing" }, \
|
||||
{ 1, "found" }
|
||||
|
||||
TRACE_EVENT(xfs_alloc_busysearch,
|
||||
TRACE_EVENT(xfs_alloc_busy_trim,
|
||||
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
xfs_agblock_t agbno, xfs_extlen_t len, int found),
|
||||
TP_ARGS(mp, agno, agbno, len, found),
|
||||
xfs_agblock_t agbno, xfs_extlen_t len,
|
||||
xfs_agblock_t tbno, xfs_extlen_t tlen),
|
||||
TP_ARGS(mp, agno, agbno, len, tbno, tlen),
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev)
|
||||
__field(xfs_agnumber_t, agno)
|
||||
__field(xfs_agblock_t, agbno)
|
||||
__field(xfs_extlen_t, len)
|
||||
__field(int, found)
|
||||
__field(xfs_agblock_t, tbno)
|
||||
__field(xfs_extlen_t, tlen)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = mp->m_super->s_dev;
|
||||
__entry->agno = agno;
|
||||
__entry->agbno = agbno;
|
||||
__entry->len = len;
|
||||
__entry->found = found;
|
||||
__entry->tbno = tbno;
|
||||
__entry->tlen = tlen;
|
||||
),
|
||||
TP_printk("dev %d:%d agno %u agbno %u len %u %s",
|
||||
TP_printk("dev %d:%d agno %u agbno %u len %u tbno %u tlen %u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->agno,
|
||||
__entry->agbno,
|
||||
__entry->len,
|
||||
__print_symbolic(__entry->found, XFS_BUSY_STATES))
|
||||
__entry->tbno,
|
||||
__entry->tlen)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xfs_trans_commit_lsn,
|
||||
|
@ -1418,7 +1391,7 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
|
|||
__entry->wasfromfl,
|
||||
__entry->isfl,
|
||||
__entry->userdata,
|
||||
__entry->firstblock)
|
||||
(unsigned long long)__entry->firstblock)
|
||||
)
|
||||
|
||||
#define DEFINE_ALLOC_EVENT(name) \
|
||||
|
@ -1433,11 +1406,14 @@ DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
|
|||
DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_size_busy);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
|
||||
DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
|
||||
|
|
|
@ -187,7 +187,6 @@ struct xfs_busy_extent {
|
|||
xfs_agnumber_t agno;
|
||||
xfs_agblock_t bno;
|
||||
xfs_extlen_t length;
|
||||
xlog_tid_t tid; /* transaction that created this */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -140,11 +140,24 @@ xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
|
|||
xfs_agblock_t bno, xfs_extlen_t len);
|
||||
|
||||
void
|
||||
xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp);
|
||||
xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list);
|
||||
|
||||
int
|
||||
xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
xfs_agblock_t bno, xfs_extlen_t len);
|
||||
|
||||
void
|
||||
xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
|
||||
|
||||
int
|
||||
xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
|
||||
|
||||
static inline void xfs_alloc_busy_sort(struct list_head *list)
|
||||
{
|
||||
list_sort(NULL, list, xfs_busy_extent_ag_cmp);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -95,6 +95,8 @@ xfs_allocbt_alloc_block(
|
|||
return 0;
|
||||
}
|
||||
|
||||
xfs_alloc_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
|
||||
|
||||
xfs_trans_agbtree_delta(cur->bc_tp, 1);
|
||||
new->s = cpu_to_be32(bno);
|
||||
|
||||
|
@ -118,17 +120,6 @@ xfs_allocbt_free_block(
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Since blocks move to the free list without the coordination used in
|
||||
* xfs_bmap_finish, we can't allow block to be available for
|
||||
* reallocation and non-transaction writing (user data) until we know
|
||||
* that the transaction that moved it to the free list is permanently
|
||||
* on disk. We track the blocks by declaring these blocks as "busy";
|
||||
* the busy list is maintained on a per-ag basis and each transaction
|
||||
* records which entries should be removed when the iclog commits to
|
||||
* disk. If a busy block is allocated, the iclog is pushed up to the
|
||||
* LSN that freed the block.
|
||||
*/
|
||||
xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
|
||||
xfs_trans_agbtree_delta(cur->bc_tp, -1);
|
||||
return 0;
|
||||
|
|
|
@ -202,7 +202,7 @@ xfs_swap_extents(
|
|||
xfs_inode_t *tip, /* tmp inode */
|
||||
xfs_swapext_t *sxp)
|
||||
{
|
||||
xfs_mount_t *mp;
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_trans_t *tp;
|
||||
xfs_bstat_t *sbp = &sxp->sx_stat;
|
||||
xfs_ifork_t *tempifp, *ifp, *tifp;
|
||||
|
@ -212,16 +212,12 @@ xfs_swap_extents(
|
|||
int taforkblks = 0;
|
||||
__uint64_t tmp;
|
||||
|
||||
mp = ip->i_mount;
|
||||
|
||||
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
|
||||
if (!tempifp) {
|
||||
error = XFS_ERROR(ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sbp = &sxp->sx_stat;
|
||||
|
||||
/*
|
||||
* we have to do two separate lock calls here to keep lockdep
|
||||
* happy. If we try to get all the locks in one call, lock will
|
||||
|
|
|
@ -1354,7 +1354,7 @@ xfs_itruncate_start(
|
|||
return 0;
|
||||
}
|
||||
last_byte = xfs_file_last_byte(ip);
|
||||
trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
|
||||
trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte);
|
||||
if (last_byte > toss_start) {
|
||||
if (flags & XFS_ITRUNC_DEFINITE) {
|
||||
xfs_tosspages(ip, toss_start,
|
||||
|
|
|
@ -970,7 +970,6 @@ xfs_iflush_abort(
|
|||
{
|
||||
xfs_inode_log_item_t *iip = ip->i_itemp;
|
||||
|
||||
iip = ip->i_itemp;
|
||||
if (iip) {
|
||||
struct xfs_ail *ailp = iip->ili_item.li_ailp;
|
||||
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
|
||||
|
|
|
@ -1449,6 +1449,13 @@ xlog_dealloc_log(xlog_t *log)
|
|||
|
||||
xlog_cil_destroy(log);
|
||||
|
||||
/*
|
||||
* always need to ensure that the extra buffer does not point to memory
|
||||
* owned by another log buffer before we free it.
|
||||
*/
|
||||
xfs_buf_set_empty(log->l_xbuf, log->l_iclog_size);
|
||||
xfs_buf_free(log->l_xbuf);
|
||||
|
||||
iclog = log->l_iclog;
|
||||
for (i=0; i<log->l_iclog_bufs; i++) {
|
||||
xfs_buf_free(iclog->ic_bp);
|
||||
|
@ -1458,7 +1465,6 @@ xlog_dealloc_log(xlog_t *log)
|
|||
}
|
||||
spinlock_destroy(&log->l_icloglock);
|
||||
|
||||
xfs_buf_free(log->l_xbuf);
|
||||
log->l_mp->m_log = NULL;
|
||||
kmem_free(log);
|
||||
} /* xlog_dealloc_log */
|
||||
|
@ -3248,13 +3254,6 @@ xfs_log_ticket_get(
|
|||
return ticket;
|
||||
}
|
||||
|
||||
xlog_tid_t
|
||||
xfs_log_get_trans_ident(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
return tp->t_ticket->t_tid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and initialise a new log ticket.
|
||||
*/
|
||||
|
|
|
@ -189,8 +189,6 @@ void xlog_iodone(struct xfs_buf *);
|
|||
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
|
||||
void xfs_log_ticket_put(struct xlog_ticket *ticket);
|
||||
|
||||
xlog_tid_t xfs_log_get_trans_ident(struct xfs_trans *tp);
|
||||
|
||||
void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
struct xfs_log_vec *log_vector,
|
||||
xfs_lsn_t *commit_lsn, int flags);
|
||||
|
|
|
@ -361,13 +361,12 @@ xlog_cil_committed(
|
|||
int abort)
|
||||
{
|
||||
struct xfs_cil_ctx *ctx = args;
|
||||
struct xfs_busy_extent *busyp, *n;
|
||||
|
||||
xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
|
||||
ctx->start_lsn, abort);
|
||||
|
||||
list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
|
||||
xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
|
||||
xfs_alloc_busy_sort(&ctx->busy_extents);
|
||||
xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents);
|
||||
|
||||
spin_lock(&ctx->cil->xc_cil_lock);
|
||||
list_del(&ctx->committing);
|
||||
|
|
|
@ -146,6 +146,8 @@ static inline uint xlog_get_client_id(__be32 i)
|
|||
shutdown */
|
||||
#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
|
||||
|
||||
typedef __uint32_t xlog_tid_t;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* Below are states for covering allocation transactions.
|
||||
|
|
|
@ -204,6 +204,35 @@ xlog_bread(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read at an offset into the buffer. Returns with the buffer in it's original
|
||||
* state regardless of the result of the read.
|
||||
*/
|
||||
STATIC int
|
||||
xlog_bread_offset(
|
||||
xlog_t *log,
|
||||
xfs_daddr_t blk_no, /* block to read from */
|
||||
int nbblks, /* blocks to read */
|
||||
xfs_buf_t *bp,
|
||||
xfs_caddr_t offset)
|
||||
{
|
||||
xfs_caddr_t orig_offset = XFS_BUF_PTR(bp);
|
||||
int orig_len = bp->b_buffer_length;
|
||||
int error, error2;
|
||||
|
||||
error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = xlog_bread_noalign(log, blk_no, nbblks, bp);
|
||||
|
||||
/* must reset buffer pointer even on error */
|
||||
error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len);
|
||||
if (error)
|
||||
return error;
|
||||
return error2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write out the buffer at the given block for the given number of blocks.
|
||||
* The buffer is kept locked across the write and is returned locked.
|
||||
|
@ -1229,20 +1258,12 @@ xlog_write_log_records(
|
|||
*/
|
||||
ealign = round_down(end_block, sectbb);
|
||||
if (j == 0 && (start_block + endcount > ealign)) {
|
||||
offset = XFS_BUF_PTR(bp);
|
||||
balign = BBTOB(ealign - start_block);
|
||||
error = XFS_BUF_SET_PTR(bp, offset + balign,
|
||||
BBTOB(sectbb));
|
||||
offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block);
|
||||
error = xlog_bread_offset(log, ealign, sectbb,
|
||||
bp, offset);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = xlog_bread_noalign(log, ealign, sectbb, bp);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
error = XFS_BUF_SET_PTR(bp, offset, bufblks);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
||||
offset = xlog_align(log, start_block, endcount, bp);
|
||||
|
@ -3448,19 +3469,9 @@ xlog_do_recovery_pass(
|
|||
* - order is important.
|
||||
*/
|
||||
wrapped_hblks = hblks - split_hblks;
|
||||
error = XFS_BUF_SET_PTR(hbp,
|
||||
offset + BBTOB(split_hblks),
|
||||
BBTOB(hblks - split_hblks));
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = xlog_bread_noalign(log, 0,
|
||||
wrapped_hblks, hbp);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = XFS_BUF_SET_PTR(hbp, offset,
|
||||
BBTOB(hblks));
|
||||
error = xlog_bread_offset(log, 0,
|
||||
wrapped_hblks, hbp,
|
||||
offset + BBTOB(split_hblks));
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
}
|
||||
|
@ -3511,19 +3522,9 @@ xlog_do_recovery_pass(
|
|||
* _first_, then the log start (LR header end)
|
||||
* - order is important.
|
||||
*/
|
||||
error = XFS_BUF_SET_PTR(dbp,
|
||||
offset + BBTOB(split_bblks),
|
||||
BBTOB(bblks - split_bblks));
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = xlog_bread_noalign(log, wrapped_hblks,
|
||||
bblks - split_bblks,
|
||||
dbp);
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
|
||||
error = XFS_BUF_SET_PTR(dbp, offset, h_size);
|
||||
error = xlog_bread_offset(log, 0,
|
||||
bblks - split_bblks, hbp,
|
||||
offset + BBTOB(split_bblks));
|
||||
if (error)
|
||||
goto bread_err2;
|
||||
}
|
||||
|
|
|
@ -1900,7 +1900,7 @@ xfs_mod_incore_sb_batch(
|
|||
uint nmsb,
|
||||
int rsvd)
|
||||
{
|
||||
xfs_mod_sb_t *msbp = &msb[0];
|
||||
xfs_mod_sb_t *msbp;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
|
@ -1910,7 +1910,7 @@ xfs_mod_incore_sb_batch(
|
|||
* changes will be atomic.
|
||||
*/
|
||||
spin_lock(&mp->m_sb_lock);
|
||||
for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
|
||||
for (msbp = msb; msbp < (msb + nmsb); msbp++) {
|
||||
ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
|
||||
msbp->msb_field > XFS_SBS_FDBLOCKS);
|
||||
|
||||
|
|
|
@ -608,10 +608,8 @@ STATIC void
|
|||
xfs_trans_free(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct xfs_busy_extent *busyp, *n;
|
||||
|
||||
list_for_each_entry_safe(busyp, n, &tp->t_busy, list)
|
||||
xfs_alloc_busy_clear(tp->t_mountp, busyp);
|
||||
xfs_alloc_busy_sort(&tp->t_busy);
|
||||
xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy);
|
||||
|
||||
atomic_dec(&tp->t_mountp->m_active_trans);
|
||||
xfs_trans_free_dqinfo(tp);
|
||||
|
|
|
@ -73,8 +73,6 @@ typedef __int32_t xfs_tid_t; /* transaction identifier */
|
|||
typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
|
||||
typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */
|
||||
|
||||
typedef __uint32_t xlog_tid_t; /* transaction ID type */
|
||||
|
||||
/*
|
||||
* These types are 64 bits on disk but are either 32 or 64 bits in memory.
|
||||
* Disk based types:
|
||||
|
|
Загрузка…
Ссылка в новой задаче