2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2005-11-02 06:58:39 +03:00
|
|
|
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2005-11-02 06:58:39 +03:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 02:20:36 +04:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 06:58:39 +03:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2005-11-02 06:58:39 +03:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
2005-11-02 06:38:42 +03:00
|
|
|
#include "xfs_fs.h"
|
2013-10-29 15:11:58 +04:00
|
|
|
#include "xfs_shared.h"
|
2013-10-23 03:51:50 +04:00
|
|
|
#include "xfs_format.h"
|
2013-10-23 03:50:10 +04:00
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_ag.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_btree.h"
|
2013-10-23 03:51:50 +04:00
|
|
|
#include "xfs_alloc_btree.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
#include "xfs_alloc.h"
|
2012-04-29 14:39:43 +04:00
|
|
|
#include "xfs_extent_busy.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
#include "xfs_error.h"
|
2009-12-15 02:14:59 +03:00
|
|
|
#include "xfs_trace.h"
|
2013-04-21 23:53:46 +04:00
|
|
|
#include "xfs_cksum.h"
|
2013-10-23 03:50:10 +04:00
|
|
|
#include "xfs_trans.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
|
2008-10-30 08:56:32 +03:00
|
|
|
STATIC struct xfs_btree_cur *
|
|
|
|
xfs_allocbt_dup_cursor(
|
|
|
|
struct xfs_btree_cur *cur)
|
|
|
|
{
|
|
|
|
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
|
|
|
|
cur->bc_private.a.agbp, cur->bc_private.a.agno,
|
|
|
|
cur->bc_btnum);
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:57:16 +03:00
|
|
|
STATIC void
|
|
|
|
xfs_allocbt_set_root(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *ptr,
|
|
|
|
int inc)
|
|
|
|
{
|
|
|
|
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
|
|
|
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
|
|
|
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
|
|
|
int btnum = cur->bc_btnum;
|
2010-01-11 14:47:41 +03:00
|
|
|
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
|
2008-10-30 08:57:16 +03:00
|
|
|
|
|
|
|
ASSERT(ptr->s != 0);
|
|
|
|
|
|
|
|
agf->agf_roots[btnum] = ptr->s;
|
|
|
|
be32_add_cpu(&agf->agf_levels[btnum], inc);
|
2010-01-11 14:47:41 +03:00
|
|
|
pag->pagf_levels[btnum] += inc;
|
|
|
|
xfs_perag_put(pag);
|
2008-10-30 08:57:16 +03:00
|
|
|
|
|
|
|
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:57:03 +03:00
|
|
|
STATIC int
|
|
|
|
xfs_allocbt_alloc_block(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *start,
|
|
|
|
union xfs_btree_ptr *new,
|
|
|
|
int length,
|
|
|
|
int *stat)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
xfs_agblock_t bno;
|
|
|
|
|
|
|
|
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
|
|
|
|
|
|
|
|
/* Allocate the new block from the freelist. If we can't, give up. */
|
|
|
|
error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
|
|
|
|
&bno, 1);
|
|
|
|
if (error) {
|
|
|
|
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bno == NULLAGBLOCK) {
|
|
|
|
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
|
|
|
|
*stat = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-24 23:06:16 +04:00
|
|
|
|
2012-04-29 14:41:10 +04:00
|
|
|
xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false);
|
2008-10-30 08:57:03 +03:00
|
|
|
|
|
|
|
xfs_trans_agbtree_delta(cur->bc_tp, 1);
|
|
|
|
new->s = cpu_to_be32(bno);
|
|
|
|
|
|
|
|
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
|
|
|
|
*stat = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:57:51 +03:00
|
|
|
STATIC int
|
|
|
|
xfs_allocbt_free_block(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
|
|
|
struct xfs_buf *agbp = cur->bc_private.a.agbp;
|
|
|
|
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
|
|
|
xfs_agblock_t bno;
|
|
|
|
int error;
|
|
|
|
|
2009-01-15 08:22:07 +03:00
|
|
|
bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
|
2008-10-30 08:57:51 +03:00
|
|
|
error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2012-04-29 14:41:10 +04:00
|
|
|
xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
|
|
|
|
XFS_EXTENT_BUSY_SKIP_DISCARD);
|
2008-10-30 08:57:51 +03:00
|
|
|
xfs_trans_agbtree_delta(cur->bc_tp, -1);
|
xfs: invalidate allocbt blocks moved to the free list
When we free a block from the alloc btree tree, we move it to the
freelist held in the AGFL and mark it busy in the busy extent tree.
This typically happens when we merge btree blocks.
Once the transaction is committed and checkpointed, the block can
remain on the free list for an indefinite amount of time. Now, this
isn't the end of the world at this point - if the free list is
shortened, the buffer is invalidated in the transaction that moves
it back to free space. If the buffer is allocated as metadata from
the free list, then all the modifications getted logged, and we have
no issues, either. And if it gets allocated as userdata direct from
the freelist, it gets invalidated and so will never get written.
However, during the time it sits on the free list, pressure on the
log can cause the AIL to be pushed and the buffer that covers the
block gets pushed for write. IOWs, we end up writing a freed
metadata block to disk. Again, this isn't the end of the world
because we know from the above we are only writing to free space.
The problem, however, is for validation callbacks. If the block was
on old btree root block, then the level of the block is going to be
higher than the current tree root, and so will fail validation.
There may be other inconsistencies in the block as well, and
currently we don't care because the block is in free space. Shutting
down the filesystem because a freed block doesn't pass write
validation, OTOH, is rather unfriendly.
So, make sure we always invalidate buffers as they move from the
free space trees to the free list so that we guarantee they never
get written to disk while on the free list.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Phil White <pwhite@sgi.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2012-11-02 04:38:41 +04:00
|
|
|
|
|
|
|
xfs_trans_binval(cur->bc_tp, bp);
|
2008-10-30 08:57:51 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2008-10-30 08:56:32 +03:00
|
|
|
* Update the longest extent in the AGF
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2008-10-30 08:56:32 +03:00
|
|
|
STATIC void
|
|
|
|
xfs_allocbt_update_lastrec(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
struct xfs_btree_block *block,
|
|
|
|
union xfs_btree_rec *rec,
|
|
|
|
int ptr,
|
|
|
|
int reason)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-10-30 08:56:32 +03:00
|
|
|
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
|
|
|
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
|
2010-01-11 14:47:41 +03:00
|
|
|
struct xfs_perag *pag;
|
2008-10-30 08:56:32 +03:00
|
|
|
__be32 len;
|
2008-10-30 08:58:01 +03:00
|
|
|
int numrecs;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-10-30 08:56:32 +03:00
|
|
|
ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-10-30 08:56:32 +03:00
|
|
|
switch (reason) {
|
|
|
|
case LASTREC_UPDATE:
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
2008-10-30 08:56:32 +03:00
|
|
|
* If this is the last leaf block and it's the last record,
|
|
|
|
* then update the size of the longest extent in the AG.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2008-10-30 08:56:32 +03:00
|
|
|
if (ptr != xfs_btree_get_numrecs(block))
|
|
|
|
return;
|
|
|
|
len = rec->alloc.ar_blockcount;
|
|
|
|
break;
|
2008-10-30 08:57:40 +03:00
|
|
|
case LASTREC_INSREC:
|
|
|
|
if (be32_to_cpu(rec->alloc.ar_blockcount) <=
|
|
|
|
be32_to_cpu(agf->agf_longest))
|
|
|
|
return;
|
|
|
|
len = rec->alloc.ar_blockcount;
|
2008-10-30 08:58:01 +03:00
|
|
|
break;
|
|
|
|
case LASTREC_DELREC:
|
|
|
|
numrecs = xfs_btree_get_numrecs(block);
|
|
|
|
if (ptr <= numrecs)
|
|
|
|
return;
|
|
|
|
ASSERT(ptr == numrecs + 1);
|
|
|
|
|
|
|
|
if (numrecs) {
|
|
|
|
xfs_alloc_rec_t *rrp;
|
|
|
|
|
2008-10-30 09:11:40 +03:00
|
|
|
rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
|
2008-10-30 08:58:01 +03:00
|
|
|
len = rrp->ar_blockcount;
|
|
|
|
} else {
|
|
|
|
len = 0;
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:57:40 +03:00
|
|
|
break;
|
2008-10-30 08:56:32 +03:00
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-10-30 08:53:59 +03:00
|
|
|
|
2008-10-30 08:56:32 +03:00
|
|
|
agf->agf_longest = len;
|
2010-01-11 14:47:41 +03:00
|
|
|
pag = xfs_perag_get(cur->bc_mp, seqno);
|
|
|
|
pag->pagf_longest = be32_to_cpu(len);
|
|
|
|
xfs_perag_put(pag);
|
2008-10-30 08:56:32 +03:00
|
|
|
xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
|
2008-10-30 08:53:59 +03:00
|
|
|
}
|
|
|
|
|
2008-10-30 08:58:01 +03:00
|
|
|
STATIC int
|
|
|
|
xfs_allocbt_get_minrecs(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
int level)
|
|
|
|
{
|
|
|
|
return cur->bc_mp->m_alloc_mnr[level != 0];
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:55:23 +03:00
|
|
|
STATIC int
|
|
|
|
xfs_allocbt_get_maxrecs(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
int level)
|
|
|
|
{
|
|
|
|
return cur->bc_mp->m_alloc_mxr[level != 0];
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:56:09 +03:00
|
|
|
STATIC void
|
|
|
|
xfs_allocbt_init_key_from_rec(
|
|
|
|
union xfs_btree_key *key,
|
|
|
|
union xfs_btree_rec *rec)
|
|
|
|
{
|
|
|
|
ASSERT(rec->alloc.ar_startblock != 0);
|
|
|
|
|
|
|
|
key->alloc.ar_startblock = rec->alloc.ar_startblock;
|
|
|
|
key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:57:40 +03:00
|
|
|
STATIC void
|
|
|
|
xfs_allocbt_init_rec_from_key(
|
|
|
|
union xfs_btree_key *key,
|
|
|
|
union xfs_btree_rec *rec)
|
|
|
|
{
|
|
|
|
ASSERT(key->alloc.ar_startblock != 0);
|
|
|
|
|
|
|
|
rec->alloc.ar_startblock = key->alloc.ar_startblock;
|
|
|
|
rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_allocbt_init_rec_from_cur(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_rec *rec)
|
|
|
|
{
|
|
|
|
ASSERT(cur->bc_rec.a.ar_startblock != 0);
|
|
|
|
|
|
|
|
rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
|
|
|
|
rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
|
|
|
|
}
|
|
|
|
|
2008-10-30 08:56:09 +03:00
|
|
|
STATIC void
|
|
|
|
xfs_allocbt_init_ptr_from_cur(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_ptr *ptr)
|
|
|
|
{
|
|
|
|
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
|
|
|
|
|
|
|
|
ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
|
|
|
|
ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
|
|
|
|
|
|
|
|
ptr->s = agf->agf_roots[cur->bc_btnum];
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC __int64_t
|
|
|
|
xfs_allocbt_key_diff(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_key *key)
|
|
|
|
{
|
|
|
|
xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
|
|
|
|
xfs_alloc_key_t *kp = &key->alloc;
|
|
|
|
__int64_t diff;
|
|
|
|
|
|
|
|
if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
|
|
|
return (__int64_t)be32_to_cpu(kp->ar_startblock) -
|
|
|
|
rec->ar_startblock;
|
|
|
|
}
|
|
|
|
|
|
|
|
diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
|
|
|
|
if (diff)
|
|
|
|
return diff;
|
|
|
|
|
|
|
|
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
|
|
|
}
|
|
|
|
|
2013-04-21 23:53:46 +04:00
|
|
|
static bool
|
2012-11-14 10:52:32 +04:00
|
|
|
xfs_allocbt_verify(
|
2012-11-12 15:54:08 +04:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = bp->b_target->bt_mount;
|
|
|
|
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
|
|
|
|
struct xfs_perag *pag = bp->b_pag;
|
|
|
|
unsigned int level;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* magic number and level verification
|
|
|
|
*
|
2013-04-21 23:53:46 +04:00
|
|
|
* During growfs operations, we can't verify the exact level or owner as
|
|
|
|
* the perag is not fully initialised and hence not attached to the
|
|
|
|
* buffer. In this case, check against the maximum tree depth.
|
|
|
|
*
|
|
|
|
* Similarly, during log recovery we will have a perag structure
|
|
|
|
* attached, but the agf information will not yet have been initialised
|
|
|
|
* from the on disk AGF. Again, we can only check against maximum limits
|
|
|
|
* in this case.
|
2012-11-12 15:54:08 +04:00
|
|
|
*/
|
|
|
|
level = be16_to_cpu(block->bb_level);
|
|
|
|
switch (block->bb_magic) {
|
2013-04-21 23:53:46 +04:00
|
|
|
case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
|
|
|
|
if (!xfs_sb_version_hascrc(&mp->m_sb))
|
|
|
|
return false;
|
|
|
|
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
|
|
|
|
return false;
|
|
|
|
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
|
|
|
|
return false;
|
|
|
|
if (pag &&
|
|
|
|
be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
|
|
|
|
return false;
|
|
|
|
/* fall through */
|
2012-11-12 15:54:08 +04:00
|
|
|
case cpu_to_be32(XFS_ABTB_MAGIC):
|
2013-04-21 23:53:46 +04:00
|
|
|
if (pag && pag->pagf_init) {
|
|
|
|
if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
|
|
|
|
return false;
|
|
|
|
} else if (level >= mp->m_ag_maxlevels)
|
|
|
|
return false;
|
2012-11-12 15:54:08 +04:00
|
|
|
break;
|
2013-04-21 23:53:46 +04:00
|
|
|
case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
|
|
|
|
if (!xfs_sb_version_hascrc(&mp->m_sb))
|
|
|
|
return false;
|
|
|
|
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_uuid))
|
|
|
|
return false;
|
|
|
|
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
|
|
|
|
return false;
|
|
|
|
if (pag &&
|
|
|
|
be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
|
|
|
|
return false;
|
|
|
|
/* fall through */
|
2012-11-12 15:54:08 +04:00
|
|
|
case cpu_to_be32(XFS_ABTC_MAGIC):
|
2013-04-21 23:53:46 +04:00
|
|
|
if (pag && pag->pagf_init) {
|
|
|
|
if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
|
|
|
|
return false;
|
|
|
|
} else if (level >= mp->m_ag_maxlevels)
|
|
|
|
return false;
|
2012-11-12 15:54:08 +04:00
|
|
|
break;
|
|
|
|
default:
|
2013-04-21 23:53:46 +04:00
|
|
|
return false;
|
2012-11-12 15:54:08 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* numrecs verification */
|
2013-04-21 23:53:46 +04:00
|
|
|
if (be16_to_cpu(block->bb_numrecs) > mp->m_alloc_mxr[level != 0])
|
|
|
|
return false;
|
2012-11-12 15:54:08 +04:00
|
|
|
|
|
|
|
/* sibling pointer verification */
|
2013-04-21 23:53:46 +04:00
|
|
|
if (!block->bb_u.s.bb_leftsib ||
|
|
|
|
(be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
|
|
|
|
block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
|
|
|
|
return false;
|
|
|
|
if (!block->bb_u.s.bb_rightsib ||
|
|
|
|
(be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
|
|
|
|
block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2012-11-14 10:52:32 +04:00
|
|
|
}
|
2012-11-12 15:54:08 +04:00
|
|
|
|
2012-11-14 10:52:32 +04:00
|
|
|
static void
|
2012-11-14 10:54:40 +04:00
|
|
|
xfs_allocbt_read_verify(
|
2012-11-14 10:52:32 +04:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2014-02-27 08:23:10 +04:00
|
|
|
if (!xfs_btree_sblock_verify_crc(bp))
|
|
|
|
xfs_buf_ioerror(bp, EFSBADCRC);
|
|
|
|
else if (!xfs_allocbt_verify(bp))
|
2013-04-21 23:53:46 +04:00
|
|
|
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
2014-02-27 08:23:10 +04:00
|
|
|
|
|
|
|
if (bp->b_error) {
|
|
|
|
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
|
|
|
xfs_verifier_error(bp);
|
2013-04-21 23:53:46 +04:00
|
|
|
}
|
2012-11-14 10:52:32 +04:00
|
|
|
}
|
|
|
|
|
2012-11-14 10:54:40 +04:00
|
|
|
static void
|
|
|
|
xfs_allocbt_write_verify(
|
2012-11-14 10:52:32 +04:00
|
|
|
struct xfs_buf *bp)
|
|
|
|
{
|
2013-04-21 23:53:46 +04:00
|
|
|
if (!xfs_allocbt_verify(bp)) {
|
|
|
|
trace_xfs_btree_corrupt(bp, _RET_IP_);
|
|
|
|
xfs_buf_ioerror(bp, EFSCORRUPTED);
|
2014-02-27 08:23:10 +04:00
|
|
|
xfs_verifier_error(bp);
|
2014-02-27 08:14:31 +04:00
|
|
|
return;
|
2013-04-21 23:53:46 +04:00
|
|
|
}
|
|
|
|
xfs_btree_sblock_calc_crc(bp);
|
|
|
|
|
2012-11-12 15:54:08 +04:00
|
|
|
}
|
|
|
|
|
2012-11-14 10:54:40 +04:00
|
|
|
const struct xfs_buf_ops xfs_allocbt_buf_ops = {
|
|
|
|
.verify_read = xfs_allocbt_read_verify,
|
|
|
|
.verify_write = xfs_allocbt_write_verify,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-04-30 15:39:34 +04:00
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
2008-10-30 08:58:32 +03:00
|
|
|
STATIC int
|
|
|
|
xfs_allocbt_keys_inorder(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_key *k1,
|
|
|
|
union xfs_btree_key *k2)
|
|
|
|
{
|
|
|
|
if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
|
|
|
return be32_to_cpu(k1->alloc.ar_startblock) <
|
|
|
|
be32_to_cpu(k2->alloc.ar_startblock);
|
|
|
|
} else {
|
|
|
|
return be32_to_cpu(k1->alloc.ar_blockcount) <
|
|
|
|
be32_to_cpu(k2->alloc.ar_blockcount) ||
|
|
|
|
(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
|
|
|
|
be32_to_cpu(k1->alloc.ar_startblock) <
|
|
|
|
be32_to_cpu(k2->alloc.ar_startblock));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC int
|
|
|
|
xfs_allocbt_recs_inorder(
|
|
|
|
struct xfs_btree_cur *cur,
|
|
|
|
union xfs_btree_rec *r1,
|
|
|
|
union xfs_btree_rec *r2)
|
|
|
|
{
|
|
|
|
if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
|
|
|
return be32_to_cpu(r1->alloc.ar_startblock) +
|
|
|
|
be32_to_cpu(r1->alloc.ar_blockcount) <=
|
|
|
|
be32_to_cpu(r2->alloc.ar_startblock);
|
|
|
|
} else {
|
|
|
|
return be32_to_cpu(r1->alloc.ar_blockcount) <
|
|
|
|
be32_to_cpu(r2->alloc.ar_blockcount) ||
|
|
|
|
(r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
|
|
|
|
be32_to_cpu(r1->alloc.ar_startblock) <
|
|
|
|
be32_to_cpu(r2->alloc.ar_startblock));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
2008-10-30 08:53:59 +03:00
|
|
|
static const struct xfs_btree_ops xfs_allocbt_ops = {
|
2008-10-30 08:55:34 +03:00
|
|
|
.rec_len = sizeof(xfs_alloc_rec_t),
|
|
|
|
.key_len = sizeof(xfs_alloc_key_t),
|
|
|
|
|
2008-10-30 08:53:59 +03:00
|
|
|
.dup_cursor = xfs_allocbt_dup_cursor,
|
2008-10-30 08:57:16 +03:00
|
|
|
.set_root = xfs_allocbt_set_root,
|
2008-10-30 08:57:03 +03:00
|
|
|
.alloc_block = xfs_allocbt_alloc_block,
|
2008-10-30 08:57:51 +03:00
|
|
|
.free_block = xfs_allocbt_free_block,
|
2008-10-30 08:56:32 +03:00
|
|
|
.update_lastrec = xfs_allocbt_update_lastrec,
|
2008-10-30 08:58:01 +03:00
|
|
|
.get_minrecs = xfs_allocbt_get_minrecs,
|
2008-10-30 08:55:23 +03:00
|
|
|
.get_maxrecs = xfs_allocbt_get_maxrecs,
|
2008-10-30 08:56:09 +03:00
|
|
|
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
|
2008-10-30 08:57:40 +03:00
|
|
|
.init_rec_from_key = xfs_allocbt_init_rec_from_key,
|
|
|
|
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
|
2008-10-30 08:56:09 +03:00
|
|
|
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
|
|
|
|
.key_diff = xfs_allocbt_key_diff,
|
2012-11-14 10:54:40 +04:00
|
|
|
.buf_ops = &xfs_allocbt_buf_ops,
|
2013-04-30 15:39:34 +04:00
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
2008-10-30 08:58:32 +03:00
|
|
|
.keys_inorder = xfs_allocbt_keys_inorder,
|
|
|
|
.recs_inorder = xfs_allocbt_recs_inorder,
|
|
|
|
#endif
|
2008-10-30 08:53:59 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a new allocation btree cursor.
|
|
|
|
*/
|
|
|
|
struct xfs_btree_cur * /* new alloc btree cursor */
|
|
|
|
xfs_allocbt_init_cursor(
|
|
|
|
struct xfs_mount *mp, /* file system mount point */
|
|
|
|
struct xfs_trans *tp, /* transaction pointer */
|
|
|
|
struct xfs_buf *agbp, /* buffer for agf structure */
|
|
|
|
xfs_agnumber_t agno, /* allocation group number */
|
|
|
|
xfs_btnum_t btnum) /* btree identifier */
|
|
|
|
{
|
|
|
|
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
|
|
|
|
struct xfs_btree_cur *cur;
|
|
|
|
|
|
|
|
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
|
|
|
|
|
|
|
|
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
|
|
|
|
|
|
|
|
cur->bc_tp = tp;
|
|
|
|
cur->bc_mp = mp;
|
|
|
|
cur->bc_btnum = btnum;
|
|
|
|
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
|
|
|
cur->bc_ops = &xfs_allocbt_ops;
|
2011-07-08 16:34:18 +04:00
|
|
|
|
|
|
|
if (btnum == XFS_BTNUM_CNT) {
|
|
|
|
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
|
2008-10-30 08:56:32 +03:00
|
|
|
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
|
2011-07-08 16:34:18 +04:00
|
|
|
} else {
|
|
|
|
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
|
|
|
|
}
|
2008-10-30 08:53:59 +03:00
|
|
|
|
|
|
|
cur->bc_private.a.agbp = agbp;
|
|
|
|
cur->bc_private.a.agno = agno;
|
|
|
|
|
2013-04-21 23:53:46 +04:00
|
|
|
if (xfs_sb_version_hascrc(&mp->m_sb))
|
|
|
|
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
|
|
|
|
|
2008-10-30 08:53:59 +03:00
|
|
|
return cur;
|
|
|
|
}
|
2008-10-30 09:11:19 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate number of records in an alloc btree block.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_allocbt_maxrecs(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
int blocklen,
|
|
|
|
int leaf)
|
|
|
|
{
|
2008-10-30 09:14:34 +03:00
|
|
|
blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
|
2008-10-30 09:11:19 +03:00
|
|
|
|
|
|
|
if (leaf)
|
|
|
|
return blocklen / sizeof(xfs_alloc_rec_t);
|
|
|
|
return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
|
|
|
|
}
|