[XFS] Minor code rearranging and cleanup to prevent some coverity false

positives.

SGI-PV: 955502
SGI-Modid: xfs-linux-melb:xfs-kern:26805a

Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
Nathan Scott 2006-09-28 11:03:44 +10:00 коммит произвёл Tim Shimmin
Родитель b627259c60
Коммит d432c80e68
3 изменённых файлов: 36 добавлений и 37 удалений

Просмотреть файл

@ -1477,8 +1477,10 @@ xfs_alloc_ag_vextent_small(
/*
* Can't allocate from the freelist for some reason.
*/
else
else {
fbno = NULLAGBLOCK;
flen = 0;
}
/*
* Can't do the allocation, give up.
*/

Просмотреть файл

@ -1054,7 +1054,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
xfs_da_node_entry_t *btree;
xfs_dablk_t blkno;
int probe, span, max, error, retval;
xfs_dahash_t hashval;
xfs_dahash_t hashval, btreehashval;
xfs_da_args_t *args;
args = state->args;
@ -1079,30 +1079,32 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
return(error);
}
curr = blk->bp->data;
ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC ||
be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC ||
be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC);
blk->magic = be16_to_cpu(curr->magic);
ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
blk->magic == XFS_DIR2_LEAFN_MAGIC ||
blk->magic == XFS_ATTR_LEAF_MAGIC);
/*
* Search an intermediate node for a match.
*/
blk->magic = be16_to_cpu(curr->magic);
if (blk->magic == XFS_DA_NODE_MAGIC) {
node = blk->bp->data;
blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
max = be16_to_cpu(node->hdr.count);
btreehashval = node->btree[max-1].hashval;
blk->hashval = be32_to_cpu(btreehashval);
/*
* Binary search. (note: small blocks will skip loop)
*/
max = be16_to_cpu(node->hdr.count);
probe = span = max / 2;
hashval = args->hashval;
for (btree = &node->btree[probe]; span > 4;
btree = &node->btree[probe]) {
span /= 2;
if (be32_to_cpu(btree->hashval) < hashval)
btreehashval = be32_to_cpu(btree->hashval);
if (btreehashval < hashval)
probe += span;
else if (be32_to_cpu(btree->hashval) > hashval)
else if (btreehashval > hashval)
probe -= span;
else
break;
@ -1133,10 +1135,10 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
blk->index = probe;
blkno = be32_to_cpu(btree->before);
}
} else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) {
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
} else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) {
} else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
break;
}
@ -1152,11 +1154,13 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state);
}
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
retval = xfs_attr_leaf_lookup_int(blk->bp, args);
blk->index = args->index;
args->blkno = blk->blkno;
} else {
ASSERT(0);
return XFS_ERROR(EFSCORRUPTED);
}
if (((retval == ENOENT) || (retval == ENOATTR)) &&
(blk->hashval == args->hashval)) {
@ -1166,8 +1170,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
return(error);
if (retval == 0) {
continue;
}
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
/* path_shift() gives ENOENT */
retval = XFS_ERROR(ENOATTR);
}

Просмотреть файл

@ -1976,7 +1976,10 @@ xfs_growfs_rt(
if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks,
mp->m_sb.sb_rsumino)))
return error;
nmp = NULL;
/*
* Allocate a new (fake) mount/sb.
*/
nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
/*
* Loop over the bitmap blocks.
* We will do everything one bitmap block at a time.
@ -1987,10 +1990,6 @@ xfs_growfs_rt(
((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
bmbno < nrbmblocks;
bmbno++) {
/*
* Allocate a new (fake) mount/sb.
*/
nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
*nmp = *mp;
nsbp = &nmp->m_sb;
/*
@ -2018,13 +2017,13 @@ xfs_growfs_rt(
cancelflags = 0;
if ((error = xfs_trans_reserve(tp, 0,
XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0)))
goto error_exit;
break;
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
XFS_ILOCK_EXCL, &ip)))
goto error_exit;
break;
ASSERT(ip == mp->m_rbmip);
/*
* Update the bitmap inode's size.
@ -2038,7 +2037,7 @@ xfs_growfs_rt(
*/
if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0,
XFS_ILOCK_EXCL, &ip)))
goto error_exit;
break;
ASSERT(ip == mp->m_rsumip);
/*
* Update the summary inode's size.
@ -2053,7 +2052,7 @@ xfs_growfs_rt(
mp->m_rsumlevels != nmp->m_rsumlevels) {
error = xfs_rtcopy_summary(mp, nmp, tp);
if (error)
goto error_exit;
break;
}
/*
* Update superblock fields.
@ -2080,17 +2079,12 @@ xfs_growfs_rt(
error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents,
nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno);
if (error)
goto error_exit;
break;
/*
* Mark more blocks free in the superblock.
*/
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS,
nsbp->sb_rextents - sbp->sb_rextents);
/*
* Free the fake mp structure.
*/
kmem_free(nmp, sizeof(*nmp));
nmp = NULL;
/*
* Update mp values into the real mp structure.
*/
@ -2101,15 +2095,15 @@ xfs_growfs_rt(
*/
xfs_trans_commit(tp, 0, NULL);
}
return 0;
if (error)
xfs_trans_cancel(tp, cancelflags);
/*
* Error paths come here.
* Free the fake mp structure.
*/
error_exit:
if (nmp)
kmem_free(nmp, sizeof(*nmp));
xfs_trans_cancel(tp, cancelflags);
kmem_free(nmp, sizeof(*nmp));
return error;
}