summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_bmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_bmap.c')
-rw-r--r--fs/xfs/xfs_bmap.c101
1 files changed, 27 insertions, 74 deletions
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 498ad50d1f4..87795188ced 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -185,16 +185,6 @@ xfs_bmap_btree_to_extents(
int *logflagsp, /* inode logging flags */
int whichfork); /* data or attr fork */
-#ifdef DEBUG
-/*
- * Check that the extents list for the inode ip is in the right order.
- */
-STATIC void
-xfs_bmap_check_extents(
- xfs_inode_t *ip, /* incore inode pointer */
- int whichfork); /* data or attr fork */
-#endif
-
/*
* Called by xfs_bmapi to update file extent records and the btree
* after removing space (or undoing a delayed allocation).
@@ -410,7 +400,6 @@ xfs_bmap_count_leaves(
STATIC int
xfs_bmap_disk_count_leaves(
xfs_ifork_t *ifp,
- xfs_mount_t *mp,
xfs_extnum_t idx,
xfs_bmbt_block_t *block,
int numrecs,
@@ -684,7 +673,7 @@ xfs_bmap_add_extent(
ASSERT(nblks <= da_old);
if (nblks < da_old)
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
- (int)(da_old - nblks), rsvd);
+ (int64_t)(da_old - nblks), rsvd);
}
/*
* Clear out the allocated field, done with it now in any case.
@@ -1209,7 +1198,7 @@ xfs_bmap_add_extent_delay_real(
diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
(cur ? cur->bc_private.b.allocated : 0));
if (diff > 0 &&
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) {
+ xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
/*
* Ick gross gag me with a spoon.
*/
@@ -1220,7 +1209,7 @@ xfs_bmap_add_extent_delay_real(
diff--;
if (!diff ||
!xfs_mod_incore_sb(ip->i_mount,
- XFS_SBS_FDBLOCKS, -diff, rsvd))
+ XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
break;
}
if (temp2) {
@@ -1228,7 +1217,7 @@ xfs_bmap_add_extent_delay_real(
diff--;
if (!diff ||
!xfs_mod_incore_sb(ip->i_mount,
- XFS_SBS_FDBLOCKS, -diff, rsvd))
+ XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
break;
}
}
@@ -2015,7 +2004,7 @@ xfs_bmap_add_extent_hole_delay(
if (oldlen != newlen) {
ASSERT(oldlen > newlen);
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
- (int)(oldlen - newlen), rsvd);
+ (int64_t)(oldlen - newlen), rsvd);
/*
* Nothing to do for disk quota accounting here.
*/
@@ -3359,7 +3348,7 @@ xfs_bmap_del_extent(
*/
ASSERT(da_old >= da_new);
if (da_old > da_new)
- xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new),
+ xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
rsvd);
if (delta) {
/* DELTA: report the original extent. */
@@ -3543,6 +3532,7 @@ xfs_bmap_forkoff_reset(
if (whichfork == XFS_ATTR_FORK &&
(ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
(ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
+ (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
@@ -4079,7 +4069,7 @@ xfs_bmap_add_attrfork(
} else
XFS_SB_UNLOCK(mp, s);
}
- if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed)))
+ if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
goto error2;
error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL);
ASSERT(ip->i_df.if_ext_max ==
@@ -4212,7 +4202,6 @@ int /* error */
xfs_bmap_finish(
xfs_trans_t **tp, /* transaction pointer addr */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_fsblock_t firstblock, /* controlled ag for allocs */
int *committed) /* xact committed or not */
{
xfs_efd_log_item_t *efd; /* extent free data */
@@ -4533,8 +4522,7 @@ xfs_bmap_read_extents(
error0);
if (level == 0)
break;
- pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
- 1, mp->m_bmap_dmxr[1]);
+ pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
xfs_trans_brelse(tp, bp);
@@ -4577,8 +4565,7 @@ xfs_bmap_read_extents(
/*
* Copy records into the extent records.
*/
- frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
- block, 1, mp->m_bmap_dmxr[0]);
+ frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
start = i;
for (j = 0; j < num_recs; j++, i++, frp++) {
trp = xfs_iext_get_ext(ifp, i);
@@ -4929,28 +4916,28 @@ xfs_bmapi(
if (rt) {
error = xfs_mod_incore_sb(mp,
XFS_SBS_FREXTENTS,
- -(extsz), (flags &
+ -((int64_t)extsz), (flags &
XFS_BMAPI_RSVBLOCKS));
} else {
error = xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
- -(alen), (flags &
+ -((int64_t)alen), (flags &
XFS_BMAPI_RSVBLOCKS));
}
if (!error) {
error = xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
- -(indlen), (flags &
+ -((int64_t)indlen), (flags &
XFS_BMAPI_RSVBLOCKS));
if (error && rt)
xfs_mod_incore_sb(mp,
XFS_SBS_FREXTENTS,
- extsz, (flags &
+ (int64_t)extsz, (flags &
XFS_BMAPI_RSVBLOCKS));
else if (error)
xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
- alen, (flags &
+ (int64_t)alen, (flags &
XFS_BMAPI_RSVBLOCKS));
}
@@ -5616,13 +5603,13 @@ xfs_bunmapi(
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
do_div(rtexts, mp->m_sb.sb_rextsize);
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
- (int)rtexts, rsvd);
+ (int64_t)rtexts, rsvd);
(void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
NULL, ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
- (int)del.br_blockcount, rsvd);
+ (int64_t)del.br_blockcount, rsvd);
(void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
NULL, ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_REGBLKS);
@@ -6048,32 +6035,6 @@ xfs_bmap_eof(
}
#ifdef DEBUG
-/*
- * Check that the extents list for the inode ip is in the right order.
- */
-STATIC void
-xfs_bmap_check_extents(
- xfs_inode_t *ip, /* incore inode pointer */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_t *ep; /* current extent entry */
- xfs_extnum_t idx; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extnum_t nextents; /* number of extents in list */
- xfs_bmbt_rec_t *nextp; /* next extent entry */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
- ep = xfs_iext_get_ext(ifp, 0);
- for (idx = 0; idx < nextents - 1; idx++) {
- nextp = xfs_iext_get_ext(ifp, idx + 1);
- xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
- (void *)(nextp));
- ep = nextp;
- }
-}
-
STATIC
xfs_buf_t *
xfs_bmap_get_bp(
@@ -6156,8 +6117,7 @@ xfs_check_block(
if (root) {
keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
} else {
- keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize,
- xfs_bmbt, block, i, dmxr);
+ keyp = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, i);
}
if (prevp) {
@@ -6172,15 +6132,14 @@ xfs_check_block(
if (root) {
pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
} else {
- pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
- xfs_bmbt, block, i, dmxr);
+ pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, i, dmxr);
}
for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
if (root) {
thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
} else {
- thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
- xfs_bmbt, block, j, dmxr);
+ thispa = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, j,
+ dmxr);
}
if (*thispa == *pp) {
cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
@@ -6267,8 +6226,7 @@ xfs_bmap_check_leaf_extents(
*/
xfs_check_block(block, mp, 0, 0);
- pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
- 1, mp->m_bmap_dmxr[1]);
+ pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
if (bp_release) {
@@ -6305,11 +6263,9 @@ xfs_bmap_check_leaf_extents(
* conform with the first entry in this one.
*/
- ep = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
- block, 1, mp->m_bmap_dmxr[0]);
+ ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
for (j = 1; j < num_recs; j++) {
- nextp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
- block, j + 1, mp->m_bmap_dmxr[0]);
+ nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1);
if (lastp) {
xfs_btree_check_rec(XFS_BTNUM_BMAP,
(void *)lastp, (void *)ep);
@@ -6454,8 +6410,7 @@ xfs_bmap_count_tree(
}
/* Dive to the next level */
- pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
- xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
+ pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
if (unlikely((error =
xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
@@ -6470,7 +6425,7 @@ xfs_bmap_count_tree(
for (;;) {
nextbno = be64_to_cpu(block->bb_rightsib);
numrecs = be16_to_cpu(block->bb_numrecs);
- if (unlikely(xfs_bmap_disk_count_leaves(ifp, mp,
+ if (unlikely(xfs_bmap_disk_count_leaves(ifp,
0, block, numrecs, count) < 0)) {
xfs_trans_brelse(tp, bp);
XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
@@ -6518,7 +6473,6 @@ xfs_bmap_count_leaves(
int
xfs_bmap_disk_count_leaves(
xfs_ifork_t *ifp,
- xfs_mount_t *mp,
xfs_extnum_t idx,
xfs_bmbt_block_t *block,
int numrecs,
@@ -6528,8 +6482,7 @@ xfs_bmap_disk_count_leaves(
xfs_bmbt_rec_t *frp;
for (b = 1; b <= numrecs; b++) {
- frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
- xfs_bmbt, block, idx + b, mp->m_bmap_dmxr[0]);
+ frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
*count += xfs_bmbt_disk_get_blockcount(frp);
}
return 0;