From 857b9778d86ccba7d7b42c9d8aeecde794ec8a6b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:30 +0200 Subject: xfs: kill xfs_itruncate_start xfs_itruncate_start is a rather length wrapper that evaluates to a call to xfs_ioend_wait and xfs_tosspages, and only has two callers. Instead of using the complicated checks left over from IRIX where we can to truncate the pagecache just call xfs_tosspages (aka truncate_inode_pages) directly as we want to get rid of all data after i_size, and truncate_inode_pages handles incorrect alignments and too large offsets just fine. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_inode.c | 163 +---------------------------------------------------- 1 file changed, 3 insertions(+), 160 deletions(-) (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index a098a20ca63..82a282ab63d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1217,165 +1217,8 @@ xfs_isize_check( #endif /* DEBUG */ /* - * Calculate the last possible buffered byte in a file. This must - * include data that was buffered beyond the EOF by the write code. - * This also needs to deal with overflowing the xfs_fsize_t type - * which can happen for sizes near the limit. - * - * We also need to take into account any blocks beyond the EOF. It - * may be the case that they were buffered by a write which failed. - * In that case the pages will still be in memory, but the inode size - * will never have been updated. - */ -STATIC xfs_fsize_t -xfs_file_last_byte( - xfs_inode_t *ip) -{ - xfs_mount_t *mp; - xfs_fsize_t last_byte; - xfs_fileoff_t last_block; - xfs_fileoff_t size_last_block; - int error; - - ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); - - mp = ip->i_mount; - /* - * Only check for blocks beyond the EOF if the extents have - * been read in. This eliminates the need for the inode lock, - * and it also saves us from looking when it really isn't - * necessary. - */ - if (ip->i_df.if_flags & XFS_IFEXTENTS) { - xfs_ilock(ip, XFS_ILOCK_SHARED); - error = xfs_bmap_last_offset(NULL, ip, &last_block, - XFS_DATA_FORK); - xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (error) { - last_block = 0; - } - } else { - last_block = 0; - } - size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); - last_block = XFS_FILEOFF_MAX(last_block, size_last_block); - - last_byte = XFS_FSB_TO_B(mp, last_block); - if (last_byte < 0) { - return XFS_MAXIOFFSET(mp); - } - last_byte += (1 << mp->m_writeio_log); - if (last_byte < 0) { - return XFS_MAXIOFFSET(mp); - } - return last_byte; -} - -/* - * Start the truncation of the file to new_size. The new size - * must be smaller than the current size. This routine will - * clear the buffer and page caches of file data in the removed - * range, and xfs_itruncate_finish() will remove the underlying - * disk blocks. - * - * The inode must have its I/O lock locked EXCLUSIVELY, and it - * must NOT have the inode lock held at all. This is because we're - * calling into the buffer/page cache code and we can't hold the - * inode lock when we do so. - * - * We need to wait for any direct I/Os in flight to complete before we - * proceed with the truncate. This is needed to prevent the extents - * being read or written by the direct I/Os from being removed while the - * I/O is in flight as there is no other method of synchronising - * direct I/O with the truncate operation. Also, because we hold - * the IOLOCK in exclusive mode, we prevent new direct I/Os from being - * started until the truncate completes and drops the lock. Essentially, - * the xfs_ioend_wait() call forms an I/O barrier that provides strict - * ordering between direct I/Os and the truncate operation. - * - * The flags parameter can have either the value XFS_ITRUNC_DEFINITE - * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used - * in the case that the caller is locking things out of order and - * may not be able to call xfs_itruncate_finish() with the inode lock - * held without dropping the I/O lock. If the caller must drop the - * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() - * must be called again with all the same restrictions as the initial - * call. - */ -int -xfs_itruncate_start( - xfs_inode_t *ip, - uint flags, - xfs_fsize_t new_size) -{ - xfs_fsize_t last_byte; - xfs_off_t toss_start; - xfs_mount_t *mp; - int error = 0; - - ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); - ASSERT((new_size == 0) || (new_size <= ip->i_size)); - ASSERT((flags == XFS_ITRUNC_DEFINITE) || - (flags == XFS_ITRUNC_MAYBE)); - - mp = ip->i_mount; - - /* wait for the completion of any pending DIOs */ - if (new_size == 0 || new_size < ip->i_size) - xfs_ioend_wait(ip); - - /* - * Call toss_pages or flushinval_pages to get rid of pages - * overlapping the region being removed. We have to use - * the less efficient flushinval_pages in the case that the - * caller may not be able to finish the truncate without - * dropping the inode's I/O lock. Make sure - * to catch any pages brought in by buffers overlapping - * the EOF by searching out beyond the isize by our - * block size. We round new_size up to a block boundary - * so that we don't toss things on the same block as - * new_size but before it. - * - * Before calling toss_page or flushinval_pages, make sure to - * call remapf() over the same region if the file is mapped. - * This frees up mapped file references to the pages in the - * given range and for the flushinval_pages case it ensures - * that we get the latest mapped changes flushed out. - */ - toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); - toss_start = XFS_FSB_TO_B(mp, toss_start); - if (toss_start < 0) { - /* - * The place to start tossing is beyond our maximum - * file size, so there is no way that the data extended - * out there. - */ - return 0; - } - last_byte = xfs_file_last_byte(ip); - trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte); - if (last_byte > toss_start) { - if (flags & XFS_ITRUNC_DEFINITE) { - xfs_tosspages(ip, toss_start, - -1, FI_REMAPF_LOCKED); - } else { - error = xfs_flushinval_pages(ip, toss_start, - -1, FI_REMAPF_LOCKED); - } - } - -#ifdef DEBUG - if (new_size == 0) { - ASSERT(VN_CACHED(VFS_I(ip)) == 0); - } -#endif - return error; -} - -/* - * Shrink the file to the given new_size. The new size must be smaller than - * the current size. This will free up the underlying blocks in the removed - * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). + * Free up the underlying blocks past new_size. The new size must be + * smaller than the current size. * * The transaction passed to this routine must have made a permanent log * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the @@ -1387,7 +1230,7 @@ xfs_itruncate_start( * will be "held" within the returned transaction. This routine does NOT * require any disk space to be reserved for it within the transaction. * - * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it + * The fork parameter must be either XFS_ATTR_FORK or XFS_DATA_FORK, and it * indicates the fork which is to be truncated. For the attribute fork we only * support truncation to size 0. * -- cgit v1.2.3-70-g09d2 From 8f04c47aa9712874af2c8816c2ca2a332cba80e4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:34 +0200 Subject: xfs: split xfs_itruncate_finish Split the guts of xfs_itruncate_finish that loop over the existing extents and calls xfs_bunmapi on them into a new helper, xfs_itruncate_externs. Make xfs_attr_inactive call it directly instead of xfs_itruncate_finish, which allows to simplify the latter a lot, by only letting it deal with the data fork. As a result xfs_itruncate_finish is renamed to xfs_itruncate_data to make its use case more obvious. Also remove the sync parameter from xfs_itruncate_data, which has been unessecary since the introduction of the busy extent list in 2002, and completely dead code since 2003 when the XFS_BMAPI_ASYNC parameter was made a no-op. I can't actually see why the xfs_attr_inactive needs to set the transaction sync, but let's keep this patch simple and without changes in behaviour. Also avoid passing a useless argument to xfs_isize_check, and make it private to xfs_inode.c. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_iops.c | 10 +- fs/xfs/linux-2.6/xfs_trace.h | 4 +- fs/xfs/quota/xfs_qm_syscalls.c | 2 +- fs/xfs/xfs_attr.c | 22 +-- fs/xfs/xfs_inode.c | 357 +++++++++++++++-------------------------- fs/xfs/xfs_inode.h | 13 +- fs/xfs/xfs_vnodeops.c | 24 +-- 7 files changed, 155 insertions(+), 277 deletions(-) (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 5a0fcb09fc7..501e4f63054 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -879,15 +879,7 @@ xfs_setattr_size( ip->i_size = iattr->ia_size; } else if (iattr->ia_size <= ip->i_size || (iattr->ia_size == 0 && ip->i_d.di_nextents)) { - /* - * Signal a sync transaction unless we are truncating an - * already unlinked file on a wsync filesystem. - */ - error = xfs_itruncate_finish(&tp, ip, iattr->ia_size, - XFS_DATA_FORK, - ((ip->i_d.di_nlink != 0 || - !(mp->m_flags & XFS_MOUNT_WSYNC)) - ? 1 : 0)); + error = xfs_itruncate_data(&tp, ip, iattr->ia_size); if (error) goto out_trans_abort; diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index cac41e42345..4fe53f9f047 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -1055,8 +1055,8 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class, DEFINE_EVENT(xfs_itrunc_class, name, \ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ TP_ARGS(ip, new_size)) -DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start); -DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end); +DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start); +DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end); TRACE_EVENT(xfs_pagecache_inval, TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 2dadb15d5ca..f2dfc74ccf3 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -263,7 +263,7 @@ xfs_qm_scall_trunc_qfile( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); - error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1); + error = xfs_itruncate_data(&tp, ip, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 01d2072fb6d..795d5aac704 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -822,17 +822,21 @@ xfs_attr_inactive(xfs_inode_t *dp) error = xfs_attr_root_inactive(&trans, dp); if (error) goto out; + /* - * signal synchronous inactive transactions unless this - * is a synchronous mount filesystem in which case we - * know that we're here because we've been called out of - * xfs_inactive which means that the last reference is gone - * and the unlink transaction has already hit the disk so - * async inactive transactions are safe. + * Signal synchronous inactive transactions unless this is a + * synchronous mount filesystem in which case we know that we're here + * because we've been called out of xfs_inactive which means that the + * last reference is gone and the unlink transaction has already hit + * the disk so async inactive transactions are safe. */ - if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK, - (!(mp->m_flags & XFS_MOUNT_WSYNC) - ? 1 : 0)))) + if (!(mp->m_flags & XFS_MOUNT_WSYNC)) { + if (dp->i_d.di_anextents > 0) + xfs_trans_set_sync(trans); + } + + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); + if (error) goto out; /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 82a282ab63d..aa143b870af 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -52,7 +52,7 @@ kmem_zone_t *xfs_ifork_zone; kmem_zone_t *xfs_inode_zone; /* - * Used in xfs_itruncate(). This is the maximum number of extents + * Used in xfs_itruncate_extents(). This is the maximum number of extents * freed from a file in a single transaction. */ #define XFS_ITRUNC_MAX_EXTENTS 2 @@ -1179,15 +1179,15 @@ xfs_ialloc( * at least do it for regular files. */ #ifdef DEBUG -void +STATIC void xfs_isize_check( - xfs_mount_t *mp, - xfs_inode_t *ip, - xfs_fsize_t isize) + struct xfs_inode *ip, + xfs_fsize_t isize) { - xfs_fileoff_t map_first; - int nimaps; - xfs_bmbt_irec_t imaps[2]; + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t map_first; + int nimaps; + xfs_bmbt_irec_t imaps[2]; if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) return; @@ -1214,11 +1214,14 @@ xfs_isize_check( ASSERT(nimaps == 1); ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); } +#else /* DEBUG */ +#define xfs_isize_check(ip, isize) #endif /* DEBUG */ /* - * Free up the underlying blocks past new_size. The new size must be - * smaller than the current size. + * Free up the underlying blocks past new_size. The new size must be smaller + * than the current size. This routine can be used both for the attribute and + * data fork, and does not modify the inode size, which is left to the caller. * * The transaction passed to this routine must have made a permanent log * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the @@ -1230,31 +1233,6 @@ xfs_isize_check( * will be "held" within the returned transaction. This routine does NOT * require any disk space to be reserved for it within the transaction. * - * The fork parameter must be either XFS_ATTR_FORK or XFS_DATA_FORK, and it - * indicates the fork which is to be truncated. For the attribute fork we only - * support truncation to size 0. - * - * We use the sync parameter to indicate whether or not the first transaction - * we perform might have to be synchronous. For the attr fork, it needs to be - * so if the unlink of the inode is not yet known to be permanent in the log. - * This keeps us from freeing and reusing the blocks of the attribute fork - * before the unlink of the inode becomes permanent. - * - * For the data fork, we normally have to run synchronously if we're being - * called out of the inactive path or we're being called out of the create path - * where we're truncating an existing file. Either way, the truncate needs to - * be sync so blocks don't reappear in the file with altered data in case of a - * crash. wsync filesystems can run the first case async because anything that - * shrinks the inode has to run sync so by the time we're called here from - * inactive, the inode size is permanently set to 0. - * - * Calls from the truncate path always need to be sync unless we're in a wsync - * filesystem and the file has already been unlinked. - * - * The caller is responsible for correctly setting the sync parameter. It gets - * too hard for us to guess here which path we're being called out of just - * based on inode state. - * * If we get an error, we must return with the inode locked and linked into the * current transaction. This keeps things simple for the higher level code, * because it always knows that the inode is locked and held in the transaction @@ -1262,124 +1240,31 @@ xfs_isize_check( * dirty on error so that transactions can be easily aborted if possible. */ int -xfs_itruncate_finish( - xfs_trans_t **tp, - xfs_inode_t *ip, - xfs_fsize_t new_size, - int fork, - int sync) +xfs_itruncate_extents( + struct xfs_trans **tpp, + struct xfs_inode *ip, + int whichfork, + xfs_fsize_t new_size) { - xfs_fsblock_t first_block; - xfs_fileoff_t first_unmap_block; - xfs_fileoff_t last_block; - xfs_filblks_t unmap_len=0; - xfs_mount_t *mp; - xfs_trans_t *ntp; - int done; - int committed; - xfs_bmap_free_t free_list; - int error; + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp = *tpp; + struct xfs_trans *ntp; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + xfs_fileoff_t first_unmap_block; + xfs_fileoff_t last_block; + xfs_filblks_t unmap_len; + int committed; + int error = 0; + int done = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); - ASSERT((new_size == 0) || (new_size <= ip->i_size)); - ASSERT(*tp != NULL); - ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); - ASSERT(ip->i_transp == *tp); + ASSERT(new_size <= ip->i_size); + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp->ili_lock_flags == 0); - - - ntp = *tp; - mp = (ntp)->t_mountp; - ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); - - /* - * We only support truncating the entire attribute fork. - */ - if (fork == XFS_ATTR_FORK) { - new_size = 0LL; - } - first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); - trace_xfs_itruncate_finish_start(ip, new_size); - - /* - * The first thing we do is set the size to new_size permanently - * on disk. This way we don't have to worry about anyone ever - * being able to look at the data being freed even in the face - * of a crash. What we're getting around here is the case where - * we free a block, it is allocated to another file, it is written - * to, and then we crash. If the new data gets written to the - * file but the log buffers containing the free and reallocation - * don't, then we'd end up with garbage in the blocks being freed. - * As long as we make the new_size permanent before actually - * freeing any blocks it doesn't matter if they get written to. - * - * The callers must signal into us whether or not the size - * setting here must be synchronous. There are a few cases - * where it doesn't have to be synchronous. Those cases - * occur if the file is unlinked and we know the unlink is - * permanent or if the blocks being truncated are guaranteed - * to be beyond the inode eof (regardless of the link count) - * and the eof value is permanent. Both of these cases occur - * only on wsync-mounted filesystems. In those cases, we're - * guaranteed that no user will ever see the data in the blocks - * that are being truncated so the truncate can run async. - * In the free beyond eof case, the file may wind up with - * more blocks allocated to it than it needs if we crash - * and that won't get fixed until the next time the file - * is re-opened and closed but that's ok as that shouldn't - * be too many blocks. - * - * However, we can't just make all wsync xactions run async - * because there's one call out of the create path that needs - * to run sync where it's truncating an existing file to size - * 0 whose size is > 0. - * - * It's probably possible to come up with a test in this - * routine that would correctly distinguish all the above - * cases from the values of the function parameters and the - * inode state but for sanity's sake, I've decided to let the - * layers above just tell us. It's simpler to correctly figure - * out in the layer above exactly under what conditions we - * can run async and I think it's easier for others read and - * follow the logic in case something has to be changed. - * cscope is your friend -- rcc. - * - * The attribute fork is much simpler. - * - * For the attribute fork we allow the caller to tell us whether - * the unlink of the inode that led to this call is yet permanent - * in the on disk log. If it is not and we will be freeing extents - * in this inode then we make the first transaction synchronous - * to make sure that the unlink is permanent by the time we free - * the blocks. - */ - if (fork == XFS_DATA_FORK) { - if (ip->i_d.di_nextents > 0) { - /* - * If we are not changing the file size then do - * not update the on-disk file size - we may be - * called from xfs_inactive_free_eofblocks(). If we - * update the on-disk file size and then the system - * crashes before the contents of the file are - * flushed to disk then the files may be full of - * holes (ie NULL files bug). - */ - if (ip->i_size != new_size) { - ip->i_d.di_size = new_size; - ip->i_size = new_size; - xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); - } - } - } else if (sync) { - ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); - if (ip->i_d.di_anextents > 0) - xfs_trans_set_sync(ntp); - } - ASSERT(fork == XFS_DATA_FORK || - (fork == XFS_ATTR_FORK && - ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || - (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); + ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); /* * Since it is possible for space to become allocated beyond @@ -1390,128 +1275,142 @@ xfs_itruncate_finish( * beyond the maximum file size (ie it is the same as last_block), * then there is nothing to do. */ + first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); - ASSERT(first_unmap_block <= last_block); - done = 0; - if (last_block == first_unmap_block) { - done = 1; - } else { - unmap_len = last_block - first_unmap_block + 1; - } + if (first_unmap_block == last_block) + return 0; + + ASSERT(first_unmap_block < last_block); + unmap_len = last_block - first_unmap_block + 1; while (!done) { - /* - * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() - * will tell us whether it freed the entire range or - * not. If this is a synchronous mount (wsync), - * then we can tell bunmapi to keep all the - * transactions asynchronous since the unlink - * transaction that made this inode inactive has - * already hit the disk. There's no danger of - * the freed blocks being reused, there being a - * crash, and the reused blocks suddenly reappearing - * in this file with garbage in them once recovery - * runs. - */ xfs_bmap_init(&free_list, &first_block); - error = xfs_bunmapi(ntp, ip, + error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, - xfs_bmapi_aflag(fork), + xfs_bmapi_aflag(whichfork), XFS_ITRUNC_MAX_EXTENTS, &first_block, &free_list, &done); - if (error) { - /* - * If the bunmapi call encounters an error, - * return to the caller where the transaction - * can be properly aborted. We just need to - * make sure we're not holding any resources - * that we were not when we came in. - */ - xfs_bmap_cancel(&free_list); - return error; - } + if (error) + goto out_bmap_cancel; /* * Duplicate the transaction that has the permanent * reservation and commit the old transaction. */ - error = xfs_bmap_finish(tp, &free_list, &committed); - ntp = *tp; + error = xfs_bmap_finish(&tp, &free_list, &committed); if (committed) - xfs_trans_ijoin(ntp, ip); - - if (error) { - /* - * If the bmap finish call encounters an error, return - * to the caller where the transaction can be properly - * aborted. We just need to make sure we're not - * holding any resources that we were not when we came - * in. - * - * Aborting from this point might lose some blocks in - * the file system, but oh well. - */ - xfs_bmap_cancel(&free_list); - return error; - } + xfs_trans_ijoin(tp, ip); + if (error) + goto out_bmap_cancel; if (committed) { /* * Mark the inode dirty so it will be logged and * moved forward in the log as part of every commit. */ - xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } - ntp = xfs_trans_dup(ntp); - error = xfs_trans_commit(*tp, 0); - *tp = ntp; + ntp = xfs_trans_dup(tp); + error = xfs_trans_commit(tp, 0); + tp = ntp; - xfs_trans_ijoin(ntp, ip); + xfs_trans_ijoin(tp, ip); if (error) - return error; + goto out; + /* - * transaction commit worked ok so we can drop the extra ticket + * Transaction commit worked ok so we can drop the extra ticket * reference that we gained in xfs_trans_dup() */ - xfs_log_ticket_put(ntp->t_ticket); - error = xfs_trans_reserve(ntp, 0, + xfs_log_ticket_put(tp->t_ticket); + error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) - return error; + goto out; } + +out: + *tpp = tp; + return error; +out_bmap_cancel: /* - * Only update the size in the case of the data fork, but - * always re-log the inode so that our permanent transaction - * can keep on rolling it forward in the log. + * If the bunmapi call encounters an error, return to the caller where + * the transaction can be properly aborted. We just need to make sure + * we're not holding any resources that we were not when we came in. */ - if (fork == XFS_DATA_FORK) { - xfs_isize_check(mp, ip, new_size); + xfs_bmap_cancel(&free_list); + goto out; +} + +int +xfs_itruncate_data( + struct xfs_trans **tpp, + struct xfs_inode *ip, + xfs_fsize_t new_size) +{ + int error; + + trace_xfs_itruncate_data_start(ip, new_size); + + /* + * The first thing we do is set the size to new_size permanently on + * disk. This way we don't have to worry about anyone ever being able + * to look at the data being freed even in the face of a crash. + * What we're getting around here is the case where we free a block, it + * is allocated to another file, it is written to, and then we crash. + * If the new data gets written to the file but the log buffers + * containing the free and reallocation don't, then we'd end up with + * garbage in the blocks being freed. As long as we make the new_size + * permanent before actually freeing any blocks it doesn't matter if + * they get written to. + */ + if (ip->i_d.di_nextents > 0) { /* - * If we are not changing the file size then do - * not update the on-disk file size - we may be - * called from xfs_inactive_free_eofblocks(). If we - * update the on-disk file size and then the system - * crashes before the contents of the file are - * flushed to disk then the files may be full of - * holes (ie NULL files bug). + * If we are not changing the file size then do not update + * the on-disk file size - we may be called from + * xfs_inactive_free_eofblocks(). If we update the on-disk + * file size and then the system crashes before the contents + * of the file are flushed to disk then the files may be + * full of holes (ie NULL files bug). */ if (ip->i_size != new_size) { ip->i_d.di_size = new_size; ip->i_size = new_size; + xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); } } - xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); - ASSERT((new_size != 0) || - (fork == XFS_ATTR_FORK) || - (ip->i_delayed_blks == 0)); - ASSERT((new_size != 0) || - (fork == XFS_ATTR_FORK) || - (ip->i_d.di_nextents == 0)); - trace_xfs_itruncate_finish_end(ip, new_size); + + error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size); + if (error) + return error; + + /* + * If we are not changing the file size then do not update the on-disk + * file size - we may be called from xfs_inactive_free_eofblocks(). + * If we update the on-disk file size and then the system crashes + * before the contents of the file are flushed to disk then the files + * may be full of holes (ie NULL files bug). + */ + xfs_isize_check(ip, new_size); + if (ip->i_size != new_size) { + ip->i_d.di_size = new_size; + ip->i_size = new_size; + } + + ASSERT(new_size != 0 || ip->i_delayed_blks == 0); + ASSERT(new_size != 0 || ip->i_d.di_nextents == 0); + + /* + * Always re-log the inode so that our permanent transaction can keep + * on rolling it forward in the log. + */ + xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); + + trace_xfs_itruncate_data_end(ip, new_size); return 0; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 6efd471c872..6495578efe0 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -491,8 +491,10 @@ uint xfs_ip2xflags(struct xfs_inode *); uint xfs_dic2xflags(struct xfs_dinode *); int xfs_ifree(struct xfs_trans *, xfs_inode_t *, struct xfs_bmap_free *); -int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, - xfs_fsize_t, int, int); +int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *, + int, xfs_fsize_t); +int xfs_itruncate_data(struct xfs_trans **, struct xfs_inode *, + xfs_fsize_t); int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); @@ -568,13 +570,6 @@ void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int); #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) -#ifdef DEBUG -void xfs_isize_check(struct xfs_mount *, struct xfs_inode *, - xfs_fsize_t); -#else /* DEBUG */ -#define xfs_isize_check(mp, ip, isize) -#endif /* DEBUG */ - #if defined(DEBUG) void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); #else diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 45b8ac662ae..11242c48277 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -220,15 +220,12 @@ xfs_free_eofblocks( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); - error = xfs_itruncate_finish(&tp, ip, - ip->i_size, - XFS_DATA_FORK, - 0); - /* - * If we get an error at this point we - * simply don't bother truncating the file. - */ + error = xfs_itruncate_data(&tp, ip, ip->i_size); if (error) { + /* + * If we get an error at this point we simply don't + * bother truncating the file. + */ xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); @@ -665,16 +662,7 @@ xfs_inactive( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); - /* - * normally, we have to run xfs_itruncate_finish sync. - * But if filesystem is wsync and we're in the inactive - * path, then we know that nlink == 0, and that the - * xaction that made nlink == 0 is permanently committed - * since xfs_remove runs as a synchronous transaction. - */ - error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, - (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0)); - + error = xfs_itruncate_data(&tp, ip, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); -- cgit v1.2.3-70-g09d2 From f3ca87389dbff0a3dc1a7cb2fa7c62e25421c66c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:47 +0200 Subject: xfs: remove i_transp Remove the transaction pointer in the inode. It's only used to avoid passing down an argument in the bmap code, and for a few asserts in the transaction code right now. Also use the local variable ip in a few more places in xfs_inode_item_unlock, so that it isn't only used for debug builds after the above change. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/quota/xfs_trans_dquot.c | 2 +- fs/xfs/xfs_bmap.c | 35 +++++++++++++++++++---------------- fs/xfs/xfs_inode.c | 3 --- fs/xfs/xfs_inode.h | 1 - fs/xfs/xfs_inode_item.c | 13 ++++--------- fs/xfs/xfs_trans_inode.c | 9 --------- 6 files changed, 24 insertions(+), 39 deletions(-) (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 2a364873133..24084edb780 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -59,7 +59,7 @@ xfs_trans_dqjoin( xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); /* - * Initialize i_transp so we can later determine if this dquot is + * Initialize d_transp so we can later determine if this dquot is * associated with this transaction. */ dqp->q_transp = tp; diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index e546a33214c..7b46b78ba3f 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -94,6 +94,7 @@ xfs_bmap_add_attrfork_local( */ STATIC int /* error */ xfs_bmap_add_extent_delay_real( + struct xfs_trans *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ @@ -439,6 +440,7 @@ xfs_bmap_add_attrfork_local( */ STATIC int /* error */ xfs_bmap_add_extent( + struct xfs_trans *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ @@ -524,7 +526,7 @@ xfs_bmap_add_extent( if (cur) ASSERT(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL); - error = xfs_bmap_add_extent_delay_real(ip, + error = xfs_bmap_add_extent_delay_real(tp, ip, idx, &cur, new, &da_new, first, flist, &logflags); } else { @@ -561,7 +563,7 @@ xfs_bmap_add_extent( int tmp_logflags; /* partial log flag return val */ ASSERT(cur == NULL); - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, da_old > 0, &tmp_logflags, whichfork); logflags |= tmp_logflags; if (error) @@ -604,6 +606,7 @@ done: */ STATIC int /* error */ xfs_bmap_add_extent_delay_real( + struct xfs_trans *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ @@ -901,7 +904,7 @@ xfs_bmap_add_extent_delay_real( } if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_nextents > ip->i_df.if_ext_max) { - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; @@ -984,7 +987,7 @@ xfs_bmap_add_extent_delay_real( } if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_nextents > ip->i_df.if_ext_max) { - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; @@ -1052,7 +1055,7 @@ xfs_bmap_add_extent_delay_real( } if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_nextents > ip->i_df.if_ext_max) { - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; @@ -2871,8 +2874,8 @@ xfs_bmap_del_extent( len = del->br_blockcount; do_div(bno, mp->m_sb.sb_rextsize); do_div(len, mp->m_sb.sb_rextsize); - if ((error = xfs_rtfree_extent(ip->i_transp, bno, - (xfs_extlen_t)len))) + error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); + if (error) goto done; do_fx = 0; nblks = len * mp->m_sb.sb_rextsize; @@ -4662,7 +4665,7 @@ xfs_bmapi( if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) got.br_state = XFS_EXT_UNWRITTEN; } - error = xfs_bmap_add_extent(ip, &lastx, &cur, &got, + error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &got, firstblock, flist, &tmp_logflags, whichfork); logflags |= tmp_logflags; @@ -4763,7 +4766,7 @@ xfs_bmapi( mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; - error = xfs_bmap_add_extent(ip, &lastx, &cur, mval, + error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, mval, firstblock, flist, &tmp_logflags, whichfork); logflags |= tmp_logflags; @@ -5117,7 +5120,7 @@ xfs_bunmapi( del.br_blockcount = mod; } del.br_state = XFS_EXT_UNWRITTEN; - error = xfs_bmap_add_extent(ip, &lastx, &cur, &del, + error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &del, firstblock, flist, &logflags, XFS_DATA_FORK); if (error) @@ -5175,18 +5178,18 @@ xfs_bunmapi( } prev.br_state = XFS_EXT_UNWRITTEN; lastx--; - error = xfs_bmap_add_extent(ip, &lastx, &cur, - &prev, firstblock, flist, &logflags, - XFS_DATA_FORK); + error = xfs_bmap_add_extent(tp, ip, &lastx, + &cur, &prev, firstblock, flist, + &logflags, XFS_DATA_FORK); if (error) goto error0; goto nodelete; } else { ASSERT(del.br_state == XFS_EXT_NORM); del.br_state = XFS_EXT_UNWRITTEN; - error = xfs_bmap_add_extent(ip, &lastx, &cur, - &del, firstblock, flist, &logflags, - XFS_DATA_FORK); + error = xfs_bmap_add_extent(tp, ip, &lastx, + &cur, &del, firstblock, flist, + &logflags, XFS_DATA_FORK); if (error) goto error0; goto nodelete; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index aa143b870af..db310f8fb76 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1261,7 +1261,6 @@ xfs_itruncate_extents( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); ASSERT(new_size <= ip->i_size); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); - ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp->ili_lock_flags == 0); ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); @@ -1436,7 +1435,6 @@ xfs_iunlink( ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_mode != 0); - ASSERT(ip->i_transp == tp); mp = tp->t_mountp; @@ -1828,7 +1826,6 @@ xfs_ifree( xfs_buf_t *ibp; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(ip->i_transp == tp); ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_nextents == 0); ASSERT(ip->i_d.di_anextents == 0); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 6495578efe0..a97644ab945 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -241,7 +241,6 @@ typedef struct xfs_inode { xfs_ifork_t i_df; /* data fork */ /* Transaction and locking information. */ - struct xfs_trans *i_transp; /* ptr to owning transaction*/ struct xfs_inode_log_item *i_itemp; /* logging information */ mrlock_t i_lock; /* inode lock */ mrlock_t i_iolock; /* inode IO lock */ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index b1e88d56069..a49811a3e5a 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -632,13 +632,8 @@ xfs_inode_item_unlock( struct xfs_inode *ip = iip->ili_inode; unsigned short lock_flags; - ASSERT(iip->ili_inode->i_itemp != NULL); - ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); - - /* - * Clear the transaction pointer in the inode. - */ - ip->i_transp = NULL; + ASSERT(ip->i_itemp != NULL); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); /* * If the inode needed a separate buffer with which to log @@ -664,8 +659,8 @@ xfs_inode_item_unlock( lock_flags = iip->ili_lock_flags; iip->ili_lock_flags = 0; if (lock_flags) { - xfs_iunlock(iip->ili_inode, lock_flags); - IRELE(iip->ili_inode); + xfs_iunlock(ip, lock_flags); + IRELE(ip); } } diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 048b0c689d3..c8dea2fd7e6 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -55,7 +55,6 @@ xfs_trans_ijoin( { xfs_inode_log_item_t *iip; - ASSERT(ip->i_transp == NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); @@ -68,12 +67,6 @@ xfs_trans_ijoin( xfs_trans_add_item(tp, &iip->ili_item); xfs_trans_inode_broot_debug(ip); - - /* - * Initialize i_transp so we can find it with xfs_inode_incore() - * in xfs_trans_iget() above. - */ - ip->i_transp = tp; } /* @@ -111,7 +104,6 @@ xfs_trans_ichgtime( ASSERT(tp); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(ip->i_transp == tp); tv = current_fs_time(inode->i_sb); @@ -140,7 +132,6 @@ xfs_trans_log_inode( xfs_inode_t *ip, uint flags) { - ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); -- cgit v1.2.3-70-g09d2 From 69ef921b55cc3788d1d2a27b33b27d04acd0090a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:05 +0200 Subject: xfs: byteswap constants instead of variables Micro-optimize various comparisms by always byteswapping the constant instead of the variable, which allows to do the swap at compile instead of runtime. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_alloc.c | 2 +- fs/xfs/xfs_attr.c | 19 ++++++++------- fs/xfs/xfs_attr_leaf.c | 60 ++++++++++++++++++++++++------------------------ fs/xfs/xfs_bmap.c | 2 +- fs/xfs/xfs_bmap_btree.c | 8 +++---- fs/xfs/xfs_btree.c | 28 +++++++++++----------- fs/xfs/xfs_da_btree.c | 58 +++++++++++++++++++++++----------------------- fs/xfs/xfs_dir2_block.c | 18 +++++++++------ fs/xfs/xfs_dir2_data.c | 47 ++++++++++++++++++------------------- fs/xfs/xfs_dir2_leaf.c | 59 +++++++++++++++++++++++++---------------------- fs/xfs/xfs_dir2_node.c | 53 ++++++++++++++++++++++-------------------- fs/xfs/xfs_ialloc.c | 14 +++++------ fs/xfs/xfs_inode.c | 13 +++++------ fs/xfs/xfs_log.c | 4 ++-- fs/xfs/xfs_log_recover.c | 20 ++++++++-------- 15 files changed, 208 insertions(+), 197 deletions(-) (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 95862bbff56..3ad3ab9f56c 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -2127,7 +2127,7 @@ xfs_read_agf( * Validate the magic number of the agf block. */ agf_ok = - be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC && + agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 795d5aac704..cbae424fe1b 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -1203,7 +1203,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context) return XFS_ERROR(error); ASSERT(bp != NULL); leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); xfs_da_brelse(NULL, bp); @@ -1610,9 +1610,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) XFS_ATTR_FORK); if (error) goto out; - ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *) - bp->data)->hdr.info.magic) - == XFS_ATTR_LEAF_MAGIC); + ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) == + cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_bmap_init(args->flist, args->firstblock); @@ -1877,11 +1876,11 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } node = bp->data; - if (be16_to_cpu(node->hdr.info.magic) - == XFS_ATTR_LEAF_MAGIC) + if (node->hdr.info.magic == + cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) break; - if (unlikely(be16_to_cpu(node->hdr.info.magic) - != XFS_DA_NODE_MAGIC)) { + if (unlikely(node->hdr.info.magic != + cpu_to_be16(XFS_DA_NODE_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, context->dp->i_mount, @@ -1916,8 +1915,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) */ for (;;) { leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) - != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(leaf->hdr.info.magic != + cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 71e90dc2aeb..8fad9602542 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -731,7 +731,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) int bytes, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); entry = &leaf->entries[0]; bytes = sizeof(struct xfs_attr_sf_hdr); @@ -777,7 +777,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_attr_leafblock_t *)tmpbuffer; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -872,7 +872,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args) goto out; node = bp1->data; leaf = bp2->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); /* both on-disk, don't endian-flip twice */ node->btree[0].hashval = leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval; @@ -997,7 +997,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) int tablesize, entsize, sum, tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(leaf->hdr.count))); hdr = &leaf->hdr; @@ -1070,7 +1070,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) int tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count))); @@ -1256,8 +1256,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); args = state->args; /* @@ -1533,7 +1533,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); leaf = (xfs_attr_leafblock_t *)info; count = be16_to_cpu(leaf->hdr.count); bytes = sizeof(xfs_attr_leaf_hdr_t) + @@ -1596,7 +1596,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) bytes = state->blocksize - (state->blocksize>>2); bytes -= be16_to_cpu(leaf->hdr.usedbytes); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); count += be16_to_cpu(leaf->hdr.count); bytes -= be16_to_cpu(leaf->hdr.usedbytes); bytes -= count * sizeof(xfs_attr_leaf_entry_t); @@ -1650,7 +1650,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_mount_t *mp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); hdr = &leaf->hdr; mp = args->trans->t_mountp; ASSERT((be16_to_cpu(hdr->count) > 0) @@ -1813,8 +1813,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; @@ -1915,7 +1915,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); @@ -2019,7 +2019,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_attr_leaf_name_remote_t *name_rmt; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); @@ -2087,8 +2087,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf_s->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(leaf_d->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; ASSERT((be16_to_cpu(hdr_s->count) > 0) && @@ -2222,8 +2222,8 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) && - (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC)); + ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) && + (leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC))); if ((be16_to_cpu(leaf1->hdr.count) > 0) && (be16_to_cpu(leaf2->hdr.count) > 0) && ((be32_to_cpu(leaf2->entries[0].hashval) < @@ -2246,7 +2246,7 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_attr_leafblock_t *leaf; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); if (count) *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) @@ -2265,7 +2265,7 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) xfs_attr_leaf_name_remote_t *name_rmt; int size; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { name_loc = xfs_attr_leaf_name_local(leaf, index); size = xfs_attr_leaf_entsize_local(name_loc->namelen, @@ -2451,7 +2451,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2515,7 +2515,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2585,13 +2585,13 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) } leaf1 = bp1->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index < be16_to_cpu(leaf1->hdr.count)); ASSERT(args->index >= 0); entry1 = &leaf1->entries[ args->index ]; leaf2 = bp2->data; - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count)); ASSERT(args->index2 >= 0); entry2 = &leaf2->entries[ args->index2 ]; @@ -2689,9 +2689,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) * This is a depth-first traversal! */ info = bp->data; - if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { + if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { error = xfs_attr_node_inactive(trans, dp, bp, 1); - } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { + } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) { error = xfs_attr_leaf_inactive(trans, dp, bp); } else { error = XFS_ERROR(EIO); @@ -2739,7 +2739,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, } node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ count = be16_to_cpu(node->hdr.count); if (!count) { @@ -2773,10 +2773,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, * Invalidate the subtree, however we have to. */ info = child_bp->data; - if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { + if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { error = xfs_attr_node_inactive(trans, dp, child_bp, level+1); - } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { + } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) { error = xfs_attr_leaf_inactive(trans, dp, child_bp); } else { @@ -2836,7 +2836,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) int error, count, size, tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); /* * Count the number of "remote" value extents. diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 7b46b78ba3f..2ce6aa644e7 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4083,7 +4083,7 @@ xfs_bmap_sanity_check( { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); - if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC || + if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) || be16_to_cpu(block->bb_level) != level || be16_to_cpu(block->bb_numrecs) == 0 || be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0]) diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 87d3c10b695..50a28442a54 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -425,10 +425,10 @@ xfs_bmbt_to_bmdr( xfs_bmbt_key_t *tkp; __be64 *tpp; - ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); - ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO); - ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO); - ASSERT(be16_to_cpu(rblock->bb_level) > 0); + ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); + ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)); + ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)); + ASSERT(rblock->bb_level != 0); dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 2f9e97c128a..43125af4262 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -66,11 +66,11 @@ xfs_btree_check_lblock( be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && block->bb_u.l.bb_leftsib && - (be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO || + (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))) && block->bb_u.l.bb_rightsib && - (be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO || + (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))); if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, @@ -105,10 +105,10 @@ xfs_btree_check_sblock( be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && - (be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK || + (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) || be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) && block->bb_u.s.bb_leftsib && - (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK || + (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) || be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) && block->bb_u.s.bb_rightsib; if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, @@ -511,9 +511,9 @@ xfs_btree_islastblock( block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO; + return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO); else - return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK; + return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); } /* @@ -777,14 +777,14 @@ xfs_btree_setbuf( b = XFS_BUF_TO_BLOCK(bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) + if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; - if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO) + if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } else { - if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK) + if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; - if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK) + if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } } @@ -795,9 +795,9 @@ xfs_btree_ptr_is_null( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - return be64_to_cpu(ptr->l) == NULLDFSBNO; + return ptr->l == cpu_to_be64(NULLDFSBNO); else - return be32_to_cpu(ptr->s) == NULLAGBLOCK; + return ptr->s == cpu_to_be32(NULLAGBLOCK); } STATIC void @@ -923,12 +923,12 @@ xfs_btree_ptr_to_daddr( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO); + ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO)); return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); } else { ASSERT(cur->bc_private.a.agno != NULLAGNUMBER); - ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK); + ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK)); return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno, be32_to_cpu(ptr->s)); diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index efb906f8a9c..18425717cbe 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -321,11 +321,11 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(bp != NULL); node = bp->data; oldroot = blk1->bp->data; - if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) { + if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - (char *)oldroot); } else { - ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); leaf = (xfs_dir2_leaf_t *)oldroot; size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] - (char *)leaf); @@ -352,7 +352,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, node->hdr.count = cpu_to_be16(2); #ifdef DEBUG - if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) { + if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) { ASSERT(blk1->blkno >= mp->m_dirleafblk && blk1->blkno < mp->m_dirfreeblk); ASSERT(blk2->blkno >= mp->m_dirleafblk && @@ -384,7 +384,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int useextra; node = oldblk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); /* * With V2 dirs the extra block is data or freespace. @@ -483,8 +483,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, node1 = node2; node2 = tmpnode; } - ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); + ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2; if (count == 0) return; @@ -578,7 +578,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int tmp; node = oldblk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); ASSERT(newblk->blkno != 0); if (state->args->whichfork == XFS_DATA_FORK) @@ -714,7 +714,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(args != NULL); ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); oldroot = root_blk->bp->data; - ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); ASSERT(!oldroot->hdr.info.forw); ASSERT(!oldroot->hdr.info.back); @@ -737,10 +737,10 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(bp != NULL); blkinfo = bp->data; if (be16_to_cpu(oldroot->hdr.level) == 1) { - ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC || - be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(blkinfo->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || + blkinfo->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); } else { - ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC); + ASSERT(blkinfo->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); } ASSERT(!blkinfo->forw); ASSERT(!blkinfo->back); @@ -776,7 +776,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); node = (xfs_da_intnode_t *)info; count = be16_to_cpu(node->hdr.count); if (count > (state->node_ents >> 1)) { @@ -836,7 +836,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) count -= state->node_ents >> 2; count -= be16_to_cpu(node->hdr.count); node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); count -= be16_to_cpu(node->hdr.count); xfs_da_brelse(state->args->trans, bp); if (count >= 0) @@ -911,7 +911,7 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) } for (blk--, level--; level >= 0; blk--, level--) { node = blk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); btree = &node->btree[ blk->index ]; if (be32_to_cpu(btree->hashval) == lasthash) break; @@ -979,8 +979,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, drop_node = drop_blk->bp->data; save_node = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); + ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); tp = state->args->trans; /* @@ -1278,8 +1278,8 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) node1 = node1_bp->data; node2 = node2_bp->data; - ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) && - (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC)); + ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) && + node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) || @@ -1299,7 +1299,7 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) xfs_da_intnode_t *node; node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); if (count) *count = be16_to_cpu(node->hdr.count); if (!node->hdr.count) @@ -1412,7 +1412,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, for (blk = &path->blk[level]; level >= 0; blk--, level--) { ASSERT(blk->bp != NULL); node = blk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { blk->index++; blkno = be32_to_cpu(node->btree[blk->index].before); @@ -1451,9 +1451,9 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, return(error); ASSERT(blk->bp != NULL); info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC || - be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC || - be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || + info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || + info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); blk->magic = be16_to_cpu(info->magic); if (blk->magic == XFS_DA_NODE_MAGIC) { node = (xfs_da_intnode_t *)info; @@ -1704,12 +1704,12 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Get values from the moved block. */ - if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) { + if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) { dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; dead_level = 0; dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval); } else { - ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC); + ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); dead_node = (xfs_da_intnode_t *)dead_info; dead_level = be16_to_cpu(dead_node->hdr.level); dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval); @@ -1768,8 +1768,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) goto done; par_node = par_buf->data; - if (unlikely( - be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC || + if (unlikely(par_node->hdr.info.magic != + cpu_to_be16(XFS_DA_NODE_MAGIC) || (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", XFS_ERRLEVEL_LOW, mp); @@ -1820,7 +1820,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, par_node = par_buf->data; if (unlikely( be16_to_cpu(par_node->hdr.level) != level || - be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { + par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); @@ -2093,7 +2093,7 @@ xfs_da_do_buf( (magic != XFS_DIR2_LEAFN_MAGIC) && (magic1 != XFS_DIR2_BLOCK_MAGIC) && (magic1 != XFS_DIR2_DATA_MAGIC) && - (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), + (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_); diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index c9fdabe5e1a..70fb0cb6bd6 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -109,7 +109,7 @@ xfs_dir2_block_addname( /* * Check the magic number, corrupted if wrong. */ - if (unlikely(be32_to_cpu(hdr->magic) != XFS_DIR2_BLOCK_MAGIC)) { + if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", XFS_ERRLEVEL_LOW, mp, hdr); xfs_da_brelse(tp, bp); @@ -255,7 +255,8 @@ xfs_dir2_block_addname( highstale = lfloghigh = -1; fromidx >= 0; fromidx--) { - if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) { + if (blp[fromidx].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) { if (highstale == -1) highstale = toidx; else { @@ -352,12 +353,14 @@ xfs_dir2_block_addname( else { for (lowstale = mid; lowstale >= 0 && - be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR; + blp[lowstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR); lowstale--) continue; for (highstale = mid + 1; highstale < be32_to_cpu(btp->count) && - be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR && + blp[highstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && (lowstale < 0 || mid - lowstale > highstale - mid); highstale++) continue; @@ -899,7 +902,7 @@ xfs_dir2_leaf_to_block( tp = args->trans; mp = dp->i_mount; leaf = lbp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); ltp = xfs_dir2_leaf_tail_p(mp, leaf); /* * If there are data blocks other than the first one, take this @@ -929,7 +932,7 @@ xfs_dir2_leaf_to_block( goto out; } hdr = dbp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); /* * Size of the "leaf" area in the block. */ @@ -971,7 +974,8 @@ xfs_dir2_leaf_to_block( */ lep = xfs_dir2_block_leaf_p(btp); for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { - if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[from].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) continue; lep[to++] = leaf->ents[from]; } diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 8e47ac3a3b9..32dca4c531a 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -73,7 +73,7 @@ xfs_dir2_data_check( bf = hdr->bestfree; p = (char *)(hdr + 1); - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { btp = xfs_dir2_block_tail_p(mp, hdr); lep = xfs_dir2_block_leaf_p(btp); endp = (char *)lep; @@ -140,7 +140,7 @@ xfs_dir2_data_check( (char *)dep - (char *)hdr); count++; lastfree = 0; - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)hdr)); @@ -160,9 +160,10 @@ xfs_dir2_data_check( * Need to have seen all the entries and all the bestfree slots. */ ASSERT(freeseen == 7); - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { - if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR) + if (lep[i].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; if (i > 0) ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); @@ -196,8 +197,8 @@ xfs_dir2_data_freefind( * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); for (dfp = &hdr->bestfree[0], seenzero = matched = 0; dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { @@ -256,8 +257,8 @@ xfs_dir2_data_freeinsert( xfs_dir2_data_free_t new; /* new bestfree entry */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); #endif dfp = hdr->bestfree; new.length = dup->length; @@ -297,8 +298,8 @@ xfs_dir2_data_freeremove( int *loghead) /* out: log data header */ { #ifdef __KERNEL__ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); #endif /* * It's the first entry, slide the next 2 up. @@ -341,8 +342,8 @@ xfs_dir2_data_freescan( char *p; /* current entry pointer */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); #endif /* * Start by clearing the table. @@ -353,7 +354,7 @@ xfs_dir2_data_freescan( * Set up pointers. */ p = (char *)(hdr + 1); - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { btp = xfs_dir2_block_tail_p(mp, hdr); endp = (char *)xfs_dir2_block_leaf_p(btp); } else @@ -458,8 +459,8 @@ xfs_dir2_data_log_entry( { xfs_dir2_data_hdr_t *hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr), (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) - @@ -476,8 +477,8 @@ xfs_dir2_data_log_header( { xfs_dir2_data_hdr_t *hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1); } @@ -493,8 +494,8 @@ xfs_dir2_data_log_unused( { xfs_dir2_data_hdr_t *hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); /* * Log the first part of the unused entry. @@ -539,12 +540,12 @@ xfs_dir2_data_make_free( /* * Figure out where the end of the data area is. */ - if (be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC) + if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)) endptr = (char *)hdr + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); btp = xfs_dir2_block_tail_p(mp, hdr); endptr = (char *)xfs_dir2_block_leaf_p(btp); } @@ -717,8 +718,8 @@ xfs_dir2_data_use_free( int oldlen; /* old unused entry's length */ hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); ASSERT(offset >= (char *)dup - (char *)hdr); ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr); diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 5d2651f1c75..d629b888ca2 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -201,8 +201,8 @@ xfs_dir2_leaf_find_entry( */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != - XFS_DIR2_NULL_DATAPTR; + leaf->ents[lowstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR); lowstale--) continue; @@ -213,8 +213,8 @@ xfs_dir2_leaf_find_entry( */ for (highstale = index; highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != - XFS_DIR2_NULL_DATAPTR && + leaf->ents[highstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && (lowstale < 0 || index - lowstale - 1 >= highstale - index); highstale++) @@ -228,8 +228,8 @@ xfs_dir2_leaf_find_entry( (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale - 1 < highstale - index)) { ASSERT(index - lowstale - 1 >= 0); - ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == - XFS_DIR2_NULL_DATAPTR); + ASSERT(leaf->ents[lowstale].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)); /* * Copy entries up to cover the stale entry and make room @@ -251,8 +251,8 @@ xfs_dir2_leaf_find_entry( * The high one is better, so use that one. */ ASSERT(highstale - index >= 0); - ASSERT(be32_to_cpu(leaf->ents[highstale].address) == - XFS_DIR2_NULL_DATAPTR); + ASSERT(leaf->ents[highstale].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)); /* * Copy entries down to cover the stale entry and make room for the @@ -342,7 +342,7 @@ xfs_dir2_leaf_addname( continue; i = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); ASSERT(i < be32_to_cpu(ltp->bestcount)); - ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); + ASSERT(bestsp[i] != cpu_to_be16(NULLDATAOFF)); if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; break; @@ -356,7 +356,8 @@ xfs_dir2_leaf_addname( /* * Remember a block we see that's missing. */ - if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1) + if (bestsp[i] == cpu_to_be16(NULLDATAOFF) && + use_block == -1) use_block = i; else if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; @@ -377,7 +378,7 @@ xfs_dir2_leaf_addname( * Now kill use_block if it refers to a missing block, so we * can use it as an indication of allocation needed. */ - if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF) + if (use_block != -1 && bestsp[use_block] == cpu_to_be16(NULLDATAOFF)) use_block = -1; /* * If we don't have enough free bytes but we can make enough @@ -590,7 +591,7 @@ xfs_dir2_leaf_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); /* * This value is not restrictive enough. * Should factor in the size of the bests table as well. @@ -610,7 +611,7 @@ xfs_dir2_leaf_check( if (i + 1 < be16_to_cpu(leaf->hdr.count)) ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= be32_to_cpu(leaf->ents[i + 1].hashval)); - if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; } ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); @@ -639,7 +640,8 @@ xfs_dir2_leaf_compact( * Compress out the stale entries in place. */ for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) { - if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[from].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) continue; /* * Only actually copy the entries that are different. @@ -696,7 +698,8 @@ xfs_dir2_leaf_compact_x1( */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; + leaf->ents[lowstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR); lowstale--) continue; /* @@ -705,7 +708,8 @@ xfs_dir2_leaf_compact_x1( */ for (highstale = index; highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && + leaf->ents[highstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && (lowstale < 0 || index - lowstale > highstale - index); highstale++) continue; @@ -729,7 +733,8 @@ xfs_dir2_leaf_compact_x1( if (index == from) newindex = to; if (from != keepstale && - be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) { + leaf->ents[from].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) { if (from == to) *lowlogp = to; continue; @@ -1207,7 +1212,7 @@ xfs_dir2_leaf_log_bests( xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf); firstb = xfs_dir2_leaf_bests_p(ltp) + first; lastb = xfs_dir2_leaf_bests_p(ltp) + last; @@ -1230,8 +1235,8 @@ xfs_dir2_leaf_log_ents( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || - be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) || + leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); firstlep = &leaf->ents[first]; lastlep = &leaf->ents[last]; xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), @@ -1249,8 +1254,8 @@ xfs_dir2_leaf_log_header( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || - be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) || + leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), (uint)(sizeof(leaf->hdr) - 1)); } @@ -1269,7 +1274,7 @@ xfs_dir2_leaf_log_tail( mp = tp->t_mountp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); ltp = xfs_dir2_leaf_tail_p(mp, leaf); xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), (uint)(mp->m_dirblksize - 1)); @@ -1570,7 +1575,7 @@ xfs_dir2_leaf_removename( * Look for the last active entry (i). */ for (i = db - 1; i > 0; i--) { - if (be16_to_cpu(bestsp[i]) != NULLDATAOFF) + if (bestsp[i] != cpu_to_be16(NULLDATAOFF)) break; } /* @@ -1740,7 +1745,7 @@ xfs_dir2_leaf_trim_data( { struct xfs_dir2_data_hdr *hdr = dbp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); ASSERT(be16_to_cpu(hdr->bestfree[0].length) == mp->m_dirblksize - (uint)sizeof(*hdr)); ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); @@ -1850,7 +1855,7 @@ xfs_dir2_node_to_leaf( return 0; lbp = state->path.blk[0].bp; leaf = lbp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); /* * Read the freespace block. */ @@ -1859,7 +1864,7 @@ xfs_dir2_node_to_leaf( return error; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); ASSERT(!free->hdr.firstdb); /* diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 8d4b54c1bed..14571cdeb33 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -73,7 +73,7 @@ xfs_dir2_free_log_bests( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)&free->bests[first] - (char *)free), (uint)((char *)&free->bests[last] - (char *)free + @@ -91,7 +91,7 @@ xfs_dir2_free_log_header( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); } @@ -276,14 +276,14 @@ xfs_dir2_leafn_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp)); for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { if (i + 1 < be16_to_cpu(leaf->hdr.count)) { ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= be32_to_cpu(leaf->ents[i + 1].hashval)); } - if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; } ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); @@ -302,7 +302,7 @@ xfs_dir2_leafn_lasthash( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); if (count) *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) @@ -341,7 +341,7 @@ xfs_dir2_leafn_lookup_for_addname( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); #ifdef __KERNEL__ ASSERT(be16_to_cpu(leaf->hdr.count) > 0); #endif @@ -358,7 +358,7 @@ xfs_dir2_leafn_lookup_for_addname( curbp = state->extrablk.bp; curfdb = state->extrablk.blkno; free = curbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); } length = xfs_dir2_data_entsize(args->namelen); /* @@ -424,7 +424,8 @@ xfs_dir2_leafn_lookup_for_addname( /* * If it has room, return it. */ - if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { + if (unlikely(free->bests[fi] == + cpu_to_be16(NULLDATAOFF))) { XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", XFS_ERRLEVEL_LOW, mp); if (curfdb != newfdb) @@ -485,7 +486,7 @@ xfs_dir2_leafn_lookup_for_entry( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); #ifdef __KERNEL__ ASSERT(be16_to_cpu(leaf->hdr.count) > 0); #endif @@ -666,7 +667,8 @@ xfs_dir2_leafn_moveents( int i; /* temp leaf index */ for (i = start_s, stale = 0; i < start_s + count; i++) { - if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf_s->ents[i].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; } } else @@ -713,8 +715,8 @@ xfs_dir2_leafn_order( leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); + ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); if (be16_to_cpu(leaf1->hdr.count) > 0 && be16_to_cpu(leaf2->hdr.count) > 0 && (be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) || @@ -862,7 +864,7 @@ xfs_dir2_leafn_remove( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); /* * Point to the entry we're removing. */ @@ -924,7 +926,7 @@ xfs_dir2_leafn_remove( return error; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); ASSERT(be32_to_cpu(free->hdr.firstdb) == XFS_DIR2_MAX_FREE_BESTS(mp) * (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); @@ -976,7 +978,8 @@ xfs_dir2_leafn_remove( int i; /* free entry index */ for (i = findex - 1; - i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF; + i >= 0 && + free->bests[i] == cpu_to_be16(NULLDATAOFF); i--) continue; free->hdr.nvalid = cpu_to_be32(i + 1); @@ -1133,7 +1136,7 @@ xfs_dir2_leafn_toosmall( */ blk = &state->path.blk[state->path.active - 1]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); leaf = (xfs_dir2_leaf_t *)info; count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); @@ -1192,7 +1195,7 @@ xfs_dir2_leafn_toosmall( count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = state->blocksize - (state->blocksize >> 2); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes -= count * (uint)sizeof(leaf->ents[0]); /* @@ -1251,8 +1254,8 @@ xfs_dir2_leafn_unbalance( ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); + ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); /* * If there are any stale leaf entries, take this opportunity * to purge them. @@ -1393,7 +1396,7 @@ xfs_dir2_node_addname_int( */ ifbno = fblk->blkno; free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); findex = fblk->index; /* * This means the free entry showed that the data block had @@ -1477,7 +1480,7 @@ xfs_dir2_node_addname_int( continue; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); findex = 0; } /* @@ -1609,7 +1612,7 @@ xfs_dir2_node_addname_int( free->hdr.nused = 0; } else { free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); } /* @@ -1632,7 +1635,7 @@ xfs_dir2_node_addname_int( * If this entry was for an empty data block * (this should always be true) then update the header. */ - if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { + if (free->bests[findex] == cpu_to_be16(NULLDATAOFF)) { be32_add_cpu(&free->hdr.nused, 1); xfs_dir2_free_log_header(tp, fbp); } @@ -1902,7 +1905,7 @@ xfs_dir2_node_replace( * Point to the data entry. */ hdr = state->extrablk.bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); dep = (xfs_dir2_data_entry_t *) ((char *)hdr + xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address))); @@ -1968,7 +1971,7 @@ xfs_dir2_node_trim_free( return 0; } free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); /* * If there are used entries, there's nothing to do. */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 84ebeec1664..dd5628bd8d0 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -683,7 +683,7 @@ xfs_dialloc( return 0; } agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); } else { /* * Continue where we left off before. In this case, we @@ -691,7 +691,7 @@ xfs_dialloc( */ agbp = *IO_agbp; agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); ASSERT(be32_to_cpu(agi->agi_freecount) > 0); } mp = tp->t_mountp; @@ -775,7 +775,7 @@ nextag: if (error) goto nextag; agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); } /* * Here with an allocation group that has a free inode. @@ -944,7 +944,7 @@ nextag: * See if the most recently allocated block has any free. */ newino: - if (be32_to_cpu(agi->agi_newino) != NULLAGINO) { + if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), XFS_LOOKUP_EQ, &i); if (error) @@ -1085,7 +1085,7 @@ xfs_difree( return error; } agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); ASSERT(agbno < be32_to_cpu(agi->agi_length)); /* * Initialize the cursor. @@ -1438,7 +1438,7 @@ xfs_ialloc_log_agi( xfs_agi_t *agi; /* allocation group header */ agi = XFS_BUF_TO_AGI(bp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); #endif /* * Compute byte offsets for the first and last fields. @@ -1492,7 +1492,7 @@ xfs_read_agi( /* * Validate the magic number of the agi block. */ - agi_ok = be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && + agi_ok = agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC) && XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)) && be32_to_cpu(agi->agi_seqno) == agno; if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index db310f8fb76..d04ea6a2dfa 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -167,7 +167,7 @@ xfs_imap_to_bp( dip = (xfs_dinode_t *)xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); - di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC && + di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && XFS_DINODE_GOOD_VERSION(dip->di_version); if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, @@ -802,7 +802,7 @@ xfs_iread( * If we got something that isn't an inode it means someone * (nfs or dmi) has a stale handle. */ - if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { + if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) { #ifdef DEBUG xfs_alert(mp, "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)", @@ -1457,7 +1457,7 @@ xfs_iunlink( ASSERT(agi->agi_unlinked[bucket_index]); ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); - if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { + if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) { /* * There is already another inode in the bucket we need * to add ourselves to. Add us at the front of the list. @@ -1468,8 +1468,7 @@ xfs_iunlink( if (error) return error; - ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); - /* both on-disk, don't endian flip twice */ + ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO)); dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); @@ -1534,7 +1533,7 @@ xfs_iunlink_remove( agino = XFS_INO_TO_AGINO(mp, ip->i_ino); ASSERT(agino != 0); bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; - ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); + ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)); ASSERT(agi->agi_unlinked[bucket_index]); if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { @@ -2659,7 +2658,7 @@ xfs_iflush_int( */ xfs_synchronize_times(ip); - if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, + if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 41d5b8f2bf9..5b24a71811f 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -3521,13 +3521,13 @@ xlog_verify_iclog(xlog_t *log, spin_unlock(&log->l_icloglock); /* check log magic numbers */ - if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) + if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); ptr = (xfs_caddr_t) &iclog->ic_header; for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; ptr += BBSIZE) { - if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) + if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) xfs_emerg(log->l_mp, "%s: unexpected magic num", __func__); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 04142caedb2..fb528b354c0 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -300,14 +300,14 @@ xlog_header_check_recover( xfs_mount_t *mp, xlog_rec_header_t *head) { - ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); + ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); /* * IRIX doesn't write the h_fmt field and leaves it zeroed * (XLOG_FMT_UNKNOWN). This stops us from trying to recover * a dirty log created in IRIX. */ - if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { + if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { xfs_warn(mp, "dirty log written in incompatible format - can't recover"); xlog_header_check_dump(mp, head); @@ -333,7 +333,7 @@ xlog_header_check_mount( xfs_mount_t *mp, xlog_rec_header_t *head) { - ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); + ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); if (uuid_is_nil(&head->h_fs_uuid)) { /* @@ -534,7 +534,7 @@ xlog_find_verify_log_record( head = (xlog_rec_header_t *)offset; - if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno)) + if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) break; if (!smallmem) @@ -916,7 +916,7 @@ xlog_find_tail( if (error) goto done; - if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { + if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { found = 1; break; } @@ -933,8 +933,8 @@ xlog_find_tail( if (error) goto done; - if (XLOG_HEADER_MAGIC_NUM == - be32_to_cpu(*(__be32 *)offset)) { + if (*(__be32 *)offset == + cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { found = 2; break; } @@ -1947,7 +1947,7 @@ xfs_qm_dqcheck( * This is all fine; things are still consistent, and we haven't lost * any quota information. Just don't complain about bad dquot blks. */ - if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) { + if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", @@ -2238,7 +2238,7 @@ xlog_recover_inode_pass2( * Make sure the place we're flushing out to really looks * like an inode! */ - if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { + if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) { xfs_buf_relse(bp); xfs_alert(mp, "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", @@ -3295,7 +3295,7 @@ xlog_valid_rec_header( { int hlen; - if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) { + if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { XFS_ERROR_REPORT("xlog_valid_rec_header(1)", XFS_ERRLEVEL_LOW, log->l_mp); return XFS_ERROR(EFSCORRUPTED); -- cgit v1.2.3-70-g09d2 From adadbeefb34f755a3477da51035eeeec2c1fde38 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: remove wrappers around b_fspriv Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.h | 2 -- fs/xfs/xfs_buf_item.c | 51 +++++++++++++------------------ fs/xfs/xfs_inode.c | 4 +-- fs/xfs/xfs_inode_item.c | 4 +-- fs/xfs/xfs_log.c | 17 ++++------- fs/xfs/xfs_trans_buf.c | 76 ++++++++++++++++++---------------------------- 6 files changed, 61 insertions(+), 93 deletions(-) (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 2eec8f0f2a5..033c152c411 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -280,8 +280,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) #define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) -#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) -#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) #define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 716ef095b13..9eacf06932b 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -90,13 +90,11 @@ xfs_buf_item_flush_log_debug( uint first, uint last) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; uint nbytes; - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); - if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) { + if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF)) return; - } ASSERT(bip->bli_logged != NULL); nbytes = last - first + 1; @@ -408,7 +406,7 @@ xfs_buf_item_unpin( int stale = bip->bli_flags & XFS_BLI_STALE; int freed; - ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); + ASSERT(bp->b_fspriv == bip); ASSERT(atomic_read(&bip->bli_refcount) > 0); trace_xfs_buf_item_unpin(bip); @@ -454,13 +452,13 @@ xfs_buf_item_unpin( */ if (bip->bli_flags & XFS_BLI_STALE_INODE) { xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); + bp->b_fspriv = NULL; XFS_BUF_CLR_IODONE_FUNC(bp); } else { spin_lock(&ailp->xa_lock); xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); xfs_buf_item_relse(bp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); + ASSERT(bp->b_fspriv == NULL); } xfs_buf_relse(bp); } @@ -684,7 +682,7 @@ xfs_buf_item_init( xfs_buf_t *bp, xfs_mount_t *mp) { - xfs_log_item_t *lip; + xfs_log_item_t *lip = bp->b_fspriv; xfs_buf_log_item_t *bip; int chunks; int map_size; @@ -696,12 +694,8 @@ xfs_buf_item_init( * nothing to do here so return. */ ASSERT(bp->b_target->bt_mount == mp); - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - if (lip->li_type == XFS_LI_BUF) { - return; - } - } + if (lip != NULL && lip->li_type == XFS_LI_BUF) + return; /* * chunks is the number of XFS_BLF_CHUNK size pieces @@ -740,11 +734,9 @@ xfs_buf_item_init( * Put the buf item into the list of items attached to the * buffer at the front. */ - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - bip->bli_item.li_bio_list = - XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - } - XFS_BUF_SET_FSPRIVATE(bp, bip); + if (bp->b_fspriv) + bip->bli_item.li_bio_list = bp->b_fspriv; + bp->b_fspriv = bip; } @@ -876,12 +868,11 @@ xfs_buf_item_relse( trace_xfs_buf_item_relse(bp, _RET_IP_); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); - XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); - if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && - (XFS_BUF_IODONE_FUNC(bp) != NULL)) { + bip = bp->b_fspriv; + bp->b_fspriv = bip->bli_item.li_bio_list; + if (bp->b_fspriv == NULL && XFS_BUF_IODONE_FUNC(bp) != NULL) XFS_BUF_CLR_IODONE_FUNC(bp); - } + xfs_buf_rele(bp); xfs_buf_item_free(bip); } @@ -908,12 +899,12 @@ xfs_buf_attach_iodone( ASSERT(xfs_buf_islocked(bp)); lip->li_cb = cb; - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + head_lip = bp->b_fspriv; + if (head_lip) { lip->li_bio_list = head_lip->li_bio_list; head_lip->li_bio_list = lip; } else { - XFS_BUF_SET_FSPRIVATE(bp, lip); + bp->b_fspriv = lip; } ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || @@ -939,8 +930,8 @@ xfs_buf_do_callbacks( { struct xfs_log_item *lip; - while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) { - XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list); + while ((lip = bp->b_fspriv) != NULL) { + bp->b_fspriv = lip->li_bio_list; ASSERT(lip->li_cb != NULL); /* * Clear the next pointer so we don't have any @@ -1026,7 +1017,7 @@ xfs_buf_iodone_callbacks( do_callbacks: xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); + bp->b_fspriv = NULL; XFS_BUF_CLR_IODONE_FUNC(bp); xfs_buf_ioend(bp, 0); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d04ea6a2dfa..34c6872b5f7 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1698,7 +1698,7 @@ xfs_ifree_cluster( * stale first, we will not attempt to lock them in the loop * below as the XFS_ISTALE flag will be set. */ - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + lip = bp->b_fspriv; while (lip) { if (lip->li_type == XFS_LI_INODE) { iip = (xfs_inode_log_item_t *)lip; @@ -2811,7 +2811,7 @@ xfs_iflush_int( */ xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bp->b_fspriv != NULL); ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); } else { /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index a49811a3e5a..588406dc6a3 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -874,7 +874,7 @@ xfs_iflush_done( * Scan the buffer IO completions for other inodes being completed and * attach them to the current inode log item. */ - blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + blip = bp->b_fspriv; prev = NULL; while (blip != NULL) { if (lip->li_cb != xfs_iflush_done) { @@ -886,7 +886,7 @@ xfs_iflush_done( /* remove from list */ next = blip->li_bio_list; if (!prev) { - XFS_BUF_SET_FSPRIVATE(bp, next); + bp->b_fspriv = next; } else { prev->li_bio_list = next; } diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 82c797dd652..a8718ed9776 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -871,13 +871,9 @@ xlog_space_left( void xlog_iodone(xfs_buf_t *bp) { - xlog_in_core_t *iclog; - xlog_t *l; - int aborted; - - iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); - aborted = 0; - l = iclog->ic_log; + xlog_in_core_t *iclog = bp->b_fspriv; + xlog_t *l = iclog->ic_log; + int aborted = 0; /* * Race to shutdown the filesystem if we see an error. @@ -1249,9 +1245,8 @@ STATIC int xlog_bdstrat( struct xfs_buf *bp) { - struct xlog_in_core *iclog; + struct xlog_in_core *iclog = bp->b_fspriv; - iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); if (iclog->ic_state & XLOG_STATE_IOERROR) { XFS_BUF_ERROR(bp, EIO); XFS_BUF_STALE(bp); @@ -1358,7 +1353,7 @@ xlog_sync(xlog_t *log, iclog->ic_bwritecnt = 1; } XFS_BUF_SET_COUNT(bp, count); - XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ + bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); @@ -1405,7 +1400,7 @@ xlog_sync(xlog_t *log, XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ (__psint_t)count), split); - XFS_BUF_SET_FSPRIVATE(bp, iclog); + bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 09a0468ef11..f910e51444a 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -89,7 +89,7 @@ _xfs_trans_bjoin( * The checks to see if one is there are in xfs_buf_item_init(). */ xfs_buf_item_init(bp, tp->t_mountp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + bip = bp->b_fspriv; ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); @@ -173,7 +173,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); ASSERT(bp->b_transp == tp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + bip = bp->b_fspriv; ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_recur++; @@ -233,7 +233,7 @@ xfs_trans_getsb(xfs_trans_t *tp, */ bp = mp->m_sb_bp; if (bp->b_transp == tp) { - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + bip = bp->b_fspriv; ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_recur++; @@ -329,7 +329,7 @@ xfs_trans_read_buf( if (bp != NULL) { ASSERT(xfs_buf_islocked(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bp->b_fspriv != NULL); ASSERT((XFS_BUF_ISERROR(bp)) == 0); if (!(XFS_BUF_ISDONE(bp))) { trace_xfs_trans_read_buf_io(bp, _RET_IP_); @@ -363,7 +363,7 @@ xfs_trans_read_buf( } - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + bip = bp->b_fspriv; bip->bli_recur++; ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -460,32 +460,30 @@ xfs_trans_brelse(xfs_trans_t *tp, xfs_buf_t *bp) { xfs_buf_log_item_t *bip; - xfs_log_item_t *lip; /* * Default to a normal brelse() call if the tp is NULL. */ if (tp == NULL) { + struct xfs_log_item *lip = bp->b_fspriv; + ASSERT(bp->b_transp == NULL); + /* * If there's a buf log item attached to the buffer, * then let the AIL know that the buffer is being * unlocked. */ - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - if (lip->li_type == XFS_LI_BUF) { - bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); - xfs_trans_unlocked_item(bip->bli_item.li_ailp, - lip); - } + if (lip != NULL && lip->li_type == XFS_LI_BUF) { + bip = bp->b_fspriv; + xfs_trans_unlocked_item(bip->bli_item.li_ailp, lip); } xfs_buf_relse(bp); return; } ASSERT(bp->b_transp == tp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + bip = bp->b_fspriv; ASSERT(bip->bli_item.li_type == XFS_LI_BUF); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); @@ -581,16 +579,15 @@ void xfs_trans_bhold(xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_flags |= XFS_BLI_HOLD; trace_xfs_trans_bhold(bip); } @@ -603,19 +600,17 @@ void xfs_trans_bhold_release(xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); ASSERT(bip->bli_flags & XFS_BLI_HOLD); - bip->bli_flags &= ~XFS_BLI_HOLD; + bip->bli_flags &= ~XFS_BLI_HOLD; trace_xfs_trans_bhold_release(bip); } @@ -634,11 +629,11 @@ xfs_trans_log_buf(xfs_trans_t *tp, uint first, uint last) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bip != NULL); ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); @@ -656,7 +651,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, XFS_BUF_DELAYWRITE(bp); XFS_BUF_DONE(bp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(atomic_read(&bip->bli_refcount) > 0); XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); bip->bli_item.li_cb = xfs_buf_iodone; @@ -706,13 +700,11 @@ xfs_trans_binval( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); trace_xfs_trans_binval(bip); @@ -780,13 +772,11 @@ xfs_trans_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_BUF; @@ -806,13 +796,11 @@ xfs_trans_stale_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_STALE_INODE; @@ -833,13 +821,11 @@ xfs_trans_inode_alloc_buf( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; @@ -863,16 +849,14 @@ xfs_trans_dquot_buf( xfs_buf_t *bp, uint type) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bip != NULL); ASSERT(type == XFS_BLF_UDQUOT_BUF || type == XFS_BLF_PDQUOT_BUF || type == XFS_BLF_GDQUOT_BUF); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_format.blf_flags |= type; -- cgit v1.2.3-70-g09d2 From cb669ca5701153a808db6627521cc8aa52fc42d1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: remove wrappers around b_iodone Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.c | 2 +- fs/xfs/linux-2.6/xfs_buf.h | 4 ---- fs/xfs/xfs_buf_item.c | 16 ++++++++-------- fs/xfs/xfs_inode.c | 4 ++-- fs/xfs/xfs_log.c | 4 ++-- fs/xfs/xfs_log_recover.c | 8 ++++---- fs/xfs/xfs_trans_buf.c | 6 +++--- 7 files changed, 20 insertions(+), 24 deletions(-) (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index a0d97c5a3f1..888176ad530 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1107,7 +1107,7 @@ xfs_bioerror_relse( XFS_BUF_UNDELAYWRITE(bp); XFS_BUF_DONE(bp); XFS_BUF_STALE(bp); - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; if (!(fl & XBF_ASYNC)) { /* * Mark b_error and B_ERROR _both_. diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 033c152c411..6a83b46b4bc 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -276,10 +276,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) -#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) -#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) -#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) - #define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 9eacf06932b..88492916c3d 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -453,7 +453,7 @@ xfs_buf_item_unpin( if (bip->bli_flags & XFS_BLI_STALE_INODE) { xfs_buf_do_callbacks(bp); bp->b_fspriv = NULL; - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; } else { spin_lock(&ailp->xa_lock); xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); @@ -870,8 +870,8 @@ xfs_buf_item_relse( bip = bp->b_fspriv; bp->b_fspriv = bip->bli_item.li_bio_list; - if (bp->b_fspriv == NULL && XFS_BUF_IODONE_FUNC(bp) != NULL) - XFS_BUF_CLR_IODONE_FUNC(bp); + if (bp->b_fspriv == NULL) + bp->b_iodone = NULL; xfs_buf_rele(bp); xfs_buf_item_free(bip); @@ -907,9 +907,9 @@ xfs_buf_attach_iodone( bp->b_fspriv = lip; } - ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || - (XFS_BUF_IODONE_FUNC(bp) == NULL)); - XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); + ASSERT(bp->b_iodone == NULL || + bp->b_iodone == xfs_buf_iodone_callbacks); + bp->b_iodone = xfs_buf_iodone_callbacks; } /* @@ -998,7 +998,7 @@ xfs_buf_iodone_callbacks( XFS_BUF_DONE(bp); XFS_BUF_SET_START(bp); } - ASSERT(XFS_BUF_IODONE_FUNC(bp)); + ASSERT(bp->b_iodone != NULL); trace_xfs_buf_item_iodone_async(bp, _RET_IP_); xfs_buf_relse(bp); return; @@ -1018,7 +1018,7 @@ xfs_buf_iodone_callbacks( do_callbacks: xfs_buf_do_callbacks(bp); bp->b_fspriv = NULL; - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; xfs_buf_ioend(bp, 0); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 34c6872b5f7..1314c83ded1 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2471,7 +2471,7 @@ cluster_corrupt_out: * mark the buffer as an error and call them. Otherwise * mark it as stale and brelse. */ - if (XFS_BUF_IODONE_FUNC(bp)) { + if (bp->b_iodone) { XFS_BUF_UNDONE(bp); XFS_BUF_STALE(bp); XFS_BUF_ERROR(bp,EIO); @@ -2812,7 +2812,7 @@ xfs_iflush_int( xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); ASSERT(bp->b_fspriv != NULL); - ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); + ASSERT(bp->b_iodone != NULL); } else { /* * We're flushing an inode which is not in the AIL and has diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a8718ed9776..06ff8437ed8 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1050,7 +1050,7 @@ xlog_alloc_log(xfs_mount_t *mp, bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp); if (!bp) goto out_free_log; - XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + bp->b_iodone = xlog_iodone; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); log->l_xbuf = bp; @@ -1084,7 +1084,7 @@ xlog_alloc_log(xfs_mount_t *mp, if (!bp) goto out_free_iclog; - XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + bp->b_iodone = xlog_iodone; iclog->ic_bp = bp; iclog->ic_data = bp->b_addr; #ifdef DEBUG diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 40c9742aa47..8fe4206de05 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -371,7 +371,7 @@ xlog_recover_iodone( xfs_force_shutdown(bp->b_target->bt_mount, SHUTDOWN_META_IO_ERROR); } - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; xfs_buf_ioend(bp, 0); } @@ -2178,7 +2178,7 @@ xlog_recover_buffer_pass2( error = xfs_bwrite(mp, bp); } else { ASSERT(bp->b_target->bt_mount == mp); - XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; xfs_bdwrite(mp, bp); } @@ -2438,7 +2438,7 @@ xlog_recover_inode_pass2( write_inode_buffer: ASSERT(bp->b_target->bt_mount == mp); - XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; xfs_bdwrite(mp, bp); error: if (need_free) @@ -2560,7 +2560,7 @@ xlog_recover_dquot_pass2( ASSERT(dq_f->qlf_size == 2); ASSERT(bp->b_target->bt_mount == mp); - XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; xfs_bdwrite(mp, bp); return (0); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index f910e51444a..15584fc3ed7 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -635,8 +635,8 @@ xfs_trans_log_buf(xfs_trans_t *tp, ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); - ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || - (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); + ASSERT(bp->b_iodone == NULL || + bp->b_iodone == xfs_buf_iodone_callbacks); /* * Mark the buffer as needing to be written out eventually, @@ -652,7 +652,7 @@ xfs_trans_log_buf(xfs_trans_t *tp, XFS_BUF_DONE(bp); ASSERT(atomic_read(&bip->bli_refcount) > 0); - XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); + bp->b_iodone = xfs_buf_iodone_callbacks; bip->bli_item.li_cb = xfs_buf_iodone; trace_xfs_trans_log_buf(bip); -- cgit v1.2.3-70-g09d2 From c84470dda7a1165d90f55c2025c4c8ca403d485e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:50 +0200 Subject: xfs: remove leftovers of the old btree tracing code Remove various bits left over from the old kdb-only btree tracing code, but leave the actual trace point stubs in place to ease adding new event based btree tracing. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/Makefile | 2 - fs/xfs/linux-2.6/xfs_super.c | 1 - fs/xfs/xfs_alloc_btree.c | 75 ------------- fs/xfs/xfs_bmap_btree.c | 98 ----------------- fs/xfs/xfs_btree.c | 1 - fs/xfs/xfs_btree.h | 38 +++---- fs/xfs/xfs_btree_trace.c | 249 ------------------------------------------- fs/xfs/xfs_btree_trace.h | 99 ----------------- fs/xfs/xfs_ialloc_btree.c | 75 ------------- fs/xfs/xfs_iget.c | 1 - fs/xfs/xfs_inode.c | 1 - 11 files changed, 19 insertions(+), 621 deletions(-) delete mode 100644 fs/xfs/xfs_btree_trace.c delete mode 100644 fs/xfs/xfs_btree_trace.h (limited to 'fs/xfs/xfs_inode.c') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 284a7c89697..75bb316529d 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -88,8 +88,6 @@ xfs-y += xfs_alloc.o \ xfs_vnodeops.o \ xfs_rw.o -xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o - # Objects in linux/ xfs-y += $(addprefix $(XFS_LINUX)/, \ kmem.o \ diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 3ebb4588e1b..25fd2cd6c8b 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -33,7 +33,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index e277777a7cf..ffb3386e45c 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -31,7 +31,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_alloc.h" #include "xfs_error.h" #include "xfs_trace.h" @@ -311,72 +310,6 @@ xfs_allocbt_recs_inorder( } #endif /* DEBUG */ -#ifdef XFS_BTREE_TRACE -ktrace_t *xfs_allocbt_trace_buf; - -STATIC void -xfs_allocbt_trace_enter( - struct xfs_btree_cur *cur, - const char *func, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type, - (void *)func, (void *)s, NULL, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} - -STATIC void -xfs_allocbt_trace_cursor( - struct xfs_btree_cur *cur, - __uint32_t *s0, - __uint64_t *l0, - __uint64_t *l1) -{ - *s0 = cur->bc_private.a.agno; - *l0 = cur->bc_rec.a.ar_startblock; - *l1 = cur->bc_rec.a.ar_blockcount; -} - -STATIC void -xfs_allocbt_trace_key( - struct xfs_btree_cur *cur, - union xfs_btree_key *key, - __uint64_t *l0, - __uint64_t *l1) -{ - *l0 = be32_to_cpu(key->alloc.ar_startblock); - *l1 = be32_to_cpu(key->alloc.ar_blockcount); -} - -STATIC void -xfs_allocbt_trace_record( - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - __uint64_t *l0, - __uint64_t *l1, - __uint64_t *l2) -{ - *l0 = be32_to_cpu(rec->alloc.ar_startblock); - *l1 = be32_to_cpu(rec->alloc.ar_blockcount); - *l2 = 0; -} -#endif /* XFS_BTREE_TRACE */ - static const struct xfs_btree_ops xfs_allocbt_ops = { .rec_len = sizeof(xfs_alloc_rec_t), .key_len = sizeof(xfs_alloc_key_t), @@ -393,18 +326,10 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, .key_diff = xfs_allocbt_key_diff, - #ifdef DEBUG .keys_inorder = xfs_allocbt_keys_inorder, .recs_inorder = xfs_allocbt_recs_inorder, #endif - -#ifdef XFS_BTREE_TRACE - .trace_enter = xfs_allocbt_trace_enter, - .trace_cursor = xfs_allocbt_trace_cursor, - .trace_key = xfs_allocbt_trace_key, - .trace_record = xfs_allocbt_trace_record, -#endif }; /* diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 50a28442a54..e2f5d59cbea 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -33,7 +33,6 @@ #include "xfs_inode_item.h" #include "xfs_alloc.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_error.h" @@ -732,95 +731,6 @@ xfs_bmbt_recs_inorder( } #endif /* DEBUG */ -#ifdef XFS_BTREE_TRACE -ktrace_t *xfs_bmbt_trace_buf; - -STATIC void -xfs_bmbt_trace_enter( - struct xfs_btree_cur *cur, - const char *func, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - struct xfs_inode *ip = cur->bc_private.b.ip; - int whichfork = cur->bc_private.b.whichfork; - - ktrace_enter(xfs_bmbt_trace_buf, - (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), - (void *)func, (void *)s, (void *)ip, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} - -STATIC void -xfs_bmbt_trace_cursor( - struct xfs_btree_cur *cur, - __uint32_t *s0, - __uint64_t *l0, - __uint64_t *l1) -{ - struct xfs_bmbt_rec_host r; - - xfs_bmbt_set_all(&r, &cur->bc_rec.b); - - *s0 = (cur->bc_nlevels << 24) | - (cur->bc_private.b.flags << 16) | - cur->bc_private.b.allocated; - *l0 = r.l0; - *l1 = r.l1; -} - -STATIC void -xfs_bmbt_trace_key( - struct xfs_btree_cur *cur, - union xfs_btree_key *key, - __uint64_t *l0, - __uint64_t *l1) -{ - *l0 = be64_to_cpu(key->bmbt.br_startoff); - *l1 = 0; -} - -/* Endian flipping versions of the bmbt extraction functions */ -STATIC void -xfs_bmbt_disk_get_all( - xfs_bmbt_rec_t *r, - xfs_bmbt_irec_t *s) -{ - __xfs_bmbt_get_all(get_unaligned_be64(&r->l0), - get_unaligned_be64(&r->l1), s); -} - -STATIC void -xfs_bmbt_trace_record( - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - __uint64_t *l0, - __uint64_t *l1, - __uint64_t *l2) -{ - struct xfs_bmbt_irec irec; - - xfs_bmbt_disk_get_all(&rec->bmbt, &irec); - *l0 = irec.br_startoff; - *l1 = irec.br_startblock; - *l2 = irec.br_blockcount; -} -#endif /* XFS_BTREE_TRACE */ - static const struct xfs_btree_ops xfs_bmbt_ops = { .rec_len = sizeof(xfs_bmbt_rec_t), .key_len = sizeof(xfs_bmbt_key_t), @@ -837,18 +747,10 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, - #ifdef DEBUG .keys_inorder = xfs_bmbt_keys_inorder, .recs_inorder = xfs_bmbt_recs_inorder, #endif - -#ifdef XFS_BTREE_TRACE - .trace_enter = xfs_bmbt_trace_enter, - .trace_cursor = xfs_bmbt_trace_cursor, - .trace_key = xfs_bmbt_trace_key, - .trace_record = xfs_bmbt_trace_record, -#endif }; /* diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 43125af4262..cabf4b5604a 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -32,7 +32,6 @@ #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_error.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 82fafc66bd1..8d05a6a46ce 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -199,25 +199,6 @@ struct xfs_btree_ops { union xfs_btree_rec *r1, union xfs_btree_rec *r2); #endif - - /* btree tracing */ -#ifdef XFS_BTREE_TRACE - void (*trace_enter)(struct xfs_btree_cur *, const char *, - char *, int, int, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t); - void (*trace_cursor)(struct xfs_btree_cur *, __uint32_t *, - __uint64_t *, __uint64_t *); - void (*trace_key)(struct xfs_btree_cur *, - union xfs_btree_key *, __uint64_t *, - __uint64_t *); - void (*trace_record)(struct xfs_btree_cur *, - union xfs_btree_rec *, __uint64_t *, - __uint64_t *, __uint64_t *); -#endif }; /* @@ -452,4 +433,23 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block) (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \ XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks) +/* + * Trace hooks. Currently not implemented as they need to be ported + * over to the generic tracing functionality, which is some effort. + * + * i,j = integer (32 bit) + * b = btree block buffer (xfs_buf_t) + * p = btree ptr + * r = btree record + * k = btree key + */ +#define XFS_BTREE_TRACE_ARGBI(c, b, i) +#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) +#define XFS_BTREE_TRACE_ARGI(c, i) +#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) +#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) +#define XFS_BTREE_TRACE_ARGIK(c, i, k) +#define XFS_BTREE_TRACE_ARGR(c, r) +#define XFS_BTREE_TRACE_CURSOR(c, t) + #endif /* __XFS_BTREE_H__ */ diff --git a/fs/xfs/xfs_btree_trace.c b/fs/xfs/xfs_btree_trace.c deleted file mode 100644 index 44ff942a0fd..00000000000 --- a/fs/xfs/xfs_btree_trace.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (c) 2008 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_types.h" -#include "xfs_inum.h" -#include "xfs_bmap_btree.h" -#include "xfs_alloc_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_inode.h" -#include "xfs_btree.h" -#include "xfs_btree_trace.h" - -STATIC void -xfs_btree_trace_ptr( - struct xfs_btree_cur *cur, - union xfs_btree_ptr ptr, - __psunsigned_t *high, - __psunsigned_t *low) -{ - if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - __u64 val = be64_to_cpu(ptr.l); - *high = val >> 32; - *low = (int)val; - } else { - *high = 0; - *low = be32_to_cpu(ptr.s); - } -} - -/* - * Add a trace buffer entry for arguments, for a buffer & 1 integer arg. - */ -void -xfs_btree_trace_argbi( - const char *func, - struct xfs_btree_cur *cur, - struct xfs_buf *b, - int i, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBI, - line, (__psunsigned_t)b, i, 0, 0, 0, 0, 0, - 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for a buffer & 2 integer args. - */ -void -xfs_btree_trace_argbii( - const char *func, - struct xfs_btree_cur *cur, - struct xfs_buf *b, - int i0, - int i1, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBII, - line, (__psunsigned_t)b, i0, i1, 0, 0, 0, 0, - 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for 3 block-length args - * and an integer arg. - */ -void -xfs_btree_trace_argfffi( - const char *func, - struct xfs_btree_cur *cur, - xfs_dfiloff_t o, - xfs_dfsbno_t b, - xfs_dfilblks_t i, - int j, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGFFFI, - line, - o >> 32, (int)o, - b >> 32, (int)b, - i >> 32, (int)i, - (int)j, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for one integer arg. - */ -void -xfs_btree_trace_argi( - const char *func, - struct xfs_btree_cur *cur, - int i, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGI, - line, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, fsblock, key. - */ -void -xfs_btree_trace_argipk( - const char *func, - struct xfs_btree_cur *cur, - int i, - union xfs_btree_ptr ptr, - union xfs_btree_key *key, - int line) -{ - __psunsigned_t high, low; - __uint64_t l0, l1; - - xfs_btree_trace_ptr(cur, ptr, &high, &low); - cur->bc_ops->trace_key(cur, key, &l0, &l1); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPK, - line, i, high, low, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, fsblock, rec. - */ -void -xfs_btree_trace_argipr( - const char *func, - struct xfs_btree_cur *cur, - int i, - union xfs_btree_ptr ptr, - union xfs_btree_rec *rec, - int line) -{ - __psunsigned_t high, low; - __uint64_t l0, l1, l2; - - xfs_btree_trace_ptr(cur, ptr, &high, &low); - cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPR, - line, i, - high, low, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - l2 >> 32, (int)l2, - 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, key. - */ -void -xfs_btree_trace_argik( - const char *func, - struct xfs_btree_cur *cur, - int i, - union xfs_btree_key *key, - int line) -{ - __uint64_t l0, l1; - - cur->bc_ops->trace_key(cur, key, &l0, &l1); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIK, - line, i, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - 0, 0, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for record. - */ -void -xfs_btree_trace_argr( - const char *func, - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - int line) -{ - __uint64_t l0, l1, l2; - - cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGR, - line, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - l2 >> 32, (int)l2, - 0, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for the cursor/operation. - */ -void -xfs_btree_trace_cursor( - const char *func, - struct xfs_btree_cur *cur, - int type, - int line) -{ - __uint32_t s0; - __uint64_t l0, l1; - char *s; - - switch (type) { - case XBT_ARGS: - s = "args"; - break; - case XBT_ENTRY: - s = "entry"; - break; - case XBT_ERROR: - s = "error"; - break; - case XBT_EXIT: - s = "exit"; - break; - default: - s = "unknown"; - break; - } - - cur->bc_ops->trace_cursor(cur, &s0, &l0, &l1); - cur->bc_ops->trace_enter(cur, func, s, XFS_BTREE_KTRACE_CUR, line, - s0, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - (__psunsigned_t)cur->bc_bufs[0], - (__psunsigned_t)cur->bc_bufs[1], - (__psunsigned_t)cur->bc_bufs[2], - (__psunsigned_t)cur->bc_bufs[3], - (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1], - (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]); -} diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h deleted file mode 100644 index 2d8a309873e..00000000000 --- a/fs/xfs/xfs_btree_trace.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2008 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_BTREE_TRACE_H__ -#define __XFS_BTREE_TRACE_H__ - -struct xfs_btree_cur; -struct xfs_buf; - - -/* - * Trace hooks. - * i,j = integer (32 bit) - * b = btree block buffer (xfs_buf_t) - * p = btree ptr - * r = btree record - * k = btree key - */ - -#ifdef XFS_BTREE_TRACE - -/* - * Trace buffer entry types. - */ -#define XFS_BTREE_KTRACE_ARGBI 1 -#define XFS_BTREE_KTRACE_ARGBII 2 -#define XFS_BTREE_KTRACE_ARGFFFI 3 -#define XFS_BTREE_KTRACE_ARGI 4 -#define XFS_BTREE_KTRACE_ARGIPK 5 -#define XFS_BTREE_KTRACE_ARGIPR 6 -#define XFS_BTREE_KTRACE_ARGIK 7 -#define XFS_BTREE_KTRACE_ARGR 8 -#define XFS_BTREE_KTRACE_CUR 9 - -/* - * Sub-types for cursor traces. - */ -#define XBT_ARGS 0 -#define XBT_ENTRY 1 -#define XBT_ERROR 2 -#define XBT_EXIT 3 - -void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *, - struct xfs_buf *, int, int); -void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *, - struct xfs_buf *, int, int, int); -void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int); -void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int, - union xfs_btree_ptr, union xfs_btree_key *, int); -void xfs_btree_trace_argipr(const char *, struct xfs_btree_cur *, int, - union xfs_btree_ptr, union xfs_btree_rec *, int); -void xfs_btree_trace_argik(const char *, struct xfs_btree_cur *, int, - union xfs_btree_key *, int); -void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *, - union xfs_btree_rec *, int); -void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int); - -#define XFS_BTREE_TRACE_ARGBI(c, b, i) \ - xfs_btree_trace_argbi(__func__, c, b, i, __LINE__) -#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \ - xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__) -#define XFS_BTREE_TRACE_ARGI(c, i) \ - xfs_btree_trace_argi(__func__, c, i, __LINE__) -#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \ - xfs_btree_trace_argipk(__func__, c, i, p, k, __LINE__) -#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) \ - xfs_btree_trace_argipr(__func__, c, i, p, r, __LINE__) -#define XFS_BTREE_TRACE_ARGIK(c, i, k) \ - xfs_btree_trace_argik(__func__, c, i, k, __LINE__) -#define XFS_BTREE_TRACE_ARGR(c, r) \ - xfs_btree_trace_argr(__func__, c, r, __LINE__) -#define XFS_BTREE_TRACE_CURSOR(c, t) \ - xfs_btree_trace_cursor(__func__, c, t, __LINE__) -#else -#define XFS_BTREE_TRACE_ARGBI(c, b, i) -#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) -#define XFS_BTREE_TRACE_ARGI(c, i) -#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) -#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) -#define XFS_BTREE_TRACE_ARGIK(c, i, k) -#define XFS_BTREE_TRACE_ARGR(c, r) -#define XFS_BTREE_TRACE_CURSOR(c, t) -#endif /* XFS_BTREE_TRACE */ - -#endif /* __XFS_BTREE_TRACE_H__ */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 16921f55c54..c6a75815aea 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -31,7 +31,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_error.h" @@ -205,72 +204,6 @@ xfs_inobt_recs_inorder( } #endif /* DEBUG */ -#ifdef XFS_BTREE_TRACE -ktrace_t *xfs_inobt_trace_buf; - -STATIC void -xfs_inobt_trace_enter( - struct xfs_btree_cur *cur, - const char *func, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - ktrace_enter(xfs_inobt_trace_buf, (void *)(__psint_t)type, - (void *)func, (void *)s, NULL, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} - -STATIC void -xfs_inobt_trace_cursor( - struct xfs_btree_cur *cur, - __uint32_t *s0, - __uint64_t *l0, - __uint64_t *l1) -{ - *s0 = cur->bc_private.a.agno; - *l0 = cur->bc_rec.i.ir_startino; - *l1 = cur->bc_rec.i.ir_free; -} - -STATIC void -xfs_inobt_trace_key( - struct xfs_btree_cur *cur, - union xfs_btree_key *key, - __uint64_t *l0, - __uint64_t *l1) -{ - *l0 = be32_to_cpu(key->inobt.ir_startino); - *l1 = 0; -} - -STATIC void -xfs_inobt_trace_record( - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - __uint64_t *l0, - __uint64_t *l1, - __uint64_t *l2) -{ - *l0 = be32_to_cpu(rec->inobt.ir_startino); - *l1 = be32_to_cpu(rec->inobt.ir_freecount); - *l2 = be64_to_cpu(rec->inobt.ir_free); -} -#endif /* XFS_BTREE_TRACE */ - static const struct xfs_btree_ops xfs_inobt_ops = { .rec_len = sizeof(xfs_inobt_rec_t), .key_len = sizeof(xfs_inobt_key_t), @@ -286,18 +219,10 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .init_rec_from_cur = xfs_inobt_init_rec_from_cur, .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, .key_diff = xfs_inobt_key_diff, - #ifdef DEBUG .keys_inorder = xfs_inobt_keys_inorder, .recs_inorder = xfs_inobt_recs_inorder, #endif - -#ifdef XFS_BTREE_TRACE - .trace_enter = xfs_inobt_trace_enter, - .trace_cursor = xfs_inobt_trace_cursor, - .trace_key = xfs_inobt_trace_key, - .trace_record = xfs_inobt_trace_record, -#endif }; /* diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 3631783b2b5..7759812c1bb 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -38,7 +38,6 @@ #include "xfs_trans_priv.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" -#include "xfs_btree_trace.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1314c83ded1..3cc21ddf9f7 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -37,7 +37,6 @@ #include "xfs_buf_item.h" #include "xfs_inode_item.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" -- cgit v1.2.3-70-g09d2