diff options
Diffstat (limited to 'fs')
42 files changed, 93 insertions, 93 deletions
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h index 16b44c3c236..1b262b790d9 100644 --- a/fs/xfs/linux-2.6/mrlock.h +++ b/fs/xfs/linux-2.6/mrlock.h @@ -79,7 +79,7 @@ static inline void mrdemote(mrlock_t *mrp) * Debug-only routine, without some platform-specific asm code, we can * now only answer requests regarding whether we hold the lock for write * (reader state is outside our visibility, we only track writer state). - * Note: means !ismrlocked would give false positivies, so don't do that. + * Note: means !ismrlocked would give false positives, so don't do that. */ static inline int ismrlocked(mrlock_t *mrp, int type) { diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c02f7c5b746..62b4553fb60 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -372,7 +372,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) * assumes that all buffers on the page are started at the same time. * * The fix is two passes across the ioend list - one to start writeback on the - * bufferheads, and then the second one submit them for I/O. + * buffer_heads, and then submit them for I/O on the second pass. */ STATIC void xfs_submit_ioend( @@ -699,7 +699,7 @@ xfs_convert_page( /* * page_dirty is initially a count of buffers on the page before - * EOF and is decrememted as we move each into a cleanable state. + * EOF and is decremented as we move each into a cleanable state. * * Derivation: * @@ -842,7 +842,7 @@ xfs_cluster_write( * page if possible. * The bh->b_state's cannot know if any of the blocks or which block for * that matter are dirty due to mmap writes, and therefore bh uptodate is - * only vaild if the page itself isn't completely uptodate. Some layers + * only valid if the page itself isn't completely uptodate. Some layers * may clear the page dirty flag prior to calling write page, under the * assumption the entire page will be written out; by not writing out the * whole page the page can be reused before all valid dirty data is @@ -892,7 +892,7 @@ xfs_page_state_convert( /* * page_dirty is initially a count of buffers on the page before - * EOF and is decrememted as we move each into a cleanable state. + * EOF and is decremented as we move each into a cleanable state. * * Derivation: * @@ -1339,9 +1339,9 @@ xfs_end_io_direct( /* * Non-NULL private data means we need to issue a transaction to * convert a range from unwritten to written extents. This needs - * to happen from process contect but aio+dio I/O completion + * to happen from process context but aio+dio I/O completion * happens from irq context so we need to defer it to a workqueue. - * This is not nessecary for synchronous direct I/O, but we do + * This is not necessary for synchronous direct I/O, but we do * it anyway to keep the code uniform and simpler. * * The core direct I/O code might be changed to always call the @@ -1358,7 +1358,7 @@ xfs_end_io_direct( } /* - * blockdev_direct_IO can return an error even afer the I/O + * blockdev_direct_IO can return an error even after the I/O * completion handler was called. Thus we need to protect * against double-freeing. */ diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/linux-2.6/xfs_export.h index e5b0559700a..e794ca4efc7 100644 --- a/fs/xfs/linux-2.6/xfs_export.h +++ b/fs/xfs/linux-2.6/xfs_export.h @@ -54,7 +54,7 @@ * Note, the NFS filehandle also includes an fsid portion which * may have an inode number in it. That number is hardcoded to * 32bits and there is no way for XFS to intercept it. In - * practice this means when exporting an XFS filesytem with 64bit + * practice this means when exporting an XFS filesystem with 64bit * inodes you should either export the mountpoint (rather than * a subdirectory) or use the "fsid" export option. */ diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 0169360475c..84ddf189389 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -681,7 +681,7 @@ start: eventsent = 1; /* - * The iolock was dropped and reaquired in XFS_SEND_DATA + * The iolock was dropped and reacquired in XFS_SEND_DATA * so we have to recheck the size when appending. * We will only "goto start;" once, since having sent the * event prevents another call to XFS_SEND_DATA, which is diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h index 8fed356db05..841200c0309 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ b/fs/xfs/linux-2.6/xfs_vfs.h @@ -92,7 +92,7 @@ typedef enum { #define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ #define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ #define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ -#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */ +#define SYNC_QUIESCE 0x0100 /* quiesce filesystem for a snapshot */ typedef int (*vfs_mount_t)(bhv_desc_t *, struct xfs_mount_args *, struct cred *); diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index e4e5f05b841..546f48af882 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -221,7 +221,7 @@ xfs_qm_dqunpin_wait( * as possible. * * We must not be holding the AIL_LOCK at this point. Calling incore() to - * search the buffercache can be a time consuming thing, and AIL_LOCK is a + * search the buffer cache can be a time consuming thing, and AIL_LOCK is a * spinlock. */ STATIC void diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 1fb757ef3f4..73c1e5e80c0 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -289,7 +289,7 @@ xfs_qm_rele_quotafs_ref( /* * This is called at mount time from xfs_mountfs to initialize the quotainfo - * structure and start the global quotamanager (xfs_Gqm) if it hasn't done + * structure and start the global quota manager (xfs_Gqm) if it hasn't done * so already. Note that the superblock has not been read in yet. */ void @@ -807,7 +807,7 @@ xfs_qm_dqattach_one( * Given a udquot and gdquot, attach a ptr to the group dquot in the * udquot as a hint for future lookups. The idea sounds simple, but the * execution isn't, because the udquot might have a group dquot attached - * already and getting rid of that gets us into lock ordering contraints. + * already and getting rid of that gets us into lock ordering constraints. * The process is complicated more by the fact that the dquots may or may not * be locked on entry. */ @@ -1094,10 +1094,10 @@ xfs_qm_sync( } /* * If we can't grab the flush lock then if the caller - * really wanted us to give this our best shot, + * really wanted us to give this our best shot, so * see if we can give a push to the buffer before we wait * on the flush lock. At this point, we know that - * eventhough the dquot is being flushed, + * even though the dquot is being flushed, * it has (new) dirty data. */ xfs_qm_dqflock_pushbuf_wait(dqp); @@ -1491,7 +1491,7 @@ xfs_qm_reset_dqcounts( /* * Do a sanity check, and if needed, repair the dqblk. Don't * output any warnings because it's perfectly possible to - * find unitialized dquot blks. See comment in xfs_qm_dqcheck. + * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. */ (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR, "xfs_quotacheck"); @@ -1580,7 +1580,7 @@ xfs_qm_dqiterate( error = 0; /* - * This looks racey, but we can't keep an inode lock across a + * This looks racy, but we can't keep an inode lock across a * trans_reserve. But, this gets called during quotacheck, and that * happens only at mount time which is single threaded. */ @@ -1824,7 +1824,7 @@ xfs_qm_dqusage_adjust( * we have to start from the beginning anyway. * Once we're done, we'll log all the dquot bufs. * - * The *QUOTA_ON checks below may look pretty racey, but quotachecks + * The *QUOTA_ON checks below may look pretty racy, but quotachecks * and quotaoffs don't race. (Quotachecks happen at mount time only). */ if (XFS_IS_UQUOTA_ON(mp)) { diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 676884394aa..c55db463bbf 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -912,7 +912,7 @@ xfs_qm_export_dquot( /* * Internally, we don't reset all the timers when quota enforcement - * gets turned off. No need to confuse the userlevel code, + * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ if (! XFS_IS_QUOTA_ENFORCED(mp)) { diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 3290975d31f..d8e131ec0aa 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -804,7 +804,7 @@ xfs_trans_reserve_quota_bydquots( } /* - * Didnt change anything critical, so, no need to log + * Didn't change anything critical, so, no need to log */ return (0); } diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 4ff0f4e41c6..2539af34eb6 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -395,7 +395,7 @@ xfs_acl_allow_set( * The access control process to determine the access permission: * if uid == file owner id, use the file owner bits. * if gid == file owner group id, use the file group bits. - * scan ACL for a maching user or group, and use matched entry + * scan ACL for a matching user or group, and use matched entry * permission. Use total permissions of all matching group entries, * until all acl entries are exhausted. The final permission produced * by matching acl entry or entries needs to be & with group permission. diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index a96e2ffce0c..dc2361dd740 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -179,7 +179,7 @@ typedef struct xfs_perag { char pagf_init; /* this agf's entry is initialized */ char pagi_init; /* this agi's entry is initialized */ - char pagf_metadata; /* the agf is prefered to be metadata */ + char pagf_metadata; /* the agf is preferred to be metadata */ char pagi_inodeok; /* The agi is ok for inodes */ __uint8_t pagf_levels[XFS_BTNUM_AGF]; /* # of levels in bno & cnt btree */ diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index f4328e1e2a7..64ee07db0d5 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -511,7 +511,7 @@ STATIC void xfs_alloc_trace_busy( char *name, /* function tag string */ char *str, /* additional string */ - xfs_mount_t *mp, /* file system mount poing */ + xfs_mount_t *mp, /* file system mount point */ xfs_agnumber_t agno, /* allocation group number */ xfs_agblock_t agbno, /* a.g. relative block number */ xfs_extlen_t len, /* length of extent */ @@ -1843,7 +1843,7 @@ xfs_alloc_fix_freelist( } else agbp = NULL; - /* If this is a metadata prefered pag and we are user data + /* If this is a metadata preferred pag and we are user data * then try somewhere else if we are not being asked to * try harder at this point */ @@ -2458,7 +2458,7 @@ error0: /* * AG Busy list management * The busy list contains block ranges that have been freed but whose - * transacations have not yet hit disk. If any block listed in a busy + * transactions have not yet hit disk. If any block listed in a busy * list is reused, the transaction that freed it must be forced to disk * before continuing to use the block. * diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 3546dea27b7..2d1f8928b26 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h @@ -68,7 +68,7 @@ typedef struct xfs_alloc_arg { xfs_alloctype_t otype; /* original allocation type */ char wasdel; /* set if allocation was prev delayed */ char wasfromfl; /* set if allocation is from freelist */ - char isfl; /* set if is freelist blocks - !actg */ + char isfl; /* set if is freelist blocks - !acctg */ char userdata; /* set if this is user data */ } xfs_alloc_arg_t; diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 093fac476bd..b6e1e02bbb2 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -294,7 +294,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, xfs_trans_ihold(args.trans, dp); /* - * If the attribute list is non-existant or a shortform list, + * If the attribute list is non-existent or a shortform list, * upgrade it to a single-leaf-block attribute list. */ if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || @@ -1584,7 +1584,7 @@ out: * Fill in the disk block numbers in the state structure for the buffers * that are attached to the state structure. * This is done so that we can quickly reattach ourselves to those buffers - * after some set of transaction commit's has released these buffers. + * after some set of transaction commits have released these buffers. */ STATIC int xfs_attr_fillstate(xfs_da_state_t *state) @@ -1631,7 +1631,7 @@ xfs_attr_fillstate(xfs_da_state_t *state) /* * Reattach the buffers to the state structure based on the disk block * numbers stored in the state structure. - * This is done after some set of transaction commit's has released those + * This is done after some set of transaction commits have released those * buffers from our grip. */ STATIC int diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 717682747bd..9462be86aa1 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -524,7 +524,7 @@ xfs_attr_shortform_compare(const void *a, const void *b) /* * Copy out entries of shortform attribute lists for attr_list(). - * Shortform atrtribute lists are not stored in hashval sorted order. + * Shortform attribute lists are not stored in hashval sorted order. * If the output buffer is not large enough to hold them all, then we * we have to calculate each entries' hashvalue and sort them before * we can begin returning them to the user. @@ -1541,7 +1541,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) /* * Check for the degenerate case of the block being empty. * If the block is empty, we'll simply delete it, no need to - * coalesce it with a sibling block. We choose (aribtrarily) + * coalesce it with a sibling block. We choose (arbitrarily) * to merge with the forward block unless it is NULL. */ if (count == 0) { diff --git a/fs/xfs/xfs_behavior.c b/fs/xfs/xfs_behavior.c index 9880adae393..f4fe3715a80 100644 --- a/fs/xfs/xfs_behavior.c +++ b/fs/xfs/xfs_behavior.c @@ -31,7 +31,7 @@ * The behavior chain is ordered based on the 'position' number which * lives in the first field of the ops vector (higher numbers first). * - * Attemps to insert duplicate ops result in an EINVAL return code. + * Attempts to insert duplicate ops result in an EINVAL return code. * Otherwise, return 0 to indicate success. */ int @@ -84,7 +84,7 @@ bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp) /* * Remove a behavior descriptor from a position in a behavior chain; - * the postition is guaranteed not to be the first position. + * the position is guaranteed not to be the first position. * Should only be called by the bhv_remove() macro. */ void diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h index 2cd89bb5ab1..1d8ff103201 100644 --- a/fs/xfs/xfs_behavior.h +++ b/fs/xfs/xfs_behavior.h @@ -39,7 +39,7 @@ * behaviors is synchronized with operations-in-progress (oip's) so that * the oip's always see a consistent view of the chain. * - * The term "interpostion" is used to refer to the act of inserting + * The term "interposition" is used to refer to the act of inserting * a behavior such that it interposes on (i.e., is inserted in front * of) a particular other behavior. A key example of this is when a * system implementing distributed single system image wishes to @@ -51,7 +51,7 @@ * * Behavior synchronization is logic which is necessary under certain * circumstances that there is no conflict between ongoing operations - * traversing the behavior chain and those dunamically modifying the + * traversing the behavior chain and those dynamically modifying the * behavior chain. Because behavior synchronization adds extra overhead * to virtual operation invocation, we want to restrict, as much as * we can, the requirement for this extra code, to those situations diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 07e2324152b..5fed15682dd 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -98,12 +98,12 @@ xfs_buf_item_flush_log_debug( } /* - * This function is called to verify that our caller's have logged + * This function is called to verify that our callers have logged * all the bytes that they changed. * * It does this by comparing the original copy of the buffer stored in * the buf log item's bli_orig array to the current copy of the buffer - * and ensuring that all bytes which miscompare are set in the bli_logged + * and ensuring that all bytes which mismatch are set in the bli_logged * array of the buf log item. */ STATIC void diff --git a/fs/xfs/xfs_cap.h b/fs/xfs/xfs_cap.h index 433ec537f9b..d0035c6e951 100644 --- a/fs/xfs/xfs_cap.h +++ b/fs/xfs/xfs_cap.h @@ -38,7 +38,7 @@ typedef struct xfs_cap_set { /* * For Linux, we take the bitfields directly from capability.h * and no longer attempt to keep this attribute ondisk compatible - * with IRIX. Since this attribute is only set on exectuables, + * with IRIX. Since this attribute is only set on executables, * it just doesn't make much sense to try. We do use a different * named attribute though, to avoid confusion. */ diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 4bae3a76c67..8988b905117 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -840,7 +840,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) /* * Check for the degenerate case of the block being empty. * If the block is empty, we'll simply delete it, no need to - * coalesce it with a sibling block. We choose (aribtrarily) + * coalesce it with a sibling block. We choose (arbitrarily) * to merge with the forward block unless it is NULL. */ if (count == 0) { diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index bd5cee6aa51..972ded59547 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -533,7 +533,7 @@ xfs_dir2_block_getdents( /* * Reached the end of the block. - * Set the offset to a nonexistent block 1 and return. + * Set the offset to a non-existent block 1 and return. */ *eofp = 1; diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 08648b18265..0f5e2f2ce6e 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -515,7 +515,7 @@ xfs_dir2_leaf_addname( ASSERT(be32_to_cpu(leaf->ents[highstale].address) == XFS_DIR2_NULL_DATAPTR); /* - * Copy entries down to copver the stale entry + * Copy entries down to cover the stale entry * and make room for the new entry. */ if (highstale - index > 0) diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index af556f16a0c..ac511ab9c52 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -830,7 +830,7 @@ xfs_dir2_leafn_rebalance( state->inleaf = 1; blk2->index = 0; cmn_err(CE_ALERT, - "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting orignal leaf: " + "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting original leaf: " "blk1->index %d\n", blk1->index); } diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c index ee88751c3be..6d711869262 100644 --- a/fs/xfs/xfs_dir_leaf.c +++ b/fs/xfs/xfs_dir_leaf.c @@ -1341,7 +1341,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) /* * Check for the degenerate case of the block being empty. * If the block is empty, we'll simply delete it, no need to - * coalesce it with a sibling block. We choose (aribtrarily) + * coalesce it with a sibling block. We choose (arbitrarily) * to merge with the forward block unless it is NULL. */ if (count == 0) { diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 56caa88713a..dfa3527b20a 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -477,7 +477,7 @@ xfs_fs_counts( * * xfs_reserve_blocks is called to set m_resblks * in the in-core mount table. The number of unused reserved blocks - * is kept in m_resbls_avail. + * is kept in m_resblks_avail. * * Reserve the requested number of blocks if available. Otherwise return * as many as possible to satisfy the request. The actual number diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 0024892841a..20c39a6e6a0 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -1023,7 +1023,7 @@ xfs_difree( rec.ir_freecount++; /* - * When an inode cluster is free, it becomes elgible for removal + * When an inode cluster is free, it becomes eligible for removal */ if ((mp->m_flags & XFS_MOUNT_IDELETE) && (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 3ce35a6f700..bb33113eef9 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -509,7 +509,7 @@ retry: } else { /* * If the inode is not fully constructed due to - * filehandle mistmatches wait for the inode to go + * filehandle mismatches wait for the inode to go * away and try again. * * iget_locked will call __wait_on_freeing_inode diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 88a517fad07..48146bdc6bd 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -160,7 +160,7 @@ xfs_inotobp( xfs_dinode_t *dip; /* - * Call the space managment code to find the location of the + * Call the space management code to find the location of the * inode on disk. */ imap.im_blkno = 0; @@ -837,7 +837,7 @@ xfs_dic2xflags( /* * Given a mount structure and an inode number, return a pointer - * to a newly allocated in-core inode coresponding to the given + * to a newly allocated in-core inode corresponding to the given * inode number. * * Initialize the inode's attributes and extent pointers if it @@ -2723,7 +2723,7 @@ xfs_ipin( /* * Decrement the pin count of the given inode, and wake up * anyone in xfs_iwait_unpin() if the count goes to 0. The - * inode must have been previoulsy pinned with a call to xfs_ipin(). + * inode must have been previously pinned with a call to xfs_ipin(). */ void xfs_iunpin( @@ -3690,7 +3690,7 @@ void xfs_iext_add( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_extnum_t idx, /* index to begin adding exts */ - int ext_diff) /* nubmer of extents to add */ + int ext_diff) /* number of extents to add */ { int byte_diff; /* new bytes being added */ int new_size; /* size of extents after adding */ @@ -4038,7 +4038,7 @@ xfs_iext_remove_indirect( xfs_extnum_t ext_diff; /* extents to remove in current list */ xfs_extnum_t nex1; /* number of extents before idx */ xfs_extnum_t nex2; /* extents after idx + count */ - int nlists; /* entries in indirecton array */ + int nlists; /* entries in indirection array */ int page_idx = idx; /* index in target extent list */ ASSERT(ifp->if_flags & XFS_IFEXTIREC); @@ -4291,9 +4291,9 @@ xfs_iext_bno_to_ext( xfs_filblks_t blockcount = 0; /* number of blocks in extent */ xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ - int high; /* upper boundry in search */ + int high; /* upper boundary in search */ xfs_extnum_t idx = 0; /* index of target extent */ - int low; /* lower boundry in search */ + int low; /* lower boundary in search */ xfs_extnum_t nextents; /* number of file extents */ xfs_fileoff_t startoff = 0; /* start offset of extent */ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 36aa1fcb90a..7497a481b2f 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -580,7 +580,7 @@ xfs_inode_item_unpin_remove( * been or is in the process of being flushed, then (ideally) we'd like to * see if the inode's buffer is still incore, and if so give it a nudge. * We delay doing so until the pushbuf routine, though, to avoid holding - * the AIL lock across a call to the blackhole which is the buffercache. + * the AIL lock across a call to the blackhole which is the buffer cache. * Also we don't want to sleep in any device strategy routines, which can happen * if we do the subsequent bawrite in here. */ diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 32247b6bfee..94068d014f2 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -272,7 +272,7 @@ xfs_bulkstat( size_t statstruct_size, /* sizeof struct filling */ char __user *ubuffer, /* buffer with inode stats */ int flags, /* defined in xfs_itable.h */ - int *done) /* 1 if there're more stats to get */ + int *done) /* 1 if there are more stats to get */ { xfs_agblock_t agbno=0;/* allocation group block number */ xfs_buf_t *agbp; /* agi header buffer */ @@ -676,7 +676,7 @@ xfs_bulkstat_single( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t *lastinop, /* inode to return */ char __user *buffer, /* buffer with inode stats */ - int *done) /* 1 if there're more stats to get */ + int *done) /* 1 if there are more stats to get */ { int count; /* count value for bulkstat call */ int error; /* return value */ diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index 047d834ed21..11eb4e1b18c 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h @@ -60,7 +60,7 @@ xfs_bulkstat( size_t statstruct_size,/* sizeof struct that we're filling */ char __user *ubuffer,/* buffer with inode stats */ int flags, /* flag to control access method */ - int *done); /* 1 if there're more stats to get */ + int *done); /* 1 if there are more stats to get */ int xfs_bulkstat_single( diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 9176995160e..32e841d2f26 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -59,7 +59,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, int num_bblks); STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); -STATIC void xlog_unalloc_log(xlog_t *log); +STATIC void xlog_dealloc_log(xlog_t *log); STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[], int nentries, xfs_log_ticket_t tic, xfs_lsn_t *start_lsn, @@ -304,7 +304,7 @@ xfs_log_done(xfs_mount_t *mp, if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || (flags & XFS_LOG_REL_PERM_RESERV)) { /* - * Release ticket if not permanent reservation or a specifc + * Release ticket if not permanent reservation or a specific * request has been made to release a permanent reservation. */ xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); @@ -511,7 +511,7 @@ xfs_log_mount(xfs_mount_t *mp, vfsp->vfs_flag |= VFS_RDONLY; if (error) { cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); - xlog_unalloc_log(mp->m_log); + xlog_dealloc_log(mp->m_log); return error; } } @@ -667,7 +667,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) * * Go through the motions of sync'ing and releasing * the iclog, even though no I/O will actually happen, - * we need to wait for other log I/O's that may already + * we need to wait for other log I/Os that may already * be in progress. Do this as a separate section of * code so we'll know if we ever get stuck here that * we're in this odd situation of trying to unmount @@ -704,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) void xfs_log_unmount_dealloc(xfs_mount_t *mp) { - xlog_unalloc_log(mp->m_log); + xlog_dealloc_log(mp->m_log); } /* @@ -1492,7 +1492,7 @@ xlog_sync(xlog_t *log, ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); - /* account for internal log which does't start at block #0 */ + /* account for internal log which doesn't start at block #0 */ XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); XFS_BUF_WRITE(bp); if ((error = XFS_bwrite(bp))) { @@ -1506,10 +1506,10 @@ xlog_sync(xlog_t *log, /* - * Unallocate a log structure + * Deallocate a log structure */ void -xlog_unalloc_log(xlog_t *log) +xlog_dealloc_log(xlog_t *log) { xlog_in_core_t *iclog, *next_iclog; xlog_ticket_t *tic, *next_tic; @@ -1539,7 +1539,7 @@ xlog_unalloc_log(xlog_t *log) if ((log->l_ticket_cnt != log->l_ticket_tcnt) && !XLOG_FORCED_SHUTDOWN(log)) { xfs_fs_cmn_err(CE_WARN, log->l_mp, - "xlog_unalloc_log: (cnt: %d, total: %d)", + "xlog_dealloc_log: (cnt: %d, total: %d)", log->l_ticket_cnt, log->l_ticket_tcnt); /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */ @@ -1562,7 +1562,7 @@ xlog_unalloc_log(xlog_t *log) #endif log->l_mp->m_log = NULL; kmem_free(log, sizeof(xlog_t)); -} /* xlog_unalloc_log */ +} /* xlog_dealloc_log */ /* * Update counters atomically now that memcpy is done. @@ -2829,7 +2829,7 @@ xlog_state_release_iclog(xlog_t *log, /* * We let the log lock go, so it's possible that we hit a log I/O - * error or someother SHUTDOWN condition that marks the iclog + * error or some other SHUTDOWN condition that marks the iclog * as XLOG_STATE_IOERROR before the bwrite. However, we know that * this iclog has consistent data, so we ignore IOERROR * flags after this point. diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 4b2ac88dbb8..eacb3d4987f 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -27,7 +27,7 @@ #ifdef __KERNEL__ /* - * By comparing each compnent, we don't have to worry about extra + * By comparing each component, we don't have to worry about extra * endian issues in treating two 32 bit numbers as one 64 bit number */ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index add13f507ed..1f0016b0b4e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -583,7 +583,7 @@ xlog_find_head( * x | x ... | x - 1 | x * Another case that fits this picture would be * x | x + 1 | x ... | x - * In this case the head really is somwhere at the end of the + * In this case the head really is somewhere at the end of the * log, as one of the latest writes at the beginning was * incomplete. * One more case is @@ -2799,7 +2799,7 @@ xlog_recover_do_trans( * we don't need to worry about the block number being * truncated in > 1 TB buffers because in user-land, * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so - * the blkno's will get through the user-mode buffer + * the blknos will get through the user-mode buffer * cache properly. The only bad case is o32 kernels * where xfs_daddr_t is 32-bits but mount will warn us * off a > 1 TB filesystem before we get here. diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 20e8abc16d1..72e7e78bfff 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -393,7 +393,7 @@ xfs_initialize_perag( break; } - /* This ag is prefered for inodes */ + /* This ag is preferred for inodes */ pag = &mp->m_perag[index]; pag->pagi_inodeok = 1; if (index < max_metadata) @@ -1728,7 +1728,7 @@ xfs_mount_log_sbunit( * We cannot use the hotcpu_register() function because it does * not allow notifier instances. We need a notifier per filesystem * as we need to be able to identify the filesystem to balance - * the counters out. This is acheived by having a notifier block + * the counters out. This is achieved by having a notifier block * embedded in the xfs_mount_t and doing pointer magic to get the * mount pointer from the notifier block address. */ diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index ebd73960e9d..66cbee79864 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -379,7 +379,7 @@ typedef struct xfs_mount { #endif int m_dalign; /* stripe unit */ int m_swidth; /* stripe width */ - int m_sinoalign; /* stripe unit inode alignmnt */ + int m_sinoalign; /* stripe unit inode alignment */ int m_attr_magicpct;/* 37% of the blocksize */ int m_dir_magicpct; /* 37% of the dir blocksize */ __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index 82a08baf437..4f6a034de7f 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -31,7 +31,7 @@ typedef __uint32_t xfs_dqid_t; /* - * Eventhough users may not have quota limits occupying all 64-bits, + * Even though users may not have quota limits occupying all 64-bits, * they may need 64-bit accounting. Hence, 64-bit quota-counters, * and quota-limits. This is a waste in the common case, but hey ... */ @@ -246,7 +246,7 @@ typedef struct xfs_qoff_logformat { #ifdef __KERNEL__ /* * This check is done typically without holding the inode lock; - * that may seem racey, but it is harmless in the context that it is used. + * that may seem racy, but it is harmless in the context that it is used. * The inode cannot go inactive as long a reference is kept, and * therefore if dquot(s) were attached, they'll stay consistent. * If, for example, the ownership of the inode changes while diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 2918956553a..8d056cef5d1 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -490,7 +490,7 @@ xfs_trans_mod_sb( case XFS_TRANS_SB_RES_FREXTENTS: /* * The allocation has already been applied to the - * in-core superblocks's counter. This should only + * in-core superblock's counter. This should only * be applied to the on-disk superblock. */ ASSERT(delta < 0); @@ -611,7 +611,7 @@ xfs_trans_apply_sb_deltas( if (whole) /* - * Log the whole thing, the fields are discontiguous. + * Log the whole thing, the fields are noncontiguous. */ xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); else @@ -669,7 +669,7 @@ xfs_trans_unreserve_and_mod_sb( /* * Apply any superblock modifications to the in-core version. * The t_res_fdblocks_delta and t_res_frextents_delta fields are - * explicity NOT applied to the in-core superblock. + * explicitly NOT applied to the in-core superblock. * The idea is that that has already been done. */ if (tp->t_flags & XFS_TRANS_SB_DIRTY) { diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index e48befa4e33..100d9a4b38e 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -354,7 +354,7 @@ typedef struct xfs_trans { xfs_lsn_t t_commit_lsn; /* log seq num of end of * transaction. */ struct xfs_mount *t_mountp; /* ptr to fs mount struct */ - struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */ + struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ xfs_trans_callback_t t_callback; /* transaction callback */ void *t_callarg; /* callback arg */ unsigned int t_flags; /* misc flags */ diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index e341409172d..7c5894d59f8 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -272,7 +272,7 @@ xfs_trans_log_inode( * This is to coordinate with the xfs_iflush() and xfs_iflush_done() * routines in the eventual clearing of the ilf_fields bits. * See the big comment in xfs_iflush() for an explanation of - * this coorination mechanism. + * this coordination mechanism. */ flags |= ip->i_itemp->ili_last_fields; ip->i_itemp->ili_format.ilf_fields |= flags; diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index d4ec4dfaf19..504d2a80747 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -880,10 +880,10 @@ xfs_statvfs( * determine if they should be flushed sync, async, or * delwri. * SYNC_CLOSE - This flag is passed when the system is being - * unmounted. We should sync and invalidate everthing. + * unmounted. We should sync and invalidate everything. * SYNC_FSDATA - This indicates that the caller would like to make * sure the superblock is safe on disk. We can ensure - * this by simply makeing sure the log gets flushed + * this by simply making sure the log gets flushed * if SYNC_BDFLUSH is set, and by actually writing it * out otherwise. * @@ -908,7 +908,7 @@ xfs_sync( * * This routine supports all of the flags defined for the generic VFS_SYNC * interface as explained above under xfs_sync. In the interests of not - * changing interfaces within the 6.5 family, additional internallly- + * changing interfaces within the 6.5 family, additional internally- * required functions are specified within a separate xflags parameter, * only available by calling this routine. * @@ -1090,7 +1090,7 @@ xfs_sync_inodes( * If this is just vfs_sync() or pflushd() calling * then we can skip inodes for which it looks like * there is nothing to do. Since we don't have the - * inode locked this is racey, but these are periodic + * inode locked this is racy, but these are periodic * calls so it doesn't matter. For the others we want * to know for sure, so we at least try to lock them. */ @@ -1429,7 +1429,7 @@ xfs_sync_inodes( * * This routine supports all of the flags defined for the generic VFS_SYNC * interface as explained above under xfs_sync. In the interests of not - * changing interfaces within the 6.5 family, additional internallly- + * changing interfaces within the 6.5 family, additional internally- * required functions are specified within a separate xflags parameter, * only available by calling this routine. * diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 0f0a64e81db..de49601919c 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -848,7 +848,7 @@ xfs_setattr( * If this is a synchronous mount, make sure that the * transaction goes to disk before returning to the user. * This is slightly sub-optimal in that truncates require - * two sync transactions instead of one for wsync filesytems. + * two sync transactions instead of one for wsync filesystems. * One for the truncate and one for the timestamps since we * don't want to change the timestamps unless we're sure the * truncate worked. Truncates are less than 1% of the laddis @@ -1170,7 +1170,7 @@ xfs_fsync( /* * If this inode is on the RT dev we need to flush that - * cache aswell. + * cache as well. */ if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); @@ -1380,7 +1380,7 @@ xfs_inactive_symlink_rmt( */ ntp = xfs_trans_dup(tp); /* - * Commit the transaction containing extent freeing and EFD's. + * Commit the transaction containing extent freeing and EFDs. * If we get an error on the commit here or on the reserve below, * we need to unlock the inode since the new transaction doesn't * have the inode attached. @@ -2023,7 +2023,7 @@ xfs_create( XFS_QM_DQRELE(mp, gdqp); /* - * Propogate the fact that the vnode changed after the + * Propagate the fact that the vnode changed after the * xfs_inode locks have been released. */ VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3); @@ -2370,7 +2370,7 @@ xfs_remove( * for a log reservation. Since we'll have to wait for the * inactive code to complete before returning from xfs_iget, * we need to make sure that we don't have log space reserved - * when we call xfs_iget. Instead we get an unlocked referece + * when we call xfs_iget. Instead we get an unlocked reference * to the inode before getting our log reservation. */ error = xfs_get_dir_entry(dentry, &ip); @@ -3020,7 +3020,7 @@ xfs_rmdir( * for a log reservation. Since we'll have to wait for the * inactive code to complete before returning from xfs_iget, * we need to make sure that we don't have log space reserved - * when we call xfs_iget. Instead we get an unlocked referece + * when we call xfs_iget. Instead we get an unlocked reference * to the inode before getting our log reservation. */ error = xfs_get_dir_entry(dentry, &cdp); |