summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-11 10:45:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-11 10:45:14 -0700
commit2840c566e95599cd60c7143762ca8b49d9395050 (patch)
treee2bc9e5a65e613c00ac9fca63d83d26458355bdf
parent859862ddd2b6b8dee00498c015ab37f02474b442 (diff)
parent19ef1229bc2e2468bdf4ea594a57e4287ffa1e6b (diff)
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
Pull reiserfs and ext3 changes from Jan Kara: "Big reiserfs cleanup from Jeff, an ext3 deadlock fix, and some small cleanups" * 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (34 commits) reiserfs: Fix compilation breakage with CONFIG_REISERFS_CHECK ext3: Fix deadlock in data=journal mode when fs is frozen reiserfs: call truncate_setsize under tailpack mutex fs/jbd/revoke.c: replace shift loop by ilog2 reiserfs: remove obsolete __constant_cpu_to_le32 reiserfs: balance_leaf refactor, split up balance_leaf_when_delete reiserfs: balance_leaf refactor, format balance_leaf_finish_node reiserfs: balance_leaf refactor, format balance_leaf_new_nodes_paste reiserfs: balance_leaf refactor, format balance_leaf_paste_right reiserfs: balance_leaf refactor, format balance_leaf_insert_right reiserfs: balance_leaf refactor, format balance_leaf_paste_left reiserfs: balance_leaf refactor, format balance_leaf_insert_left reiserfs: balance_leaf refactor, pull out balance_leaf{left, right, new_nodes, finish_node} reiserfs: balance_leaf refactor, pull out balance_leaf_finish_node_paste reiserfs: balance_leaf refactor pull out balance_leaf_finish_node_insert reiserfs: balance_leaf refactor, pull out balance_leaf_new_nodes_paste reiserfs: balance_leaf refactor, pull out balance_leaf_new_nodes_insert reiserfs: balance_leaf refactor, pull out balance_leaf_paste_right reiserfs: balance_leaf refactor, pull out balance_leaf_insert_right reiserfs: balance_leaf refactor, pull out balance_leaf_paste_left ...
-rw-r--r--fs/ext3/inode.c33
-rw-r--r--fs/jbd/revoke.c12
-rw-r--r--fs/reiserfs/bitmap.c259
-rw-r--r--fs/reiserfs/dir.c156
-rw-r--r--fs/reiserfs/do_balan.c2449
-rw-r--r--fs/reiserfs/file.c90
-rw-r--r--fs/reiserfs/fix_node.c1008
-rw-r--r--fs/reiserfs/hashes.c15
-rw-r--r--fs/reiserfs/ibalance.c271
-rw-r--r--fs/reiserfs/inode.c1206
-rw-r--r--fs/reiserfs/ioctl.c27
-rw-r--r--fs/reiserfs/item_ops.c108
-rw-r--r--fs/reiserfs/journal.c1339
-rw-r--r--fs/reiserfs/lbalance.c501
-rw-r--r--fs/reiserfs/namei.c513
-rw-r--r--fs/reiserfs/objectid.c101
-rw-r--r--fs/reiserfs/prints.c176
-rw-r--r--fs/reiserfs/reiserfs.h1921
-rw-r--r--fs/reiserfs/resize.c75
-rw-r--r--fs/reiserfs/stree.c884
-rw-r--r--fs/reiserfs/super.c552
-rw-r--r--fs/reiserfs/tail_conversion.c161
-rw-r--r--fs/reiserfs/xattr.c70
-rw-r--r--fs/reiserfs/xattr.h3
-rw-r--r--fs/reiserfs/xattr_acl.c38
25 files changed, 7052 insertions, 4916 deletions
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index f5157d0d1b4..695abe738a2 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1716,17 +1716,17 @@ static int ext3_journalled_writepage(struct page *page,
WARN_ON_ONCE(IS_RDONLY(inode) &&
!(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
- if (ext3_journal_current_handle())
- goto no_write;
-
trace_ext3_journalled_writepage(page);
- handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto no_write;
- }
-
if (!page_has_buffers(page) || PageChecked(page)) {
+ if (ext3_journal_current_handle())
+ goto no_write;
+
+ handle = ext3_journal_start(inode,
+ ext3_writepage_trans_blocks(inode));
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto no_write;
+ }
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
@@ -1749,17 +1749,18 @@ static int ext3_journalled_writepage(struct page *page,
atomic_set(&EXT3_I(inode)->i_datasync_tid,
handle->h_transaction->t_tid);
unlock_page(page);
+ err = ext3_journal_stop(handle);
+ if (!ret)
+ ret = err;
} else {
/*
- * It may be a page full of checkpoint-mode buffers. We don't
- * really know unless we go poke around in the buffer_heads.
- * But block_write_full_page will do the right thing.
+ * It is a page full of checkpoint-mode buffers. Go and write
+ * them. They should have been already mapped when they went
+ * to the journal so provide NULL get_block function to catch
+ * errors.
*/
- ret = block_write_full_page(page, ext3_get_block, wbc);
+ ret = block_write_full_page(page, NULL, wbc);
}
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
out:
return ret;
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 25c713e7071..8898bbd2b61 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -231,19 +231,15 @@ record_cache_failure:
static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
{
- int shift = 0;
- int tmp = hash_size;
+ int i;
struct jbd_revoke_table_s *table;
table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
if (!table)
goto out;
- while((tmp >>= 1UL) != 0UL)
- shift++;
-
table->hash_size = hash_size;
- table->hash_shift = shift;
+ table->hash_shift = ilog2(hash_size);
table->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!table->hash_table) {
@@ -252,8 +248,8 @@ static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
goto out;
}
- for (tmp = 0; tmp < hash_size; tmp++)
- INIT_LIST_HEAD(&table->hash_table[tmp]);
+ for (i = 0; i < hash_size; i++)
+ INIT_LIST_HEAD(&table->hash_table[i]);
out:
return table;
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 1bcffeab713..dc198bc64c6 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -50,8 +50,10 @@ static inline void get_bit_address(struct super_block *s,
unsigned int *bmap_nr,
unsigned int *offset)
{
- /* It is in the bitmap block number equal to the block
- * number divided by the number of bits in a block. */
+ /*
+ * It is in the bitmap block number equal to the block
+ * number divided by the number of bits in a block.
+ */
*bmap_nr = block >> (s->s_blocksize_bits + 3);
/* Within that bitmap block it is located at bit offset *offset. */
*offset = block & ((s->s_blocksize << 3) - 1);
@@ -71,10 +73,12 @@ int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
get_bit_address(s, block, &bmap, &offset);
- /* Old format filesystem? Unlikely, but the bitmaps are all up front so
- * we need to account for it. */
+ /*
+ * Old format filesystem? Unlikely, but the bitmaps are all
+ * up front so we need to account for it.
+ */
if (unlikely(test_bit(REISERFS_OLD_FORMAT,
- &(REISERFS_SB(s)->s_properties)))) {
+ &REISERFS_SB(s)->s_properties))) {
b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
if (block >= bmap1 &&
block <= bmap1 + bmap_count) {
@@ -108,8 +112,11 @@ int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
return 1;
}
-/* searches in journal structures for a given block number (bmap, off). If block
- is found in reiserfs journal it suggests next free block candidate to test. */
+/*
+ * Searches in journal structures for a given block number (bmap, off).
+ * If block is found in reiserfs journal it suggests next free block
+ * candidate to test.
+ */
static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
int off, int *next)
{
@@ -120,7 +127,7 @@ static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
*next = tmp;
PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
} else {
- (*next) = off + 1; /* inc offset to avoid looping. */
+ (*next) = off + 1; /* inc offset to avoid looping. */
PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
}
PROC_INFO_INC(s, scan_bitmap.retry);
@@ -129,8 +136,10 @@ static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
return 0;
}
-/* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
- * block; */
+/*
+ * Searches for a window of zero bits with given minimum and maximum
+ * lengths in one bitmap block
+ */
static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
unsigned int bmap_n, int *beg, int boundary,
int min, int max, int unfm)
@@ -145,10 +154,6 @@ static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
"range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
PROC_INFO_INC(s, scan_bitmap.bmap);
-/* this is unclear and lacks comments, explain how journal bitmaps
- work here for the reader. Convey a sense of the design here. What
- is a window? */
-/* - I mean `a window of zero bits' as in description of this function - Zam. */
if (!bi) {
reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
@@ -161,18 +166,21 @@ static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
return 0;
while (1) {
- cont:
+cont:
if (bi->free_count < min) {
brelse(bh);
- return 0; // No free blocks in this bitmap
+ return 0; /* No free blocks in this bitmap */
}
/* search for a first zero bit -- beginning of a window */
*beg = reiserfs_find_next_zero_le_bit
((unsigned long *)(bh->b_data), boundary, *beg);
- if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
- * cannot contain a zero window of minimum size */
+ /*
+ * search for a zero bit fails or the rest of bitmap block
+ * cannot contain a zero window of minimum size
+ */
+ if (*beg + min > boundary) {
brelse(bh);
return 0;
}
@@ -186,49 +194,75 @@ static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
next = end;
break;
}
- /* finding the other end of zero bit window requires looking into journal structures (in
- * case of searching for free blocks for unformatted nodes) */
+
+ /*
+ * finding the other end of zero bit window requires
+ * looking into journal structures (in case of
+ * searching for free blocks for unformatted nodes)
+ */
if (unfm && is_block_in_journal(s, bmap_n, end, &next))
break;
}
- /* now (*beg) points to beginning of zero bits window,
- * (end) points to one bit after the window end */
- if (end - *beg >= min) { /* it seems we have found window of proper size */
+ /*
+ * now (*beg) points to beginning of zero bits window,
+ * (end) points to one bit after the window end
+ */
+
+ /* found window of proper size */
+ if (end - *beg >= min) {
int i;
reiserfs_prepare_for_journal(s, bh, 1);
- /* try to set all blocks used checking are they still free */
+ /*
+ * try to set all blocks used checking are
+ * they still free
+ */
for (i = *beg; i < end; i++) {
- /* It seems that we should not check in journal again. */
+ /* Don't check in journal again. */
if (reiserfs_test_and_set_le_bit
(i, bh->b_data)) {
- /* bit was set by another process
- * while we slept in prepare_for_journal() */
+ /*
+ * bit was set by another process while
+ * we slept in prepare_for_journal()
+ */
PROC_INFO_INC(s, scan_bitmap.stolen);
- if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
- * if length of this set is more or equal to `min' */
+
+ /*
+ * we can continue with smaller set
+ * of allocated blocks, if length of
+ * this set is more or equal to `min'
+ */
+ if (i >= *beg + min) {
end = i;
break;
}
- /* otherwise we clear all bit were set ... */
+
+ /*
+ * otherwise we clear all bit
+ * were set ...
+ */
while (--i >= *beg)
reiserfs_clear_le_bit
(i, bh->b_data);
reiserfs_restore_prepared_buffer(s, bh);
*beg = org;
- /* ... and search again in current block from beginning */
+
+ /*
+ * Search again in current block
+ * from beginning
+ */
goto cont;
}
}
bi->free_count -= (end - *beg);
- journal_mark_dirty(th, s, bh);
+ journal_mark_dirty(th, bh);
brelse(bh);
/* free block count calculation */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
1);
PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
- journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
return end - (*beg);
} else {
@@ -267,11 +301,13 @@ static inline int block_group_used(struct super_block *s, u32 id)
int bm = bmap_hash_id(s, id);
struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
- /* If we don't have cached information on this bitmap block, we're
+ /*
+ * If we don't have cached information on this bitmap block, we're
* going to have to load it later anyway. Loading it here allows us
* to make a better decision. This favors long-term performance gain
* with a better on-disk layout vs. a short term gain of skipping the
- * read and potentially having a bad placement. */
+ * read and potentially having a bad placement.
+ */
if (info->free_count == UINT_MAX) {
struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
brelse(bh);
@@ -304,25 +340,26 @@ __le32 reiserfs_choose_packing(struct inode * dir)
return packing;
}
-/* Tries to find contiguous zero bit window (given size) in given region of
- * bitmap and place new blocks there. Returns number of allocated blocks. */
+/*
+ * Tries to find contiguous zero bit window (given size) in given region of
+ * bitmap and place new blocks there. Returns number of allocated blocks.
+ */
static int scan_bitmap(struct reiserfs_transaction_handle *th,
b_blocknr_t * start, b_blocknr_t finish,
int min, int max, int unfm, sector_t file_block)
{
int nr_allocated = 0;
struct super_block *s = th->t_super;
- /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
- * - Hans, it is not a block number - Zam. */
-
unsigned int bm, off;
unsigned int end_bm, end_off;
unsigned int off_max = s->s_blocksize << 3;
BUG_ON(!th->t_trans_id);
PROC_INFO_INC(s, scan_bitmap.call);
+
+ /* No point in looking for more free blocks */
if (SB_FREE_BLOCKS(s) <= 0)
- return 0; // No point in looking for more free blocks
+ return 0;
get_bit_address(s, *start, &bm, &off);
get_bit_address(s, finish, &end_bm, &end_off);
@@ -331,7 +368,8 @@ static int scan_bitmap(struct reiserfs_transaction_handle *th,
if (end_bm > reiserfs_bmap_count(s))
end_bm = reiserfs_bmap_count(s);
- /* When the bitmap is more than 10% free, anyone can allocate.
+ /*
+ * When the bitmap is more than 10% free, anyone can allocate.
* When it's less than 10% free, only files that already use the
* bitmap are allowed. Once we pass 80% full, this restriction
* is lifted.
@@ -369,7 +407,7 @@ static int scan_bitmap(struct reiserfs_transaction_handle *th,
nr_allocated =
scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
- ret:
+ret:
*start = bm * off_max + off;
return nr_allocated;
@@ -411,14 +449,14 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
"block %lu: bit already cleared", block);
}
apbi[nr].free_count++;
- journal_mark_dirty(th, s, bmbh);
+ journal_mark_dirty(th, bmbh);
brelse(bmbh);
reiserfs_prepare_for_journal(s, sbh, 1);
/* update super block */
set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
- journal_mark_dirty(th, s, sbh);
+ journal_mark_dirty(th, sbh);
if (for_unformatted) {
int depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(inode, 1);
@@ -483,7 +521,7 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
if (dirty)
reiserfs_update_sd(th, inode);
ei->i_prealloc_block = save;
- list_del_init(&(ei->i_prealloc_list));
+ list_del_init(&ei->i_prealloc_list);
}
/* FIXME: It should be inline function */
@@ -529,7 +567,8 @@ int reiserfs_parse_alloc_options(struct super_block *s, char *options)
{
char *this_char, *value;
- REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
+ /* clear default settings */
+ REISERFS_SB(s)->s_alloc_options.bits = 0;
while ((this_char = strsep(&options, ":")) != NULL) {
if ((value = strchr(this_char, '=')) != NULL)
@@ -731,7 +770,7 @@ static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
hash_in = (char *)&hint->key.k_dir_id;
} else {
if (!hint->inode) {
- //hint->search_start = hint->beg;
+ /*hint->search_start = hint->beg;*/
hash_in = (char *)&hint->key.k_dir_id;
} else
if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
@@ -785,7 +824,8 @@ static void oid_groups(reiserfs_blocknr_hint_t * hint)
dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
- /* keep the root dir and it's first set of subdirs close to
+ /*
+ * keep the root dir and it's first set of subdirs close to
* the start of the disk
*/
if (dirid <= 2)
@@ -799,7 +839,8 @@ static void oid_groups(reiserfs_blocknr_hint_t * hint)
}
}
-/* returns 1 if it finds an indirect item and gets valid hint info
+/*
+ * returns 1 if it finds an indirect item and gets valid hint info
* from it, otherwise 0
*/
static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
@@ -811,25 +852,29 @@ static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
__le32 *item;
int ret = 0;
- if (!hint->path) /* reiserfs code can call this function w/o pointer to path
- * structure supplied; then we rely on supplied search_start */
+ /*
+ * reiserfs code can call this function w/o pointer to path
+ * structure supplied; then we rely on supplied search_start
+ */
+ if (!hint->path)
return 0;
path = hint->path;
bh = get_last_bh(path);
RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor");
- ih = get_ih(path);
+ ih = tp_item_head(path);
pos_in_item = path->pos_in_item;
- item = get_item(path);
+ item = tp_item_body(path);
hint->search_start = bh->b_blocknr;
+ /*
+ * for indirect item: go to left and look for the first non-hole entry
+ * in the indirect item
+ */
if (!hint->formatted_node && is_indirect_le_ih(ih)) {
- /* for indirect item: go to left and look for the first non-hole entry
- in the indirect item */
if (pos_in_item == I_UNFM_NUM(ih))
pos_in_item--;
-// pos_in_item = I_UNFM_NUM (ih) - 1;
while (pos_in_item >= 0) {
int t = get_block_num(item, pos_in_item);
if (t) {
@@ -845,10 +890,12 @@ static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
return ret;
}
-/* should be, if formatted node, then try to put on first part of the device
- specified as number of percent with mount option device, else try to put
- on last of device. This is not to say it is good code to do so,
- but the effect should be measured. */
+/*
+ * should be, if formatted node, then try to put on first part of the device
+ * specified as number of percent with mount option device, else try to put
+ * on last of device. This is not to say it is good code to do so,
+ * but the effect should be measured.
+ */
static inline void set_border_in_hint(struct super_block *s,
reiserfs_blocknr_hint_t * hint)
{
@@ -974,21 +1021,27 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
set_border_in_hint(s, hint);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
- /* whenever we create a new directory, we displace it. At first we will
- hash for location, later we might look for a moderately empty place for
- it */
+ /*
+ * whenever we create a new directory, we displace it. At first
+ * we will hash for location, later we might look for a moderately
+ * empty place for it
+ */
if (displacing_new_packing_localities(s)
&& hint->th->displace_new_blocks) {
displace_new_packing_locality(hint);
- /* we do not continue determine_search_start,
- * if new packing locality is being displaced */
+ /*
+ * we do not continue determine_search_start,
+ * if new packing locality is being displaced
+ */
return;
}
#endif
- /* all persons should feel encouraged to add more special cases here and
- * test them */
+ /*
+ * all persons should feel encouraged to add more special cases
+ * here and test them
+ */
if (displacing_large_files(s) && !hint->formatted_node
&& this_blocknr_allocation_would_make_it_a_large_file(hint)) {
@@ -996,8 +1049,10 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
return;
}
- /* if none of our special cases is relevant, use the left neighbor in the
- tree order of the new node we are allocating for */
+ /*
+ * if none of our special cases is relevant, use the left
+ * neighbor in the tree order of the new node we are allocating for
+ */
if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
hash_formatted_node(hint);
return;
@@ -1005,10 +1060,13 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
unfm_hint = get_left_neighbor(hint);
- /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
- new blocks are displaced based on directory ID. Also, if suggested search_start
- is less than last preallocated block, we start searching from it, assuming that
- HDD dataflow is faster in forward direction */
+ /*
+ * Mimic old block allocator behaviour, that is if VFS allowed for
+ * preallocation, new blocks are displaced based on directory ID.
+ * Also, if suggested search_start is less than last preallocated
+ * block, we start searching from it, assuming that HDD dataflow
+ * is faster in forward direction
+ */
if (TEST_OPTION(old_way, s)) {
if (!hint->formatted_node) {
if (!reiserfs_hashed_relocation(s))
@@ -1037,11 +1095,13 @@ static void determine_search_start(reiserfs_blocknr_hint_t * hint,
TEST_OPTION(old_hashed_relocation, s)) {
old_hashed_relocation(hint);
}
+
/* new_hashed_relocation works with both formatted/unformatted nodes */
if ((!unfm_hint || hint->formatted_node) &&
TEST_OPTION(new_hashed_relocation, s)) {
new_hashed_relocation(hint);
}
+
/* dirid grouping works only on unformatted nodes */
if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
dirid_groups(hint);
@@ -1079,8 +1139,6 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
return CARRY_ON;
}
-/* XXX I know it could be merged with upper-level function;
- but may be result function would be too complex. */
static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
b_blocknr_t * new_blocknrs,
b_blocknr_t start,
@@ -1108,7 +1166,10 @@ static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
/* do we have something to fill prealloc. array also ? */
if (nr_allocated > 0) {
- /* it means prealloc_size was greater that 0 and we do preallocation */
+ /*
+ * it means prealloc_size was greater that 0 and
+ * we do preallocation
+ */
list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
&SB_JOURNAL(hint->th->t_super)->
j_prealloc_list);
@@ -1176,7 +1237,8 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
start = 0;
finish = hint->beg;
break;
- default: /* We've tried searching everywhere, not enough space */
+ default:
+ /* We've tried searching everywhere, not enough space */
/* Free the blocks */
if (!hint->formatted_node) {
#ifdef REISERQUOTA_DEBUG
@@ -1261,8 +1323,11 @@ static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
return amount_needed;
}
-int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have
- already reserved */ )
+int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
+ b_blocknr_t *new_blocknrs,
+ int amount_needed,
+ /* Amount of blocks we have already reserved */
+ int reserved_by_us)
{
int initial_amount_needed = amount_needed;
int ret;
@@ -1274,15 +1339,21 @@ int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new
return NO_DISK_SPACE;
/* should this be if !hint->inode && hint->preallocate? */
/* do you mean hint->formatted_node can be removed ? - Zam */
- /* hint->formatted_node cannot be removed because we try to access
- inode information here, and there is often no inode assotiated with
- metadata allocations - green */
+ /*
+ * hint->formatted_node cannot be removed because we try to access
+ * inode information here, and there is often no inode associated with
+ * metadata allocations - green
+ */
if (!hint->formatted_node && hint->preallocate) {
amount_needed = use_preallocated_list_if_available
(hint, new_blocknrs, amount_needed);
- if (amount_needed == 0) /* all blocknrs we need we got from
- prealloc. list */
+
+ /*
+ * We have all the block numbers we need from the
+ * prealloc list
+ */
+ if (amount_needed == 0)
return CARRY_ON;
new_blocknrs += (initial_amount_needed - amount_needed);
}
@@ -1296,10 +1367,12 @@ int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new
ret = blocknrs_and_prealloc_arrays_from_search_start
(hint, new_blocknrs, amount_needed);
- /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
- * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
- * variant) */
-
+ /*
+ * We used prealloc. list to fill (partially) new_blocknrs array.
+ * If final allocation fails we need to return blocks back to
+ * prealloc. list or just free them. -- Zam (I chose second
+ * variant)
+ */
if (ret != CARRY_ON) {
while (amount_needed++ < initial_amount_needed) {
reiserfs_free_block(hint->th, hint->inode,
@@ -1338,10 +1411,12 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
struct buffer_head *bh;
- /* Way old format filesystems had the bitmaps packed up front.
- * I doubt there are any of these left, but just in case... */
+ /*
+ * Way old format filesystems had the bitmaps packed up front.
+ * I doubt there are any of these left, but just in case...
+ */
if (unlikely(test_bit(REISERFS_OLD_FORMAT,
- &(REISERFS_SB(sb)->s_properties))))
+ &REISERFS_SB(sb)->s_properties)))
block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
else if (bitmap == 0)
block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index af677353a3f..d9f5a60dd59 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -59,7 +59,10 @@ static inline bool is_privroot_deh(struct inode *dir, struct reiserfs_de_head *d
int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
{
- struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
+
+ /* key of current position in the directory (key of directory entry) */
+ struct cpu_key pos_key;
+
INITIALIZE_PATH(path_to_entry);
struct buffer_head *bh;
int item_num, entry_num;
@@ -77,21 +80,28 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
reiserfs_check_lock_depth(inode->i_sb, "readdir");
- /* form key for search the next directory entry using f_pos field of
- file structure */
+ /*
+ * form key for search the next directory entry using
+ * f_pos field of file structure
+ */
make_cpu_key(&pos_key, inode, ctx->pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
next_pos = cpu_key_k_offset(&pos_key);
path_to_entry.reada = PATH_READA;
while (1) {
- research:
- /* search the directory item, containing entry with specified key */
+research:
+ /*
+ * search the directory item, containing entry with
+ * specified key
+ */
search_res =
search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry,
&de);
if (search_res == IO_ERROR) {
- // FIXME: we could just skip part of directory which could
- // not be read
+ /*
+ * FIXME: we could just skip part of directory
+ * which could not be read
+ */
ret = -EIO;
goto out;
}
@@ -102,41 +112,49 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
store_ih(&tmp_ih, ih);
/* we must have found item, that is item of this directory, */
- RFALSE(COMP_SHORT_KEYS(&(ih->ih_key), &pos_key),
+ RFALSE(COMP_SHORT_KEYS(&ih->ih_key, &pos_key),
"vs-9000: found item %h does not match to dir we readdir %K",
ih, &pos_key);
RFALSE(item_num > B_NR_ITEMS(bh) - 1,
"vs-9005 item_num == %d, item amount == %d",
item_num, B_NR_ITEMS(bh));
- /* and entry must be not more than number of entries in the item */
- RFALSE(I_ENTRY_COUNT(ih) < entry_num,
+ /*
+ * and entry must be not more than number of entries
+ * in the item
+ */
+ RFALSE(ih_entry_count(ih) < entry_num,
"vs-9010: entry number is too big %d (%d)",
- entry_num, I_ENTRY_COUNT(ih));
+ entry_num, ih_entry_count(ih));
+ /*
+ * go through all entries in the directory item beginning
+ * from the entry, that has been found
+ */
if (search_res == POSITION_FOUND
- || entry_num < I_ENTRY_COUNT(ih)) {
- /* go through all entries in the directory item beginning from the entry, that has been found */
+ || entry_num < ih_entry_count(ih)) {
struct reiserfs_de_head *deh =
B_I_DEH(bh, ih) + entry_num;
- for (; entry_num < I_ENTRY_COUNT(ih);
+ for (; entry_num < ih_entry_count(ih);
entry_num++, deh++) {
int d_reclen;
char *d_name;
ino_t d_ino;
loff_t cur_pos = deh_offset(deh);
+ /* it is hidden entry */
if (!de_visible(deh))
- /* it is hidden entry */
continue;
d_reclen = entry_length(bh, ih, entry_num);
d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh);
if (d_reclen <= 0 ||
d_name + d_reclen > bh->b_data + bh->b_size) {
- /* There is corrupted data in entry,
- * We'd better stop here */
+ /*
+ * There is corrupted data in entry,
+ * We'd better stop here
+ */
pathrelse(&path_to_entry);
ret = -EIO;
goto out;
@@ -145,10 +163,10 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
if (!d_name[d_reclen - 1])
d_reclen = strlen(d_name);
+ /* too big to send back to VFS */
if (d_reclen >
REISERFS_MAX_NAME(inode->i_sb->
s_blocksize)) {
- /* too big to send back to VFS */
continue;
}
@@ -173,10 +191,14 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
goto research;
}
}
- // Note, that we copy name to user space via temporary
- // buffer (local_buf) because filldir will block if
- // user space buffer is swapped out. At that time
- // entry can move to somewhere else
+
+ /*
+ * Note, that we copy name to user space via
+ * temporary buffer (local_buf) because
+ * filldir will block if user space buffer is
+ * swapped out. At that time entry can move to
+ * somewhere else
+ */
memcpy(local_buf, d_name, d_reclen);
/*
@@ -209,22 +231,26 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
} /* for */
}
+ /* end of directory has been reached */
if (item_num != B_NR_ITEMS(bh) - 1)
- // end of directory has been reached
goto end;
- /* item we went through is last item of node. Using right
- delimiting key check is it directory end */
+ /*
+ * item we went through is last item of node. Using right
+ * delimiting key check is it directory end
+ */
rkey = get_rkey(&path_to_entry, inode->i_sb);
if (!comp_le_keys(rkey, &MIN_KEY)) {
- /* set pos_key to key, that is the smallest and greater
- that key of the last entry in the item */
+ /*
+ * set pos_key to key, that is the smallest and greater
+ * that key of the last entry in the item
+ */
set_cpu_key_k_offset(&pos_key, next_pos);
continue;
}
+ /* end of directory has been reached */
if (COMP_SHORT_KEYS(rkey, &pos_key)) {
- // end of directory has been reached
goto end;
}
@@ -248,71 +274,73 @@ static int reiserfs_readdir(struct file *file, struct dir_context *ctx)
return reiserfs_readdir_inode(file_inode(file), ctx);
}
-/* compose directory item containing "." and ".." entries (entries are
- not aligned to 4 byte boundary) */
-/* the last four params are LE */
+/*
+ * compose directory item containing "." and ".." entries (entries are
+ * not aligned to 4 byte boundary)
+ */
void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
__le32 par_dirid, __le32 par_objid)
{
- struct reiserfs_de_head *deh;
+ struct reiserfs_de_head *dot, *dotdot;
memset(body, 0, EMPTY_DIR_SIZE_V1);
- deh = (struct reiserfs_de_head *)body;
+ dot = (struct reiserfs_de_head *)body;
+ dotdot = dot + 1;
/* direntry header of "." */
- put_deh_offset(&(deh[0]), DOT_OFFSET);
+ put_deh_offset(dot, DOT_OFFSET);
/* these two are from make_le_item_head, and are are LE */
- deh[0].deh_dir_id = dirid;
- deh[0].deh_objectid = objid;
- deh[0].deh_state = 0; /* Endian safe if 0 */
- put_deh_location(&(deh[0]), EMPTY_DIR_SIZE_V1 - strlen("."));
- mark_de_visible(&(deh[0]));
+ dot->deh_dir_id = dirid;
+ dot->deh_objectid = objid;
+ dot->deh_state = 0; /* Endian safe if 0 */
+ put_deh_location(dot, EMPTY_DIR_SIZE_V1 - strlen("."));
+ mark_de_visible(dot);
/* direntry header of ".." */
- put_deh_offset(&(deh[1]), DOT_DOT_OFFSET);
+ put_deh_offset(dotdot, DOT_DOT_OFFSET);
/* key of ".." for the root directory */
/* these two are from the inode, and are are LE */
- deh[1].deh_dir_id = par_dirid;
- deh[1].deh_objectid = par_objid;
- deh[1].deh_state = 0; /* Endian safe if 0 */
- put_deh_location(&(deh[1]), deh_location(&(deh[0])) - strlen(".."));
- mark_de_visible(&(deh[1]));
+ dotdot->deh_dir_id = par_dirid;
+ dotdot->deh_objectid = par_objid;
+ dotdot->deh_state = 0; /* Endian safe if 0 */
+ put_deh_location(dotdot, deh_location(dot) - strlen(".."));
+ mark_de_visible(dotdot);
/* copy ".." and "." */
- memcpy(body + deh_location(&(deh[0])), ".", 1);
- memcpy(body + deh_location(&(deh[1])), "..", 2);
+ memcpy(body + deh_location(dot), ".", 1);
+ memcpy(body + deh_location(dotdot), "..", 2);
}
/* compose directory item containing "." and ".." entries */
void make_empty_dir_item(char *body, __le32 dirid, __le32 objid,
__le32 par_dirid, __le32 par_objid)
{
- struct reiserfs_de_head *deh;
+ struct reiserfs_de_head *dot, *dotdot;
memset(body, 0, EMPTY_DIR_SIZE);
- deh = (struct reiserfs_de_head *)body;
+ dot = (struct reiserfs_de_head *)body;
+ dotdot = dot + 1;
/* direntry header of "." */
- put_deh_offset(&(deh[0]), DOT_OFFSET);
+ put_deh_offset(dot, DOT_OFFSET);
/* these two are from make_le_item_head, and are are LE */
- deh[0].deh_dir_id = dirid;
- deh[0].deh_objectid = objid;
- deh[0].deh_state = 0; /* Endian safe if 0 */
- put_deh_location(&(deh[0]), EMPTY_DIR_SIZE - ROUND_UP(strlen(".")));
- mark_de_visible(&(deh[0]));
+ dot->deh_dir_id = dirid;
+ dot->deh_objectid = objid;
+ dot->deh_state = 0; /* Endian safe if 0 */
+ put_deh_location(dot, EMPTY_DIR_SIZE - ROUND_UP(strlen(".")));
+ mark_de_visible(dot);
/* direntry header of ".." */
- put_deh_offset(&(deh[1]), DOT_DOT_OFFSET);
+ put_deh_offset(dotdot, DOT_DOT_OFFSET);
/* key of ".." for the root directory */
/* these two are from the inode, and are are LE */
- deh[1].deh_dir_id = par_dirid;
- deh[1].deh_objectid = par_objid;
- deh[1].deh_state = 0; /* Endian safe if 0 */
- put_deh_location(&(deh[1]),
- deh_location(&(deh[0])) - ROUND_UP(strlen("..")));
- mark_de_visible(&(deh[1]));
+ dotdot->deh_dir_id = par_dirid;
+ dotdot->deh_objectid = par_objid;
+ dotdot->deh_state = 0; /* Endian safe if 0 */
+ put_deh_location(dotdot, deh_location(dot) - ROUND_UP(strlen("..")));
+ mark_de_visible(dotdot);
/* copy ".." and "." */
- memcpy(body + deh_location(&(deh[0])), ".", 1);
- memcpy(body + deh_location(&(deh[1])), "..", 2);
+ memcpy(body + deh_location(dot), ".", 1);
+ memcpy(body + deh_location(dotdot), "..", 2);
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 9a3c68cf602..54fdf196bfb 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -2,18 +2,13 @@
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
-/* Now we have all buffers that must be used in balancing of the tree */
-/* Further calculations can not cause schedule(), and thus the buffer */
-/* tree will be stable until the balancing will be finished */
-/* balance the tree according to the analysis made before, */
-/* and using buffers obtained after all above. */
-
-/**
- ** balance_leaf_when_delete
- ** balance_leaf
- ** do_balance
- **
- **/
+/*
+ * Now we have all buffers that must be used in balancing of the tree
+ * Further calculations can not cause schedule(), and thus the buffer
+ * tree will be stable until the balancing will be finished
+ * balance the tree according to the analysis made before,
+ * and using buffers obtained after all above.
+ */
#include <asm/uaccess.h>
#include <linux/time.h>
@@ -61,48 +56,190 @@ static inline void buffer_info_init_bh(struct tree_balance *tb,
inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
struct buffer_head *bh, int flag)
{
- journal_mark_dirty(tb->transaction_handle,
- tb->transaction_handle->t_super, bh);
+ journal_mark_dirty(tb->transaction_handle, bh);
}
#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
-/* summary:
- if deleting something ( tb->insert_size[0] < 0 )
- return(balance_leaf_when_delete()); (flag d handled here)
- else
- if lnum is larger than 0 we put items into the left node
- if rnum is larger than 0 we put items into the right node
- if snum1 is larger than 0 we put items into the new node s1
- if snum2 is larger than 0 we put items into the new node s2
-Note that all *num* count new items being created.
-
-It would be easier to read balance_leaf() if each of these summary
-lines was a separate procedure rather than being inlined. I think
-that there are many passages here and in balance_leaf_when_delete() in
-which two calls to one procedure can replace two passages, and it
-might save cache space and improve software maintenance costs to do so.
-
-Vladimir made the perceptive comment that we should offload most of
-the decision making in this function into fix_nodes/check_balance, and
-then create some sort of structure in tb that says what actions should
-be performed by do_balance.
-
--Hans */
-
-/* Balance leaf node in case of delete or cut: insert_size[0] < 0
+/*
+ * summary:
+ * if deleting something ( tb->insert_size[0] < 0 )
+ * return(balance_leaf_when_delete()); (flag d handled here)
+ * else
+ * if lnum is larger than 0 we put items into the left node
+ * if rnum is larger than 0 we put items into the right node
+ * if snum1 is larger than 0 we put items into the new node s1
+ * if snum2 is larger than 0 we put items into the new node s2
+ * Note that all *num* count new items being created.
+ */
+
+static void balance_leaf_when_delete_del(struct tree_balance *tb)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int item_pos = PATH_LAST_POSITION(tb->tb_path);
+ struct buffer_info bi;
+#ifdef CONFIG_REISERFS_CHECK
+ struct item_head *ih = item_head(tbS0, item_pos);
+#endif
+
+ RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
+ "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
+ -tb->insert_size[0], ih);
+
+ buffer_info_init_tbS0(tb, &bi);
+ leaf_delete_items(&bi, 0, item_pos, 1, -1);
+
+ if (!item_pos && tb->CFL[0]) {
+ if (B_NR_ITEMS(tbS0)) {
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
+ } else {
+ if (!PATH_H_POSITION(tb->tb_path, 1))
+ replace_key(tb, tb->CFL[0], tb->lkey[0],
+ PATH_H_PPARENT(tb->tb_path, 0), 0);
+ }
+ }
+
+ RFALSE(!item_pos && !tb->CFL[0],
+ "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0],
+ tb->L[0]);
+}
+
+/* cut item in S[0] */
+static void balance_leaf_when_delete_cut(struct tree_balance *tb)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int item_pos = PATH_LAST_POSITION(tb->tb_path);
+ struct item_head *ih = item_head(tbS0, item_pos);
+ int pos_in_item = tb->tb_path->pos_in_item;
+ struct buffer_info bi;
+ buffer_info_init_tbS0(tb, &bi);
+
+ if (is_direntry_le_ih(ih)) {
+ /*
+ * UFS unlink semantics are such that you can only
+ * delete one directory entry at a time.
+ *
+ * when we cut a directory tb->insert_size[0] means
+ * number of entries to be cut (always 1)
+ */
+ tb->insert_size[0] = -1;
+ leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
+ -tb->insert_size[0]);
+
+ RFALSE(!item_pos && !pos_in_item && !tb->CFL[0],
+ "PAP-12030: can not change delimiting key. CFL[0]=%p",
+ tb->CFL[0]);
+
+ if (!item_pos && !pos_in_item && tb->CFL[0])
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
+ } else {
+ leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
+ -tb->insert_size[0]);
+
+ RFALSE(!ih_item_len(ih),
+ "PAP-12035: cut must leave non-zero dynamic "
+ "length of item");
+ }
+}
+
+static int balance_leaf_when_delete_left(struct tree_balance *tb)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+
+ /* L[0] must be joined with S[0] */
+ if (tb->lnum[0] == -1) {
+ /* R[0] must be also joined with S[0] */
+ if (tb->rnum[0] == -1) {
+ if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
+ /*
+ * all contents of all the
+ * 3 buffers will be in L[0]
+ */
+ if (PATH_H_POSITION(tb->tb_path, 1) == 0 &&
+ 1 < B_NR_ITEMS(tb->FR[0]))
+ replace_key(tb, tb->CFL[0],
+ tb->lkey[0], tb->FR[0], 1);
+
+ leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1,
+ NULL);
+ leaf_move_items(LEAF_FROM_R_TO_L, tb,
+ B_NR_ITEMS(tb->R[0]), -1,
+ NULL);
+
+ reiserfs_invalidate_buffer(tb, tbS0);
+ reiserfs_invalidate_buffer(tb, tb->R[0]);
+
+ return 0;
+ }
+
+ /* all contents of all the 3 buffers will be in R[0] */
+ leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL);
+ leaf_move_items(LEAF_FROM_L_TO_R, tb,
+ B_NR_ITEMS(tb->L[0]), -1, NULL);
+
+ /* right_delimiting_key is correct in R[0] */
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
+
+ reiserfs_invalidate_buffer(tb, tbS0);
+ reiserfs_invalidate_buffer(tb, tb->L[0]);
+
+ return -1;
+ }
+
+ RFALSE(tb->rnum[0] != 0,
+ "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
+ /* all contents of L[0] and S[0] will be in L[0] */
+ leaf_shift_left(tb, n, -1);
+
+ reiserfs_invalidate_buffer(tb, tbS0);
+
+ return 0;
+ }
+
+ /*
+ * a part of contents of S[0] will be in L[0] and
+ * the rest part of S[0] will be in R[0]
+ */
+
+ RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
+ (tb->lnum[0] + tb->rnum[0] > n + 1),
+ "PAP-12050: rnum(%d) and lnum(%d) and item "
+ "number(%d) in S[0] are not consistent",
+ tb->rnum[0], tb->lnum[0], n);
+ RFALSE((tb->lnum[0] + tb->rnum[0] == n) &&
+ (tb->lbytes != -1 || tb->rbytes != -1),
+ "PAP-12055: bad rbytes (%d)/lbytes (%d) "
+ "parameters when items are not split",
+ tb->rbytes, tb->lbytes);
+ RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) &&
+ (tb->lbytes < 1 || tb->rbytes != -1),
+ "PAP-12060: bad rbytes (%d)/lbytes (%d) "
+ "parameters when items are split",
+ tb->rbytes, tb->lbytes);
+
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+
+ reiserfs_invalidate_buffer(tb, tbS0);
+
+ return 0;
+}
+
+/*
+ * Balance leaf node in case of delete or cut: insert_size[0] < 0
*
* lnum, rnum can have values >= -1
* -1 means that the neighbor must be joined with S
* 0 means that nothing should be done with the neighbor
- * >0 means to shift entirely or partly the specified number of items to the neighbor
+ * >0 means to shift entirely or partly the specified number of items
+ * to the neighbor
*/
static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
{
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int item_pos = PATH_LAST_POSITION(tb->tb_path);
- int pos_in_item = tb->tb_path->pos_in_item;
struct buffer_info bi;
int n;
struct item_head *ih;
@@ -114,1022 +251,1202 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
"PAP-12010: tree can not be empty");
- ih = B_N_PITEM_HEAD(tbS0, item_pos);
+ ih = item_head(tbS0, item_pos);
buffer_info_init_tbS0(tb, &bi);
/* Delete or truncate the item */
- switch (flag) {
- case M_DELETE: /* delete item in S[0] */
+ BUG_ON(flag != M_DELETE && flag != M_CUT);
+ if (flag == M_DELETE)
+ balance_leaf_when_delete_del(tb);
+ else /* M_CUT */
+ balance_leaf_when_delete_cut(tb);
- RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
- "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
- -tb->insert_size[0], ih);
- leaf_delete_items(&bi, 0, item_pos, 1, -1);
+ /*
+ * the rule is that no shifting occurs unless by shifting
+ * a node can be freed
+ */
+ n = B_NR_ITEMS(tbS0);
- if (!item_pos && tb->CFL[0]) {
- if (B_NR_ITEMS(tbS0)) {
- replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0,
- 0);
- } else {
- if (!PATH_H_POSITION(tb->tb_path, 1))
- replace_key(tb, tb->CFL[0], tb->lkey[0],
- PATH_H_PPARENT(tb->tb_path,
- 0), 0);
- }
- }
- RFALSE(!item_pos && !tb->CFL[0],
- "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0],
- tb->L[0]);
+ /* L[0] takes part in balancing */
+ if (tb->lnum[0])
+ return balance_leaf_when_delete_left(tb);
- break;
+ if (tb->rnum[0] == -1) {
+ /* all contents of R[0] and S[0] will be in R[0] */
+ leaf_shift_right(tb, n, -1);
+ reiserfs_invalidate_buffer(tb, tbS0);
+ return 0;
+ }
- case M_CUT:{ /* cut item in S[0] */
- if (is_direntry_le_ih(ih)) {
+ RFALSE(tb->rnum[0],
+ "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
+ return 0;
+}
- /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
- /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */
- tb->insert_size[0] = -1;
- leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
- -tb->insert_size[0]);
+static void balance_leaf_insert_left(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
+{
+ int ret;
+ struct buffer_info bi;
+ int n = B_NR_ITEMS(tb->L[0]);
+
+ if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
+ /* part of new item falls into L[0] */
+ int new_item_len, shift;
+ int version;
+
+ ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
+
+ /* Calculate item length to insert to S[0] */
+ new_item_len = ih_item_len(ih) - tb->lbytes;
+
+ /* Calculate and check item length to insert to L[0] */
+ put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
+
+ RFALSE(ih_item_len(ih) <= 0,
+ "PAP-12080: there is nothing to insert into L[0]: "
+ "ih_item_len=%d", ih_item_len(ih));
+
+ /* Insert new item into L[0] */
+ buffer_info_init_left(tb, &bi);
+ leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
+ min_t(int, tb->zeroes_num, ih_item_len(ih)));
+
+ version = ih_version(ih);
+
+ /*
+ * Calculate key component, item length and body to
+ * insert into S[0]
+ */
+ shift = 0;
+ if (is_indirect_le_ih(ih))
+ shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
+
+ add_le_ih_k_offset(ih, tb->lbytes << shift);
+
+ put_ih_item_len(ih, new_item_len);
+ if (tb->lbytes > tb->zeroes_num) {
+ body += (tb->lbytes - tb->zeroes_num);
+ tb->zeroes_num = 0;
+ } else
+ tb->zeroes_num -= tb->lbytes;
+
+ RFALSE(ih_item_len(ih) <= 0,
+ "PAP-12085: there is nothing to insert into S[0]: "
+ "ih_item_len=%d", ih_item_len(ih));
+ } else {
+ /* new item in whole falls into L[0] */
+ /* Shift lnum[0]-1 items to L[0] */
+ ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
+
+ /* Insert new item into L[0] */
+ buffer_info_init_left(tb, &bi);
+ leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
+ tb->zeroes_num);
+ tb->insert_size[0] = 0;
+ tb->zeroes_num = 0;
+ }
+}
- RFALSE(!item_pos && !pos_in_item && !tb->CFL[0],
- "PAP-12030: can not change delimiting key. CFL[0]=%p",
- tb->CFL[0]);
+static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body)
+{
+ int n = B_NR_ITEMS(tb->L[0]);
+ struct buffer_info bi;
- if (!item_pos && !pos_in_item && tb->CFL[0]) {
- replace_key(tb, tb->CFL[0], tb->lkey[0],
- tbS0, 0);
- }
- } else {
- leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
- -tb->insert_size[0]);
+ RFALSE(tb->zeroes_num,
+ "PAP-12090: invalid parameter in case of a directory");
+
+ /* directory item */
+ if (tb->lbytes > tb->pos_in_item) {
+ /* new directory entry falls into L[0] */
+ struct item_head *pasted;
+ int ret, l_pos_in_item = tb->pos_in_item;
+
+ /*
+ * Shift lnum[0] - 1 items in whole.
+ * Shift lbytes - 1 entries from given directory item
+ */
+ ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1);
+ if (ret && !tb->item_pos) {
+ pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
+ l_pos_in_item += ih_entry_count(pasted) -
+ (tb->lbytes - 1);
+ }
- RFALSE(!ih_item_len(ih),
- "PAP-12035: cut must leave non-zero dynamic length of item");
- }
- break;
+ /* Append given directory entry to directory item */
+ buffer_info_init_left(tb, &bi);
+ leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
+ l_pos_in_item, tb->insert_size[0],
+ body, tb->zeroes_num);
+
+ /*
+ * previous string prepared space for pasting new entry,
+ * following string pastes this entry
+ */
+
+ /*
+ * when we have merge directory item, pos_in_item
+ * has been changed too
+ */
+
+ /* paste new directory entry. 1 is entry number */
+ leaf_paste_entries(&bi, n + tb->item_pos - ret,
+ l_pos_in_item, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
+ tb->insert_size[0] = 0;
+ } else {
+ /* new directory item doesn't fall into L[0] */
+ /*
+ * Shift lnum[0]-1 items in whole. Shift lbytes
+ * directory entries from directory item number lnum[0]
+ */
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+ }
+
+ /* Calculate new position to append in item body */
+ tb->pos_in_item -= tb->lbytes;
+}
+
+static void balance_leaf_paste_left_shift(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tb->L[0]);
+ struct buffer_info bi;
+
+ if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
+ balance_leaf_paste_left_shift_dirent(tb, ih, body);
+ return;
+ }
+
+ RFALSE(tb->lbytes <= 0,
+ "PAP-12095: there is nothing to shift to L[0]. "
+ "lbytes=%d", tb->lbytes);
+ RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
+ "PAP-12100: incorrect position to paste: "
+ "item_len=%d, pos_in_item=%d",
+ ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item);
+
+ /* appended item will be in L[0] in whole */
+ if (tb->lbytes >= tb->pos_in_item) {
+ struct item_head *tbS0_pos_ih, *tbL0_ih;
+ struct item_head *tbS0_0_ih;
+ struct reiserfs_key *left_delim_key;
+ int ret, l_n, version, temp_l;
+
+ tbS0_pos_ih = item_head(tbS0, tb->item_pos);
+ tbS0_0_ih = item_head(tbS0, 0);
+
+ /*
+ * this bytes number must be appended
+ * to the last item of L[h]
+ */
+ l_n = tb->lbytes - tb->pos_in_item;
+
+ /* Calculate new insert_size[0] */
+ tb->insert_size[0] -= l_n;
+
+ RFALSE(tb->insert_size[0] <= 0,
+ "PAP-12105: there is nothing to paste into "
+ "L[0]. insert_size=%d", tb->insert_size[0]);
+
+ ret = leaf_shift_left(tb, tb->lnum[0],
+ ih_item_len(tbS0_pos_ih));
+
+ tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret);
+
+ /* Append to body of item in L[0] */
+ buffer_info_init_left(tb, &bi);
+ leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
+ ih_item_len(tbL0_ih), l_n, body,
+ min_t(int, l_n, tb->zeroes_num));
+
+ /*
+ * 0-th item in S0 can be only of DIRECT type
+ * when l_n != 0
+ */
+ temp_l = l_n;
+
+ RFALSE(ih_item_len(tbS0_0_ih),
+ "PAP-12106: item length must be 0");
+ RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key,
+ leaf_key(tb->L[0], n + tb->item_pos - ret)),
+ "PAP-12107: items must be of the same file");
+
+ if (is_indirect_le_ih(tbL0_ih)) {
+ int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
+ temp_l = l_n << shift;
}
+ /* update key of first item in S0 */
+ version = ih_version(tbS0_0_ih);
+ add_le_key_k_offset(version, &tbS0_0_ih->ih_key, temp_l);
+
+ /* update left delimiting key */
+ left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]);
+ add_le_key_k_offset(version, left_delim_key, temp_l);
+
+ /*
+ * Calculate new body, position in item and
+ * insert_size[0]
+ */
+ if (l_n > tb->zeroes_num) {
+ body += (l_n - tb->zeroes_num);
+ tb->zeroes_num = 0;
+ } else
+ tb->zeroes_num -= l_n;
+ tb->pos_in_item = 0;
+
+ RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key,
+ leaf_key(tb->L[0],
+ B_NR_ITEMS(tb->L[0]) - 1)) ||
+ !op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size) ||
+ !op_is_left_mergeable(left_delim_key, tbS0->b_size),
+ "PAP-12120: item must be merge-able with left "
+ "neighboring item");
+ } else {
+ /* only part of the appended item will be in L[0] */
+
+ /* Calculate position in item for append in S[0] */
+ tb->pos_in_item -= tb->lbytes;
+
+ RFALSE(tb->pos_in_item <= 0,
+ "PAP-12125: no place for paste. pos_in_item=%d",
+ tb->pos_in_item);
+
+ /*
+ * Shift lnum[0] - 1 items in whole.
+ * Shift lbytes - 1 byte from item number lnum[0]
+ */
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+ }
+}
- default:
- print_cur_tb("12040");
- reiserfs_panic(tb->tb_sb, "PAP-12040",
- "unexpected mode: %s(%d)",
- (flag ==
- M_PASTE) ? "PASTE" : ((flag ==
- M_INSERT) ? "INSERT" :
- "UNKNOWN"), flag);
+
+/* appended item will be in L[0] in whole */
+static void balance_leaf_paste_left_whole(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tb->L[0]);
+ struct buffer_info bi;
+ struct item_head *pasted;
+ int ret;
+
+ /* if we paste into first item of S[0] and it is left mergable */
+ if (!tb->item_pos &&
+ op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size)) {
+ /*
+ * then increment pos_in_item by the size of the
+ * last item in L[0]
+ */
+ pasted = item_head(tb->L[0], n - 1);
+ if (is_direntry_le_ih(pasted))
+ tb->pos_in_item += ih_entry_count(pasted);
+ else
+ tb->pos_in_item += ih_item_len(pasted);
}
- /* the rule is that no shifting occurs unless by shifting a node can be freed */
- n = B_NR_ITEMS(tbS0);
- if (tb->lnum[0]) { /* L[0] takes part in balancing */
- if (tb->lnum[0] == -1) { /* L[0] must be joined with S[0] */
- if (tb->rnum[0] == -1) { /* R[0] must be also joined with S[0] */
- if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
- /* all contents of all the 3 buffers will be in L[0] */
- if (PATH_H_POSITION(tb->tb_path, 1) == 0
- && 1 < B_NR_ITEMS(tb->FR[0]))
- replace_key(tb, tb->CFL[0],
- tb->lkey[0],
- tb->FR[0], 1);
-
- leaf_move_items(LEAF_FROM_S_TO_L, tb, n,
- -1, NULL);
- leaf_move_items(LEAF_FROM_R_TO_L, tb,
- B_NR_ITEMS(tb->R[0]),
- -1, NULL);
-
- reiserfs_invalidate_buffer(tb, tbS0);
- reiserfs_invalidate_buffer(tb,
- tb->R[0]);
-
- return 0;
- }
- /* all contents of all the 3 buffers will be in R[0] */
- leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1,
- NULL);
- leaf_move_items(LEAF_FROM_L_TO_R, tb,
- B_NR_ITEMS(tb->L[0]), -1, NULL);
+ /*
+ * Shift lnum[0] - 1 items in whole.
+ * Shift lbytes - 1 byte from item number lnum[0]
+ */
+ ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+
+ /* Append to body of item in L[0] */
+ buffer_info_init_left(tb, &bi);
+ leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item,
+ tb->insert_size[0], body, tb->zeroes_num);
+
+ /* if appended item is directory, paste entry */
+ pasted = item_head(tb->L[0], n + tb->item_pos - ret);
+ if (is_direntry_le_ih(pasted))
+ leaf_paste_entries(&bi, n + tb->item_pos - ret,
+ tb->pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ /*
+ * if appended item is indirect item, put unformatted node
+ * into un list
+ */
+ if (is_indirect_le_ih(pasted))
+ set_ih_free_space(pasted, 0);
- /* right_delimiting_key is correct in R[0] */
- replace_key(tb, tb->CFR[0], tb->rkey[0],
- tb->R[0], 0);
+ tb->insert_size[0] = 0;
+ tb->zeroes_num = 0;
+}
- reiserfs_invalidate_buffer(tb, tbS0);
- reiserfs_invalidate_buffer(tb, tb->L[0]);
+static void balance_leaf_paste_left(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
+{
+ /* we must shift the part of the appended item */
+ if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
+ balance_leaf_paste_left_shift(tb, ih, body);
+ else
+ balance_leaf_paste_left_whole(tb, ih, body);
+}
- return -1;
- }
+/* Shift lnum[0] items from S[0] to the left neighbor L[0] */
+static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih,
+ const char *body, int flag)
+{
+ if (tb->lnum[0] <= 0)
+ return;
- RFALSE(tb->rnum[0] != 0,
- "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
- /* all contents of L[0] and S[0] will be in L[0] */
- leaf_shift_left(tb, n, -1);
+ /* new item or it part falls to L[0], shift it too */
+ if (tb->item_pos < tb->lnum[0]) {
+ BUG_ON(flag != M_INSERT && flag != M_PASTE);
+
+ if (flag == M_INSERT)
+ balance_leaf_insert_left(tb, ih, body);
+ else /* M_PASTE */
+ balance_leaf_paste_left(tb, ih, body);
+ } else
+ /* new item doesn't fall into L[0] */
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+}
- reiserfs_invalidate_buffer(tb, tbS0);
- return 0;
+static void balance_leaf_insert_right(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
+{
+
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+ struct buffer_info bi;
+ int ret;
+
+ /* new item or part of it doesn't fall into R[0] */
+ if (n - tb->rnum[0] >= tb->item_pos) {
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+ return;
+ }
+
+ /* new item or its part falls to R[0] */
+
+ /* part of new item falls into R[0] */
+ if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
+ loff_t old_key_comp, old_len, r_zeroes_number;
+ const char *r_body;
+ int version, shift;
+ loff_t offset;
+
+ leaf_shift_right(tb, tb->rnum[0] - 1, -1);
+
+ version = ih_version(ih);
+
+ /* Remember key component and item length */
+ old_key_comp = le_ih_k_offset(ih);
+ old_len = ih_item_len(ih);
+
+ /*
+ * Calculate key component and item length to insert
+ * into R[0]
+ */
+ shift = 0;
+ if (is_indirect_le_ih(ih))
+ shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
+ offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift);
+ set_le_ih_k_offset(ih, offset);
+ put_ih_item_len(ih, tb->rbytes);
+
+ /* Insert part of the item into R[0] */
+ buffer_info_init_right(tb, &bi);
+ if ((old_len - tb->rbytes) > tb->zeroes_num) {
+ r_zeroes_number = 0;
+ r_body = body + (old_len - tb->rbytes) - tb->zeroes_num;
+ } else {
+ r_body = body;
+ r_zeroes_number = tb->zeroes_num -
+ (old_len - tb->rbytes);
+ tb->zeroes_num -= r_zeroes_number;
}
- /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */
-
- RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
- (tb->lnum[0] + tb->rnum[0] > n + 1),
- "PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent",
- tb->rnum[0], tb->lnum[0], n);
- RFALSE((tb->lnum[0] + tb->rnum[0] == n) &&
- (tb->lbytes != -1 || tb->rbytes != -1),
- "PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split",
- tb->rbytes, tb->lbytes);
- RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) &&
- (tb->lbytes < 1 || tb->rbytes != -1),
- "PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split",
- tb->rbytes, tb->lbytes);
- leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+ leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number);
+
+ /* Replace right delimiting key by first key in R[0] */
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
+
+ /*
+ * Calculate key component and item length to
+ * insert into S[0]
+ */
+ set_le_ih_k_offset(ih, old_key_comp);
+ put_ih_item_len(ih, old_len - tb->rbytes);
+
+ tb->insert_size[0] -= tb->rbytes;
+
+ } else {
+ /* whole new item falls into R[0] */
+
+ /* Shift rnum[0]-1 items to R[0] */
+ ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
+
+ /* Insert new item into R[0] */
+ buffer_info_init_right(tb, &bi);
+ leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1,
+ ih, body, tb->zeroes_num);
+
+ if (tb->item_pos - n + tb->rnum[0] - 1 == 0)
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
+
+ tb->zeroes_num = tb->insert_size[0] = 0;
+ }
+}
+
+
+static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct buffer_info bi;
+ int entry_count;
+
+ RFALSE(tb->zeroes_num,
+ "PAP-12145: invalid parameter in case of a directory");
+ entry_count = ih_entry_count(item_head(tbS0, tb->item_pos));
+
+ /* new directory entry falls into R[0] */
+ if (entry_count - tb->rbytes < tb->pos_in_item) {
+ int paste_entry_position;
+
+ RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0],
+ "PAP-12150: no enough of entries to shift to R[0]: "
+ "rbytes=%d, entry_count=%d", tb->rbytes, entry_count);
+
+ /*
+ * Shift rnum[0]-1 items in whole.
+ * Shift rbytes-1 directory entries from directory
+ * item number rnum[0]
+ */
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
+
+ /* Paste given directory entry to directory item */
+ paste_entry_position = tb->pos_in_item - entry_count +
+ tb->rbytes - 1;
+ buffer_info_init_right(tb, &bi);
+ leaf_paste_in_buffer(&bi, 0, paste_entry_position,
+ tb->insert_size[0], body, tb->zeroes_num);
+
+ /* paste entry */
+ leaf_paste_entries(&bi, 0, paste_entry_position, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ /* change delimiting keys */
+ if (paste_entry_position == 0)
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
+
+ tb->insert_size[0] = 0;
+ tb->pos_in_item++;
+ } else {
+ /* new directory entry doesn't fall into R[0] */
leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+ }
+}
- reiserfs_invalidate_buffer(tb, tbS0);
+static void balance_leaf_paste_right_shift(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n_shift, n_rem, r_zeroes_number, version;
+ unsigned long temp_rem;
+ const char *r_body;
+ struct buffer_info bi;
- return 0;
+ /* we append to directory item */
+ if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
+ balance_leaf_paste_right_shift_dirent(tb, ih, body);
+ return;
}
- if (tb->rnum[0] == -1) {
- /* all contents of R[0] and S[0] will be in R[0] */
- leaf_shift_right(tb, n, -1);
- reiserfs_invalidate_buffer(tb, tbS0);
- return 0;
+ /* regular object */
+
+ /*
+ * Calculate number of bytes which must be shifted
+ * from appended item
+ */
+ n_shift = tb->rbytes - tb->insert_size[0];
+ if (n_shift < 0)
+ n_shift = 0;
+
+ RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
+ "PAP-12155: invalid position to paste. ih_item_len=%d, "
+ "pos_in_item=%d", tb->pos_in_item,
+ ih_item_len(item_head(tbS0, tb->item_pos)));
+
+ leaf_shift_right(tb, tb->rnum[0], n_shift);
+
+ /*
+ * Calculate number of bytes which must remain in body
+ * after appending to R[0]
+ */
+ n_rem = tb->insert_size[0] - tb->rbytes;
+ if (n_rem < 0)
+ n_rem = 0;
+
+ temp_rem = n_rem;
+
+ version = ih_version(item_head(tb->R[0], 0));
+
+ if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) {
+ int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
+ temp_rem = n_rem << shift;
}
- RFALSE(tb->rnum[0],
- "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
- return 0;
+ add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem);
+ add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]),
+ temp_rem);
+
+ do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
+
+ /* Append part of body into R[0] */
+ buffer_info_init_right(tb, &bi);
+ if (n_rem > tb->zeroes_num) {
+ r_zeroes_number = 0;
+ r_body = body + n_rem - tb->zeroes_num;
+ } else {
+ r_body = body;
+ r_zeroes_number = tb->zeroes_num - n_rem;
+ tb->zeroes_num -= r_zeroes_number;
+ }
+
+ leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
+ r_body, r_zeroes_number);
+
+ if (is_indirect_le_ih(item_head(tb->R[0], 0)))
+ set_ih_free_space(item_head(tb->R[0], 0), 0);
+
+ tb->insert_size[0] = n_rem;
+ if (!n_rem)
+ tb->pos_in_item++;
}
-static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item header of inserted item (this is on little endian) */
- const char *body, /* body of inserted item or bytes to paste */
- int flag, /* i - insert, d - delete, c - cut, p - paste
- (see comment to do_balance) */
- struct item_head *insert_key, /* in our processing of one level we sometimes determine what
- must be inserted into the next higher level. This insertion
- consists of a key or two keys and their corresponding
- pointers */
- struct buffer_head **insert_ptr /* inserted node-ptrs for the next level */
- )
+static void balance_leaf_paste_right_whole(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
{
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
- of the affected item */
+ int n = B_NR_ITEMS(tbS0);
+ struct item_head *pasted;
struct buffer_info bi;
- struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
- int snum[2]; /* number of items that will be placed
- into S_new (includes partially shifted
- items) */
- int sbytes[2]; /* if an item is partially shifted into S_new then
- if it is a directory item
- it is the number of entries from the item that are shifted into S_new
- else
- it is the number of bytes from the item that are shifted into S_new
- */
- int n, i;
- int ret_val;
- int pos_in_item;
- int zeros_num;
- PROC_INFO_INC(tb->tb_sb, balance_at[0]);
+ buffer_info_init_right(tb, &bi);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+
+ /* append item in R[0] */
+ if (tb->pos_in_item >= 0) {
+ buffer_info_init_right(tb, &bi);
+ leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0],
+ tb->pos_in_item, tb->insert_size[0], body,
+ tb->zeroes_num);
+ }
- /* Make balance in case insert_size[0] < 0 */
- if (tb->insert_size[0] < 0)
- return balance_leaf_when_delete(tb, flag);
+ /* paste new entry, if item is directory item */
+ pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]);
+ if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) {
+ leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0],
+ tb->pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]);
- zeros_num = 0;
- if (flag == M_INSERT && !body)
- zeros_num = ih_item_len(ih);
+ if (!tb->pos_in_item) {
- pos_in_item = tb->tb_path->pos_in_item;
- /* for indirect item pos_in_item is measured in unformatted node
- pointers. Recalculate to bytes */
- if (flag != M_INSERT
- && is_indirect_le_ih(B_N_PITEM_HEAD(tbS0, item_pos)))
- pos_in_item *= UNFM_P_SIZE;
-
- if (tb->lnum[0] > 0) {
- /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
- if (item_pos < tb->lnum[0]) {
- /* new item or it part falls to L[0], shift it too */
- n = B_NR_ITEMS(tb->L[0]);
-
- switch (flag) {
- case M_INSERT: /* insert item into L[0] */
-
- if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
- /* part of new item falls into L[0] */
- int new_item_len;
- int version;
-
- ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
-
- /* Calculate item length to insert to S[0] */
- new_item_len = ih_item_len(ih) - tb->lbytes;
- /* Calculate and check item length to insert to L[0] */
- put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
-
- RFALSE(ih_item_len(ih) <= 0,
- "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
- ih_item_len(ih));
-
- /* Insert new item into L[0] */
- buffer_info_init_left(tb, &bi);
- leaf_insert_into_buf(&bi,
- n + item_pos - ret_val, ih, body,
- zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
-
- version = ih_version(ih);
-
- /* Calculate key component, item length and body to insert into S[0] */
- set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
- (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
-
- put_ih_item_len(ih, new_item_len);
- if (tb->lbytes > zeros_num) {
- body += (tb->lbytes - zeros_num);
- zeros_num = 0;
- } else
- zeros_num -= tb->lbytes;
-
- RFALSE(ih_item_len(ih) <= 0,
- "PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d",
- ih_item_len(ih));
- } else {
- /* new item in whole falls into L[0] */
- /* Shift lnum[0]-1 items to L[0] */
- ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
- /* Insert new item into L[0] */
- buffer_info_init_left(tb, &bi);
- leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num);
- tb->insert_size[0] = 0;
- zeros_num = 0;
- }
- break;
-
- case M_PASTE: /* append item in L[0] */
-
- if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
- /* we must shift the part of the appended item */
- if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) {
-
- RFALSE(zeros_num,
- "PAP-12090: invalid parameter in case of a directory");
- /* directory item */
- if (tb->lbytes > pos_in_item) {
- /* new directory entry falls into L[0] */
- struct item_head *pasted;
- int l_pos_in_item = pos_in_item;
-
- /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
- ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1);
- if (ret_val && !item_pos) {
- pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
- l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1);
- }
-
- /* Append given directory entry to directory item */
- buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num);
-
- /* previous string prepared space for pasting new entry, following string pastes this entry */
-
- /* when we have merge directory item, pos_in_item has been changed too */
-
- /* paste new directory entry. 1 is entry number */
- leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item,
- 1, (struct reiserfs_de_head *) body,
- body + DEH_SIZE, tb->insert_size[0]);
- tb->insert_size[0] = 0;
- } else {
- /* new directory item doesn't fall into L[0] */
- /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
- leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
- }
- /* Calculate new position to append in item body */
- pos_in_item -= tb->lbytes;
- } else {
- /* regular object */
- RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes);
- RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
- "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
- ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item);
-
- if (tb->lbytes >= pos_in_item) {
- /* appended item will be in L[0] in whole */
- int l_n;
-
- /* this bytes number must be appended to the last item of L[h] */
- l_n = tb->lbytes - pos_in_item;
-
- /* Calculate new insert_size[0] */
- tb->insert_size[0] -= l_n;
-
- RFALSE(tb->insert_size[0] <= 0,
- "PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
- tb->insert_size[0]);
- ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len
- (B_N_PITEM_HEAD(tbS0, item_pos)));
- /* Append to body of item in L[0] */
- buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer
- (&bi, n + item_pos - ret_val, ih_item_len
- (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)),
- l_n, body,
- zeros_num > l_n ? l_n : zeros_num);
- /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
- {
- int version;
- int temp_l = l_n;
-
- RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)),
- "PAP-12106: item length must be 0");
- RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY
- (tb->L[0], n + item_pos - ret_val)),
- "PAP-12107: items must be of the same file");
- if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
- temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT);
- }
- /* update key of first item in S0 */
- version = ih_version(B_N_PITEM_HEAD(tbS0, 0));
- set_le_key_k_offset(version, B_N_PKEY(tbS0, 0),
- le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l);
- /* update left delimiting key */
- set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
- le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l);
- }
-
- /* Calculate new body, position in item and insert_size[0] */
- if (l_n > zeros_num) {
- body += (l_n - zeros_num);
- zeros_num = 0;
- } else
- zeros_num -= l_n;
- pos_in_item = 0;
-
- RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1))
- || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)
- || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size),
- "PAP-12120: item must be merge-able with left neighboring item");
- } else { /* only part of the appended item will be in L[0] */
-
- /* Calculate position in item for append in S[0] */
- pos_in_item -= tb->lbytes;
-
- RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
-
- /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
- leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
- }
- }
- } else { /* appended item will be in L[0] in whole */
-
- struct item_head *pasted;
-
- if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */
- /* then increment pos_in_item by the size of the last item in L[0] */
- pasted = B_N_PITEM_HEAD(tb->L[0], n - 1);
- if (is_direntry_le_ih(pasted))
- pos_in_item += ih_entry_count(pasted);
- else
- pos_in_item += ih_item_len(pasted);
- }
-
- /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
- ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
- /* Append to body of item in L[0] */
- buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer(&bi, n + item_pos - ret_val,
- pos_in_item,
- tb->insert_size[0],
- body, zeros_num);
-
- /* if appended item is directory, paste entry */
- pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val);
- if (is_direntry_le_ih(pasted))
- leaf_paste_entries(&bi, n + item_pos - ret_val,
- pos_in_item, 1,
- (struct reiserfs_de_head *) body,
- body + DEH_SIZE,
- tb->insert_size[0]);
- /* if appended item is indirect item, put unformatted node into un list */
- if (is_indirect_le_ih(pasted))
- set_ih_free_space(pasted, 0);
- tb->insert_size[0] = 0;
- zeros_num = 0;
- }
- break;
- default: /* cases d and t */
- reiserfs_panic(tb->tb_sb, "PAP-12130",
- "lnum > 0: unexpected mode: "
- " %s(%d)",
- (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
- }
- } else {
- /* new item doesn't fall into L[0] */
- leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
+ RFALSE(tb->item_pos - n + tb->rnum[0],
+ "PAP-12165: directory item must be first "
+ "item of node when pasting is in 0th position");
+
+ /* update delimiting keys */
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
}
}
- /* tb->lnum[0] > 0 */
- /* Calculate new item position */
- item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0));
-
- if (tb->rnum[0] > 0) {
- /* shift rnum[0] items from S[0] to the right neighbor R[0] */
- n = B_NR_ITEMS(tbS0);
- switch (flag) {
-
- case M_INSERT: /* insert item */
- if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */
- if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */
- loff_t old_key_comp, old_len, r_zeros_number;
- const char *r_body;
- int version;
- loff_t offset;
-
- leaf_shift_right(tb, tb->rnum[0] - 1, -1);
-
- version = ih_version(ih);
- /* Remember key component and item length */
- old_key_comp = le_ih_k_offset(ih);
- old_len = ih_item_len(ih);
-
- /* Calculate key component and item length to insert into R[0] */
- offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0));
- set_le_ih_k_offset(ih, offset);
- put_ih_item_len(ih, tb->rbytes);
- /* Insert part of the item into R[0] */
- buffer_info_init_right(tb, &bi);
- if ((old_len - tb->rbytes) > zeros_num) {
- r_zeros_number = 0;
- r_body = body + (old_len - tb->rbytes) - zeros_num;
- } else {
- r_body = body;
- r_zeros_number = zeros_num - (old_len - tb->rbytes);
- zeros_num -= r_zeros_number;
- }
-
- leaf_insert_into_buf(&bi, 0, ih, r_body,
- r_zeros_number);
-
- /* Replace right delimiting key by first key in R[0] */
- replace_key(tb, tb->CFR[0], tb->rkey[0],
- tb->R[0], 0);
-
- /* Calculate key component and item length to insert into S[0] */
- set_le_ih_k_offset(ih, old_key_comp);
- put_ih_item_len(ih, old_len - tb->rbytes);
-
- tb->insert_size[0] -= tb->rbytes;
-
- } else { /* whole new item falls into R[0] */
-
- /* Shift rnum[0]-1 items to R[0] */
- ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
- /* Insert new item into R[0] */
- buffer_info_init_right(tb, &bi);
- leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1,
- ih, body, zeros_num);
-
- if (item_pos - n + tb->rnum[0] - 1 == 0) {
- replace_key(tb, tb->CFR[0],
- tb->rkey[0],
- tb->R[0], 0);
-
- }
- zeros_num = tb->insert_size[0] = 0;
- }
- } else { /* new item or part of it doesn't fall into R[0] */
-
- leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
- }
- break;
+ if (is_indirect_le_ih(pasted))
+ set_ih_free_space(pasted, 0);
+ tb->zeroes_num = tb->insert_size[0] = 0;
+}
- case M_PASTE: /* append item */
-
- if (n - tb->rnum[0] <= item_pos) { /* pasted item or part of it falls to R[0] */
- if (item_pos == n - tb->rnum[0] && tb->rbytes != -1) { /* we must shift the part of the appended item */
- if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { /* we append to directory item */
- int entry_count;
-
- RFALSE(zeros_num,
- "PAP-12145: invalid parameter in case of a directory");
- entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD
- (tbS0, item_pos));
- if (entry_count - tb->rbytes <
- pos_in_item)
- /* new directory entry falls into R[0] */
- {
- int paste_entry_position;
-
- RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0],
- "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
- tb->rbytes, entry_count);
- /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
- leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
- /* Paste given directory entry to directory item */
- paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
- buffer_info_init_right(tb, &bi);
- leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num);
- /* paste entry */
- leaf_paste_entries(&bi, 0, paste_entry_position, 1,
- (struct reiserfs_de_head *) body,
- body + DEH_SIZE, tb->insert_size[0]);
-
- if (paste_entry_position == 0) {
- /* change delimiting keys */
- replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0);
- }
-
- tb->insert_size[0] = 0;
- pos_in_item++;
- } else { /* new directory entry doesn't fall into R[0] */
-
- leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
- }
- } else { /* regular object */
-
- int n_shift, n_rem, r_zeros_number;
- const char *r_body;
-
- /* Calculate number of bytes which must be shifted from appended item */
- if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0)
- n_shift = 0;
-
- RFALSE(pos_in_item != ih_item_len
- (B_N_PITEM_HEAD(tbS0, item_pos)),
- "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
- pos_in_item, ih_item_len
- (B_N_PITEM_HEAD(tbS0, item_pos)));
-
- leaf_shift_right(tb, tb->rnum[0], n_shift);
- /* Calculate number of bytes which must remain in body after appending to R[0] */
- if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0)
- n_rem = 0;
-
- {
- int version;
- unsigned long temp_rem = n_rem;
-
- version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0));
- if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) {
- temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
- }
- set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0),
- le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem);
- set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]),
- le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem);
- }
-/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
- k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
- do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
-
- /* Append part of body into R[0] */
- buffer_info_init_right(tb, &bi);
- if (n_rem > zeros_num) {
- r_zeros_number = 0;
- r_body = body + n_rem - zeros_num;
- } else {
- r_body = body;
- r_zeros_number = zeros_num - n_rem;
- zeros_num -= r_zeros_number;
- }
-
- leaf_paste_in_buffer(&bi, 0, n_shift,
- tb->insert_size[0] - n_rem,
- r_body, r_zeros_number);
-
- if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) {
-#if 0
- RFALSE(n_rem,
- "PAP-12160: paste more than one unformatted node pointer");
-#endif
- set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0);
- }
- tb->insert_size[0] = n_rem;
- if (!n_rem)
- pos_in_item++;
- }
- } else { /* pasted item in whole falls into R[0] */
-
- struct item_head *pasted;
-
- ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
- /* append item in R[0] */
- if (pos_in_item >= 0) {
- buffer_info_init_right(tb, &bi);
- leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item,
- tb->insert_size[0], body, zeros_num);
- }
-
- /* paste new entry, if item is directory item */
- pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
- if (is_direntry_le_ih(pasted) && pos_in_item >= 0) {
- leaf_paste_entries(&bi, item_pos - n + tb->rnum[0],
- pos_in_item, 1,
- (struct reiserfs_de_head *) body,
- body + DEH_SIZE, tb->insert_size[0]);
- if (!pos_in_item) {
-
- RFALSE(item_pos - n + tb->rnum[0],
- "PAP-12165: directory item must be first item of node when pasting is in 0th position");
-
- /* update delimiting keys */
- replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
- }
- }
-
- if (is_indirect_le_ih(pasted))
- set_ih_free_space(pasted, 0);
- zeros_num = tb->insert_size[0] = 0;
- }
- } else { /* new item doesn't fall into R[0] */
-
- leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
- }
- break;
- default: /* cases d and t */
- reiserfs_panic(tb->tb_sb, "PAP-12175",
- "rnum > 0: unexpected mode: %s(%d)",
- (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
- }
+static void balance_leaf_paste_right(struct tree_balance *tb,
+ struct item_head *ih, const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+ /* new item doesn't fall into R[0] */
+ if (n - tb->rnum[0] > tb->item_pos) {
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
+ return;
}
- /* tb->rnum[0] > 0 */
- RFALSE(tb->blknum[0] > 3,
- "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
- RFALSE(tb->blknum[0] < 0,
- "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
+ /* pasted item or part of it falls to R[0] */
- /* if while adding to a node we discover that it is possible to split
- it in two, and merge the left part into the left neighbor and the
- right part into the right neighbor, eliminating the node */
- if (tb->blknum[0] == 0) { /* node S[0] is empty now */
+ if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1)
+ /* we must shift the part of the appended item */
+ balance_leaf_paste_right_shift(tb, ih, body);
+ else
+ /* pasted item in whole falls into R[0] */
+ balance_leaf_paste_right_whole(tb, ih, body);
+}
- RFALSE(!tb->lnum[0] || !tb->rnum[0],
- "PAP-12190: lnum and rnum must not be zero");
- /* if insertion was done before 0-th position in R[0], right
- delimiting key of the tb->L[0]'s and left delimiting key are
- not set correctly */
- if (tb->CFL[0]) {
- if (!tb->CFR[0])
- reiserfs_panic(tb->tb_sb, "vs-12195",
- "CFR not initialized");
- copy_key(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
- B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]));
- do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
+/* shift rnum[0] items from S[0] to the right neighbor R[0] */
+static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih,
+ const char *body, int flag)
+{
+ if (tb->rnum[0] <= 0)
+ return;
+
+ BUG_ON(flag != M_INSERT && flag != M_PASTE);
+
+ if (flag == M_INSERT)
+ balance_leaf_insert_right(tb, ih, body);
+ else /* M_PASTE */
+ balance_leaf_paste_right(tb, ih, body);
+}
+
+static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+ struct buffer_info bi;
+ int shift;
+
+ /* new item or it part don't falls into S_new[i] */
+ if (n - tb->snum[i] >= tb->item_pos) {
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
+ tb->snum[i], tb->sbytes[i], tb->S_new[i]);
+ return;
+ }
+
+ /* new item or it's part falls to first new node S_new[i] */
+
+ /* part of new item falls into S_new[i] */
+ if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
+ int old_key_comp, old_len, r_zeroes_number;
+ const char *r_body;
+ int version;
+
+ /* Move snum[i]-1 items from S[0] to S_new[i] */
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
+ tb->S_new[i]);
+
+ /* Remember key component and item length */
+ version = ih_version(ih);
+ old_key_comp = le_ih_k_offset(ih);
+ old_len = ih_item_len(ih);
+
+ /*
+ * Calculate key component and item length to insert
+ * into S_new[i]
+ */
+ shift = 0;
+ if (is_indirect_le_ih(ih))
+ shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
+ set_le_ih_k_offset(ih,
+ le_ih_k_offset(ih) +
+ ((old_len - tb->sbytes[i]) << shift));
+
+ put_ih_item_len(ih, tb->sbytes[i]);
+
+ /* Insert part of the item into S_new[i] before 0-th item */
+ buffer_info_init_bh(tb, &bi, tb->S_new[i]);
+
+ if ((old_len - tb->sbytes[i]) > tb->zeroes_num) {
+ r_zeroes_number = 0;
+ r_body = body + (old_len - tb->sbytes[i]) -
+ tb->zeroes_num;
+ } else {
+ r_body = body;
+ r_zeroes_number = tb->zeroes_num - (old_len -
+ tb->sbytes[i]);
+ tb->zeroes_num -= r_zeroes_number;
}
- reiserfs_invalidate_buffer(tb, tbS0);
- return 0;
+ leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number);
+
+ /*
+ * Calculate key component and item length to
+ * insert into S[i]
+ */
+ set_le_ih_k_offset(ih, old_key_comp);
+ put_ih_item_len(ih, old_len - tb->sbytes[i]);
+ tb->insert_size[0] -= tb->sbytes[i];
+ } else {
+ /* whole new item falls into S_new[i] */
+
+ /*
+ * Shift snum[0] - 1 items to S_new[i]
+ * (sbytes[i] of split item)
+ */
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
+ tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]);
+
+ /* Insert new item into S_new[i] */
+ buffer_info_init_bh(tb, &bi, tb->S_new[i]);
+ leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1,
+ ih, body, tb->zeroes_num);
+
+ tb->zeroes_num = tb->insert_size[0] = 0;
}
+}
- /* Fill new nodes that appear in place of S[0] */
+/* we append to directory item */
+static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
+ int entry_count = ih_entry_count(aux_ih);
+ struct buffer_info bi;
+
+ if (entry_count - tb->sbytes[i] < tb->pos_in_item &&
+ tb->pos_in_item <= entry_count) {
+ /* new directory entry falls into S_new[i] */
+
+ RFALSE(!tb->insert_size[0],
+ "PAP-12215: insert_size is already 0");
+ RFALSE(tb->sbytes[i] - 1 >= entry_count,
+ "PAP-12220: there are no so much entries (%d), only %d",
+ tb->sbytes[i] - 1, entry_count);
+
+ /*
+ * Shift snum[i]-1 items in whole.
+ * Shift sbytes[i] directory entries
+ * from directory item number snum[i]
+ */
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
+ tb->sbytes[i] - 1, tb->S_new[i]);
+
+ /*
+ * Paste given directory entry to
+ * directory item
+ */
+ buffer_info_init_bh(tb, &bi, tb->S_new[i]);
+ leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count +
+ tb->sbytes[i] - 1, tb->insert_size[0],
+ body, tb->zeroes_num);
+
+ /* paste new directory entry */
+ leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count +
+ tb->sbytes[i] - 1, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ tb->insert_size[0] = 0;
+ tb->pos_in_item++;
+ } else {
+ /* new directory entry doesn't fall into S_new[i] */
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
+ tb->sbytes[i], tb->S_new[i]);
+ }
+
+}
- /* I am told that this copying is because we need an array to enable
- the looping code. -Hans */
- snum[0] = tb->s1num, snum[1] = tb->s2num;
- sbytes[0] = tb->s1bytes;
- sbytes[1] = tb->s2bytes;
+static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
+ int n_shift, n_rem, r_zeroes_number, shift;
+ const char *r_body;
+ struct item_head *tmp;
+ struct buffer_info bi;
+
+ RFALSE(ih, "PAP-12210: ih must be 0");
+
+ if (is_direntry_le_ih(aux_ih)) {
+ balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key,
+ insert_ptr, i);
+ return;
+ }
+
+ /* regular object */
+
+
+ RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) ||
+ tb->insert_size[0] <= 0,
+ "PAP-12225: item too short or insert_size <= 0");
+
+ /*
+ * Calculate number of bytes which must be shifted from appended item
+ */
+ n_shift = tb->sbytes[i] - tb->insert_size[0];
+ if (n_shift < 0)
+ n_shift = 0;
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift,
+ tb->S_new[i]);
+
+ /*
+ * Calculate number of bytes which must remain in body after
+ * append to S_new[i]
+ */
+ n_rem = tb->insert_size[0] - tb->sbytes[i];
+ if (n_rem < 0)
+ n_rem = 0;
+
+ /* Append part of body into S_new[0] */
+ buffer_info_init_bh(tb, &bi, tb->S_new[i]);
+ if (n_rem > tb->zeroes_num) {
+ r_zeroes_number = 0;
+ r_body = body + n_rem - tb->zeroes_num;
+ } else {
+ r_body = body;
+ r_zeroes_number = tb->zeroes_num - n_rem;
+ tb->zeroes_num -= r_zeroes_number;
+ }
+
+ leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
+ r_body, r_zeroes_number);
+
+ tmp = item_head(tb->S_new[i], 0);
+ shift = 0;
+ if (is_indirect_le_ih(tmp)) {
+ set_ih_free_space(tmp, 0);
+ shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
+ }
+ add_le_ih_k_offset(tmp, n_rem << shift);
+
+ tb->insert_size[0] = n_rem;
+ if (!n_rem)
+ tb->pos_in_item++;
+}
+
+static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+ int leaf_mi;
+ struct item_head *pasted;
+ struct buffer_info bi;
+
+#ifdef CONFIG_REISERFS_CHECK
+ struct item_head *ih_check = item_head(tbS0, tb->item_pos);
+
+ if (!is_direntry_le_ih(ih_check) &&
+ (tb->pos_in_item != ih_item_len(ih_check) ||
+ tb->insert_size[0] <= 0))
+ reiserfs_panic(tb->tb_sb,
+ "PAP-12235",
+ "pos_in_item must be equal to ih_item_len");
+#endif
+
+ leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
+ tb->sbytes[i], tb->S_new[i]);
+
+ RFALSE(leaf_mi,
+ "PAP-12240: unexpected value returned by leaf_move_items (%d)",
+ leaf_mi);
+
+ /* paste into item */
+ buffer_info_init_bh(tb, &bi, tb->S_new[i]);
+ leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i],
+ tb->pos_in_item, tb->insert_size[0],
+ body, tb->zeroes_num);
+
+ pasted = item_head(tb->S_new[i], tb->item_pos - n +
+ tb->snum[i]);
+ if (is_direntry_le_ih(pasted))
+ leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i],
+ tb->pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ /* if we paste to indirect item update ih_free_space */
+ if (is_indirect_le_ih(pasted))
+ set_ih_free_space(pasted, 0);
+
+ tb->zeroes_num = tb->insert_size[0] = 0;
+
+}
+static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int i)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ int n = B_NR_ITEMS(tbS0);
+
+ /* pasted item doesn't fall into S_new[i] */
+ if (n - tb->snum[i] > tb->item_pos) {
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
+ tb->snum[i], tb->sbytes[i], tb->S_new[i]);
+ return;
+ }
+
+ /* pasted item or part if it falls to S_new[i] */
+
+ if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1)
+ /* we must shift part of the appended item */
+ balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key,
+ insert_ptr, i);
+ else
+ /* item falls wholly into S_new[i] */
+ balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key,
+ insert_ptr, i);
+}
+
+/* Fill new nodes that appear in place of S[0] */
+static void balance_leaf_new_nodes(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr,
+ int flag)
+{
+ int i;
for (i = tb->blknum[0] - 2; i >= 0; i--) {
+ BUG_ON(flag != M_INSERT && flag != M_PASTE);
- RFALSE(!snum[i], "PAP-12200: snum[%d] == %d. Must be > 0", i,
- snum[i]);
+ RFALSE(!tb->snum[i],
+ "PAP-12200: snum[%d] == %d. Must be > 0", i,
+ tb->snum[i]);
/* here we shift from S to S_new nodes */
- S_new[i] = get_FEB(tb);
+ tb->S_new[i] = get_FEB(tb);
/* initialized block type and tree level */
- set_blkh_level(B_BLK_HEAD(S_new[i]), DISK_LEAF_NODE_LEVEL);
-
- n = B_NR_ITEMS(tbS0);
-
- switch (flag) {
- case M_INSERT: /* insert item */
-
- if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */
- if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */
- int old_key_comp, old_len, r_zeros_number;
- const char *r_body;
- int version;
-
- /* Move snum[i]-1 items from S[0] to S_new[i] */
- leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
- snum[i] - 1, -1,
- S_new[i]);
- /* Remember key component and item length */
- version = ih_version(ih);
- old_key_comp = le_ih_k_offset(ih);
- old_len = ih_item_len(ih);
-
- /* Calculate key component and item length to insert into S_new[i] */
- set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
- ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
-
- put_ih_item_len(ih, sbytes[i]);
-
- /* Insert part of the item into S_new[i] before 0-th item */
- buffer_info_init_bh(tb, &bi, S_new[i]);
-
- if ((old_len - sbytes[i]) > zeros_num) {
- r_zeros_number = 0;
- r_body = body + (old_len - sbytes[i]) - zeros_num;
- } else {
- r_body = body;
- r_zeros_number = zeros_num - (old_len - sbytes[i]);
- zeros_num -= r_zeros_number;
- }
-
- leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number);
-
- /* Calculate key component and item length to insert into S[i] */
- set_le_ih_k_offset(ih, old_key_comp);
- put_ih_item_len(ih, old_len - sbytes[i]);
- tb->insert_size[0] -= sbytes[i];
- } else { /* whole new item falls into S_new[i] */
-
- /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
- leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
- snum[i] - 1, sbytes[i], S_new[i]);
-
- /* Insert new item into S_new[i] */
- buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1,
- ih, body, zeros_num);
-
- zeros_num = tb->insert_size[0] = 0;
- }
- }
-
- else { /* new item or it part don't falls into S_new[i] */
+ set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL);
+
+ if (flag == M_INSERT)
+ balance_leaf_new_nodes_insert(tb, ih, body, insert_key,
+ insert_ptr, i);
+ else /* M_PASTE */
+ balance_leaf_new_nodes_paste(tb, ih, body, insert_key,
+ insert_ptr, i);
+
+ memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE);
+ insert_ptr[i] = tb->S_new[i];
+
+ RFALSE(!buffer_journaled(tb->S_new[i])
+ || buffer_journal_dirty(tb->S_new[i])
+ || buffer_dirty(tb->S_new[i]),
+ "PAP-12247: S_new[%d] : (%b)",
+ i, tb->S_new[i]);
+ }
+}
- leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
- snum[i], sbytes[i], S_new[i]);
- }
- break;
+static void balance_leaf_finish_node_insert(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct buffer_info bi;
+ buffer_info_init_tbS0(tb, &bi);
+ leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num);
- case M_PASTE: /* append item */
-
- if (n - snum[i] <= item_pos) { /* pasted item or part if it falls to S_new[i] */
- if (item_pos == n - snum[i] && sbytes[i] != -1) { /* we must shift part of the appended item */
- struct item_head *aux_ih;
-
- RFALSE(ih, "PAP-12210: ih must be 0");
-
- aux_ih = B_N_PITEM_HEAD(tbS0, item_pos);
- if (is_direntry_le_ih(aux_ih)) {
- /* we append to directory item */
-
- int entry_count;
-
- entry_count = ih_entry_count(aux_ih);
-
- if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) {
- /* new directory entry falls into S_new[i] */
-
- RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0");
- RFALSE(sbytes[i] - 1 >= entry_count,
- "PAP-12220: there are no so much entries (%d), only %d",
- sbytes[i] - 1, entry_count);
-
- /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
- leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]);
- /* Paste given directory entry to directory item */
- buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
- tb->insert_size[0], body, zeros_num);
- /* paste new directory entry */
- leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1,
- (struct reiserfs_de_head *) body,
- body + DEH_SIZE, tb->insert_size[0]);
- tb->insert_size[0] = 0;
- pos_in_item++;
- } else { /* new directory entry doesn't fall into S_new[i] */
- leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]);
- }
- } else { /* regular object */
-
- int n_shift, n_rem, r_zeros_number;
- const char *r_body;
-
- RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0,
- "PAP-12225: item too short or insert_size <= 0");
-
- /* Calculate number of bytes which must be shifted from appended item */
- n_shift = sbytes[i] - tb->insert_size[0];
- if (n_shift < 0)
- n_shift = 0;
- leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
-
- /* Calculate number of bytes which must remain in body after append to S_new[i] */
- n_rem = tb->insert_size[0] - sbytes[i];
- if (n_rem < 0)
- n_rem = 0;
- /* Append part of body into S_new[0] */
- buffer_info_init_bh(tb, &bi, S_new[i]);
- if (n_rem > zeros_num) {
- r_zeros_number = 0;
- r_body = body + n_rem - zeros_num;
- } else {
- r_body = body;
- r_zeros_number = zeros_num - n_rem;
- zeros_num -= r_zeros_number;
- }
-
- leaf_paste_in_buffer(&bi, 0, n_shift,
- tb->insert_size[0] - n_rem,
- r_body, r_zeros_number);
- {
- struct item_head *tmp;
-
- tmp = B_N_PITEM_HEAD(S_new[i], 0);
- if (is_indirect_le_ih
- (tmp)) {
- set_ih_free_space(tmp, 0);
- set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
- } else {
- set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem);
- }
- }
-
- tb->insert_size[0] = n_rem;
- if (!n_rem)
- pos_in_item++;
- }
- } else
- /* item falls wholly into S_new[i] */
- {
- int leaf_mi;
- struct item_head *pasted;
+ /* If we insert the first key change the delimiting key */
+ if (tb->item_pos == 0) {
+ if (tb->CFL[0]) /* can be 0 in reiserfsck */
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
-#ifdef CONFIG_REISERFS_CHECK
- struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos);
-
- if (!is_direntry_le_ih(ih_check)
- && (pos_in_item != ih_item_len(ih_check)
- || tb->insert_size[0] <= 0))
- reiserfs_panic(tb->tb_sb,
- "PAP-12235",
- "pos_in_item "
- "must be equal "
- "to ih_item_len");
-#endif /* CONFIG_REISERFS_CHECK */
-
- leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW,
- tb, snum[i],
- sbytes[i],
- S_new[i]);
-
- RFALSE(leaf_mi,
- "PAP-12240: unexpected value returned by leaf_move_items (%d)",
- leaf_mi);
-
- /* paste into item */
- buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_paste_in_buffer(&bi,
- item_pos - n + snum[i],
- pos_in_item,
- tb->insert_size[0],
- body, zeros_num);
-
- pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
- if (is_direntry_le_ih(pasted)) {
- leaf_paste_entries(&bi,
- item_pos - n + snum[i],
- pos_in_item, 1,
- (struct reiserfs_de_head *)body,
- body + DEH_SIZE,
- tb->insert_size[0]
- );
- }
-
- /* if we paste to indirect item update ih_free_space */
- if (is_indirect_le_ih(pasted))
- set_ih_free_space(pasted, 0);
- zeros_num = tb->insert_size[0] = 0;
- }
- }
+ }
+}
- else { /* pasted item doesn't fall into S_new[i] */
+static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct item_head *pasted = item_head(tbS0, tb->item_pos);
+ struct buffer_info bi;
- leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
- snum[i], sbytes[i], S_new[i]);
- }
- break;
- default: /* cases d and t */
- reiserfs_panic(tb->tb_sb, "PAP-12245",
- "blknum > 2: unexpected mode: %s(%d)",
- (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
+ if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) {
+ RFALSE(!tb->insert_size[0],
+ "PAP-12260: insert_size is 0 already");
+
+ /* prepare space */
+ buffer_info_init_tbS0(tb, &bi);
+ leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item,
+ tb->insert_size[0], body, tb->zeroes_num);
+
+ /* paste entry */
+ leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ if (!tb->item_pos && !tb->pos_in_item) {
+ RFALSE(!tb->CFL[0] || !tb->L[0],
+ "PAP-12270: CFL[0]/L[0] must be specified");
+ if (tb->CFL[0])
+ replace_key(tb, tb->CFL[0], tb->lkey[0],
+ tbS0, 0);
}
- memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
- insert_ptr[i] = S_new[i];
+ tb->insert_size[0] = 0;
+ }
+}
+
+static void balance_leaf_finish_node_paste(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+ struct buffer_info bi;
+ struct item_head *pasted = item_head(tbS0, tb->item_pos);
- RFALSE(!buffer_journaled(S_new[i])
- || buffer_journal_dirty(S_new[i])
- || buffer_dirty(S_new[i]), "PAP-12247: S_new[%d] : (%b)",
- i, S_new[i]);
+ /* when directory, may be new entry already pasted */
+ if (is_direntry_le_ih(pasted)) {
+ balance_leaf_finish_node_paste_dirent(tb, ih, body);
+ return;
}
- /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the
- affected item which remains in S */
- if (0 <= item_pos && item_pos < tb->s0num) { /* if we must insert or append into buffer S[0] */
+ /* regular object */
- switch (flag) {
- case M_INSERT: /* insert item into S[0] */
- buffer_info_init_tbS0(tb, &bi);
- leaf_insert_into_buf(&bi, item_pos, ih, body,
- zeros_num);
+ if (tb->pos_in_item == ih_item_len(pasted)) {
+ RFALSE(tb->insert_size[0] <= 0,
+ "PAP-12275: insert size must not be %d",
+ tb->insert_size[0]);
+ buffer_info_init_tbS0(tb, &bi);
+ leaf_paste_in_buffer(&bi, tb->item_pos,
+ tb->pos_in_item, tb->insert_size[0], body,
+ tb->zeroes_num);
- /* If we insert the first key change the delimiting key */
- if (item_pos == 0) {
- if (tb->CFL[0]) /* can be 0 in reiserfsck */
- replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
- }
- break;
+ if (is_indirect_le_ih(pasted))
+ set_ih_free_space(pasted, 0);
- case M_PASTE:{ /* append item in S[0] */
- struct item_head *pasted;
-
- pasted = B_N_PITEM_HEAD(tbS0, item_pos);
- /* when directory, may be new entry already pasted */
- if (is_direntry_le_ih(pasted)) {
- if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) {
-
- RFALSE(!tb->insert_size[0],
- "PAP-12260: insert_size is 0 already");
-
- /* prepare space */
- buffer_info_init_tbS0(tb, &bi);
- leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
- tb->insert_size[0], body,
- zeros_num);
-
- /* paste entry */
- leaf_paste_entries(&bi, item_pos, pos_in_item, 1,
- (struct reiserfs_de_head *)body,
- body + DEH_SIZE,
- tb->insert_size[0]);
- if (!item_pos && !pos_in_item) {
- RFALSE(!tb->CFL[0] || !tb->L[0],
- "PAP-12270: CFL[0]/L[0] must be specified");
- if (tb->CFL[0])
- replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
- }
- tb->insert_size[0] = 0;
- }
- } else { /* regular object */
- if (pos_in_item == ih_item_len(pasted)) {
-
- RFALSE(tb->insert_size[0] <= 0,
- "PAP-12275: insert size must not be %d",
- tb->insert_size[0]);
- buffer_info_init_tbS0(tb, &bi);
- leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
- tb->insert_size[0], body, zeros_num);
-
- if (is_indirect_le_ih(pasted)) {
-#if 0
- RFALSE(tb->
- insert_size[0] !=
- UNFM_P_SIZE,
- "PAP-12280: insert_size for indirect item must be %d, not %d",
- UNFM_P_SIZE,
- tb->
- insert_size[0]);
-#endif
- set_ih_free_space(pasted, 0);
- }
- tb->insert_size[0] = 0;
- }
+ tb->insert_size[0] = 0;
+ }
#ifdef CONFIG_REISERFS_CHECK
- else {
- if (tb->insert_size[0]) {
- print_cur_tb("12285");
- reiserfs_panic(tb->tb_sb,
- "PAP-12285",
- "insert_size "
- "must be 0 "
- "(%d)",
- tb->insert_size[0]);
- }
- }
-#endif /* CONFIG_REISERFS_CHECK */
-
- }
- } /* case M_PASTE: */
+ else if (tb->insert_size[0]) {
+ print_cur_tb("12285");
+ reiserfs_panic(tb->tb_sb, "PAP-12285",
+ "insert_size must be 0 (%d)", tb->insert_size[0]);
+ }
+#endif
+}
+
+/*
+ * if the affected item was not wholly shifted then we
+ * perform all necessary operations on that part or whole
+ * of the affected item which remains in S
+ */
+static void balance_leaf_finish_node(struct tree_balance *tb,
+ struct item_head *ih,
+ const char *body, int flag)
+{
+ /* if we must insert or append into buffer S[0] */
+ if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
+ if (flag == M_INSERT)
+ balance_leaf_finish_node_insert(tb, ih, body);
+ else /* M_PASTE */
+ balance_leaf_finish_node_paste(tb, ih, body);
+ }
+}
+
+/**
+ * balance_leaf - reiserfs tree balancing algorithm
+ * @tb: tree balance state
+ * @ih: item header of inserted item (little endian)
+ * @body: body of inserted item or bytes to paste
+ * @flag: i - insert, d - delete, c - cut, p - paste (see do_balance)
+ * passed back:
+ * @insert_key: key to insert new nodes
+ * @insert_ptr: array of nodes to insert at the next level
+ *
+ * In our processing of one level we sometimes determine what must be
+ * inserted into the next higher level. This insertion consists of a
+ * key or two keys and their corresponding pointers.
+ */
+static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
+ const char *body, int flag,
+ struct item_head *insert_key,
+ struct buffer_head **insert_ptr)
+{
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+
+ PROC_INFO_INC(tb->tb_sb, balance_at[0]);
+
+ /* Make balance in case insert_size[0] < 0 */
+ if (tb->insert_size[0] < 0)
+ return balance_leaf_when_delete(tb, flag);
+
+ tb->item_pos = PATH_LAST_POSITION(tb->tb_path),
+ tb->pos_in_item = tb->tb_path->pos_in_item,
+ tb->zeroes_num = 0;
+ if (flag == M_INSERT && !body)
+ tb->zeroes_num = ih_item_len(ih);
+
+ /*
+ * for indirect item pos_in_item is measured in unformatted node
+ * pointers. Recalculate to bytes
+ */
+ if (flag != M_INSERT
+ && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
+ tb->pos_in_item *= UNFM_P_SIZE;
+
+ balance_leaf_left(tb, ih, body, flag);
+
+ /* tb->lnum[0] > 0 */
+ /* Calculate new item position */
+ tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0));
+
+ balance_leaf_right(tb, ih, body, flag);
+
+ /* tb->rnum[0] > 0 */
+ RFALSE(tb->blknum[0] > 3,
+ "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
+ RFALSE(tb->blknum[0] < 0,
+ "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
+
+ /*
+ * if while adding to a node we discover that it is possible to split
+ * it in two, and merge the left part into the left neighbor and the
+ * right part into the right neighbor, eliminating the node
+ */
+ if (tb->blknum[0] == 0) { /* node S[0] is empty now */
+
+ RFALSE(!tb->lnum[0] || !tb->rnum[0],
+ "PAP-12190: lnum and rnum must not be zero");
+ /*
+ * if insertion was done before 0-th position in R[0], right
+ * delimiting key of the tb->L[0]'s and left delimiting key are
+ * not set correctly
+ */
+ if (tb->CFL[0]) {
+ if (!tb->CFR[0])
+ reiserfs_panic(tb->tb_sb, "vs-12195",
+ "CFR not initialized");
+ copy_key(internal_key(tb->CFL[0], tb->lkey[0]),
+ internal_key(tb->CFR[0], tb->rkey[0]));
+ do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
}
+
+ reiserfs_invalidate_buffer(tb, tbS0);
+ return 0;
}
+
+ balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag);
+
+ balance_leaf_finish_node(tb, ih, body, flag);
+
#ifdef CONFIG_REISERFS_CHECK
if (flag == M_PASTE && tb->insert_size[0]) {
print_cur_tb("12290");
@@ -1137,9 +1454,11 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
"PAP-12290", "insert_size is still not 0 (%d)",
tb->insert_size[0]);
}
-#endif /* CONFIG_REISERFS_CHECK */
+#endif
+
+ /* Leaf level of the tree is balanced (end of balance_leaf) */
return 0;
-} /* Leaf level of the tree is balanced (end of balance_leaf) */
+}
/* Make empty node */
void make_empty_node(struct buffer_info *bi)
@@ -1178,9 +1497,7 @@ struct buffer_head *get_FEB(struct tree_balance *tb)
return tb->used[i];
}
-/* This is now used because reiserfs_free_block has to be able to
-** schedule.
-*/
+/* This is now used because reiserfs_free_block has to be able to schedule. */
static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
{
int i;
@@ -1246,10 +1563,10 @@ void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest,
if (B_IS_ITEMS_LEVEL(src))
/* source buffer contains leaf node */
- memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PITEM_HEAD(src, n_src),
+ memcpy(internal_key(dest, n_dest), item_head(src, n_src),
KEY_SIZE);
else
- memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PDELIM_KEY(src, n_src),
+ memcpy(internal_key(dest, n_dest), internal_key(src, n_src),
KEY_SIZE);
do_balance_mark_internal_dirty(tb, dest, 0);
@@ -1335,8 +1652,10 @@ static int check_before_balancing(struct tree_balance *tb)
"mount point.");
}
- /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
- prepped all of these for us). */
+ /*
+ * double check that buffers that we will modify are unlocked.
+ * (fix_nodes should already have prepped all of these for us).
+ */
if (tb->lnum[0]) {
retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
@@ -1429,49 +1748,51 @@ static void check_internal_levels(struct tree_balance *tb)
#endif
-/* Now we have all of the buffers that must be used in balancing of
- the tree. We rely on the assumption that schedule() will not occur
- while do_balance works. ( Only interrupt handlers are acceptable.)
- We balance the tree according to the analysis made before this,
- using buffers already obtained. For SMP support it will someday be
- necessary to add ordered locking of tb. */
-
-/* Some interesting rules of balancing:
-
- we delete a maximum of two nodes per level per balancing: we never
- delete R, when we delete two of three nodes L, S, R then we move
- them into R.
-
- we only delete L if we are deleting two nodes, if we delete only
- one node we delete S
-
- if we shift leaves then we shift as much as we can: this is a
- deliberate policy of extremism in node packing which results in
- higher average utilization after repeated random balance operations
- at the cost of more memory copies and more balancing as a result of
- small insertions to full nodes.
-
- if we shift internal nodes we try to evenly balance the node
- utilization, with consequent less balancing at the cost of lower
- utilization.
-
- one could argue that the policy for directories in leaves should be
- that of internal nodes, but we will wait until another day to
- evaluate this.... It would be nice to someday measure and prove
- these assumptions as to what is optimal....
+/*
+ * Now we have all of the buffers that must be used in balancing of
+ * the tree. We rely on the assumption that schedule() will not occur
+ * while do_balance works. ( Only interrupt handlers are acceptable.)
+ * We balance the tree according to the analysis made before this,
+ * using buffers already obtained. For SMP support it will someday be
+ * necessary to add ordered locking of tb.
+ */
-*/
+/*
+ * Some interesting rules of balancing:
+ * we delete a maximum of two nodes per level per balancing: we never
+ * delete R, when we delete two of three nodes L, S, R then we move
+ * them into R.
+ *
+ * we only delete L if we are deleting two nodes, if we delete only
+ * one node we delete S
+ *
+ * if we shift leaves then we shift as much as we can: this is a
+ * deliberate policy of extremism in node packing which results in
+ * higher average utilization after repeated random balance operations
+ * at the cost of more memory copies and more balancing as a result of
+ * small insertions to full nodes.
+ *
+ * if we shift internal nodes we try to evenly balance the node
+ * utilization, with consequent less balancing at the cost of lower
+ * utilization.
+ *
+ * one could argue that the policy for directories in leaves should be
+ * that of internal nodes, but we will wait until another day to
+ * evaluate this.... It would be nice to someday measure and prove
+ * these assumptions as to what is optimal....
+ */
static inline void do_balance_starts(struct tree_balance *tb)
{
- /* use print_cur_tb() to see initial state of struct
- tree_balance */
+ /* use print_cur_tb() to see initial state of struct tree_balance */
/* store_print_tb (tb); */
/* do not delete, just comment it out */
-/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
- "check");*/
+ /*
+ print_tb(flag, PATH_LAST_POSITION(tb->tb_path),
+ tb->tb_path->pos_in_item, tb, "check");
+ */
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
#ifdef CONFIG_REISERFS_CHECK
REISERFS_SB(tb->tb_sb)->cur_tb = tb;
@@ -1487,9 +1808,10 @@ static inline void do_balance_completed(struct tree_balance *tb)
REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
#endif
- /* reiserfs_free_block is no longer schedule safe. So, we need to
- ** put the buffers we want freed on the thrown list during do_balance,
- ** and then free them now
+ /*
+ * reiserfs_free_block is no longer schedule safe. So, we need to
+ * put the buffers we want freed on the thrown list during do_balance,
+ * and then free them now
*/
REISERFS_SB(tb->tb_sb)->s_do_balance++;
@@ -1500,36 +1822,40 @@ static inline void do_balance_completed(struct tree_balance *tb)
free_thrown(tb);
}
-void do_balance(struct tree_balance *tb, /* tree_balance structure */
- struct item_head *ih, /* item header of inserted item */
- const char *body, /* body of inserted item or bytes to paste */
- int flag)
-{ /* i - insert, d - delete
- c - cut, p - paste
-
- Cut means delete part of an item
- (includes removing an entry from a
- directory).
-
- Delete means delete whole item.
-
- Insert means add a new item into the
- tree.
-
- Paste means to append to the end of an
- existing file or to insert a directory
- entry. */
- int child_pos, /* position of a child node in its parent */
- h; /* level of the tree being processed */
- struct item_head insert_key[2]; /* in our processing of one level
- we sometimes determine what
- must be inserted into the next
- higher level. This insertion
- consists of a key or two keys
- and their corresponding
- pointers */
- struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next
- level */
+/*
+ * do_balance - balance the tree
+ *
+ * @tb: tree_balance structure
+ * @ih: item header of inserted item
+ * @body: body of inserted item or bytes to paste
+ * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste
+ *
+ * Cut means delete part of an item (includes removing an entry from a
+ * directory).
+ *
+ * Delete means delete whole item.
+ *
+ * Insert means add a new item into the tree.
+ *
+ * Paste means to append to the end of an existing file or to
+ * insert a directory entry.
+ */
+void do_balance(struct tree_balance *tb, struct item_head *ih,
+ const char *body, int flag)
+{
+ int child_pos; /* position of a child node in its parent */
+ int h; /* level of the tree being processed */
+
+ /*
+ * in our processing of one level we sometimes determine what
+ * must be inserted into the next higher level. This insertion
+ * consists of a key or two keys and their corresponding
+ * pointers
+ */
+ struct item_head insert_key[2];
+
+ /* inserted node-ptrs for the next level */
+ struct buffer_head *insert_ptr[2];
tb->tb_mode = flag;
tb->need_balance_dirty = 0;
@@ -1546,12 +1872,14 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
return;
}
- atomic_inc(&(fs_generation(tb->tb_sb)));
+ atomic_inc(&fs_generation(tb->tb_sb));
do_balance_starts(tb);
- /* balance leaf returns 0 except if combining L R and S into
- one node. see balance_internal() for explanation of this
- line of code. */
+ /*
+ * balance_leaf returns 0 except if combining L R and S into
+ * one node. see balance_internal() for explanation of this
+ * line of code.
+ */
child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
@@ -1561,9 +1889,8 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
/* Balance internal level of the tree. */
for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++)
- child_pos =
- balance_internal(tb, h, child_pos, insert_key, insert_ptr);
+ child_pos = balance_internal(tb, h, child_pos, insert_key,
+ insert_ptr);
do_balance_completed(tb);
-
}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index ed58d843d57..5f6c32c668b 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -15,20 +15,20 @@
#include <linux/quotaops.h>
/*
-** We pack the tails of files on file close, not at the time they are written.
-** This implies an unnecessary copy of the tail and an unnecessary indirect item
-** insertion/balancing, for files that are written in one write.
-** It avoids unnecessary tail packings (balances) for files that are written in
-** multiple writes and are small enough to have tails.
-**
-** file_release is called by the VFS layer when the file is closed. If
-** this is the last open file descriptor, and the file
-** small enough to have a tail, and the tail is currently in an
-** unformatted node, the tail is converted back into a direct item.
-**
-** We use reiserfs_truncate_file to pack the tail, since it already has
-** all the conditions coded.
-*/
+ * We pack the tails of files on file close, not at the time they are written.
+ * This implies an unnecessary copy of the tail and an unnecessary indirect item
+ * insertion/balancing, for files that are written in one write.
+ * It avoids unnecessary tail packings (balances) for files that are written in
+ * multiple writes and are small enough to have tails.
+ *
+ * file_release is called by the VFS layer when the file is closed. If
+ * this is the last open file descriptor, and the file
+ * small enough to have a tail, and the tail is currently in an
+ * unformatted node, the tail is converted back into a direct item.
+ *
+ * We use reiserfs_truncate_file to pack the tail, since it already has
+ * all the conditions coded.
+ */
static int reiserfs_file_release(struct inode *inode, struct file *filp)
{
@@ -41,10 +41,10 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
return 0;
- mutex_lock(&(REISERFS_I(inode)->tailpack));
+ mutex_lock(&REISERFS_I(inode)->tailpack);
if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
- mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
return 0;
}
@@ -52,31 +52,35 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
!tail_has_to_be_packed(inode)) &&
REISERFS_I(inode)->i_prealloc_count <= 0) {
- mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
return 0;
}
reiserfs_write_lock(inode->i_sb);
- /* freeing preallocation only involves relogging blocks that
+ /*
+ * freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
* freed at the end of each transaction, so it is impossible for
* us to log any additional blocks (including quota blocks)
*/
err = journal_begin(&th, inode->i_sb, 1);
if (err) {
- /* uh oh, we can't allow the inode to go away while there
+ /*
+ * uh oh, we can't allow the inode to go away while there
* is still preallocation blocks pending. Try to join the
* aborted transaction
*/
jbegin_failure = err;
- err = journal_join_abort(&th, inode->i_sb, 1);
+ err = journal_join_abort(&th, inode->i_sb);
if (err) {
- /* hmpf, our choices here aren't good. We can pin the inode
- * which will disallow unmount from every happening, we can
- * do nothing, which will corrupt random memory on unmount,
- * or we can forcibly remove the file from the preallocation
- * list, which will leak blocks on disk. Lets pin the inode
+ /*
+ * hmpf, our choices here aren't good. We can pin
+ * the inode which will disallow unmount from ever
+ * happening, we can do nothing, which will corrupt
+ * random memory on unmount, or we can forcibly
+ * remove the file from the preallocation list, which
+ * will leak blocks on disk. Lets pin the inode
* and let the admin know what is going on.
*/
igrab(inode);
@@ -92,7 +96,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
#ifdef REISERFS_PREALLOCATE
reiserfs_discard_prealloc(&th, inode);
#endif
- err = journal_end(&th, inode->i_sb, 1);
+ err = journal_end(&th);
/* copy back the error code from journal_begin */
if (!err)
@@ -102,35 +106,38 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
tail_has_to_be_packed(inode)) {
- /* if regular file is released by last holder and it has been
- appended (we append by unformatted node only) or its direct
- item(s) had to be converted, then it may have to be
- indirect2direct converted */
+ /*
+ * if regular file is released by last holder and it has been
+ * appended (we append by unformatted node only) or its direct
+ * item(s) had to be converted, then it may have to be
+ * indirect2direct converted
+ */
err = reiserfs_truncate_file(inode, 0);
}
- out:
+out:
reiserfs_write_unlock(inode->i_sb);
- mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
return err;
}
static int reiserfs_file_open(struct inode *inode, struct file *file)
{
int err = dquot_file_open(inode, file);
+
+ /* somebody might be tailpacking on final close; wait for it */
if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
- /* somebody might be tailpacking on final close; wait for it */
- mutex_lock(&(REISERFS_I(inode)->tailpack));
+ mutex_lock(&REISERFS_I(inode)->tailpack);
atomic_inc(&REISERFS_I(inode)->openers);
- mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
}
return err;
}
void reiserfs_vfs_truncate_file(struct inode *inode)
{
- mutex_lock(&(REISERFS_I(inode)->tailpack));
+ mutex_lock(&REISERFS_I(inode)->tailpack);
reiserfs_truncate_file(inode, 1);
- mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
}
/* Sync a reiserfs file. */
@@ -205,10 +212,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
set_buffer_uptodate(bh);
if (logit) {
reiserfs_prepare_for_journal(s, bh, 1);
- journal_mark_dirty(&th, s, bh);
+ journal_mark_dirty(&th, bh);
} else if (!buffer_dirty(bh)) {
mark_buffer_dirty(bh);
- /* do data=ordered on any page past the end
+ /*
+ * do data=ordered on any page past the end
* of file and any buffer marked BH_New.
*/
if (reiserfs_data_ordered(inode->i_sb) &&
@@ -219,8 +227,8 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
}
}
if (logit) {
- ret = journal_end(&th, s, bh_per_page + 1);
- drop_write_lock:
+ ret = journal_end(&th);
+drop_write_lock:
reiserfs_write_unlock(s);
}
/*
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index dc4d4153031..6b0ddb2a909 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -2,59 +2,32 @@
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
-/**
- ** old_item_num
- ** old_entry_num
- ** set_entry_sizes
- ** create_virtual_node
- ** check_left
- ** check_right
- ** directory_part_size
- ** get_num_ver
- ** set_parameters
- ** is_leaf_removable
- ** are_leaves_removable
- ** get_empty_nodes
- ** get_lfree
- ** get_rfree
- ** is_left_neighbor_in_cache
- ** decrement_key
- ** get_far_parent
- ** get_parents
- ** can_node_be_removed
- ** ip_check_balance
- ** dc_check_balance_internal
- ** dc_check_balance_leaf
- ** dc_check_balance
- ** check_balance
- ** get_direct_parent
- ** get_neighbors
- ** fix_nodes
- **
- **
- **/
-
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "reiserfs.h"
#include <linux/buffer_head.h>
-/* To make any changes in the tree we find a node, that contains item
- to be changed/deleted or position in the node we insert a new item
- to. We call this node S. To do balancing we need to decide what we
- will shift to left/right neighbor, or to a new node, where new item
- will be etc. To make this analysis simpler we build virtual
- node. Virtual node is an array of items, that will replace items of
- node S. (For instance if we are going to delete an item, virtual
- node does not contain it). Virtual node keeps information about
- item sizes and types, mergeability of first and last items, sizes
- of all entries in directory item. We use this array of items when
- calculating what we can shift to neighbors and how many nodes we
- have to have if we do not any shiftings, if we shift to left/right
- neighbor or to both. */
-
-/* taking item number in virtual node, returns number of item, that it has in source buffer */
+/*
+ * To make any changes in the tree we find a node that contains item
+ * to be changed/deleted or position in the node we insert a new item
+ * to. We call this node S. To do balancing we need to decide what we
+ * will shift to left/right neighbor, or to a new node, where new item
+ * will be etc. To make this analysis simpler we build virtual
+ * node. Virtual node is an array of items, that will replace items of
+ * node S. (For instance if we are going to delete an item, virtual
+ * node does not contain it). Virtual node keeps information about
+ * item sizes and types, mergeability of first and last items, sizes
+ * of all entries in directory item. We use this array of items when
+ * calculating what we can shift to neighbors and how many nodes we
+ * have to have if we do not any shiftings, if we shift to left/right
+ * neighbor or to both.
+ */
+
+/*
+ * Takes item number in virtual node, returns number of item
+ * that it has in source buffer
+ */
static inline int old_item_num(int new_num, int affected_item_num, int mode)
{
if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
@@ -105,14 +78,17 @@ static void create_virtual_node(struct tree_balance *tb, int h)
vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item);
/* first item in the node */
- ih = B_N_PITEM_HEAD(Sh, 0);
+ ih = item_head(Sh, 0);
/* define the mergeability for 0-th item (if it is not being deleted) */
- if (op_is_left_mergeable(&(ih->ih_key), Sh->b_size)
+ if (op_is_left_mergeable(&ih->ih_key, Sh->b_size)
&& (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
- /* go through all items those remain in the virtual node (except for the new (inserted) one) */
+ /*
+ * go through all items that remain in the virtual
+ * node (except for the new (inserted) one)
+ */
for (new_num = 0; new_num < vn->vn_nr_item; new_num++) {
int j;
struct virtual_item *vi = vn->vn_vi + new_num;
@@ -128,11 +104,13 @@ static void create_virtual_node(struct tree_balance *tb, int h)
vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE;
vi->vi_ih = ih + j;
- vi->vi_item = B_I_PITEM(Sh, ih + j);
+ vi->vi_item = ih_item_body(Sh, ih + j);
vi->vi_uarea = vn->vn_free_ptr;
- // FIXME: there is no check, that item operation did not
- // consume too much memory
+ /*
+ * FIXME: there is no check that item operation did not
+ * consume too much memory
+ */
vn->vn_free_ptr +=
op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
@@ -145,7 +123,8 @@ static void create_virtual_node(struct tree_balance *tb, int h)
if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
- vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted
+ /* pointer to data which is going to be pasted */
+ vi->vi_new_data = vn->vn_data;
}
}
@@ -164,11 +143,14 @@ static void create_virtual_node(struct tree_balance *tb, int h)
tb->insert_size[0]);
}
- /* set right merge flag we take right delimiting key and check whether it is a mergeable item */
+ /*
+ * set right merge flag we take right delimiting key and
+ * check whether it is a mergeable item
+ */
if (tb->CFR[0]) {
struct reiserfs_key *key;
- key = B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]);
+ key = internal_key(tb->CFR[0], tb->rkey[0]);
if (op_is_left_mergeable(key, Sh->b_size)
&& (vn->vn_mode != M_DELETE
|| vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1))
@@ -179,12 +161,19 @@ static void create_virtual_node(struct tree_balance *tb, int h)
if (op_is_left_mergeable(key, Sh->b_size) &&
!(vn->vn_mode != M_DELETE
|| vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) {
- /* we delete last item and it could be merged with right neighbor's first item */
+ /*
+ * we delete last item and it could be merged
+ * with right neighbor's first item
+ */
if (!
(B_NR_ITEMS(Sh) == 1
- && is_direntry_le_ih(B_N_PITEM_HEAD(Sh, 0))
- && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) {
- /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
+ && is_direntry_le_ih(item_head(Sh, 0))
+ && ih_entry_count(item_head(Sh, 0)) == 1)) {
+ /*
+ * node contains more than 1 item, or item
+ * is not directory item, or this item
+ * contains more than 1 entry
+ */
print_block(Sh, 0, -1, -1);
reiserfs_panic(tb->tb_sb, "vs-8045",
"rdkey %k, affected item==%d "
@@ -198,8 +187,10 @@ static void create_virtual_node(struct tree_balance *tb, int h)
}
}
-/* using virtual node check, how many items can be shifted to left
- neighbor */
+/*
+ * Using virtual node check, how many items can be
+ * shifted to left neighbor
+ */
static void check_left(struct tree_balance *tb, int h, int cur_free)
{
int i;
@@ -259,9 +250,13 @@ static void check_left(struct tree_balance *tb, int h, int cur_free)
}
/* the item cannot be shifted entirely, try to split it */
- /* check whether L[0] can hold ih and at least one byte of the item body */
+ /*
+ * check whether L[0] can hold ih and at least one byte
+ * of the item body
+ */
+
+ /* cannot shift even a part of the current item */
if (cur_free <= ih_size) {
- /* cannot shift even a part of the current item */
tb->lbytes = -1;
return;
}
@@ -278,8 +273,10 @@ static void check_left(struct tree_balance *tb, int h, int cur_free)
return;
}
-/* using virtual node check, how many items can be shifted to right
- neighbor */
+/*
+ * Using virtual node check, how many items can be
+ * shifted to right neighbor
+ */
static void check_right(struct tree_balance *tb, int h, int cur_free)
{
int i;
@@ -338,13 +335,21 @@ static void check_right(struct tree_balance *tb, int h, int cur_free)
continue;
}
- /* check whether R[0] can hold ih and at least one byte of the item body */
- if (cur_free <= ih_size) { /* cannot shift even a part of the current item */
+ /*
+ * check whether R[0] can hold ih and at least one
+ * byte of the item body
+ */
+
+ /* cannot shift even a part of the current item */
+ if (cur_free <= ih_size) {
tb->rbytes = -1;
return;
}
- /* R[0] can hold the header of the item and at least one byte of its body */
+ /*
+ * R[0] can hold the header of the item and at least
+ * one byte of its body
+ */
cur_free -= ih_size; /* cur_free is still > 0 */
tb->rbytes = op_check_right(vi, cur_free);
@@ -361,45 +366,64 @@ static void check_right(struct tree_balance *tb, int h, int cur_free)
/*
* from - number of items, which are shifted to left neighbor entirely
* to - number of item, which are shifted to right neighbor entirely
- * from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
- * to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
+ * from_bytes - number of bytes of boundary item (or directory entries)
+ * which are shifted to left neighbor
+ * to_bytes - number of bytes of boundary item (or directory entries)
+ * which are shifted to right neighbor
+ */
static int get_num_ver(int mode, struct tree_balance *tb, int h,
int from, int from_bytes,
int to, int to_bytes, short *snum012, int flow)
{
int i;
int cur_free;
- // int bytes;
int units;
struct virtual_node *vn = tb->tb_vn;
- // struct virtual_item * vi;
-
int total_node_size, max_node_size, current_item_size;
int needed_nodes;
- int start_item, /* position of item we start filling node from */
- end_item, /* position of item we finish filling node by */
- start_bytes, /* number of first bytes (entries for directory) of start_item-th item
- we do not include into node that is being filled */
- end_bytes; /* number of last bytes (entries for directory) of end_item-th item
- we do node include into node that is being filled */
- int split_item_positions[2]; /* these are positions in virtual item of
- items, that are split between S[0] and
- S1new and S1new and S2new */
+
+ /* position of item we start filling node from */
+ int start_item;
+
+ /* position of item we finish filling node by */
+ int end_item;
+
+ /*
+ * number of first bytes (entries for directory) of start_item-th item
+ * we do not include into node that is being filled
+ */
+ int start_bytes;
+
+ /*
+ * number of last bytes (entries for directory) of end_item-th item
+ * we do node include into node that is being filled
+ */
+ int end_bytes;
+
+ /*
+ * these are positions in virtual item of items, that are split
+ * between S[0] and S1new and S1new and S2new
+ */
+ int split_item_positions[2];
split_item_positions[0] = -1;
split_item_positions[1] = -1;
- /* We only create additional nodes if we are in insert or paste mode
- or we are in replace mode at the internal level. If h is 0 and
- the mode is M_REPLACE then in fix_nodes we change the mode to
- paste or insert before we get here in the code. */
+ /*
+ * We only create additional nodes if we are in insert or paste mode
+ * or we are in replace mode at the internal level. If h is 0 and
+ * the mode is M_REPLACE then in fix_nodes we change the mode to
+ * paste or insert before we get here in the code.
+ */
RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
"vs-8100: insert_size < 0 in overflow");
max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
- /* snum012 [0-2] - number of items, that lay
- to S[0], first new node and second new node */
+ /*
+ * snum012 [0-2] - number of items, that lay
+ * to S[0], first new node and second new node
+ */
snum012[3] = -1; /* s1bytes */
snum012[4] = -1; /* s2bytes */
@@ -416,20 +440,22 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
total_node_size = 0;
cur_free = max_node_size;
- // start from 'from'-th item
+ /* start from 'from'-th item */
start_item = from;
- // skip its first 'start_bytes' units
+ /* skip its first 'start_bytes' units */
start_bytes = ((from_bytes != -1) ? from_bytes : 0);
- // last included item is the 'end_item'-th one
+ /* last included item is the 'end_item'-th one */
end_item = vn->vn_nr_item - to - 1;
- // do not count last 'end_bytes' units of 'end_item'-th item
+ /* do not count last 'end_bytes' units of 'end_item'-th item */
end_bytes = (to_bytes != -1) ? to_bytes : 0;
- /* go through all item beginning from the start_item-th item and ending by
- the end_item-th item. Do not count first 'start_bytes' units of
- 'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
-
+ /*
+ * go through all item beginning from the start_item-th item
+ * and ending by the end_item-th item. Do not count first
+ * 'start_bytes' units of 'start_item'-th item and last
+ * 'end_bytes' of 'end_item'-th item
+ */
for (i = start_item; i <= end_item; i++) {
struct virtual_item *vi = vn->vn_vi + i;
int skip_from_end = ((i == end_item) ? end_bytes : 0);
@@ -439,7 +465,10 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
/* get size of current item */
current_item_size = vi->vi_item_len;
- /* do not take in calculation head part (from_bytes) of from-th item */
+ /*
+ * do not take in calculation head part (from_bytes)
+ * of from-th item
+ */
current_item_size -=
op_part_size(vi, 0 /*from start */ , start_bytes);
@@ -455,9 +484,11 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
continue;
}
+ /*
+ * virtual item length is longer, than max size of item in
+ * a node. It is impossible for direct item
+ */
if (current_item_size > max_node_size) {
- /* virtual item length is longer, than max size of item in
- a node. It is impossible for direct item */
RFALSE(is_direct_le_ih(vi->vi_ih),
"vs-8110: "
"direct item length is %d. It can not be longer than %d",
@@ -466,15 +497,18 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
flow = 1;
}
+ /* as we do not split items, take new node and continue */
if (!flow) {
- /* as we do not split items, take new node and continue */
needed_nodes++;
i--;
total_node_size = 0;
continue;
}
- // calculate number of item units which fit into node being
- // filled
+
+ /*
+ * calculate number of item units which fit into node being
+ * filled
+ */
{
int free_space;
@@ -482,17 +516,17 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
units =
op_check_left(vi, free_space, start_bytes,
skip_from_end);
+ /*
+ * nothing fits into current node, take new
+ * node and continue
+ */
if (units == -1) {
- /* nothing fits into current node, take new node and continue */
needed_nodes++, i--, total_node_size = 0;
continue;
}
}
/* something fits into the current node */
- //if (snum012[3] != -1 || needed_nodes != 1)
- // reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
- //snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
start_bytes += units;
snum012[needed_nodes - 1 + 3] = units;
@@ -508,9 +542,11 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
total_node_size = 0;
}
- // sum012[4] (if it is not -1) contains number of units of which
- // are to be in S1new, snum012[3] - to be in S0. They are supposed
- // to be S1bytes and S2bytes correspondingly, so recalculate
+ /*
+ * sum012[4] (if it is not -1) contains number of units of which
+ * are to be in S1new, snum012[3] - to be in S0. They are supposed
+ * to be S1bytes and S2bytes correspondingly, so recalculate
+ */
if (snum012[4] > 0) {
int split_item_num;
int bytes_to_r, bytes_to_l;
@@ -527,7 +563,7 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
((split_item_positions[0] ==
split_item_positions[1]) ? snum012[3] : 0);
- // s2bytes
+ /* s2bytes */
snum012[4] =
op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] -
bytes_to_r - bytes_to_l - bytes_to_S1new;
@@ -555,7 +591,7 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
((split_item_positions[0] == split_item_positions[1]
&& snum012[4] != -1) ? snum012[4] : 0);
- // s1bytes
+ /* s1bytes */
snum012[3] =
op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] -
bytes_to_r - bytes_to_l - bytes_to_S2new;
@@ -565,7 +601,8 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
}
-/* Set parameters for balancing.
+/*
+ * Set parameters for balancing.
* Performs write of results of analysis of balancing into structure tb,
* where it will later be used by the functions that actually do the balancing.
* Parameters:
@@ -575,11 +612,12 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
* rnum number of items from S[h] that must be shifted to R[h];
* blk_num number of blocks that S[h] will be splitted into;
* s012 number of items that fall into splitted nodes.
- * lbytes number of bytes which flow to the left neighbor from the item that is not
- * not shifted entirely
- * rbytes number of bytes which flow to the right neighbor from the item that is not
- * not shifted entirely
- * s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array)
+ * lbytes number of bytes which flow to the left neighbor from the
+ * item that is not not shifted entirely
+ * rbytes number of bytes which flow to the right neighbor from the
+ * item that is not not shifted entirely
+ * s1bytes number of bytes which flow to the first new node when
+ * S[0] splits (this number is contained in s012 array)
*/
static void set_parameters(struct tree_balance *tb, int h, int lnum,
@@ -590,12 +628,14 @@ static void set_parameters(struct tree_balance *tb, int h, int lnum,
tb->rnum[h] = rnum;
tb->blknum[h] = blk_num;
- if (h == 0) { /* only for leaf level */
+ /* only for leaf level */
+ if (h == 0) {
if (s012 != NULL) {
- tb->s0num = *s012++,
- tb->s1num = *s012++, tb->s2num = *s012++;
- tb->s1bytes = *s012++;
- tb->s2bytes = *s012;
+ tb->s0num = *s012++;
+ tb->snum[0] = *s012++;
+ tb->snum[1] = *s012++;
+ tb->sbytes[0] = *s012++;
+ tb->sbytes[1] = *s012;
}
tb->lbytes = lb;
tb->rbytes = rb;
@@ -607,8 +647,10 @@ static void set_parameters(struct tree_balance *tb, int h, int lnum,
PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
}
-/* check, does node disappear if we shift tb->lnum[0] items to left
- neighbor and tb->rnum[0] to the right one. */
+/*
+ * check if node disappears if we shift tb->lnum[0] items to left
+ * neighbor and tb->rnum[0] to the right one.
+ */
static int is_leaf_removable(struct tree_balance *tb)
{
struct virtual_node *vn = tb->tb_vn;
@@ -616,8 +658,10 @@ static int is_leaf_removable(struct tree_balance *tb)
int size;
int remain_items;
- /* number of items, that will be shifted to left (right) neighbor
- entirely */
+ /*
+ * number of items that will be shifted to left (right) neighbor
+ * entirely
+ */
to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
remain_items = vn->vn_nr_item;
@@ -625,21 +669,21 @@ static int is_leaf_removable(struct tree_balance *tb)
/* how many items remain in S[0] after shiftings to neighbors */
remain_items -= (to_left + to_right);
+ /* all content of node can be shifted to neighbors */
if (remain_items < 1) {
- /* all content of node can be shifted to neighbors */
set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
NULL, -1, -1);
return 1;
}
+ /* S[0] is not removable */
if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
- /* S[0] is not removable */
return 0;
- /* check, whether we can divide 1 remaining item between neighbors */
+ /* check whether we can divide 1 remaining item between neighbors */
/* get size of remaining item (in item units) */
- size = op_unit_num(&(vn->vn_vi[to_left]));
+ size = op_unit_num(&vn->vn_vi[to_left]);
if (tb->lbytes + tb->rbytes >= size) {
set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL,
@@ -675,23 +719,28 @@ static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree)
"vs-8125: item number must be 1: it is %d",
B_NR_ITEMS(S0));
- ih = B_N_PITEM_HEAD(S0, 0);
+ ih = item_head(S0, 0);
if (tb->CFR[0]
- && !comp_short_le_keys(&(ih->ih_key),
- B_N_PDELIM_KEY(tb->CFR[0],
+ && !comp_short_le_keys(&ih->ih_key,
+ internal_key(tb->CFR[0],
tb->rkey[0])))
+ /*
+ * Directory must be in correct state here: that is
+ * somewhere at the left side should exist first
+ * directory item. But the item being deleted can
+ * not be that first one because its right neighbor
+ * is item of the same directory. (But first item
+ * always gets deleted in last turn). So, neighbors
+ * of deleted item can be merged, so we can save
+ * ih_size
+ */
if (is_direntry_le_ih(ih)) {
- /* Directory must be in correct state here: that is
- somewhere at the left side should exist first directory
- item. But the item being deleted can not be that first
- one because its right neighbor is item of the same
- directory. (But first item always gets deleted in last
- turn). So, neighbors of deleted item can be merged, so
- we can save ih_size */
ih_size = IH_SIZE;
- /* we might check that left neighbor exists and is of the
- same directory */
+ /*
+ * we might check that left neighbor exists
+ * and is of the same directory
+ */
RFALSE(le_ih_k_offset(ih) == DOT_OFFSET,
"vs-8130: first directory item can not be removed until directory is not empty");
}
@@ -770,7 +819,8 @@ static void free_buffers_in_tb(struct tree_balance *tb)
}
}
-/* Get new buffers for storing new nodes that are created while balancing.
+/*
+ * Get new buffers for storing new nodes that are created while balancing.
* Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
* CARRY_ON - schedule didn't occur while the function worked;
* NO_DISK_SPACE - no disk space.
@@ -778,28 +828,33 @@ static void free_buffers_in_tb(struct tree_balance *tb)
/* The function is NOT SCHEDULE-SAFE! */
static int get_empty_nodes(struct tree_balance *tb, int h)
{
- struct buffer_head *new_bh,
- *Sh = PATH_H_PBUFFER(tb->tb_path, h);
+ struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h);
b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
- int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */
- retval = CARRY_ON;
+ int counter, number_of_freeblk;
+ int amount_needed; /* number of needed empty blocks */
+ int retval = CARRY_ON;
struct super_block *sb = tb->tb_sb;
- /* number_of_freeblk is the number of empty blocks which have been
- acquired for use by the balancing algorithm minus the number of
- empty blocks used in the previous levels of the analysis,
- number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
- after empty blocks are acquired, and the balancing analysis is
- then restarted, amount_needed is the number needed by this level
- (h) of the balancing analysis.
-
- Note that for systems with many processes writing, it would be
- more layout optimal to calculate the total number needed by all
- levels and then to run reiserfs_new_blocks to get all of them at once. */
-
- /* Initiate number_of_freeblk to the amount acquired prior to the restart of
- the analysis or 0 if not restarted, then subtract the amount needed
- by all of the levels of the tree below h. */
+ /*
+ * number_of_freeblk is the number of empty blocks which have been
+ * acquired for use by the balancing algorithm minus the number of
+ * empty blocks used in the previous levels of the analysis,
+ * number_of_freeblk = tb->cur_blknum can be non-zero if a schedule
+ * occurs after empty blocks are acquired, and the balancing analysis
+ * is then restarted, amount_needed is the number needed by this
+ * level (h) of the balancing analysis.
+ *
+ * Note that for systems with many processes writing, it would be
+ * more layout optimal to calculate the total number needed by all
+ * levels and then to run reiserfs_new_blocks to get all of them at
+ * once.
+ */
+
+ /*
+ * Initiate number_of_freeblk to the amount acquired prior to the
+ * restart of the analysis or 0 if not restarted, then subtract the
+ * amount needed by all of the levels of the tree below h.
+ */
/* blknum includes S[h], so we subtract 1 in this calculation */
for (counter = 0, number_of_freeblk = tb->cur_blknum;
counter < h; counter++)
@@ -810,13 +865,19 @@ static int get_empty_nodes(struct tree_balance *tb, int h)
/* Allocate missing empty blocks. */
/* if Sh == 0 then we are getting a new root */
amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
- /* Amount_needed = the amount that we need more than the amount that we have. */
+ /*
+ * Amount_needed = the amount that we need more than the
+ * amount that we have.
+ */
if (amount_needed > number_of_freeblk)
amount_needed -= number_of_freeblk;
- else /* If we have enough already then there is nothing to do. */
+ else /* If we have enough already then there is nothing to do. */
return CARRY_ON;
- /* No need to check quota - is not allocated for blocks used for formatted nodes */
+ /*
+ * No need to check quota - is not allocated for blocks used
+ * for formatted nodes
+ */
if (reiserfs_new_form_blocknrs(tb, blocknrs,
amount_needed) == NO_DISK_SPACE)
return NO_DISK_SPACE;
@@ -849,8 +910,10 @@ static int get_empty_nodes(struct tree_balance *tb, int h)
return retval;
}
-/* Get free space of the left neighbor, which is stored in the parent
- * node of the left neighbor. */
+/*
+ * Get free space of the left neighbor, which is stored in the parent
+ * node of the left neighbor.
+ */
static int get_lfree(struct tree_balance *tb, int h)
{
struct buffer_head *l, *f;
@@ -870,7 +933,8 @@ static int get_lfree(struct tree_balance *tb, int h)
return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
}
-/* Get free space of the right neighbor,
+/*
+ * Get free space of the right neighbor,
* which is stored in the parent node of the right neighbor.
*/
static int get_rfree(struct tree_balance *tb, int h)
@@ -916,7 +980,10 @@ static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
"vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
father, tb->FL[h]);
- /* Get position of the pointer to the left neighbor into the left father. */
+ /*
+ * Get position of the pointer to the left neighbor
+ * into the left father.
+ */
left_neighbor_position = (father == tb->FL[h]) ?
tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
/* Get left neighbor block number. */
@@ -940,17 +1007,20 @@ static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
static void decrement_key(struct cpu_key *key)
{
- // call item specific function for this key
+ /* call item specific function for this key */
item_ops[cpu_key_k_type(key)]->decrement_key(key);
}
-/* Calculate far left/right parent of the left/right neighbor of the current node, that
- * is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
+/*
+ * Calculate far left/right parent of the left/right neighbor of the
+ * current node, that is calculate the left/right (FL[h]/FR[h]) neighbor
+ * of the parent F[h].
* Calculate left/right common parent of the current node and L[h]/R[h].
* Calculate left/right delimiting key position.
- * Returns: PATH_INCORRECT - path in the tree is not correct;
- SCHEDULE_OCCURRED - schedule occurred while the function worked;
- * CARRY_ON - schedule didn't occur while the function worked;
+ * Returns: PATH_INCORRECT - path in the tree is not correct
+ * SCHEDULE_OCCURRED - schedule occurred while the function worked
+ * CARRY_ON - schedule didn't occur while the function
+ * worked
*/
static int get_far_parent(struct tree_balance *tb,
int h,
@@ -966,8 +1036,10 @@ static int get_far_parent(struct tree_balance *tb,
first_last_position = 0,
path_offset = PATH_H_PATH_OFFSET(path, h);
- /* Starting from F[h] go upwards in the tree, and look for the common
- ancestor of F[h], and its neighbor l/r, that should be obtained. */
+ /*
+ * Starting from F[h] go upwards in the tree, and look for the common
+ * ancestor of F[h], and its neighbor l/r, that should be obtained.
+ */
counter = path_offset;
@@ -975,21 +1047,33 @@ static int get_far_parent(struct tree_balance *tb,
"PAP-8180: invalid path length");
for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
- /* Check whether parent of the current buffer in the path is really parent in the tree. */
+ /*
+ * Check whether parent of the current buffer in the path
+ * is really parent in the tree.
+ */
if (!B_IS_IN_TREE
(parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
return REPEAT_SEARCH;
+
/* Check whether position in the parent is correct. */
if ((position =
PATH_OFFSET_POSITION(path,
counter - 1)) >
B_NR_ITEMS(parent))
return REPEAT_SEARCH;
- /* Check whether parent at the path really points to the child. */
+
+ /*
+ * Check whether parent at the path really points
+ * to the child.
+ */
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
return REPEAT_SEARCH;
- /* Return delimiting key if position in the parent is not equal to first/last one. */
+
+ /*
+ * Return delimiting key if position in the parent is not
+ * equal to first/last one.
+ */
if (c_lr_par == RIGHT_PARENTS)
first_last_position = B_NR_ITEMS(parent);
if (position != first_last_position) {
@@ -1002,7 +1086,10 @@ static int get_far_parent(struct tree_balance *tb,
/* if we are in the root of the tree, then there is no common father */
if (counter == FIRST_PATH_ELEMENT_OFFSET) {
- /* Check whether first buffer in the path is the root of the tree. */
+ /*
+ * Check whether first buffer in the path is the
+ * root of the tree.
+ */
if (PATH_OFFSET_PBUFFER
(tb->tb_path,
FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
@@ -1031,12 +1118,15 @@ static int get_far_parent(struct tree_balance *tb,
}
}
- /* So, we got common parent of the current node and its left/right neighbor.
- Now we are geting the parent of the left/right neighbor. */
+ /*
+ * So, we got common parent of the current node and its
+ * left/right neighbor. Now we are getting the parent of the
+ * left/right neighbor.
+ */
/* Form key to get parent of the left/right neighbor. */
le_key2cpu_key(&s_lr_father_key,
- B_N_PDELIM_KEY(*pcom_father,
+ internal_key(*pcom_father,
(c_lr_par ==
LEFT_PARENTS) ? (tb->lkey[h - 1] =
position -
@@ -1050,7 +1140,7 @@ static int get_far_parent(struct tree_balance *tb,
if (search_by_key
(tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
h + 1) == IO_ERROR)
- // path is released
+ /* path is released */
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -1071,12 +1161,15 @@ static int get_far_parent(struct tree_balance *tb,
return CARRY_ON;
}
-/* Get parents of neighbors of node in the path(S[path_offset]) and common parents of
- * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset],
- * FR[path_offset], CFL[path_offset], CFR[path_offset].
- * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset].
- * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
- * CARRY_ON - schedule didn't occur while the function worked;
+/*
+ * Get parents of neighbors of node in the path(S[path_offset]) and
+ * common parents of S[path_offset] and L[path_offset]/R[path_offset]:
+ * F[path_offset], FL[path_offset], FR[path_offset], CFL[path_offset],
+ * CFR[path_offset].
+ * Calculate numbers of left and right delimiting keys position:
+ * lkey[path_offset], rkey[path_offset].
+ * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked
+ * CARRY_ON - schedule didn't occur while the function worked
*/
static int get_parents(struct tree_balance *tb, int h)
{
@@ -1088,8 +1181,11 @@ static int get_parents(struct tree_balance *tb, int h)
/* Current node is the root of the tree or will be root of the tree */
if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
- /* The root can not have parents.
- Release nodes which previously were obtained as parents of the current node neighbors. */
+ /*
+ * The root can not have parents.
+ * Release nodes which previously were obtained as
+ * parents of the current node neighbors.
+ */
brelse(tb->FL[h]);
brelse(tb->CFL[h]);
brelse(tb->FR[h]);
@@ -1111,10 +1207,14 @@ static int get_parents(struct tree_balance *tb, int h)
get_bh(curf);
tb->lkey[h] = position - 1;
} else {
- /* Calculate current parent of L[path_offset], which is the left neighbor of the current node.
- Calculate current common parent of L[path_offset] and the current node. Note that
- CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset].
- Calculate lkey[path_offset]. */
+ /*
+ * Calculate current parent of L[path_offset], which is the
+ * left neighbor of the current node. Calculate current
+ * common parent of L[path_offset] and the current node.
+ * Note that CFL[path_offset] not equal FL[path_offset] and
+ * CFL[path_offset] not equal F[path_offset].
+ * Calculate lkey[path_offset].
+ */
if ((ret = get_far_parent(tb, h + 1, &curf,
&curcf,
LEFT_PARENTS)) != CARRY_ON)
@@ -1130,19 +1230,22 @@ static int get_parents(struct tree_balance *tb, int h)
(curcf && !B_IS_IN_TREE(curcf)),
"PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
-/* Get parent FR[h] of R[h]. */
+ /* Get parent FR[h] of R[h]. */
-/* Current node is the last child of F[h]. FR[h] != F[h]. */
+ /* Current node is the last child of F[h]. FR[h] != F[h]. */
if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
-/* Calculate current parent of R[h], which is the right neighbor of F[h].
- Calculate current common parent of R[h] and current node. Note that CFR[h]
- not equal FR[path_offset] and CFR[h] not equal F[h]. */
+ /*
+ * Calculate current parent of R[h], which is the right
+ * neighbor of F[h]. Calculate current common parent of
+ * R[h] and current node. Note that CFR[h] not equal
+ * FR[path_offset] and CFR[h] not equal F[h].
+ */
if ((ret =
get_far_parent(tb, h + 1, &curf, &curcf,
RIGHT_PARENTS)) != CARRY_ON)
return ret;
} else {
-/* Current node is not the last child of its parent F[h]. */
+ /* Current node is not the last child of its parent F[h]. */
curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
get_bh(curf);
@@ -1165,8 +1268,10 @@ static int get_parents(struct tree_balance *tb, int h)
return CARRY_ON;
}
-/* it is possible to remove node as result of shiftings to
- neighbors even when we insert or paste item. */
+/*
+ * it is possible to remove node as result of shiftings to
+ * neighbors even when we insert or paste item.
+ */
static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
struct tree_balance *tb, int h)
{
@@ -1175,21 +1280,22 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
struct item_head *ih;
struct reiserfs_key *r_key = NULL;
- ih = B_N_PITEM_HEAD(Sh, 0);
+ ih = item_head(Sh, 0);
if (tb->CFR[h])
- r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]);
+ r_key = internal_key(tb->CFR[h], tb->rkey[h]);
if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
/* shifting may merge items which might save space */
-
((!h
- && op_is_left_mergeable(&(ih->ih_key), Sh->b_size)) ? IH_SIZE : 0)
+ && op_is_left_mergeable(&ih->ih_key, Sh->b_size)) ? IH_SIZE : 0)
-
((!h && r_key
&& op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0)
+ ((h) ? KEY_SIZE : 0)) {
/* node can not be removed */
- if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
+ if (sfree >= levbytes) {
+ /* new item fits into node S[h] without any shifting */
if (!h)
tb->s0num =
B_NR_ITEMS(Sh) +
@@ -1202,7 +1308,8 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
return !NO_BALANCING_NEEDED;
}
-/* Check whether current node S[h] is balanced when increasing its size by
+/*
+ * Check whether current node S[h] is balanced when increasing its size by
* Inserting or Pasting.
* Calculate parameters for balancing for current level h.
* Parameters:
@@ -1219,39 +1326,48 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
static int ip_check_balance(struct tree_balance *tb, int h)
{
struct virtual_node *vn = tb->tb_vn;
- int levbytes, /* Number of bytes that must be inserted into (value
- is negative if bytes are deleted) buffer which
- contains node being balanced. The mnemonic is
- that the attempted change in node space used level
- is levbytes bytes. */
- ret;
+ /*
+ * Number of bytes that must be inserted into (value is negative
+ * if bytes are deleted) buffer which contains node being balanced.
+ * The mnemonic is that the attempted change in node space used
+ * level is levbytes bytes.
+ */
+ int levbytes;
+ int ret;
int lfree, sfree, rfree /* free space in L, S and R */ ;
- /* nver is short for number of vertixes, and lnver is the number if
- we shift to the left, rnver is the number if we shift to the
- right, and lrnver is the number if we shift in both directions.
- The goal is to minimize first the number of vertixes, and second,
- the number of vertixes whose contents are changed by shifting,
- and third the number of uncached vertixes whose contents are
- changed by shifting and must be read from disk. */
+ /*
+ * nver is short for number of vertixes, and lnver is the number if
+ * we shift to the left, rnver is the number if we shift to the
+ * right, and lrnver is the number if we shift in both directions.
+ * The goal is to minimize first the number of vertixes, and second,
+ * the number of vertixes whose contents are changed by shifting,
+ * and third the number of uncached vertixes whose contents are
+ * changed by shifting and must be read from disk.
+ */
int nver, lnver, rnver, lrnver;
- /* used at leaf level only, S0 = S[0] is the node being balanced,
- sInum [ I = 0,1,2 ] is the number of items that will
- remain in node SI after balancing. S1 and S2 are new
- nodes that might be created. */
+ /*
+ * used at leaf level only, S0 = S[0] is the node being balanced,
+ * sInum [ I = 0,1,2 ] is the number of items that will
+ * remain in node SI after balancing. S1 and S2 are new
+ * nodes that might be created.
+ */
- /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
- where 4th parameter is s1bytes and 5th - s2bytes
+ /*
+ * we perform 8 calls to get_num_ver(). For each call we
+ * calculate five parameters. where 4th parameter is s1bytes
+ * and 5th - s2bytes
+ *
+ * s0num, s1num, s2num for 8 cases
+ * 0,1 - do not shift and do not shift but bottle
+ * 2 - shift only whole item to left
+ * 3 - shift to left and bottle as much as possible
+ * 4,5 - shift to right (whole items and as much as possible
+ * 6,7 - shift to both directions (whole items and as much as possible)
*/
- short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
- 0,1 - do not shift and do not shift but bottle
- 2 - shift only whole item to left
- 3 - shift to left and bottle as much as possible
- 4,5 - shift to right (whole items and as much as possible
- 6,7 - shift to both directions (whole items and as much as possible)
- */
+ short snum012[40] = { 0, };
/* Sh is the node whose balance is currently being checked */
struct buffer_head *Sh;
@@ -1265,9 +1381,10 @@ static int ip_check_balance(struct tree_balance *tb, int h)
reiserfs_panic(tb->tb_sb, "vs-8210",
"S[0] can not be 0");
switch (ret = get_empty_nodes(tb, h)) {
+ /* no balancing for higher levels needed */
case CARRY_ON:
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
- return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+ return NO_BALANCING_NEEDED;
case NO_DISK_SPACE:
case REPEAT_SEARCH:
@@ -1278,7 +1395,9 @@ static int ip_check_balance(struct tree_balance *tb, int h)
}
}
- if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
+ /* get parents of S[h] neighbors. */
+ ret = get_parents(tb, h);
+ if (ret != CARRY_ON)
return ret;
sfree = B_FREE_SPACE(Sh);
@@ -1287,38 +1406,44 @@ static int ip_check_balance(struct tree_balance *tb, int h)
rfree = get_rfree(tb, h);
lfree = get_lfree(tb, h);
+ /* and new item fits into node S[h] without any shifting */
if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
NO_BALANCING_NEEDED)
- /* and new item fits into node S[h] without any shifting */
return NO_BALANCING_NEEDED;
create_virtual_node(tb, h);
/*
- determine maximal number of items we can shift to the left neighbor (in tb structure)
- and the maximal number of bytes that can flow to the left neighbor
- from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
+ * determine maximal number of items we can shift to the left
+ * neighbor (in tb structure) and the maximal number of bytes
+ * that can flow to the left neighbor from the left most liquid
+ * item that cannot be shifted from S[0] entirely (returned value)
*/
check_left(tb, h, lfree);
/*
- determine maximal number of items we can shift to the right neighbor (in tb structure)
- and the maximal number of bytes that can flow to the right neighbor
- from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
+ * determine maximal number of items we can shift to the right
+ * neighbor (in tb structure) and the maximal number of bytes
+ * that can flow to the right neighbor from the right most liquid
+ * item that cannot be shifted from S[0] entirely (returned value)
*/
check_right(tb, h, rfree);
- /* all contents of internal node S[h] can be moved into its
- neighbors, S[h] will be removed after balancing */
+ /*
+ * all contents of internal node S[h] can be moved into its
+ * neighbors, S[h] will be removed after balancing
+ */
if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
int to_r;
- /* Since we are working on internal nodes, and our internal
- nodes have fixed size entries, then we can balance by the
- number of items rather than the space they consume. In this
- routine we set the left node equal to the right node,
- allowing a difference of less than or equal to 1 child
- pointer. */
+ /*
+ * Since we are working on internal nodes, and our internal
+ * nodes have fixed size entries, then we can balance by the
+ * number of items rather than the space they consume. In this
+ * routine we set the left node equal to the right node,
+ * allowing a difference of less than or equal to 1 child
+ * pointer.
+ */
to_r =
((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
@@ -1328,7 +1453,10 @@ static int ip_check_balance(struct tree_balance *tb, int h)
return CARRY_ON;
}
- /* this checks balance condition, that any two neighboring nodes can not fit in one node */
+ /*
+ * this checks balance condition, that any two neighboring nodes
+ * can not fit in one node
+ */
RFALSE(h &&
(tb->lnum[h] >= vn->vn_nr_item + 1 ||
tb->rnum[h] >= vn->vn_nr_item + 1),
@@ -1337,16 +1465,22 @@ static int ip_check_balance(struct tree_balance *tb, int h)
(tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
"vs-8225: tree is not balanced on leaf level");
- /* all contents of S[0] can be moved into its neighbors
- S[0] will be removed after balancing. */
+ /*
+ * all contents of S[0] can be moved into its neighbors
+ * S[0] will be removed after balancing.
+ */
if (!h && is_leaf_removable(tb))
return CARRY_ON;
- /* why do we perform this check here rather than earlier??
- Answer: we can win 1 node in some cases above. Moreover we
- checked it above, when we checked, that S[0] is not removable
- in principle */
- if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
+ /*
+ * why do we perform this check here rather than earlier??
+ * Answer: we can win 1 node in some cases above. Moreover we
+ * checked it above, when we checked, that S[0] is not removable
+ * in principle
+ */
+
+ /* new item fits into node S[h] without any shifting */
+ if (sfree >= levbytes) {
if (!h)
tb->s0num = vn->vn_nr_item;
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
@@ -1355,18 +1489,19 @@ static int ip_check_balance(struct tree_balance *tb, int h)
{
int lpar, rpar, nset, lset, rset, lrset;
- /*
- * regular overflowing of the node
- */
+ /* regular overflowing of the node */
- /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
- lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
- nset, lset, rset, lrset - shows, whether flowing items give better packing
+ /*
+ * get_num_ver works in 2 modes (FLOW & NO_FLOW)
+ * lpar, rpar - number of items we can shift to left/right
+ * neighbor (including splitting item)
+ * nset, lset, rset, lrset - shows, whether flowing items
+ * give better packing
*/
#define FLOW 1
#define NO_FLOW 0 /* do not any splitting */
- /* we choose one the following */
+ /* we choose one of the following */
#define NOTHING_SHIFT_NO_FLOW 0
#define NOTHING_SHIFT_FLOW 5
#define LEFT_SHIFT_NO_FLOW 10
@@ -1379,10 +1514,13 @@ static int ip_check_balance(struct tree_balance *tb, int h)
lpar = tb->lnum[h];
rpar = tb->rnum[h];
- /* calculate number of blocks S[h] must be split into when
- nothing is shifted to the neighbors,
- as well as number of items in each part of the split node (s012 numbers),
- and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
+ /*
+ * calculate number of blocks S[h] must be split into when
+ * nothing is shifted to the neighbors, as well as number of
+ * items in each part of the split node (s012 numbers),
+ * and number of bytes (s1bytes) of the shared drop which
+ * flow to S1 if any
+ */
nset = NOTHING_SHIFT_NO_FLOW;
nver = get_num_ver(vn->vn_mode, tb, h,
0, -1, h ? vn->vn_nr_item : 0, -1,
@@ -1391,7 +1529,10 @@ static int ip_check_balance(struct tree_balance *tb, int h)
if (!h) {
int nver1;
- /* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
+ /*
+ * note, that in this case we try to bottle
+ * between S[0] and S1 (S1 - the first new node)
+ */
nver1 = get_num_ver(vn->vn_mode, tb, h,
0, -1, 0, -1,
snum012 + NOTHING_SHIFT_FLOW, FLOW);
@@ -1399,11 +1540,13 @@ static int ip_check_balance(struct tree_balance *tb, int h)
nset = NOTHING_SHIFT_FLOW, nver = nver1;
}
- /* calculate number of blocks S[h] must be split into when
- l_shift_num first items and l_shift_bytes of the right most
- liquid item to be shifted are shifted to the left neighbor,
- as well as number of items in each part of the splitted node (s012 numbers),
- and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+ /*
+ * calculate number of blocks S[h] must be split into when
+ * l_shift_num first items and l_shift_bytes of the right
+ * most liquid item to be shifted are shifted to the left
+ * neighbor, as well as number of items in each part of the
+ * splitted node (s012 numbers), and number of bytes
+ * (s1bytes) of the shared drop which flow to S1 if any
*/
lset = LEFT_SHIFT_NO_FLOW;
lnver = get_num_ver(vn->vn_mode, tb, h,
@@ -1422,11 +1565,13 @@ static int ip_check_balance(struct tree_balance *tb, int h)
lset = LEFT_SHIFT_FLOW, lnver = lnver1;
}
- /* calculate number of blocks S[h] must be split into when
- r_shift_num first items and r_shift_bytes of the left most
- liquid item to be shifted are shifted to the right neighbor,
- as well as number of items in each part of the splitted node (s012 numbers),
- and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+ /*
+ * calculate number of blocks S[h] must be split into when
+ * r_shift_num first items and r_shift_bytes of the left most
+ * liquid item to be shifted are shifted to the right neighbor,
+ * as well as number of items in each part of the splitted
+ * node (s012 numbers), and number of bytes (s1bytes) of the
+ * shared drop which flow to S1 if any
*/
rset = RIGHT_SHIFT_NO_FLOW;
rnver = get_num_ver(vn->vn_mode, tb, h,
@@ -1451,10 +1596,12 @@ static int ip_check_balance(struct tree_balance *tb, int h)
rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
}
- /* calculate number of blocks S[h] must be split into when
- items are shifted in both directions,
- as well as number of items in each part of the splitted node (s012 numbers),
- and number of bytes (s1bytes) of the shared drop which flow to S1 if any
+ /*
+ * calculate number of blocks S[h] must be split into when
+ * items are shifted in both directions, as well as number
+ * of items in each part of the splitted node (s012 numbers),
+ * and number of bytes (s1bytes) of the shared drop which
+ * flow to S1 if any
*/
lrset = LR_SHIFT_NO_FLOW;
lrnver = get_num_ver(vn->vn_mode, tb, h,
@@ -1481,10 +1628,12 @@ static int ip_check_balance(struct tree_balance *tb, int h)
lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
}
- /* Our general shifting strategy is:
- 1) to minimized number of new nodes;
- 2) to minimized number of neighbors involved in shifting;
- 3) to minimized number of disk reads; */
+ /*
+ * Our general shifting strategy is:
+ * 1) to minimized number of new nodes;
+ * 2) to minimized number of neighbors involved in shifting;
+ * 3) to minimized number of disk reads;
+ */
/* we can win TWO or ONE nodes by shifting in both directions */
if (lrnver < lnver && lrnver < rnver) {
@@ -1508,42 +1657,59 @@ static int ip_check_balance(struct tree_balance *tb, int h)
return CARRY_ON;
}
- /* if shifting doesn't lead to better packing then don't shift */
+ /*
+ * if shifting doesn't lead to better packing
+ * then don't shift
+ */
if (nver == lrnver) {
set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
-1);
return CARRY_ON;
}
- /* now we know that for better packing shifting in only one
- direction either to the left or to the right is required */
+ /*
+ * now we know that for better packing shifting in only one
+ * direction either to the left or to the right is required
+ */
- /* if shifting to the left is better than shifting to the right */
+ /*
+ * if shifting to the left is better than
+ * shifting to the right
+ */
if (lnver < rnver) {
SET_PAR_SHIFT_LEFT;
return CARRY_ON;
}
- /* if shifting to the right is better than shifting to the left */
+ /*
+ * if shifting to the right is better than
+ * shifting to the left
+ */
if (lnver > rnver) {
SET_PAR_SHIFT_RIGHT;
return CARRY_ON;
}
- /* now shifting in either direction gives the same number
- of nodes and we can make use of the cached neighbors */
+ /*
+ * now shifting in either direction gives the same number
+ * of nodes and we can make use of the cached neighbors
+ */
if (is_left_neighbor_in_cache(tb, h)) {
SET_PAR_SHIFT_LEFT;
return CARRY_ON;
}
- /* shift to the right independently on whether the right neighbor in cache or not */
+ /*
+ * shift to the right independently on whether the
+ * right neighbor in cache or not
+ */
SET_PAR_SHIFT_RIGHT;
return CARRY_ON;
}
}
-/* Check whether current node S[h] is balanced when Decreasing its size by
+/*
+ * Check whether current node S[h] is balanced when Decreasing its size by
* Deleting or Cutting for INTERNAL node of S+tree.
* Calculate parameters for balancing for current level h.
* Parameters:
@@ -1563,8 +1729,10 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
{
struct virtual_node *vn = tb->tb_vn;
- /* Sh is the node whose balance is currently being checked,
- and Fh is its father. */
+ /*
+ * Sh is the node whose balance is currently being checked,
+ * and Fh is its father.
+ */
struct buffer_head *Sh, *Fh;
int maxsize, ret;
int lfree, rfree /* free space in L and R */ ;
@@ -1574,19 +1742,25 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
maxsize = MAX_CHILD_SIZE(Sh);
-/* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
-/* new_nr_item = number of items node would have if operation is */
-/* performed without balancing (new_nr_item); */
+ /*
+ * using tb->insert_size[h], which is negative in this case,
+ * create_virtual_node calculates:
+ * new_nr_item = number of items node would have if operation is
+ * performed without balancing (new_nr_item);
+ */
create_virtual_node(tb, h);
if (!Fh) { /* S[h] is the root. */
+ /* no balancing for higher levels needed */
if (vn->vn_nr_item > 0) {
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
- return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+ return NO_BALANCING_NEEDED;
}
- /* new_nr_item == 0.
+ /*
+ * new_nr_item == 0.
* Current root will be deleted resulting in
- * decrementing the tree height. */
+ * decrementing the tree height.
+ */
set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
return CARRY_ON;
}
@@ -1602,12 +1776,18 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
check_left(tb, h, lfree);
check_right(tb, h, rfree);
- if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { /* Balance condition for the internal node is valid.
- * In this case we balance only if it leads to better packing. */
- if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { /* Here we join S[h] with one of its neighbors,
- * which is impossible with greater values of new_nr_item. */
+ /*
+ * Balance condition for the internal node is valid.
+ * In this case we balance only if it leads to better packing.
+ */
+ if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) {
+ /*
+ * Here we join S[h] with one of its neighbors,
+ * which is impossible with greater values of new_nr_item.
+ */
+ if (vn->vn_nr_item == MIN_NR_KEY(Sh)) {
+ /* All contents of S[h] can be moved to L[h]. */
if (tb->lnum[h] >= vn->vn_nr_item + 1) {
- /* All contents of S[h] can be moved to L[h]. */
int n;
int order_L;
@@ -1623,8 +1803,8 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
return CARRY_ON;
}
+ /* All contents of S[h] can be moved to R[h]. */
if (tb->rnum[h] >= vn->vn_nr_item + 1) {
- /* All contents of S[h] can be moved to R[h]. */
int n;
int order_R;
@@ -1641,8 +1821,11 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
}
}
+ /*
+ * All contents of S[h] can be moved to the neighbors
+ * (L[h] & R[h]).
+ */
if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
- /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
int to_r;
to_r =
@@ -1659,7 +1842,10 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
return NO_BALANCING_NEEDED;
}
- /* Current node contain insufficient number of items. Balancing is required. */
+ /*
+ * Current node contain insufficient number of items.
+ * Balancing is required.
+ */
/* Check whether we can merge S[h] with left neighbor. */
if (tb->lnum[h] >= vn->vn_nr_item + 1)
if (is_left_neighbor_in_cache(tb, h)
@@ -1726,7 +1912,8 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
return CARRY_ON;
}
-/* Check whether current node S[h] is balanced when Decreasing its size by
+/*
+ * Check whether current node S[h] is balanced when Decreasing its size by
* Deleting or Truncating for LEAF node of S+tree.
* Calculate parameters for balancing for current level h.
* Parameters:
@@ -1743,15 +1930,21 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
{
struct virtual_node *vn = tb->tb_vn;
- /* Number of bytes that must be deleted from
- (value is negative if bytes are deleted) buffer which
- contains node being balanced. The mnemonic is that the
- attempted change in node space used level is levbytes bytes. */
+ /*
+ * Number of bytes that must be deleted from
+ * (value is negative if bytes are deleted) buffer which
+ * contains node being balanced. The mnemonic is that the
+ * attempted change in node space used level is levbytes bytes.
+ */
int levbytes;
+
/* the maximal item size */
int maxsize, ret;
- /* S0 is the node whose balance is currently being checked,
- and F0 is its father. */
+
+ /*
+ * S0 is the node whose balance is currently being checked,
+ * and F0 is its father.
+ */
struct buffer_head *S0, *F0;
int lfree, rfree /* free space in L and R */ ;
@@ -1784,9 +1977,11 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
if (are_leaves_removable(tb, lfree, rfree))
return CARRY_ON;
- /* determine maximal number of items we can shift to the left/right neighbor
- and the maximal number of bytes that can flow to the left/right neighbor
- from the left/right most liquid item that cannot be shifted from S[0] entirely
+ /*
+ * determine maximal number of items we can shift to the left/right
+ * neighbor and the maximal number of bytes that can flow to the
+ * left/right neighbor from the left/right most liquid item that
+ * cannot be shifted from S[0] entirely
*/
check_left(tb, h, lfree);
check_right(tb, h, rfree);
@@ -1810,7 +2005,10 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
return CARRY_ON;
}
- /* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
+ /*
+ * All contents of S[0] can be moved to the neighbors (L[0] & R[0]).
+ * Set parameters and return
+ */
if (is_leaf_removable(tb))
return CARRY_ON;
@@ -1820,7 +2018,8 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
return NO_BALANCING_NEEDED;
}
-/* Check whether current node S[h] is balanced when Decreasing its size by
+/*
+ * Check whether current node S[h] is balanced when Decreasing its size by
* Deleting or Cutting.
* Calculate parameters for balancing for current level h.
* Parameters:
@@ -1844,15 +2043,16 @@ static int dc_check_balance(struct tree_balance *tb, int h)
return dc_check_balance_leaf(tb, h);
}
-/* Check whether current node S[h] is balanced.
+/*
+ * Check whether current node S[h] is balanced.
* Calculate parameters for balancing for current level h.
* Parameters:
*
* tb tree_balance structure:
*
- * tb is a large structure that must be read about in the header file
- * at the same time as this procedure if the reader is to successfully
- * understand this procedure
+ * tb is a large structure that must be read about in the header
+ * file at the same time as this procedure if the reader is
+ * to successfully understand this procedure
*
* h current level of the node;
* inum item number in S[h];
@@ -1882,8 +2082,8 @@ static int check_balance(int mode,
RFALSE(mode == M_INSERT && !vn->vn_ins_ih,
"vs-8255: ins_ih can not be 0 in insert mode");
+ /* Calculate balance parameters when size of node is increasing. */
if (tb->insert_size[h] > 0)
- /* Calculate balance parameters when size of node is increasing. */
return ip_check_balance(tb, h);
/* Calculate balance parameters when size of node is decreasing. */
@@ -1911,21 +2111,23 @@ static int get_direct_parent(struct tree_balance *tb, int h)
PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
return CARRY_ON;
}
- return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
+ /* Root is changed and we must recalculate the path. */
+ return REPEAT_SEARCH;
}
+ /* Parent in the path is not in the tree. */
if (!B_IS_IN_TREE
(bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
- return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
+ return REPEAT_SEARCH;
if ((position =
PATH_OFFSET_POSITION(path,
path_offset - 1)) > B_NR_ITEMS(bh))
return REPEAT_SEARCH;
+ /* Parent in the path is not parent of the current node in the tree. */
if (B_N_CHILD_NUM(bh, position) !=
PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
- /* Parent in the path is not parent of the current node in the tree. */
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
@@ -1936,10 +2138,15 @@ static int get_direct_parent(struct tree_balance *tb, int h)
return REPEAT_SEARCH;
}
- return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */
+ /*
+ * Parent in the path is unlocked and really parent
+ * of the current node.
+ */
+ return CARRY_ON;
}
-/* Using lnum[h] and rnum[h] we should determine what neighbors
+/*
+ * Using lnum[h] and rnum[h] we should determine what neighbors
* of S[h] we
* need in order to balance S[h], and get them if necessary.
* Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
@@ -1997,7 +2204,7 @@ static int get_neighbors(struct tree_balance *tb, int h)
}
/* We need right neighbor to balance S[path_offset]. */
- if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */
+ if (tb->rnum[h]) {
PROC_INFO_INC(sb, need_r_neighbor[h]);
bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
@@ -2053,9 +2260,11 @@ static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh)
(max_num_of_entries - 1) * sizeof(__u16));
}
-/* maybe we should fail balancing we are going to perform when kmalloc
- fails several times. But now it will loop until kmalloc gets
- required memory */
+/*
+ * maybe we should fail balancing we are going to perform when kmalloc
+ * fails several times. But now it will loop until kmalloc gets
+ * required memory
+ */
static int get_mem_for_virtual_node(struct tree_balance *tb)
{
int check_fs = 0;
@@ -2064,8 +2273,8 @@ static int get_mem_for_virtual_node(struct tree_balance *tb)
size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
+ /* we have to allocate more memory for virtual node */
if (size > tb->vn_buf_size) {
- /* we have to allocate more memory for virtual node */
if (tb->vn_buf) {
/* free memory allocated before */
kfree(tb->vn_buf);
@@ -2079,10 +2288,12 @@ static int get_mem_for_virtual_node(struct tree_balance *tb)
/* get memory for virtual item */
buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
if (!buf) {
- /* getting memory with GFP_KERNEL priority may involve
- balancing now (due to indirect_to_direct conversion on
- dcache shrinking). So, release path and collected
- resources here */
+ /*
+ * getting memory with GFP_KERNEL priority may involve
+ * balancing now (due to indirect_to_direct conversion
+ * on dcache shrinking). So, release path and collected
+ * resources here
+ */
free_buffers_in_tb(tb);
buf = kmalloc(size, GFP_NOFS);
if (!buf) {
@@ -2168,8 +2379,10 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
for (i = tb->tb_path->path_length;
!locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
- /* if I understand correctly, we can only be sure the last buffer
- ** in the path is in the tree --clm
+ /*
+ * if I understand correctly, we can only
+ * be sure the last buffer in the path is
+ * in the tree --clm
*/
#ifdef CONFIG_REISERFS_CHECK
if (PATH_PLAST_BUFFER(tb->tb_path) ==
@@ -2256,13 +2469,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
}
}
}
- /* as far as I can tell, this is not required. The FEB list seems
- ** to be full of newly allocated nodes, which will never be locked,
- ** dirty, or anything else.
- ** To be safe, I'm putting in the checks and waits in. For the moment,
- ** they are needed to keep the code in journal.c from complaining
- ** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well.
- ** --clm
+
+ /*
+ * as far as I can tell, this is not required. The FEB list
+ * seems to be full of newly allocated nodes, which will
+ * never be locked, dirty, or anything else.
+ * To be safe, I'm putting in the checks and waits in.
+ * For the moment, they are needed to keep the code in
+ * journal.c from complaining about the buffer.
+ * That code is inside CONFIG_REISERFS_CHECK as well. --clm
*/
for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
if (tb->FEB[i]) {
@@ -2300,7 +2515,8 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
return CARRY_ON;
}
-/* Prepare for balancing, that is
+/*
+ * Prepare for balancing, that is
* get all necessary parents, and neighbors;
* analyze what and where should be moved;
* get sufficient number of new nodes;
@@ -2309,13 +2525,14 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
* When ported to SMP kernels, only at the last moment after all needed nodes
* are collected in cache, will the resources be locked using the usual
* textbook ordered lock acquisition algorithms. Note that ensuring that
- * this code neither write locks what it does not need to write lock nor locks out of order
- * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
+ * this code neither write locks what it does not need to write lock nor locks
+ * out of order will be a pain in the butt that could have been avoided.
+ * Grumble grumble. -Hans
*
* fix is meant in the sense of render unchanging
*
- * Latency might be improved by first gathering a list of what buffers are needed
- * and then getting as many of them in parallel as possible? -Hans
+ * Latency might be improved by first gathering a list of what buffers
+ * are needed and then getting as many of them in parallel as possible? -Hans
*
* Parameters:
* op_mode i - insert, d - delete, c - cut (truncate), p - paste (append)
@@ -2335,8 +2552,9 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
int pos_in_item;
- /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
- ** during wait_tb_buffers_run
+ /*
+ * we set wait_tb_buffers_run when we have to restore any dirty
+ * bits cleared during wait_tb_buffers_run
*/
int wait_tb_buffers_run = 0;
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
@@ -2347,14 +2565,15 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
tb->fs_gen = get_generation(tb->tb_sb);
- /* we prepare and log the super here so it will already be in the
- ** transaction when do_balance needs to change it.
- ** This way do_balance won't have to schedule when trying to prepare
- ** the super for logging
+ /*
+ * we prepare and log the super here so it will already be in the
+ * transaction when do_balance needs to change it.
+ * This way do_balance won't have to schedule when trying to prepare
+ * the super for logging
*/
reiserfs_prepare_for_journal(tb->tb_sb,
SB_BUFFER_WITH_SB(tb->tb_sb), 1);
- journal_mark_dirty(tb->transaction_handle, tb->tb_sb,
+ journal_mark_dirty(tb->transaction_handle,
SB_BUFFER_WITH_SB(tb->tb_sb));
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
@@ -2408,7 +2627,7 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
#endif
if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
- // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
+ /* FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat */
return REPEAT_SEARCH;
/* Starting from the leaf level; for all levels h of the tree. */
@@ -2427,7 +2646,10 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
goto repeat;
if (h != MAX_HEIGHT - 1)
tb->insert_size[h + 1] = 0;
- /* ok, analysis and resource gathering are complete */
+ /*
+ * ok, analysis and resource gathering
+ * are complete
+ */
break;
}
goto repeat;
@@ -2437,15 +2659,19 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
if (ret != CARRY_ON)
goto repeat;
- /* No disk space, or schedule occurred and analysis may be
- * invalid and needs to be redone. */
+ /*
+ * No disk space, or schedule occurred and analysis may be
+ * invalid and needs to be redone.
+ */
ret = get_empty_nodes(tb, h);
if (ret != CARRY_ON)
goto repeat;
+ /*
+ * We have a positive insert size but no nodes exist on this
+ * level, this means that we are creating a new root.
+ */
if (!PATH_H_PBUFFER(tb->tb_path, h)) {
- /* We have a positive insert size but no nodes exist on this
- level, this means that we are creating a new root. */
RFALSE(tb->blknum[h] != 1,
"PAP-8350: creating new empty root");
@@ -2453,11 +2679,13 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
if (h < MAX_HEIGHT - 1)
tb->insert_size[h + 1] = 0;
} else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
+ /*
+ * The tree needs to be grown, so this node S[h]
+ * which is the root node is split into two nodes,
+ * and a new node (S[h+1]) will be created to
+ * become the root node.
+ */
if (tb->blknum[h] > 1) {
- /* The tree needs to be grown, so this node S[h]
- which is the root node is split into two nodes,
- and a new node (S[h+1]) will be created to
- become the root node. */
RFALSE(h == MAX_HEIGHT - 1,
"PAP-8355: attempt to create too high of a tree");
@@ -2487,12 +2715,14 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
goto repeat;
}
- repeat:
- // fix_nodes was unable to perform its calculation due to
- // filesystem got changed under us, lack of free disk space or i/o
- // failure. If the first is the case - the search will be
- // repeated. For now - free all resources acquired so far except
- // for the new allocated nodes
+repeat:
+ /*
+ * fix_nodes was unable to perform its calculation due to
+ * filesystem got changed under us, lack of free disk space or i/o
+ * failure. If the first is the case - the search will be
+ * repeated. For now - free all resources acquired so far except
+ * for the new allocated nodes
+ */
{
int i;
@@ -2548,8 +2778,6 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
}
-/* Anatoly will probably forgive me renaming tb to tb. I just
- wanted to make lines shorter */
void unfix_nodes(struct tree_balance *tb)
{
int i;
@@ -2578,8 +2806,10 @@ void unfix_nodes(struct tree_balance *tb)
for (i = 0; i < MAX_FEB_SIZE; i++) {
if (tb->FEB[i]) {
b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
- /* de-allocated block which was not used by balancing and
- bforget about buffer for it */
+ /*
+ * de-allocated block which was not used by
+ * balancing and bforget about buffer for it
+ */
brelse(tb->FEB[i]);
reiserfs_free_block(tb->transaction_handle, NULL,
blocknr, 0);
diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c
index 91b0cc1242a..7a26c4fe6c4 100644
--- a/fs/reiserfs/hashes.c
+++ b/fs/reiserfs/hashes.c
@@ -12,12 +12,6 @@
* Yura's function is added (04/07/2000)
*/
-//
-// keyed_hash
-// yura_hash
-// r5_hash
-//
-
#include <linux/kernel.h>
#include "reiserfs.h"
#include <asm/types.h>
@@ -56,7 +50,7 @@ u32 keyed_hash(const signed char *msg, int len)
u32 pad;
int i;
- // assert(len >= 0 && len < 256);
+ /* assert(len >= 0 && len < 256); */
pad = (u32) len | ((u32) len << 8);
pad |= pad << 16;
@@ -127,9 +121,10 @@ u32 keyed_hash(const signed char *msg, int len)
return h0 ^ h1;
}
-/* What follows in this file is copyright 2000 by Hans Reiser, and the
- * licensing of what follows is governed by reiserfs/README */
-
+/*
+ * What follows in this file is copyright 2000 by Hans Reiser, and the
+ * licensing of what follows is governed by reiserfs/README
+ */
u32 yura_hash(const signed char *msg, int len)
{
int j, pow;
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index e1978fd895f..73231b1ebdb 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -12,7 +12,10 @@
int balance_internal(struct tree_balance *,
int, int, struct item_head *, struct buffer_head **);
-/* modes of internal_shift_left, internal_shift_right and internal_insert_childs */
+/*
+ * modes of internal_shift_left, internal_shift_right and
+ * internal_insert_childs
+ */
#define INTERNAL_SHIFT_FROM_S_TO_L 0
#define INTERNAL_SHIFT_FROM_R_TO_S 1
#define INTERNAL_SHIFT_FROM_L_TO_S 2
@@ -32,7 +35,9 @@ static void internal_define_dest_src_infos(int shift_mode,
memset(src_bi, 0, sizeof(struct buffer_info));
/* define dest, src, dest parent, dest position */
switch (shift_mode) {
- case INTERNAL_SHIFT_FROM_S_TO_L: /* used in internal_shift_left */
+
+ /* used in internal_shift_left */
+ case INTERNAL_SHIFT_FROM_S_TO_L:
src_bi->tb = tb;
src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
@@ -52,12 +57,14 @@ static void internal_define_dest_src_infos(int shift_mode,
dest_bi->tb = tb;
dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
- dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */
+ /* dest position is analog of dest->b_item_order */
+ dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
*d_key = tb->lkey[h];
*cf = tb->CFL[h];
break;
- case INTERNAL_SHIFT_FROM_R_TO_S: /* used in internal_shift_left */
+ /* used in internal_shift_left */
+ case INTERNAL_SHIFT_FROM_R_TO_S:
src_bi->tb = tb;
src_bi->bi_bh = tb->R[h];
src_bi->bi_parent = tb->FR[h];
@@ -111,7 +118,8 @@ static void internal_define_dest_src_infos(int shift_mode,
}
}
-/* Insert count node pointers into buffer cur before position to + 1.
+/*
+ * Insert count node pointers into buffer cur before position to + 1.
* Insert count items into buffer cur before position to.
* Items and node pointers are specified by inserted and bh respectively.
*/
@@ -146,14 +154,14 @@ static void internal_insert_childs(struct buffer_info *cur_bi,
/* copy to_be_insert disk children */
for (i = 0; i < count; i++) {
- put_dc_size(&(new_dc[i]),
+ put_dc_size(&new_dc[i],
MAX_CHILD_SIZE(bh[i]) - B_FREE_SPACE(bh[i]));
- put_dc_block_number(&(new_dc[i]), bh[i]->b_blocknr);
+ put_dc_block_number(&new_dc[i], bh[i]->b_blocknr);
}
memcpy(dc, new_dc, DC_SIZE * count);
/* prepare space for count items */
- ih = B_N_PDELIM_KEY(cur, ((to == -1) ? 0 : to));
+ ih = internal_key(cur, ((to == -1) ? 0 : to));
memmove(ih + count, ih,
(nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE);
@@ -190,8 +198,10 @@ static void internal_insert_childs(struct buffer_info *cur_bi,
}
-/* Delete del_num items and node pointers from buffer cur starting from *
- * the first_i'th item and first_p'th pointers respectively. */
+/*
+ * Delete del_num items and node pointers from buffer cur starting from
+ * the first_i'th item and first_p'th pointers respectively.
+ */
static void internal_delete_pointers_items(struct buffer_info *cur_bi,
int first_p,
int first_i, int del_num)
@@ -233,7 +243,7 @@ static void internal_delete_pointers_items(struct buffer_info *cur_bi,
dc = B_N_CHILD(cur, first_p);
memmove(dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE);
- key = B_N_PDELIM_KEY(cur, first_i);
+ key = internal_key(cur, first_i);
memmove(key, key + del_num,
(nr - first_i - del_num) * KEY_SIZE + (nr + 1 -
del_num) * DC_SIZE);
@@ -270,22 +280,30 @@ static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n)
i_from = (from == 0) ? from : from - 1;
- /* delete n pointers starting from `from' position in CUR;
- delete n keys starting from 'i_from' position in CUR;
+ /*
+ * delete n pointers starting from `from' position in CUR;
+ * delete n keys starting from 'i_from' position in CUR;
*/
internal_delete_pointers_items(cur_bi, from, i_from, n);
}
-/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
-* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
- * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
+/*
+ * copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer
+ * dest
+ * last_first == FIRST_TO_LAST means that we copy first items
+ * from src to tail of dest
+ * last_first == LAST_TO_FIRST means that we copy last items
+ * from src to head of dest
*/
static void internal_copy_pointers_items(struct buffer_info *dest_bi,
struct buffer_head *src,
int last_first, int cpy_num)
{
- /* ATTENTION! Number of node pointers in DEST is equal to number of items in DEST *
- * as delimiting key have already inserted to buffer dest.*/
+ /*
+ * ATTENTION! Number of node pointers in DEST is equal to number
+ * of items in DEST as delimiting key have already inserted to
+ * buffer dest.
+ */
struct buffer_head *dest = dest_bi->bi_bh;
int nr_dest, nr_src;
int dest_order, src_order;
@@ -330,13 +348,13 @@ static void internal_copy_pointers_items(struct buffer_info *dest_bi,
memcpy(dc, B_N_CHILD(src, src_order), DC_SIZE * cpy_num);
/* prepare space for cpy_num - 1 item headers */
- key = B_N_PDELIM_KEY(dest, dest_order);
+ key = internal_key(dest, dest_order);
memmove(key + cpy_num - 1, key,
KEY_SIZE * (nr_dest - dest_order) + DC_SIZE * (nr_dest +
cpy_num));
/* insert headers */
- memcpy(key, B_N_PDELIM_KEY(src, src_order), KEY_SIZE * (cpy_num - 1));
+ memcpy(key, internal_key(src, src_order), KEY_SIZE * (cpy_num - 1));
/* sizes, item number */
set_blkh_nr_item(blkh, blkh_nr_item(blkh) + (cpy_num - 1));
@@ -366,7 +384,9 @@ static void internal_copy_pointers_items(struct buffer_info *dest_bi,
}
-/* Copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest.
+/*
+ * Copy cpy_num node pointers and cpy_num - 1 items from buffer src to
+ * buffer dest.
* Delete cpy_num - del_par items and node pointers from buffer src.
* last_first == FIRST_TO_LAST means, that we copy/delete first items from src.
* last_first == LAST_TO_FIRST means, that we copy/delete last items from src.
@@ -385,8 +405,10 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi,
if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
first_pointer = 0;
first_item = 0;
- /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
- for key - with first_item */
+ /*
+ * delete cpy_num - del_par pointers and keys starting for
+ * pointers with first_pointer, for key - with first_item
+ */
internal_delete_pointers_items(src_bi, first_pointer,
first_item, cpy_num - del_par);
} else { /* shift_right occurs */
@@ -404,7 +426,9 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi,
}
/* Insert n_src'th key of buffer src before n_dest'th key of buffer dest. */
-static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_before, /* insert key before key with n_dest number */
+static void internal_insert_key(struct buffer_info *dest_bi,
+ /* insert key before key with n_dest number */
+ int dest_position_before,
struct buffer_head *src, int src_position)
{
struct buffer_head *dest = dest_bi->bi_bh;
@@ -429,12 +453,12 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b
nr = blkh_nr_item(blkh);
/* prepare space for inserting key */
- key = B_N_PDELIM_KEY(dest, dest_position_before);
+ key = internal_key(dest, dest_position_before);
memmove(key + 1, key,
(nr - dest_position_before) * KEY_SIZE + (nr + 1) * DC_SIZE);
/* insert key */
- memcpy(key, B_N_PDELIM_KEY(src, src_position), KEY_SIZE);
+ memcpy(key, internal_key(src, src_position), KEY_SIZE);
/* Change dirt, free space, item number fields. */
@@ -453,13 +477,19 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b
}
}
-/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
- * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
+/*
+ * Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
+ * Copy pointer_amount node pointers and pointer_amount - 1 items from
+ * buffer src to buffer dest.
* Replace d_key'th key in buffer cfl.
* Delete pointer_amount items and node pointers from buffer src.
*/
/* this can be invoked both to shift from S to L and from R to S */
-static void internal_shift_left(int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S */
+static void internal_shift_left(
+ /*
+ * INTERNAL_FROM_S_TO_L | INTERNAL_FROM_R_TO_S
+ */
+ int mode,
struct tree_balance *tb,
int h, int pointer_amount)
{
@@ -473,7 +503,10 @@ static void internal_shift_left(int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FRO
/*printk("pointer_amount = %d\n",pointer_amount); */
if (pointer_amount) {
- /* insert delimiting key from common father of dest and src to node dest into position B_NR_ITEM(dest) */
+ /*
+ * insert delimiting key from common father of dest and
+ * src to node dest into position B_NR_ITEM(dest)
+ */
internal_insert_key(&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf,
d_key_position);
@@ -492,7 +525,8 @@ static void internal_shift_left(int mode, /* INTERNAL_FROM_S_TO_L | INTERNAL_FRO
}
-/* Insert delimiting key to L[h].
+/*
+ * Insert delimiting key to L[h].
* Copy n node pointers and n - 1 items from buffer S[h] to L[h].
* Delete n - 1 items and node pointers from buffer S[h].
*/
@@ -507,23 +541,27 @@ static void internal_shift1_left(struct tree_balance *tb,
internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
&dest_bi, &src_bi, &d_key_position, &cf);
- if (pointer_amount > 0) /* insert lkey[h]-th key from CFL[h] to left neighbor L[h] */
+ /* insert lkey[h]-th key from CFL[h] to left neighbor L[h] */
+ if (pointer_amount > 0)
internal_insert_key(&dest_bi, B_NR_ITEMS(dest_bi.bi_bh), cf,
d_key_position);
- /* internal_insert_key (tb->L[h], B_NR_ITEM(tb->L[h]), tb->CFL[h], tb->lkey[h]); */
/* last parameter is del_parameter */
internal_move_pointers_items(&dest_bi, &src_bi, FIRST_TO_LAST,
pointer_amount, 1);
- /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
}
-/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
+/*
+ * Insert d_key'th (delimiting) key from buffer cfr to head of dest.
* Copy n node pointers and n - 1 items from buffer src to buffer dest.
* Replace d_key'th key in buffer cfr.
* Delete n items and node pointers from buffer src.
*/
-static void internal_shift_right(int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S */
+static void internal_shift_right(
+ /*
+ * INTERNAL_FROM_S_TO_R | INTERNAL_FROM_L_TO_S
+ */
+ int mode,
struct tree_balance *tb,
int h, int pointer_amount)
{
@@ -538,7 +576,10 @@ static void internal_shift_right(int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FR
nr = B_NR_ITEMS(src_bi.bi_bh);
if (pointer_amount > 0) {
- /* insert delimiting key from common father of dest and src to dest node into position 0 */
+ /*
+ * insert delimiting key from common father of dest
+ * and src to dest node into position 0
+ */
internal_insert_key(&dest_bi, 0, cf, d_key_position);
if (nr == pointer_amount - 1) {
RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ ||
@@ -559,7 +600,8 @@ static void internal_shift_right(int mode, /* INTERNAL_FROM_S_TO_R | INTERNAL_FR
pointer_amount, 0);
}
-/* Insert delimiting key to R[h].
+/*
+ * Insert delimiting key to R[h].
* Copy n node pointers and n - 1 items from buffer S[h] to R[h].
* Delete n - 1 items and node pointers from buffer S[h].
*/
@@ -574,18 +616,19 @@ static void internal_shift1_right(struct tree_balance *tb,
internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
&dest_bi, &src_bi, &d_key_position, &cf);
- if (pointer_amount > 0) /* insert rkey from CFR[h] to right neighbor R[h] */
+ /* insert rkey from CFR[h] to right neighbor R[h] */
+ if (pointer_amount > 0)
internal_insert_key(&dest_bi, 0, cf, d_key_position);
- /* internal_insert_key (tb->R[h], 0, tb->CFR[h], tb->rkey[h]); */
/* last parameter is del_parameter */
internal_move_pointers_items(&dest_bi, &src_bi, LAST_TO_FIRST,
pointer_amount, 1);
- /* internal_move_pointers_items (tb->R[h], tb->S[h], LAST_TO_FIRST, pointer_amount, 1); */
}
-/* Delete insert_num node pointers together with their left items
- * and balance current node.*/
+/*
+ * Delete insert_num node pointers together with their left items
+ * and balance current node.
+ */
static void balance_internal_when_delete(struct tree_balance *tb,
int h, int child_pos)
{
@@ -626,9 +669,11 @@ static void balance_internal_when_delete(struct tree_balance *tb,
new_root = tb->R[h - 1];
else
new_root = tb->L[h - 1];
- /* switch super block's tree root block number to the new value */
+ /*
+ * switch super block's tree root block
+ * number to the new value */
PUT_SB_ROOT_BLOCK(tb->tb_sb, new_root->b_blocknr);
- //REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --;
+ /*REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --; */
PUT_SB_TREE_HEIGHT(tb->tb_sb,
SB_TREE_HEIGHT(tb->tb_sb) - 1);
@@ -636,8 +681,8 @@ static void balance_internal_when_delete(struct tree_balance *tb,
REISERFS_SB(tb->tb_sb)->s_sbh,
1);
/*&&&&&&&&&&&&&&&&&&&&&& */
+ /* use check_internal if new root is an internal node */
if (h > 1)
- /* use check_internal if new root is an internal node */
check_internal(new_root);
/*&&&&&&&&&&&&&&&&&&&&&& */
@@ -648,7 +693,8 @@ static void balance_internal_when_delete(struct tree_balance *tb,
return;
}
- if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) { /* join S[h] with L[h] */
+ /* join S[h] with L[h] */
+ if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) {
RFALSE(tb->rnum[h] != 0,
"invalid tb->rnum[%d]==%d when joining S[h] with L[h]",
@@ -660,7 +706,8 @@ static void balance_internal_when_delete(struct tree_balance *tb,
return;
}
- if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) { /* join S[h] with R[h] */
+ /* join S[h] with R[h] */
+ if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) {
RFALSE(tb->lnum[h] != 0,
"invalid tb->lnum[%d]==%d when joining S[h] with R[h]",
h, tb->lnum[h]);
@@ -671,17 +718,18 @@ static void balance_internal_when_delete(struct tree_balance *tb,
return;
}
- if (tb->lnum[h] < 0) { /* borrow from left neighbor L[h] */
+ /* borrow from left neighbor L[h] */
+ if (tb->lnum[h] < 0) {
RFALSE(tb->rnum[h] != 0,
"wrong tb->rnum[%d]==%d when borrow from L[h]", h,
tb->rnum[h]);
- /*internal_shift_right (tb, h, tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], -tb->lnum[h]); */
internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h,
-tb->lnum[h]);
return;
}
- if (tb->rnum[h] < 0) { /* borrow from right neighbor R[h] */
+ /* borrow from right neighbor R[h] */
+ if (tb->rnum[h] < 0) {
RFALSE(tb->lnum[h] != 0,
"invalid tb->lnum[%d]==%d when borrow from R[h]",
h, tb->lnum[h]);
@@ -689,7 +737,8 @@ static void balance_internal_when_delete(struct tree_balance *tb,
return;
}
- if (tb->lnum[h] > 0) { /* split S[h] into two parts and put them into neighbors */
+ /* split S[h] into two parts and put them into neighbors */
+ if (tb->lnum[h] > 0) {
RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
"invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them",
h, tb->lnum[h], h, tb->rnum[h], n);
@@ -717,7 +766,7 @@ static void replace_lkey(struct tree_balance *tb, int h, struct item_head *key)
if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0)
return;
- memcpy(B_N_PDELIM_KEY(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE);
+ memcpy(internal_key(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE);
do_balance_mark_internal_dirty(tb, tb->CFL[h], 0);
}
@@ -732,34 +781,41 @@ static void replace_rkey(struct tree_balance *tb, int h, struct item_head *key)
"R[h] can not be empty if it exists (item number=%d)",
B_NR_ITEMS(tb->R[h]));
- memcpy(B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE);
+ memcpy(internal_key(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE);
do_balance_mark_internal_dirty(tb, tb->CFR[h], 0);
}
-int balance_internal(struct tree_balance *tb, /* tree_balance structure */
- int h, /* level of the tree */
- int child_pos, struct item_head *insert_key, /* key for insertion on higher level */
- struct buffer_head **insert_ptr /* node for insertion on higher level */
- )
- /* if inserting/pasting
- {
- child_pos is the position of the node-pointer in S[h] that *
- pointed to S[h-1] before balancing of the h-1 level; *
- this means that new pointers and items must be inserted AFTER *
- child_pos
- }
- else
- {
- it is the position of the leftmost pointer that must be deleted (together with
- its corresponding key to the left of the pointer)
- as a result of the previous level's balancing.
- }
- */
+
+/*
+ * if inserting/pasting {
+ * child_pos is the position of the node-pointer in S[h] that
+ * pointed to S[h-1] before balancing of the h-1 level;
+ * this means that new pointers and items must be inserted AFTER
+ * child_pos
+ * } else {
+ * it is the position of the leftmost pointer that must be deleted
+ * (together with its corresponding key to the left of the pointer)
+ * as a result of the previous level's balancing.
+ * }
+ */
+
+int balance_internal(struct tree_balance *tb,
+ int h, /* level of the tree */
+ int child_pos,
+ /* key for insertion on higher level */
+ struct item_head *insert_key,
+ /* node for insertion on higher level */
+ struct buffer_head **insert_ptr)
{
struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
struct buffer_info bi;
- int order; /* we return this: it is 0 if there is no S[h], else it is tb->S[h]->b_item_order */
+
+ /*
+ * we return this: it is 0 if there is no S[h],
+ * else it is tb->S[h]->b_item_order
+ */
+ int order;
int insert_num, n, k;
struct buffer_head *S_new;
struct item_head new_insert_key;
@@ -774,8 +830,10 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
(tbSh) ? PATH_H_POSITION(tb->tb_path,
h + 1) /*tb->S[h]->b_item_order */ : 0;
- /* Using insert_size[h] calculate the number insert_num of items
- that must be inserted to or deleted from S[h]. */
+ /*
+ * Using insert_size[h] calculate the number insert_num of items
+ * that must be inserted to or deleted from S[h].
+ */
insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));
/* Check whether insert_num is proper * */
@@ -794,23 +852,21 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
k = 0;
if (tb->lnum[h] > 0) {
- /* shift lnum[h] items from S[h] to the left neighbor L[h].
- check how many of new items fall into L[h] or CFL[h] after
- shifting */
+ /*
+ * shift lnum[h] items from S[h] to the left neighbor L[h].
+ * check how many of new items fall into L[h] or CFL[h] after
+ * shifting
+ */
n = B_NR_ITEMS(tb->L[h]); /* number of items in L[h] */
if (tb->lnum[h] <= child_pos) {
/* new items don't fall into L[h] or CFL[h] */
internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
tb->lnum[h]);
- /*internal_shift_left (tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,tb->lnum[h]); */
child_pos -= tb->lnum[h];
} else if (tb->lnum[h] > child_pos + insert_num) {
/* all new items fall into L[h] */
internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
tb->lnum[h] - insert_num);
- /* internal_shift_left(tb->L[h],tb->CFL[h],tb->lkey[h],tbSh,
- tb->lnum[h]-insert_num);
- */
/* insert insert_num keys and node-pointers into L[h] */
bi.tb = tb;
bi.bi_bh = tb->L[h];
@@ -826,7 +882,10 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
} else {
struct disk_child *dc;
- /* some items fall into L[h] or CFL[h], but some don't fall */
+ /*
+ * some items fall into L[h] or CFL[h],
+ * but some don't fall
+ */
internal_shift1_left(tb, h, child_pos + 1);
/* calculate number of new items that fall into L[h] */
k = tb->lnum[h] - child_pos - 1;
@@ -841,7 +900,10 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
replace_lkey(tb, h, insert_key + k);
- /* replace the first node-ptr in S[h] by node-ptr to insert_ptr[k] */
+ /*
+ * replace the first node-ptr in S[h] by
+ * node-ptr to insert_ptr[k]
+ */
dc = B_N_CHILD(tbSh, 0);
put_dc_size(dc,
MAX_CHILD_SIZE(insert_ptr[k]) -
@@ -860,17 +922,17 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
/* tb->lnum[h] > 0 */
if (tb->rnum[h] > 0) {
/*shift rnum[h] items from S[h] to the right neighbor R[h] */
- /* check how many of new items fall into R or CFR after shifting */
+ /*
+ * check how many of new items fall into R or CFR
+ * after shifting
+ */
n = B_NR_ITEMS(tbSh); /* number of items in S[h] */
if (n - tb->rnum[h] >= child_pos)
/* new items fall into S[h] */
- /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],tb->rnum[h]); */
internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
tb->rnum[h]);
else if (n + insert_num - tb->rnum[h] < child_pos) {
/* all new items fall into R[h] */
- /*internal_shift_right(tb,h,tbSh,tb->CFR[h],tb->rkey[h],tb->R[h],
- tb->rnum[h] - insert_num); */
internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
tb->rnum[h] - insert_num);
@@ -904,7 +966,10 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
replace_rkey(tb, h, insert_key + insert_num - k - 1);
- /* replace the first node-ptr in R[h] by node-ptr insert_ptr[insert_num-k-1] */
+ /*
+ * replace the first node-ptr in R[h] by
+ * node-ptr insert_ptr[insert_num-k-1]
+ */
dc = B_N_CHILD(tb->R[h], 0);
put_dc_size(dc,
MAX_CHILD_SIZE(insert_ptr
@@ -921,7 +986,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
}
}
- /** Fill new node that appears instead of S[h] **/
+ /** Fill new node that appears instead of S[h] **/
RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");
@@ -997,26 +1062,30 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
/* new items don't fall into S_new */
/* store the delimiting key for the next level */
/* new_insert_key = (n - snum)'th key in S[h] */
- memcpy(&new_insert_key, B_N_PDELIM_KEY(tbSh, n - snum),
+ memcpy(&new_insert_key, internal_key(tbSh, n - snum),
KEY_SIZE);
/* last parameter is del_par */
internal_move_pointers_items(&dest_bi, &src_bi,
LAST_TO_FIRST, snum, 0);
- /* internal_move_pointers_items(S_new, tbSh, LAST_TO_FIRST, snum, 0); */
} else if (n + insert_num - snum < child_pos) {
/* all new items fall into S_new */
/* store the delimiting key for the next level */
- /* new_insert_key = (n + insert_item - snum)'th key in S[h] */
+ /*
+ * new_insert_key = (n + insert_item - snum)'th
+ * key in S[h]
+ */
memcpy(&new_insert_key,
- B_N_PDELIM_KEY(tbSh, n + insert_num - snum),
+ internal_key(tbSh, n + insert_num - snum),
KEY_SIZE);
/* last parameter is del_par */
internal_move_pointers_items(&dest_bi, &src_bi,
LAST_TO_FIRST,
snum - insert_num, 0);
- /* internal_move_pointers_items(S_new,tbSh,1,snum - insert_num,0); */
- /* insert insert_num keys and node-pointers into S_new */
+ /*
+ * insert insert_num keys and node-pointers
+ * into S_new
+ */
internal_insert_childs(&dest_bi,
/*S_new,tb->S[h-1]->b_next, */
child_pos - n - insert_num +
@@ -1033,7 +1102,6 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
internal_move_pointers_items(&dest_bi, &src_bi,
LAST_TO_FIRST,
n - child_pos + 1, 1);
- /* internal_move_pointers_items(S_new,tbSh,1,n - child_pos + 1,1); */
/* calculate number of new items that fall into S_new */
k = snum - n + child_pos - 1;
@@ -1043,7 +1111,10 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
/* new_insert_key = insert_key[insert_num - k - 1] */
memcpy(&new_insert_key, insert_key + insert_num - k - 1,
KEY_SIZE);
- /* replace first node-ptr in S_new by node-ptr to insert_ptr[insert_num-k-1] */
+ /*
+ * replace first node-ptr in S_new by node-ptr
+ * to insert_ptr[insert_num-k-1]
+ */
dc = B_N_CHILD(S_new, 0);
put_dc_size(dc,
@@ -1066,7 +1137,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
|| buffer_dirty(S_new), "cm-00001: bad S_new (%b)",
S_new);
- // S_new is released in unfix_nodes
+ /* S_new is released in unfix_nodes */
}
n = B_NR_ITEMS(tbSh); /*number of items in S[h] */
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index bc8b8009897..e3ca0489491 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -25,7 +25,10 @@ int reiserfs_commit_write(struct file *f, struct page *page,
void reiserfs_evict_inode(struct inode *inode)
{
- /* We need blocks for transaction + (user+group) quota update (possibly delete) */
+ /*
+ * We need blocks for transaction + (user+group) quota
+ * update (possibly delete)
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 2 +
2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
@@ -39,8 +42,12 @@ void reiserfs_evict_inode(struct inode *inode)
if (inode->i_nlink)
goto no_delete;
- /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
- if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
+ /*
+ * The = 0 happens when we abort creating a new inode
+ * for some reason like lack of space..
+ * also handles bad_inode case
+ */
+ if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
reiserfs_delete_xattrs(inode);
@@ -54,34 +61,43 @@ void reiserfs_evict_inode(struct inode *inode)
err = reiserfs_delete_object(&th, inode);
- /* Do quota update inside a transaction for journaled quotas. We must do that
- * after delete_object so that quota updates go into the same transaction as
- * stat data deletion */
+ /*
+ * Do quota update inside a transaction for journaled quotas.
+ * We must do that after delete_object so that quota updates
+ * go into the same transaction as stat data deletion
+ */
if (!err) {
int depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_inode(inode);
reiserfs_write_lock_nested(inode->i_sb, depth);
}
- if (journal_end(&th, inode->i_sb, jbegin_count))
+ if (journal_end(&th))
goto out;
- /* check return value from reiserfs_delete_object after
+ /*
+ * check return value from reiserfs_delete_object after
* ending the transaction
*/
if (err)
goto out;
- /* all items of file are deleted, so we can remove "save" link */
- remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything
- * about an error here */
+ /*
+ * all items of file are deleted, so we can remove
+ * "save" link
+ * we can't do anything about an error here
+ */
+ remove_save_link(inode, 0 /* not truncate */);
out:
reiserfs_write_unlock(inode->i_sb);
} else {
/* no object items are in the tree */
;
}
- clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
+
+ /* note this must go after the journal_end to prevent deadlock */
+ clear_inode(inode);
+
dquot_drop(inode);
inode->i_blocks = 0;
return;
@@ -103,8 +119,10 @@ static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
key->key_length = length;
}
-/* take base of inode_key (it comes from inode always) (dirid, objectid) and version from an inode, set
- offset and type of key */
+/*
+ * take base of inode_key (it comes from inode always) (dirid, objectid)
+ * and version from an inode, set offset and type of key
+ */
void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
int type, int length)
{
@@ -114,9 +132,7 @@ void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
length);
}
-//
-// when key is 0, do not set version and short key
-//
+/* when key is 0, do not set version and short key */
inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
int version,
loff_t offset, int type, int length,
@@ -132,43 +148,47 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
set_le_ih_k_type(ih, type);
put_ih_item_len(ih, length);
/* set_ih_free_space (ih, 0); */
- // for directory items it is entry count, for directs and stat
- // datas - 0xffff, for indirects - 0
+ /*
+ * for directory items it is entry count, for directs and stat
+ * datas - 0xffff, for indirects - 0
+ */
put_ih_entry_count(ih, entry_count);
}
-//
-// FIXME: we might cache recently accessed indirect item
-
-// Ugh. Not too eager for that....
-// I cut the code until such time as I see a convincing argument (benchmark).
-// I don't want a bloated inode struct..., and I don't like code complexity....
-
-/* cutting the code is fine, since it really isn't in use yet and is easy
-** to add back in. But, Vladimir has a really good idea here. Think
-** about what happens for reading a file. For each page,
-** The VFS layer calls reiserfs_readpage, who searches the tree to find
-** an indirect item. This indirect item has X number of pointers, where
-** X is a big number if we've done the block allocation right. But,
-** we only use one or two of these pointers during each call to readpage,
-** needlessly researching again later on.
-**
-** The size of the cache could be dynamic based on the size of the file.
-**
-** I'd also like to see us cache the location the stat data item, since
-** we are needlessly researching for that frequently.
-**
-** --chris
-*/
+/*
+ * FIXME: we might cache recently accessed indirect item
+ * Ugh. Not too eager for that....
+ * I cut the code until such time as I see a convincing argument (benchmark).
+ * I don't want a bloated inode struct..., and I don't like code complexity....
+ */
-/* If this page has a file tail in it, and
-** it was read in by get_block_create_0, the page data is valid,
-** but tail is still sitting in a direct item, and we can't write to
-** it. So, look through this page, and check all the mapped buffers
-** to make sure they have valid block numbers. Any that don't need
-** to be unmapped, so that __block_write_begin will correctly call
-** reiserfs_get_block to convert the tail into an unformatted node
-*/
+/*
+ * cutting the code is fine, since it really isn't in use yet and is easy
+ * to add back in. But, Vladimir has a really good idea here. Think
+ * about what happens for reading a file. For each page,
+ * The VFS layer calls reiserfs_readpage, who searches the tree to find
+ * an indirect item. This indirect item has X number of pointers, where
+ * X is a big number if we've done the block allocation right. But,
+ * we only use one or two of these pointers during each call to readpage,
+ * needlessly researching again later on.
+ *
+ * The size of the cache could be dynamic based on the size of the file.
+ *
+ * I'd also like to see us cache the location the stat data item, since
+ * we are needlessly researching for that frequently.
+ *
+ * --chris
+ */
+
+/*
+ * If this page has a file tail in it, and
+ * it was read in by get_block_create_0, the page data is valid,
+ * but tail is still sitting in a direct item, and we can't write to
+ * it. So, look through this page, and check all the mapped buffers
+ * to make sure they have valid block numbers. Any that don't need
+ * to be unmapped, so that __block_write_begin will correctly call
+ * reiserfs_get_block to convert the tail into an unformatted node
+ */
static inline void fix_tail_page_for_writing(struct page *page)
{
struct buffer_head *head, *next, *bh;
@@ -186,8 +206,10 @@ static inline void fix_tail_page_for_writing(struct page *page)
}
}
-/* reiserfs_get_block does not need to allocate a block only if it has been
- done already or non-hole position has been found in the indirect item */
+/*
+ * reiserfs_get_block does not need to allocate a block only if it has been
+ * done already or non-hole position has been found in the indirect item
+ */
static inline int allocation_needed(int retval, b_blocknr_t allocated,
struct item_head *ih,
__le32 * item, int pos_in_item)
@@ -211,14 +233,16 @@ static inline void set_block_dev_mapped(struct buffer_head *bh,
map_bh(bh, inode->i_sb, block);
}
-//
-// files which were created in the earlier version can not be longer,
-// than 2 gb
-//
+/*
+ * files which were created in the earlier version can not be longer,
+ * than 2 gb
+ */
static int file_capable(struct inode *inode, sector_t block)
{
- if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 || // it is new file.
- block < (1 << (31 - inode->i_sb->s_blocksize_bits))) // old file, but 'block' is inside of 2gb
+ /* it is new file. */
+ if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
+ /* old file, but 'block' is inside of 2gb */
+ block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
return 1;
return 0;
@@ -228,7 +252,6 @@ static int restart_transaction(struct reiserfs_transaction_handle *th,
struct inode *inode, struct treepath *path)
{
struct super_block *s = th->t_super;
- int len = th->t_blocks_allocated;
int err;
BUG_ON(!th->t_trans_id);
@@ -241,7 +264,7 @@ static int restart_transaction(struct reiserfs_transaction_handle *th,
return 0;
}
reiserfs_update_sd(th, inode);
- err = journal_end(th, s, len);
+ err = journal_end(th);
if (!err) {
err = journal_begin(th, s, JOURNAL_PER_BALANCE_CNT * 6);
if (!err)
@@ -250,14 +273,14 @@ static int restart_transaction(struct reiserfs_transaction_handle *th,
return err;
}
-// it is called by get_block when create == 0. Returns block number
-// for 'block'-th logical block of file. When it hits direct item it
-// returns 0 (being called from bmap) or read direct item into piece
-// of page (bh_result)
-
-// Please improve the english/clarity in the comment above, as it is
-// hard to understand.
-
+/*
+ * it is called by get_block when create == 0. Returns block number
+ * for 'block'-th logical block of file. When it hits direct item it
+ * returns 0 (being called from bmap) or read direct item into piece
+ * of page (bh_result)
+ * Please improve the english/clarity in the comment above, as it is
+ * hard to understand.
+ */
static int _get_block_create_0(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int args)
{
@@ -273,7 +296,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
int done = 0;
unsigned long offset;
- // prepare the key to look for the 'block'-th block of file
+ /* prepare the key to look for the 'block'-th block of file */
make_cpu_key(&key, inode,
(loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
3);
@@ -285,23 +308,28 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
kunmap(bh_result->b_page);
if (result == IO_ERROR)
return -EIO;
- // We do not return -ENOENT if there is a hole but page is uptodate, because it means
- // That there is some MMAPED data associated with it that is yet to be written to disk.
+ /*
+ * We do not return -ENOENT if there is a hole but page is
+ * uptodate, because it means that there is some MMAPED data
+ * associated with it that is yet to be written to disk.
+ */
if ((args & GET_BLOCK_NO_HOLE)
&& !PageUptodate(bh_result->b_page)) {
return -ENOENT;
}
return 0;
}
- //
+
bh = get_last_bh(&path);
- ih = get_ih(&path);
+ ih = tp_item_head(&path);
if (is_indirect_le_ih(ih)) {
- __le32 *ind_item = (__le32 *) B_I_PITEM(bh, ih);
+ __le32 *ind_item = (__le32 *) ih_item_body(bh, ih);
- /* FIXME: here we could cache indirect item or part of it in
- the inode to avoid search_by_key in case of subsequent
- access to file */
+ /*
+ * FIXME: here we could cache indirect item or part of it in
+ * the inode to avoid search_by_key in case of subsequent
+ * access to file
+ */
blocknr = get_block_num(ind_item, path.pos_in_item);
ret = 0;
if (blocknr) {
@@ -311,8 +339,12 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
set_buffer_boundary(bh_result);
}
} else
- // We do not return -ENOENT if there is a hole but page is uptodate, because it means
- // That there is some MMAPED data associated with it that is yet to be written to disk.
+ /*
+ * We do not return -ENOENT if there is a hole but
+ * page is uptodate, because it means that there is
+ * some MMAPED data associated with it that is
+ * yet to be written to disk.
+ */
if ((args & GET_BLOCK_NO_HOLE)
&& !PageUptodate(bh_result->b_page)) {
ret = -ENOENT;
@@ -323,41 +355,45 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
kunmap(bh_result->b_page);
return ret;
}
- // requested data are in direct item(s)
+ /* requested data are in direct item(s) */
if (!(args & GET_BLOCK_READ_DIRECT)) {
- // we are called by bmap. FIXME: we can not map block of file
- // when it is stored in direct item(s)
+ /*
+ * we are called by bmap. FIXME: we can not map block of file
+ * when it is stored in direct item(s)
+ */
pathrelse(&path);
if (p)
kunmap(bh_result->b_page);
return -ENOENT;
}
- /* if we've got a direct item, and the buffer or page was uptodate,
- ** we don't want to pull data off disk again. skip to the
- ** end, where we map the buffer and return
+ /*
+ * if we've got a direct item, and the buffer or page was uptodate,
+ * we don't want to pull data off disk again. skip to the
+ * end, where we map the buffer and return
*/
if (buffer_uptodate(bh_result)) {
goto finished;
} else
/*
- ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
- ** pages without any buffers. If the page is up to date, we don't want
- ** read old data off disk. Set the up to date bit on the buffer instead
- ** and jump to the end
+ * grab_tail_page can trigger calls to reiserfs_get_block on
+ * up to date pages without any buffers. If the page is up
+ * to date, we don't want read old data off disk. Set the up
+ * to date bit on the buffer instead and jump to the end
*/
if (!bh_result->b_page || PageUptodate(bh_result->b_page)) {
set_buffer_uptodate(bh_result);
goto finished;
}
- // read file tail into part of page
+ /* read file tail into part of page */
offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
copy_item_head(&tmp_ih, ih);
- /* we only want to kmap if we are reading the tail into the page.
- ** this is not the common case, so we don't kmap until we are
- ** sure we need to. But, this means the item might move if
- ** kmap schedules
+ /*
+ * we only want to kmap if we are reading the tail into the page.
+ * this is not the common case, so we don't kmap until we are
+ * sure we need to. But, this means the item might move if
+ * kmap schedules
*/
if (!p)
p = (char *)kmap(bh_result->b_page);
@@ -368,10 +404,11 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
if (!is_direct_le_ih(ih)) {
BUG();
}
- /* make sure we don't read more bytes than actually exist in
- ** the file. This can happen in odd cases where i_size isn't
- ** correct, and when direct item padding results in a few
- ** extra bytes at the end of the direct item
+ /*
+ * make sure we don't read more bytes than actually exist in
+ * the file. This can happen in odd cases where i_size isn't
+ * correct, and when direct item padding results in a few
+ * extra bytes at the end of the direct item
*/
if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
break;
@@ -383,40 +420,43 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
} else {
chars = ih_item_len(ih) - path.pos_in_item;
}
- memcpy(p, B_I_PITEM(bh, ih) + path.pos_in_item, chars);
+ memcpy(p, ih_item_body(bh, ih) + path.pos_in_item, chars);
if (done)
break;
p += chars;
+ /*
+ * we done, if read direct item is not the last item of
+ * node FIXME: we could try to check right delimiting key
+ * to see whether direct item continues in the right
+ * neighbor or rely on i_size
+ */
if (PATH_LAST_POSITION(&path) != (B_NR_ITEMS(bh) - 1))
- // we done, if read direct item is not the last item of
- // node FIXME: we could try to check right delimiting key
- // to see whether direct item continues in the right
- // neighbor or rely on i_size
break;
- // update key to look for the next piece
+ /* update key to look for the next piece */
set_cpu_key_k_offset(&key, cpu_key_k_offset(&key) + chars);
result = search_for_position_by_key(inode->i_sb, &key, &path);
if (result != POSITION_FOUND)
- // i/o error most likely
+ /* i/o error most likely */
break;
bh = get_last_bh(&path);
- ih = get_ih(&path);
+ ih = tp_item_head(&path);
} while (1);
flush_dcache_page(bh_result->b_page);
kunmap(bh_result->b_page);
- finished:
+finished:
pathrelse(&path);
if (result == IO_ERROR)
return -EIO;
- /* this buffer has valid data, but isn't valid for io. mapping it to
+ /*
+ * this buffer has valid data, but isn't valid for io. mapping it to
* block #0 tells the rest of reiserfs it just has a tail in it
*/
map_bh(bh_result, inode->i_sb, 0);
@@ -424,8 +464,10 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
return 0;
}
-// this is called to create file map. So, _get_block_create_0 will not
-// read direct item
+/*
+ * this is called to create file map. So, _get_block_create_0 will not
+ * read direct item
+ */
static int reiserfs_bmap(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
@@ -439,22 +481,23 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
return 0;
}
-/* special version of get_block that is only used by grab_tail_page right
-** now. It is sent to __block_write_begin, and when you try to get a
-** block past the end of the file (or a block from a hole) it returns
-** -ENOENT instead of a valid buffer. __block_write_begin expects to
-** be able to do i/o on the buffers returned, unless an error value
-** is also returned.
-**
-** So, this allows __block_write_begin to be used for reading a single block
-** in a page. Where it does not produce a valid page for holes, or past the
-** end of the file. This turns out to be exactly what we need for reading
-** tails for conversion.
-**
-** The point of the wrapper is forcing a certain value for create, even
-** though the VFS layer is calling this function with create==1. If you
-** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
-** don't use this function.
+/*
+ * special version of get_block that is only used by grab_tail_page right
+ * now. It is sent to __block_write_begin, and when you try to get a
+ * block past the end of the file (or a block from a hole) it returns
+ * -ENOENT instead of a valid buffer. __block_write_begin expects to
+ * be able to do i/o on the buffers returned, unless an error value
+ * is also returned.
+ *
+ * So, this allows __block_write_begin to be used for reading a single block
+ * in a page. Where it does not produce a valid page for holes, or past the
+ * end of the file. This turns out to be exactly what we need for reading
+ * tails for conversion.
+ *
+ * The point of the wrapper is forcing a certain value for create, even
+ * though the VFS layer is calling this function with create==1. If you
+ * don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
+ * don't use this function.
*/
static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
struct buffer_head *bh_result,
@@ -463,8 +506,10 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
}
-/* This is special helper for reiserfs_get_block in case we are executing
- direct_IO request. */
+/*
+ * This is special helper for reiserfs_get_block in case we are executing
+ * direct_IO request.
+ */
static int reiserfs_get_blocks_direct_io(struct inode *inode,
sector_t iblock,
struct buffer_head *bh_result,
@@ -474,9 +519,11 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
bh_result->b_page = NULL;
- /* We set the b_size before reiserfs_get_block call since it is
- referenced in convert_tail_for_hole() that may be called from
- reiserfs_get_block() */
+ /*
+ * We set the b_size before reiserfs_get_block call since it is
+ * referenced in convert_tail_for_hole() that may be called from
+ * reiserfs_get_block()
+ */
bh_result->b_size = (1 << inode->i_blkbits);
ret = reiserfs_get_block(inode, iblock, bh_result,
@@ -486,14 +533,18 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
/* don't allow direct io onto tail pages */
if (buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
- /* make sure future calls to the direct io funcs for this offset
- ** in the file fail by unmapping the buffer
+ /*
+ * make sure future calls to the direct io funcs for this
+ * offset in the file fail by unmapping the buffer
*/
clear_buffer_mapped(bh_result);
ret = -EINVAL;
}
- /* Possible unpacked tail. Flush the data before pages have
- disappeared */
+
+ /*
+ * Possible unpacked tail. Flush the data before pages have
+ * disappeared
+ */
if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
int err;
@@ -507,20 +558,20 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
if (err < 0)
ret = err;
}
- out:
+out:
return ret;
}
/*
-** helper function for when reiserfs_get_block is called for a hole
-** but the file tail is still in a direct item
-** bh_result is the buffer head for the hole
-** tail_offset is the offset of the start of the tail in the file
-**
-** This calls prepare_write, which will start a new transaction
-** you should not be in a transaction, or have any paths held when you
-** call this.
-*/
+ * helper function for when reiserfs_get_block is called for a hole
+ * but the file tail is still in a direct item
+ * bh_result is the buffer head for the hole
+ * tail_offset is the offset of the start of the tail in the file
+ *
+ * This calls prepare_write, which will start a new transaction
+ * you should not be in a transaction, or have any paths held when you
+ * call this.
+ */
static int convert_tail_for_hole(struct inode *inode,
struct buffer_head *bh_result,
loff_t tail_offset)
@@ -540,9 +591,10 @@ static int convert_tail_for_hole(struct inode *inode,
tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
index = tail_offset >> PAGE_CACHE_SHIFT;
- /* hole_page can be zero in case of direct_io, we are sure
- that we cannot get here if we write with O_DIRECT into
- tail page */
+ /*
+ * hole_page can be zero in case of direct_io, we are sure
+ * that we cannot get here if we write with O_DIRECT into tail page
+ */
if (!hole_page || index != hole_page->index) {
tail_page = grab_cache_page(inode->i_mapping, index);
retval = -ENOMEM;
@@ -553,14 +605,15 @@ static int convert_tail_for_hole(struct inode *inode,
tail_page = hole_page;
}
- /* we don't have to make sure the conversion did not happen while
- ** we were locking the page because anyone that could convert
- ** must first take i_mutex.
- **
- ** We must fix the tail page for writing because it might have buffers
- ** that are mapped, but have a block number of 0. This indicates tail
- ** data that has been read directly into the page, and
- ** __block_write_begin won't trigger a get_block in this case.
+ /*
+ * we don't have to make sure the conversion did not happen while
+ * we were locking the page because anyone that could convert
+ * must first take i_mutex.
+ *
+ * We must fix the tail page for writing because it might have buffers
+ * that are mapped, but have a block number of 0. This indicates tail
+ * data that has been read directly into the page, and
+ * __block_write_begin won't trigger a get_block in this case.
*/
fix_tail_page_for_writing(tail_page);
retval = __reiserfs_write_begin(tail_page, tail_start,
@@ -573,12 +626,12 @@ static int convert_tail_for_hole(struct inode *inode,
retval = reiserfs_commit_write(NULL, tail_page, tail_start, tail_end);
- unlock:
+unlock:
if (tail_page != hole_page) {
unlock_page(tail_page);
page_cache_release(tail_page);
}
- out:
+out:
return retval;
}
@@ -604,7 +657,8 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh_result, int create)
{
int repeat, retval = 0;
- b_blocknr_t allocated_block_nr = 0; // b_blocknr_t is (unsigned) 32 bit int
+ /* b_blocknr_t is (unsigned) 32 bit int*/
+ b_blocknr_t allocated_block_nr = 0;
INITIALIZE_PATH(path);
int pos_in_item;
struct cpu_key key;
@@ -614,12 +668,14 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
int done;
int fs_gen;
struct reiserfs_transaction_handle *th = NULL;
- /* space reserved in transaction batch:
- . 3 balancings in direct->indirect conversion
- . 1 block involved into reiserfs_update_sd()
- XXX in practically impossible worst case direct2indirect()
- can incur (much) more than 3 balancings.
- quota update for user, group */
+ /*
+ * space reserved in transaction batch:
+ * . 3 balancings in direct->indirect conversion
+ * . 1 block involved into reiserfs_update_sd()
+ * XXX in practically impossible worst case direct2indirect()
+ * can incur (much) more than 3 balancings.
+ * quota update for user, group
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 + 1 +
2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
@@ -636,8 +692,9 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
return -EFBIG;
}
- /* if !create, we aren't changing the FS, so we don't need to
- ** log anything, so we don't need to start a transaction
+ /*
+ * if !create, we aren't changing the FS, so we don't need to
+ * log anything, so we don't need to start a transaction
*/
if (!(create & GET_BLOCK_CREATE)) {
int ret;
@@ -647,6 +704,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
reiserfs_write_unlock(inode->i_sb);
return ret;
}
+
/*
* if we're already in a transaction, make sure to close
* any new transactions we start in this func
@@ -655,8 +713,10 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
reiserfs_transaction_running(inode->i_sb))
dangle = 0;
- /* If file is of such a size, that it might have a tail and tails are enabled
- ** we should mark it as possibly needing tail packing on close
+ /*
+ * If file is of such a size, that it might have a tail and
+ * tails are enabled we should mark it as possibly needing
+ * tail packing on close
*/
if ((have_large_tails(inode->i_sb)
&& inode->i_size < i_block_size(inode) * 4)
@@ -667,7 +727,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* set the key of the first byte in the 'block'-th block of file */
make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
- start_trans:
+start_trans:
th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
if (!th) {
retval = -ENOMEM;
@@ -675,7 +735,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
}
reiserfs_update_inode_transaction(inode);
}
- research:
+research:
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval == IO_ERROR) {
@@ -684,8 +744,8 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
}
bh = get_last_bh(&path);
- ih = get_ih(&path);
- item = get_item(&path);
+ ih = tp_item_head(&path);
+ item = tp_item_body(&path);
pos_in_item = path.pos_in_item;
fs_gen = get_generation(inode->i_sb);
@@ -703,11 +763,12 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
_allocate_block(th, block, inode, &allocated_block_nr,
&path, create);
+ /*
+ * restart the transaction to give the journal a chance to free
+ * some blocks. releases the path, so we have to go back to
+ * research if we succeed on the second try
+ */
if (repeat == NO_DISK_SPACE || repeat == QUOTA_EXCEEDED) {
- /* restart the transaction to give the journal a chance to free
- ** some blocks. releases the path, so we have to go back to
- ** research if we succeed on the second try
- */
SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
retval = restart_transaction(th, inode, &path);
if (retval)
@@ -734,9 +795,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (indirect_item_found(retval, ih)) {
b_blocknr_t unfm_ptr;
- /* 'block'-th block is in the file already (there is
- corresponding cell in some indirect item). But it may be
- zero unformatted node pointer (hole) */
+ /*
+ * 'block'-th block is in the file already (there is
+ * corresponding cell in some indirect item). But it may be
+ * zero unformatted node pointer (hole)
+ */
unfm_ptr = get_block_num(item, pos_in_item);
if (unfm_ptr == 0) {
/* use allocated block to plug the hole */
@@ -753,7 +816,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
reiserfs_add_ordered_list(inode, bh_result);
put_block_num(item, pos_in_item, allocated_block_nr);
unfm_ptr = allocated_block_nr;
- journal_mark_dirty(th, inode->i_sb, bh);
+ journal_mark_dirty(th, bh);
reiserfs_update_sd(th, inode);
}
set_block_dev_mapped(bh_result, unfm_ptr, inode);
@@ -764,9 +827,10 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
reiserfs_write_unlock(inode->i_sb);
- /* the item was found, so new blocks were not added to the file
- ** there is no need to make sure the inode is updated with this
- ** transaction
+ /*
+ * the item was found, so new blocks were not added to the file
+ * there is no need to make sure the inode is updated with this
+ * transaction
*/
return retval;
}
@@ -776,9 +840,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
goto start_trans;
}
- /* desired position is not found or is in the direct item. We have
- to append file with holes up to 'block'-th block converting
- direct items to indirect one if necessary */
+ /*
+ * desired position is not found or is in the direct item. We have
+ * to append file with holes up to 'block'-th block converting
+ * direct items to indirect one if necessary
+ */
done = 0;
do {
if (is_statdata_le_ih(ih)) {
@@ -790,16 +856,18 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
TYPE_INDIRECT, UNFM_P_SIZE,
0 /* free_space */ );
+ /*
+ * we are going to add 'block'-th block to the file.
+ * Use allocated block for that
+ */
if (cpu_key_k_offset(&key) == 1) {
- /* we are going to add 'block'-th block to the file. Use
- allocated block for that */
unp = cpu_to_le32(allocated_block_nr);
set_block_dev_mapped(bh_result,
allocated_block_nr, inode);
set_buffer_new(bh_result);
done = 1;
}
- tmp_key = key; // ;)
+ tmp_key = key; /* ;) */
set_cpu_key_k_offset(&tmp_key, 1);
PATH_LAST_POSITION(&path)++;
@@ -809,9 +877,12 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (retval) {
reiserfs_free_block(th, inode,
allocated_block_nr, 1);
- goto failure; // retval == -ENOSPC, -EDQUOT or -EIO or -EEXIST
+ /*
+ * retval == -ENOSPC, -EDQUOT or -EIO
+ * or -EEXIST
+ */
+ goto failure;
}
- //mark_tail_converted (inode);
} else if (is_direct_le_ih(ih)) {
/* direct item has to be converted */
loff_t tail_offset;
@@ -819,18 +890,24 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
tail_offset =
((le_ih_k_offset(ih) -
1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
+
+ /*
+ * direct item we just found fits into block we have
+ * to map. Convert it into unformatted node: use
+ * bh_result for the conversion
+ */
if (tail_offset == cpu_key_k_offset(&key)) {
- /* direct item we just found fits into block we have
- to map. Convert it into unformatted node: use
- bh_result for the conversion */
set_block_dev_mapped(bh_result,
allocated_block_nr, inode);
unbh = bh_result;
done = 1;
} else {
- /* we have to padd file tail stored in direct item(s)
- up to block size and convert it to unformatted
- node. FIXME: this should also get into page cache */
+ /*
+ * we have to pad file tail stored in direct
+ * item(s) up to block size and convert it
+ * to unformatted node. FIXME: this should
+ * also get into page cache
+ */
pathrelse(&path);
/*
@@ -859,7 +936,10 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
inode->i_ino,
retval);
if (allocated_block_nr) {
- /* the bitmap, the super, and the stat data == 3 */
+ /*
+ * the bitmap, the super,
+ * and the stat data == 3
+ */
if (!th)
th = reiserfs_persistent_transaction(inode->i_sb, 3);
if (th)
@@ -881,43 +961,57 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
allocated_block_nr, 1);
goto failure;
}
- /* it is important the set_buffer_uptodate is done after
- ** the direct2indirect. The buffer might contain valid
- ** data newer than the data on disk (read by readpage, changed,
- ** and then sent here by writepage). direct2indirect needs
- ** to know if unbh was already up to date, so it can decide
- ** if the data in unbh needs to be replaced with data from
- ** the disk
+ /*
+ * it is important the set_buffer_uptodate is done
+ * after the direct2indirect. The buffer might
+ * contain valid data newer than the data on disk
+ * (read by readpage, changed, and then sent here by
+ * writepage). direct2indirect needs to know if unbh
+ * was already up to date, so it can decide if the
+ * data in unbh needs to be replaced with data from
+ * the disk
*/
set_buffer_uptodate(unbh);
- /* unbh->b_page == NULL in case of DIRECT_IO request, this means
- buffer will disappear shortly, so it should not be added to
+ /*
+ * unbh->b_page == NULL in case of DIRECT_IO request,
+ * this means buffer will disappear shortly, so it
+ * should not be added to
*/
if (unbh->b_page) {
- /* we've converted the tail, so we must
- ** flush unbh before the transaction commits
+ /*
+ * we've converted the tail, so we must
+ * flush unbh before the transaction commits
*/
reiserfs_add_tail_list(inode, unbh);
- /* mark it dirty now to prevent commit_write from adding
- ** this buffer to the inode's dirty buffer list
+ /*
+ * mark it dirty now to prevent commit_write
+ * from adding this buffer to the inode's
+ * dirty buffer list
*/
/*
- * AKPM: changed __mark_buffer_dirty to mark_buffer_dirty().
- * It's still atomic, but it sets the page dirty too,
- * which makes it eligible for writeback at any time by the
- * VM (which was also the case with __mark_buffer_dirty())
+ * AKPM: changed __mark_buffer_dirty to
+ * mark_buffer_dirty(). It's still atomic,
+ * but it sets the page dirty too, which makes
+ * it eligible for writeback at any time by the
+ * VM (which was also the case with
+ * __mark_buffer_dirty())
*/
mark_buffer_dirty(unbh);
}
} else {
- /* append indirect item with holes if needed, when appending
- pointer to 'block'-th block use block, which is already
- allocated */
+ /*
+ * append indirect item with holes if needed, when
+ * appending pointer to 'block'-th block use block,
+ * which is already allocated
+ */
struct cpu_key tmp_key;
- unp_t unf_single = 0; // We use this in case we need to allocate only
- // one block which is a fastpath
+ /*
+ * We use this in case we need to allocate
+ * only one block which is a fastpath
+ */
+ unp_t unf_single = 0;
unp_t *un;
__u64 max_to_insert =
MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
@@ -926,14 +1020,17 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
RFALSE(pos_in_item != ih_item_len(ih) / UNFM_P_SIZE,
"vs-804: invalid position for append");
- /* indirect item has to be appended, set up key of that position */
+ /*
+ * indirect item has to be appended,
+ * set up key of that position
+ * (key type is unimportant)
+ */
make_cpu_key(&tmp_key, inode,
le_key_k_offset(version,
- &(ih->ih_key)) +
+ &ih->ih_key) +
op_bytes_number(ih,
inode->i_sb->s_blocksize),
- //pos_in_item * inode->i_sb->s_blocksize,
- TYPE_INDIRECT, 3); // key type is unimportant
+ TYPE_INDIRECT, 3);
RFALSE(cpu_key_k_offset(&tmp_key) > cpu_key_k_offset(&key),
"green-805: invalid offset");
@@ -954,8 +1051,10 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
}
}
if (blocks_needed <= max_to_insert) {
- /* we are going to add target block to the file. Use allocated
- block for that */
+ /*
+ * we are going to add target block to
+ * the file. Use allocated block for that
+ */
un[blocks_needed - 1] =
cpu_to_le32(allocated_block_nr);
set_block_dev_mapped(bh_result,
@@ -964,8 +1063,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
done = 1;
} else {
/* paste hole to the indirect item */
- /* If kmalloc failed, max_to_insert becomes zero and it means we
- only have space for one block */
+ /*
+ * If kmalloc failed, max_to_insert becomes
+ * zero and it means we only have space for
+ * one block
+ */
blocks_needed =
max_to_insert ? max_to_insert : 1;
}
@@ -984,9 +1086,12 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
goto failure;
}
if (!done) {
- /* We need to mark new file size in case this function will be
- interrupted/aborted later on. And we may do this only for
- holes. */
+ /*
+ * We need to mark new file size in case
+ * this function will be interrupted/aborted
+ * later on. And we may do this only for
+ * holes.
+ */
inode->i_size +=
inode->i_sb->s_blocksize * blocks_needed;
}
@@ -995,13 +1100,13 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (done == 1)
break;
- /* this loop could log more blocks than we had originally asked
- ** for. So, we have to allow the transaction to end if it is
- ** too big or too full. Update the inode so things are
- ** consistent if we crash before the function returns
- **
- ** release the path so that anybody waiting on the path before
- ** ending their transaction will be able to continue.
+ /*
+ * this loop could log more blocks than we had originally
+ * asked for. So, we have to allow the transaction to end
+ * if it is too big or too full. Update the inode so things
+ * are consistent if we crash before the function returns
+ * release the path so that anybody waiting on the path before
+ * ending their transaction will be able to continue.
*/
if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
retval = restart_transaction(th, inode, &path);
@@ -1031,14 +1136,14 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
goto failure;
}
bh = get_last_bh(&path);
- ih = get_ih(&path);
- item = get_item(&path);
+ ih = tp_item_head(&path);
+ item = tp_item_body(&path);
pos_in_item = path.pos_in_item;
} while (1);
retval = 0;
- failure:
+failure:
if (th && (!dangle || (retval && !th->t_trans_id))) {
int err;
if (th->t_trans_id)
@@ -1060,8 +1165,10 @@ reiserfs_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block);
}
-/* Compute real number of used bytes by file
- * Following three functions can go away when we'll have enough space in stat item
+/*
+ * Compute real number of used bytes by file
+ * Following three functions can go away when we'll have enough space in
+ * stat item
*/
static int real_space_diff(struct inode *inode, int sd_size)
{
@@ -1071,13 +1178,14 @@ static int real_space_diff(struct inode *inode, int sd_size)
if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
return sd_size;
- /* End of file is also in full block with indirect reference, so round
- ** up to the next block.
- **
- ** there is just no way to know if the tail is actually packed
- ** on the file, so we have to assume it isn't. When we pack the
- ** tail, we add 4 bytes to pretend there really is an unformatted
- ** node pointer
+ /*
+ * End of file is also in full block with indirect reference, so round
+ * up to the next block.
+ *
+ * there is just no way to know if the tail is actually packed
+ * on the file, so we have to assume it isn't. When we pack the
+ * tail, we add 4 bytes to pretend there really is an unformatted
+ * node pointer
*/
bytes =
((inode->i_size +
@@ -1108,36 +1216,36 @@ static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
bytes += (loff_t) 511;
}
- /* files from before the quota patch might i_blocks such that
- ** bytes < real_space. Deal with that here to prevent it from
- ** going negative.
+ /*
+ * files from before the quota patch might i_blocks such that
+ * bytes < real_space. Deal with that here to prevent it from
+ * going negative.
*/
if (bytes < real_space)
return 0;
return (bytes - real_space) >> 9;
}
-//
-// BAD: new directories have stat data of new type and all other items
-// of old type. Version stored in the inode says about body items, so
-// in update_stat_data we can not rely on inode, but have to check
-// item version directly
-//
+/*
+ * BAD: new directories have stat data of new type and all other items
+ * of old type. Version stored in the inode says about body items, so
+ * in update_stat_data we can not rely on inode, but have to check
+ * item version directly
+ */
-// called by read_locked_inode
+/* called by read_locked_inode */
static void init_inode(struct inode *inode, struct treepath *path)
{
struct buffer_head *bh;
struct item_head *ih;
__u32 rdev;
- //int version = ITEM_VERSION_1;
bh = PATH_PLAST_BUFFER(path);
- ih = PATH_PITEM_HEAD(path);
+ ih = tp_item_head(path);
- copy_key(INODE_PKEY(inode), &(ih->ih_key));
+ copy_key(INODE_PKEY(inode), &ih->ih_key);
- INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));
+ INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
REISERFS_I(inode)->i_flags = 0;
REISERFS_I(inode)->i_prealloc_block = 0;
REISERFS_I(inode)->i_prealloc_count = 0;
@@ -1147,7 +1255,7 @@ static void init_inode(struct inode *inode, struct treepath *path)
if (stat_data_v1(ih)) {
struct stat_data_v1 *sd =
- (struct stat_data_v1 *)B_I_PITEM(bh, ih);
+ (struct stat_data_v1 *)ih_item_body(bh, ih);
unsigned long blocks;
set_inode_item_key_version(inode, KEY_FORMAT_3_5);
@@ -1168,20 +1276,26 @@ static void init_inode(struct inode *inode, struct treepath *path)
inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
blocks = (inode->i_size + 511) >> 9;
blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
+
+ /*
+ * there was a bug in <=3.5.23 when i_blocks could take
+ * negative values. Starting from 3.5.17 this value could
+ * even be stored in stat data. For such files we set
+ * i_blocks based on file size. Just 2 notes: this can be
+ * wrong for sparse files. On-disk value will be only
+ * updated if file's inode will ever change
+ */
if (inode->i_blocks > blocks) {
- // there was a bug in <=3.5.23 when i_blocks could take negative
- // values. Starting from 3.5.17 this value could even be stored in
- // stat data. For such files we set i_blocks based on file
- // size. Just 2 notes: this can be wrong for sparce files. On-disk value will be
- // only updated if file's inode will ever change
inode->i_blocks = blocks;
}
rdev = sd_v1_rdev(sd);
REISERFS_I(inode)->i_first_direct_byte =
sd_v1_first_direct_byte(sd);
- /* an early bug in the quota code can give us an odd number for the
- ** block count. This is incorrect, fix it here.
+
+ /*
+ * an early bug in the quota code can give us an odd
+ * number for the block count. This is incorrect, fix it here.
*/
if (inode->i_blocks & 1) {
inode->i_blocks++;
@@ -1189,13 +1303,17 @@ static void init_inode(struct inode *inode, struct treepath *path)
inode_set_bytes(inode,
to_real_used_space(inode, inode->i_blocks,
SD_V1_SIZE));
- /* nopack is initially zero for v1 objects. For v2 objects,
- nopack is initialised from sd_attrs */
+ /*
+ * nopack is initially zero for v1 objects. For v2 objects,
+ * nopack is initialised from sd_attrs
+ */
REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
} else {
- // new stat data found, but object may have old items
- // (directories and symlinks)
- struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);
+ /*
+ * new stat data found, but object may have old items
+ * (directories and symlinks)
+ */
+ struct stat_data *sd = (struct stat_data *)ih_item_body(bh, ih);
inode->i_mode = sd_v2_mode(sd);
set_nlink(inode, sd_v2_nlink(sd));
@@ -1225,8 +1343,10 @@ static void init_inode(struct inode *inode, struct treepath *path)
inode_set_bytes(inode,
to_real_used_space(inode, inode->i_blocks,
SD_V2_SIZE));
- /* read persistent inode attributes from sd and initialise
- generic inode flags from them */
+ /*
+ * read persistent inode attributes from sd and initialise
+ * generic inode flags from them
+ */
REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
}
@@ -1249,7 +1369,7 @@ static void init_inode(struct inode *inode, struct treepath *path)
}
}
-// update new stat data with inode fields
+/* update new stat data with inode fields */
static void inode2sd(void *sd, struct inode *inode, loff_t size)
{
struct stat_data *sd_v2 = (struct stat_data *)sd;
@@ -1273,7 +1393,7 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size)
set_sd_v2_attrs(sd_v2, flags);
}
-// used to copy inode's fields to old stat data
+/* used to copy inode's fields to old stat data */
static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
{
struct stat_data_v1 *sd_v1 = (struct stat_data_v1 *)sd;
@@ -1292,14 +1412,15 @@ static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
else
set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
- // Sigh. i_first_direct_byte is back
+ /* Sigh. i_first_direct_byte is back */
set_sd_v1_first_direct_byte(sd_v1,
REISERFS_I(inode)->i_first_direct_byte);
}
-/* NOTE, you must prepare the buffer head before sending it here,
-** and then log it after the call
-*/
+/*
+ * NOTE, you must prepare the buffer head before sending it here,
+ * and then log it after the call
+ */
static void update_stat_data(struct treepath *path, struct inode *inode,
loff_t size)
{
@@ -1307,17 +1428,17 @@ static void update_stat_data(struct treepath *path, struct inode *inode,
struct item_head *ih;
bh = PATH_PLAST_BUFFER(path);
- ih = PATH_PITEM_HEAD(path);
+ ih = tp_item_head(path);
if (!is_statdata_le_ih(ih))
reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
INODE_PKEY(inode), ih);
+ /* path points to old stat data */
if (stat_data_v1(ih)) {
- // path points to old stat data
- inode2sd_v1(B_I_PITEM(bh, ih), inode, size);
+ inode2sd_v1(ih_item_body(bh, ih), inode, size);
} else {
- inode2sd(B_I_PITEM(bh, ih), inode, size);
+ inode2sd(ih_item_body(bh, ih), inode, size);
}
return;
@@ -1335,7 +1456,8 @@ void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
- make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3); //key type is unimportant
+ /* key type is unimportant */
+ make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
for (;;) {
int pos;
@@ -1363,45 +1485,48 @@ void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
return;
}
- /* sigh, prepare_for_journal might schedule. When it schedules the
- ** FS might change. We have to detect that, and loop back to the
- ** search if the stat data item has moved
+ /*
+ * sigh, prepare_for_journal might schedule. When it
+ * schedules the FS might change. We have to detect that,
+ * and loop back to the search if the stat data item has moved
*/
bh = get_last_bh(&path);
- ih = get_ih(&path);
+ ih = tp_item_head(&path);
copy_item_head(&tmp_ih, ih);
fs_gen = get_generation(inode->i_sb);
reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
+
+ /* Stat_data item has been moved after scheduling. */
if (fs_changed(fs_gen, inode->i_sb)
&& item_moved(&tmp_ih, &path)) {
reiserfs_restore_prepared_buffer(inode->i_sb, bh);
- continue; /* Stat_data item has been moved after scheduling. */
+ continue;
}
break;
}
update_stat_data(&path, inode, size);
- journal_mark_dirty(th, th->t_super, bh);
+ journal_mark_dirty(th, bh);
pathrelse(&path);
return;
}
-/* reiserfs_read_locked_inode is called to read the inode off disk, and it
-** does a make_bad_inode when things go wrong. But, we need to make sure
-** and clear the key in the private portion of the inode, otherwise a
-** corresponding iput might try to delete whatever object the inode last
-** represented.
-*/
+/*
+ * reiserfs_read_locked_inode is called to read the inode off disk, and it
+ * does a make_bad_inode when things go wrong. But, we need to make sure
+ * and clear the key in the private portion of the inode, otherwise a
+ * corresponding iput might try to delete whatever object the inode last
+ * represented.
+ */
static void reiserfs_make_bad_inode(struct inode *inode)
{
memset(INODE_PKEY(inode), 0, KEY_SIZE);
make_bad_inode(inode);
}
-//
-// initially this function was derived from minix or ext2's analog and
-// evolved as the prototype did
-//
-
+/*
+ * initially this function was derived from minix or ext2's analog and
+ * evolved as the prototype did
+ */
int reiserfs_init_locked_inode(struct inode *inode, void *p)
{
struct reiserfs_iget_args *args = (struct reiserfs_iget_args *)p;
@@ -1410,8 +1535,10 @@ int reiserfs_init_locked_inode(struct inode *inode, void *p)
return 0;
}
-/* looks for stat data in the tree, and fills up the fields of in-core
- inode stat data fields */
+/*
+ * looks for stat data in the tree, and fills up the fields of in-core
+ * inode stat data fields
+ */
void reiserfs_read_locked_inode(struct inode *inode,
struct reiserfs_iget_args *args)
{
@@ -1422,8 +1549,10 @@ void reiserfs_read_locked_inode(struct inode *inode,
dirino = args->dirid;
- /* set version 1, version 2 could be used too, because stat data
- key is the same in both versions */
+ /*
+ * set version 1, version 2 could be used too, because stat data
+ * key is the same in both versions
+ */
key.version = KEY_FORMAT_3_5;
key.on_disk_key.k_dir_id = dirino;
key.on_disk_key.k_objectid = inode->i_ino;
@@ -1439,8 +1568,9 @@ void reiserfs_read_locked_inode(struct inode *inode,
reiserfs_make_bad_inode(inode);
return;
}
+
+ /* a stale NFS handle can trigger this without it being an error */
if (retval != ITEM_FOUND) {
- /* a stale NFS handle can trigger this without it being an error */
pathrelse(&path_to_sd);
reiserfs_make_bad_inode(inode);
clear_nlink(inode);
@@ -1449,20 +1579,25 @@ void reiserfs_read_locked_inode(struct inode *inode,
init_inode(inode, &path_to_sd);
- /* It is possible that knfsd is trying to access inode of a file
- that is being removed from the disk by some other thread. As we
- update sd on unlink all that is required is to check for nlink
- here. This bug was first found by Sizif when debugging
- SquidNG/Butterfly, forgotten, and found again after Philippe
- Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
-
- More logical fix would require changes in fs/inode.c:iput() to
- remove inode from hash-table _after_ fs cleaned disk stuff up and
- in iget() to return NULL if I_FREEING inode is found in
- hash-table. */
- /* Currently there is one place where it's ok to meet inode with
- nlink==0: processing of open-unlinked and half-truncated files
- during mount (fs/reiserfs/super.c:finish_unfinished()). */
+ /*
+ * It is possible that knfsd is trying to access inode of a file
+ * that is being removed from the disk by some other thread. As we
+ * update sd on unlink all that is required is to check for nlink
+ * here. This bug was first found by Sizif when debugging
+ * SquidNG/Butterfly, forgotten, and found again after Philippe
+ * Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
+
+ * More logical fix would require changes in fs/inode.c:iput() to
+ * remove inode from hash-table _after_ fs cleaned disk stuff up and
+ * in iget() to return NULL if I_FREEING inode is found in
+ * hash-table.
+ */
+
+ /*
+ * Currently there is one place where it's ok to meet inode with
+ * nlink==0: processing of open-unlinked and half-truncated files
+ * during mount (fs/reiserfs/super.c:finish_unfinished()).
+ */
if ((inode->i_nlink == 0) &&
!REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
reiserfs_warning(inode->i_sb, "vs-13075",
@@ -1472,7 +1607,8 @@ void reiserfs_read_locked_inode(struct inode *inode,
reiserfs_make_bad_inode(inode);
}
- reiserfs_check_path(&path_to_sd); /* init inode should be relsing */
+ /* init inode should be relsing */
+ reiserfs_check_path(&path_to_sd);
/*
* Stat data v1 doesn't support ACLs.
@@ -1481,7 +1617,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
cache_no_acl(inode);
}
-/**
+/*
* reiserfs_find_actor() - "find actor" reiserfs supplies to iget5_locked().
*
* @inode: inode from hash table to check
@@ -1556,7 +1692,8 @@ static struct dentry *reiserfs_get_dentry(struct super_block *sb,
struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
- /* fhtype happens to reflect the number of u32s encoded.
+ /*
+ * fhtype happens to reflect the number of u32s encoded.
* due to a bug in earlier code, fhtype might indicate there
* are more u32s then actually fitted.
* so if fhtype seems to be more than len, reduce fhtype.
@@ -1625,13 +1762,16 @@ int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
return *lenp;
}
-/* looks for stat data, then copies fields to it, marks the buffer
- containing stat data as dirty */
-/* reiserfs inodes are never really dirty, since the dirty inode call
-** always logs them. This call allows the VFS inode marking routines
-** to properly mark inodes for datasync and such, but only actually
-** does something when called for a synchronous update.
-*/
+/*
+ * looks for stat data, then copies fields to it, marks the buffer
+ * containing stat data as dirty
+ */
+/*
+ * reiserfs inodes are never really dirty, since the dirty inode call
+ * always logs them. This call allows the VFS inode marking routines
+ * to properly mark inodes for datasync and such, but only actually
+ * does something when called for a synchronous update.
+ */
int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct reiserfs_transaction_handle th;
@@ -1639,24 +1779,28 @@ int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (inode->i_sb->s_flags & MS_RDONLY)
return -EROFS;
- /* memory pressure can sometimes initiate write_inode calls with sync == 1,
- ** these cases are just when the system needs ram, not when the
- ** inode needs to reach disk for safety, and they can safely be
- ** ignored because the altered inode has already been logged.
+ /*
+ * memory pressure can sometimes initiate write_inode calls with
+ * sync == 1,
+ * these cases are just when the system needs ram, not when the
+ * inode needs to reach disk for safety, and they can safely be
+ * ignored because the altered inode has already been logged.
*/
if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) {
reiserfs_write_lock(inode->i_sb);
if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
reiserfs_update_sd(&th, inode);
- journal_end_sync(&th, inode->i_sb, jbegin_count);
+ journal_end_sync(&th);
}
reiserfs_write_unlock(inode->i_sb);
}
return 0;
}
-/* stat data of new object is inserted already, this inserts the item
- containing "." and ".." entries */
+/*
+ * stat data of new object is inserted already, this inserts the item
+ * containing "." and ".." entries
+ */
static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct item_head *ih, struct treepath *path,
@@ -1674,9 +1818,11 @@ static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
le32_to_cpu(ih->ih_key.k_objectid), DOT_OFFSET,
TYPE_DIRENTRY, 3 /*key length */ );
- /* compose item head for new item. Directories consist of items of
- old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
- is done by reiserfs_new_inode */
+ /*
+ * compose item head for new item. Directories consist of items of
+ * old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
+ * is done by reiserfs_new_inode
+ */
if (old_format_only(sb)) {
make_le_item_head(ih, NULL, KEY_FORMAT_3_5, DOT_OFFSET,
TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
@@ -1714,9 +1860,12 @@ static int reiserfs_new_directory(struct reiserfs_transaction_handle *th,
return reiserfs_insert_item(th, path, &key, ih, inode, body);
}
-/* stat data of object has been inserted, this inserts the item
- containing the body of symlink */
-static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct inode *inode, /* Inode of symlink */
+/*
+ * stat data of object has been inserted, this inserts the item
+ * containing the body of symlink
+ */
+static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
struct item_head *ih,
struct treepath *path, const char *symname,
int item_len)
@@ -1754,15 +1903,26 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
return reiserfs_insert_item(th, path, &key, ih, inode, symname);
}
-/* inserts the stat data into the tree, and then calls
- reiserfs_new_directory (to insert ".", ".." item if new object is
- directory) or reiserfs_new_symlink (to insert symlink body if new
- object is symlink) or nothing (if new object is regular file)
-
- NOTE! uid and gid must already be set in the inode. If we return
- non-zero due to an error, we have to drop the quota previously allocated
- for the fresh inode. This can only be done outside a transaction, so
- if we return non-zero, we also end the transaction. */
+/*
+ * inserts the stat data into the tree, and then calls
+ * reiserfs_new_directory (to insert ".", ".." item if new object is
+ * directory) or reiserfs_new_symlink (to insert symlink body if new
+ * object is symlink) or nothing (if new object is regular file)
+
+ * NOTE! uid and gid must already be set in the inode. If we return
+ * non-zero due to an error, we have to drop the quota previously allocated
+ * for the fresh inode. This can only be done outside a transaction, so
+ * if we return non-zero, we also end the transaction.
+ *
+ * @th: active transaction handle
+ * @dir: parent directory for new inode
+ * @mode: mode of new inode
+ * @symname: symlink contents if inode is symlink
+ * @isize: 0 for regular file, EMPTY_DIR_SIZE for dirs, strlen(symname) for
+ * symlinks
+ * @inode: inode to be filled
+ * @security: optional security context to associate with this inode
+ */
int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct inode *dir, umode_t mode, const char *symname,
/* 0 for regular, EMTRY_DIR_SIZE for dirs,
@@ -1807,7 +1967,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
else
make_le_item_head(&ih, NULL, KEY_FORMAT_3_6, SD_OFFSET,
TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
- memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
+ memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
depth = reiserfs_write_unlock_nested(inode->i_sb);
@@ -1820,10 +1980,11 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
}
if (old_format_only(sb))
- /* not a perfect generation count, as object ids can be reused, but
- ** this is as good as reiserfs can do right now.
- ** note that the private part of inode isn't filled in yet, we have
- ** to use the directory.
+ /*
+ * not a perfect generation count, as object ids can be reused,
+ * but this is as good as reiserfs can do right now.
+ * note that the private part of inode isn't filled in yet,
+ * we have to use the directory.
*/
inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
else
@@ -1850,7 +2011,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
U32_MAX /*NO_BYTES_IN_DIRECT_ITEM */ ;
- INIT_LIST_HEAD(&(REISERFS_I(inode)->i_prealloc_list));
+ INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
REISERFS_I(inode)->i_flags = 0;
REISERFS_I(inode)->i_prealloc_block = 0;
REISERFS_I(inode)->i_prealloc_count = 0;
@@ -1878,9 +2039,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_bad_inode;
}
if (old_format_only(sb)) {
+ /* i_uid or i_gid is too big to be stored in stat data v3.5 */
if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
pathrelse(&path_to_key);
- /* i_uid or i_gid is too big to be stored in stat data v3.5 */
err = -EINVAL;
goto out_bad_inode;
}
@@ -1888,9 +2049,11 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
} else {
inode2sd(&sd, inode, inode->i_size);
}
- // store in in-core inode the key of stat data and version all
- // object items will have (directory items will have old offset
- // format, other new objects will consist of new items)
+ /*
+ * store in in-core inode the key of stat data and version all
+ * object items will have (directory items will have old offset
+ * format, other new objects will consist of new items)
+ */
if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode))
set_inode_item_key_version(inode, KEY_FORMAT_3_5);
else
@@ -1934,7 +2097,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
- journal_end(th, th->t_super, th->t_blocks_allocated);
+ journal_end(th);
goto out_inserted_sd;
}
@@ -1945,7 +2108,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
- journal_end(th, th->t_super, th->t_blocks_allocated);
+ journal_end(th);
goto out_inserted_sd;
}
} else if (inode->i_sb->s_flags & MS_POSIXACL) {
@@ -1962,8 +2125,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
- retval = journal_end(th, th->t_super,
- th->t_blocks_allocated);
+ retval = journal_end(th);
if (retval)
err = retval;
goto out_inserted_sd;
@@ -1975,11 +2137,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
return 0;
-/* it looks like you can easily compress these two goto targets into
- * one. Keeping it like this doesn't actually hurt anything, and they
- * are place holders for what the quota code actually needs.
- */
- out_bad_inode:
+out_bad_inode:
/* Invalidate the object, nothing was inserted yet */
INODE_PKEY(inode)->k_objectid = 0;
@@ -1988,16 +2146,19 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
dquot_free_inode(inode);
reiserfs_write_lock_nested(inode->i_sb, depth);
- out_end_trans:
- journal_end(th, th->t_super, th->t_blocks_allocated);
- /* Drop can be outside and it needs more credits so it's better to have it outside */
+out_end_trans:
+ journal_end(th);
+ /*
+ * Drop can be outside and it needs more credits so it's better
+ * to have it outside
+ */
depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_drop(inode);
reiserfs_write_lock_nested(inode->i_sb, depth);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
- out_inserted_sd:
+out_inserted_sd:
clear_nlink(inode);
th->t_trans_id = 0; /* so the caller can't use this handle later */
unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
@@ -2006,25 +2167,26 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
}
/*
-** finds the tail page in the page cache,
-** reads the last block in.
-**
-** On success, page_result is set to a locked, pinned page, and bh_result
-** is set to an up to date buffer for the last block in the file. returns 0.
-**
-** tail conversion is not done, so bh_result might not be valid for writing
-** check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
-** trying to write the block.
-**
-** on failure, nonzero is returned, page_result and bh_result are untouched.
-*/
+ * finds the tail page in the page cache,
+ * reads the last block in.
+ *
+ * On success, page_result is set to a locked, pinned page, and bh_result
+ * is set to an up to date buffer for the last block in the file. returns 0.
+ *
+ * tail conversion is not done, so bh_result might not be valid for writing
+ * check buffer_mapped(bh_result) and bh_result->b_blocknr != 0 before
+ * trying to write the block.
+ *
+ * on failure, nonzero is returned, page_result and bh_result are untouched.
+ */
static int grab_tail_page(struct inode *inode,
struct page **page_result,
struct buffer_head **bh_result)
{
- /* we want the page with the last byte in the file,
- ** not the page that will hold the next byte for appending
+ /*
+ * we want the page with the last byte in the file,
+ * not the page that will hold the next byte for appending
*/
unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
unsigned long pos = 0;
@@ -2036,10 +2198,11 @@ static int grab_tail_page(struct inode *inode,
struct page *page;
int error;
- /* we know that we are only called with inode->i_size > 0.
- ** we also know that a file tail can never be as big as a block
- ** If i_size % blocksize == 0, our file is currently block aligned
- ** and it won't need converting or zeroing after a truncate.
+ /*
+ * we know that we are only called with inode->i_size > 0.
+ * we also know that a file tail can never be as big as a block
+ * If i_size % blocksize == 0, our file is currently block aligned
+ * and it won't need converting or zeroing after a truncate.
*/
if ((offset & (blocksize - 1)) == 0) {
return -ENOENT;
@@ -2068,10 +2231,11 @@ static int grab_tail_page(struct inode *inode,
} while (bh != head);
if (!buffer_uptodate(bh)) {
- /* note, this should never happen, prepare_write should
- ** be taking care of this for us. If the buffer isn't up to date,
- ** I've screwed up the code to find the buffer, or the code to
- ** call prepare_write
+ /*
+ * note, this should never happen, prepare_write should be
+ * taking care of this for us. If the buffer isn't up to
+ * date, I've screwed up the code to find the buffer, or the
+ * code to call prepare_write
*/
reiserfs_error(inode->i_sb, "clm-6000",
"error reading block %lu", bh->b_blocknr);
@@ -2081,21 +2245,21 @@ static int grab_tail_page(struct inode *inode,
*bh_result = bh;
*page_result = page;
- out:
+out:
return error;
- unlock:
+unlock:
unlock_page(page);
page_cache_release(page);
return error;
}
/*
-** vfs version of truncate file. Must NOT be called with
-** a transaction already started.
-**
-** some code taken from block_truncate_page
-*/
+ * vfs version of truncate file. Must NOT be called with
+ * a transaction already started.
+ *
+ * some code taken from block_truncate_page
+ */
int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
{
struct reiserfs_transaction_handle th;
@@ -2113,9 +2277,11 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
if (inode->i_size > 0) {
error = grab_tail_page(inode, &page, &bh);
if (error) {
- // -ENOENT means we truncated past the end of the file,
- // and get_block_create_0 could not find a block to read in,
- // which is ok.
+ /*
+ * -ENOENT means we truncated past the end of the
+ * file, and get_block_create_0 could not find a
+ * block to read in, which is ok.
+ */
if (error != -ENOENT)
reiserfs_error(inode->i_sb, "clm-6001",
"grab_tail_page failed %d",
@@ -2125,29 +2291,33 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
}
}
- /* so, if page != NULL, we have a buffer head for the offset at
- ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
- ** then we have an unformatted node. Otherwise, we have a direct item,
- ** and no zeroing is required on disk. We zero after the truncate,
- ** because the truncate might pack the item anyway
- ** (it will unmap bh if it packs).
+ /*
+ * so, if page != NULL, we have a buffer head for the offset at
+ * the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
+ * then we have an unformatted node. Otherwise, we have a direct item,
+ * and no zeroing is required on disk. We zero after the truncate,
+ * because the truncate might pack the item anyway
+ * (it will unmap bh if it packs).
+ *
+ * it is enough to reserve space in transaction for 2 balancings:
+ * one for "save" link adding and another for the first
+ * cut_from_item. 1 is for update_sd
*/
- /* it is enough to reserve space in transaction for 2 balancings:
- one for "save" link adding and another for the first
- cut_from_item. 1 is for update_sd */
error = journal_begin(&th, inode->i_sb,
JOURNAL_PER_BALANCE_CNT * 2 + 1);
if (error)
goto out;
reiserfs_update_inode_transaction(inode);
if (update_timestamps)
- /* we are doing real truncate: if the system crashes before the last
- transaction of truncating gets committed - on reboot the file
- either appears truncated properly or not truncated at all */
+ /*
+ * we are doing real truncate: if the system crashes
+ * before the last transaction of truncating gets committed
+ * - on reboot the file either appears truncated properly
+ * or not truncated at all
+ */
add_save_link(&th, inode, 1);
err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
- error =
- journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);
+ error = journal_end(&th);
if (error)
goto out;
@@ -2180,7 +2350,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
reiserfs_write_unlock(inode->i_sb);
return 0;
- out:
+out:
if (page) {
unlock_page(page);
page_cache_release(page);
@@ -2212,7 +2382,10 @@ static int map_block_for_writepage(struct inode *inode,
int copy_size;
int trans_running = 0;
- /* catch places below that try to log something without starting a trans */
+ /*
+ * catch places below that try to log something without
+ * starting a trans
+ */
th.t_trans_id = 0;
if (!buffer_uptodate(bh_result)) {
@@ -2220,11 +2393,11 @@ static int map_block_for_writepage(struct inode *inode,
}
kmap(bh_result->b_page);
- start_over:
+start_over:
reiserfs_write_lock(inode->i_sb);
make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
- research:
+research:
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval != POSITION_FOUND) {
use_get_block = 1;
@@ -2232,8 +2405,8 @@ static int map_block_for_writepage(struct inode *inode,
}
bh = get_last_bh(&path);
- ih = get_ih(&path);
- item = get_item(&path);
+ ih = tp_item_head(&path);
+ item = tp_item_body(&path);
pos_in_item = path.pos_in_item;
/* we've found an unformatted node */
@@ -2281,10 +2454,10 @@ static int map_block_for_writepage(struct inode *inode,
goto research;
}
- memcpy(B_I_PITEM(bh, ih) + pos_in_item, p + bytes_copied,
+ memcpy(ih_item_body(bh, ih) + pos_in_item, p + bytes_copied,
copy_size);
- journal_mark_dirty(&th, inode->i_sb, bh);
+ journal_mark_dirty(&th, bh);
bytes_copied += copy_size;
set_block_dev_mapped(bh_result, 0, inode);
@@ -2304,10 +2477,10 @@ static int map_block_for_writepage(struct inode *inode,
}
retval = 0;
- out:
+out:
pathrelse(&path);
if (trans_running) {
- int err = journal_end(&th, inode->i_sb, jbegin_count);
+ int err = journal_end(&th);
if (err)
retval = err;
trans_running = 0;
@@ -2331,7 +2504,8 @@ static int map_block_for_writepage(struct inode *inode,
kunmap(bh_result->b_page);
if (!retval && buffer_mapped(bh_result) && bh_result->b_blocknr == 0) {
- /* we've copied data from the page into the direct item, so the
+ /*
+ * we've copied data from the page into the direct item, so the
* buffer in the page is now clean, mark it to reflect that.
*/
lock_buffer(bh_result);
@@ -2370,7 +2544,8 @@ static int reiserfs_write_full_page(struct page *page,
return 0;
}
- /* The page dirty bit is cleared before writepage is called, which
+ /*
+ * The page dirty bit is cleared before writepage is called, which
* means we have to tell create_empty_buffers to make dirty buffers
* The page really should be up to date at this point, so tossing
* in the BH_Uptodate is just a sanity check.
@@ -2381,8 +2556,9 @@ static int reiserfs_write_full_page(struct page *page,
}
head = page_buffers(page);
- /* last page in the file, zero out any contents past the
- ** last byte in the file
+ /*
+ * last page in the file, zero out any contents past the
+ * last byte in the file
*/
if (page->index >= end_index) {
unsigned last_offset;
@@ -2412,7 +2588,8 @@ static int reiserfs_write_full_page(struct page *page,
(!buffer_mapped(bh) || (buffer_mapped(bh)
&& bh->b_blocknr ==
0))) {
- /* not mapped yet, or it points to a direct item, search
+ /*
+ * not mapped yet, or it points to a direct item, search
* the btree for the mapping info, and log any direct
* items found
*/
@@ -2450,10 +2627,11 @@ static int reiserfs_write_full_page(struct page *page,
if (checked) {
reiserfs_prepare_for_journal(s, bh, 1);
- journal_mark_dirty(&th, s, bh);
+ journal_mark_dirty(&th, bh);
continue;
}
- /* from this point on, we know the buffer is mapped to a
+ /*
+ * from this point on, we know the buffer is mapped to a
* real block and not a direct item
*/
if (wbc->sync_mode != WB_SYNC_NONE) {
@@ -2472,7 +2650,7 @@ static int reiserfs_write_full_page(struct page *page,
} while ((bh = bh->b_this_page) != head);
if (checked) {
- error = journal_end(&th, s, bh_per_page + 1);
+ error = journal_end(&th);
reiserfs_write_unlock(s);
if (error)
goto fail;
@@ -2497,7 +2675,7 @@ static int reiserfs_write_full_page(struct page *page,
} while (bh != head);
error = 0;
- done:
+done:
if (nr == 0) {
/*
* if this page only had a direct item, it is very possible for
@@ -2519,8 +2697,9 @@ static int reiserfs_write_full_page(struct page *page,
}
return error;
- fail:
- /* catches various errors, we need to make sure any valid dirty blocks
+fail:
+ /*
+ * catches various errors, we need to make sure any valid dirty blocks
* get to the media. The page is currently locked and not marked for
* writeback
*/
@@ -2533,8 +2712,8 @@ static int reiserfs_write_full_page(struct page *page,
mark_buffer_async_write(bh);
} else {
/*
- * clear any dirty bits that might have come from getting
- * attached to a dirty page
+ * clear any dirty bits that might have come from
+ * getting attached to a dirty page
*/
clear_buffer_dirty(bh);
}
@@ -2614,15 +2793,18 @@ static int reiserfs_write_begin(struct file *file,
ret = __block_write_begin(page, pos, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
- /* this gets a little ugly. If reiserfs_get_block returned an
- * error and left a transacstion running, we've got to close it,
- * and we've got to free handle if it was a persistent transaction.
+ /*
+ * this gets a little ugly. If reiserfs_get_block returned an
+ * error and left a transacstion running, we've got to close
+ * it, and we've got to free handle if it was a persistent
+ * transaction.
*
* But, if we had nested into an existing transaction, we need
* to just drop the ref count on the handle.
*
* If old_ref == 0, the transaction is from reiserfs_get_block,
- * and it was a persistent trans. Otherwise, it was nested above.
+ * and it was a persistent trans. Otherwise, it was nested
+ * above.
*/
if (th->t_refcount > old_ref) {
if (old_ref)
@@ -2671,15 +2853,18 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
ret = __block_write_begin(page, from, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
- /* this gets a little ugly. If reiserfs_get_block returned an
- * error and left a transacstion running, we've got to close it,
- * and we've got to free handle if it was a persistent transaction.
+ /*
+ * this gets a little ugly. If reiserfs_get_block returned an
+ * error and left a transacstion running, we've got to close
+ * it, and we've got to free handle if it was a persistent
+ * transaction.
*
* But, if we had nested into an existing transaction, we need
* to just drop the ref count on the handle.
*
* If old_ref == 0, the transaction is from reiserfs_get_block,
- * and it was a persistent trans. Otherwise, it was nested above.
+ * and it was a persistent trans. Otherwise, it was nested
+ * above.
*/
if (th->t_refcount > old_ref) {
if (old_ref)
@@ -2734,17 +2919,20 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
reiserfs_commit_page(inode, page, start, start + copied);
- /* generic_commit_write does this for us, but does not update the
- ** transaction tracking stuff when the size changes. So, we have
- ** to do the i_size updates here.
+ /*
+ * generic_commit_write does this for us, but does not update the
+ * transaction tracking stuff when the size changes. So, we have
+ * to do the i_size updates here.
*/
if (pos + copied > inode->i_size) {
struct reiserfs_transaction_handle myth;
reiserfs_write_lock(inode->i_sb);
locked = true;
- /* If the file have grown beyond the border where it
- can have a tail, unmark it as needing a tail
- packing */
+ /*
+ * If the file have grown beyond the border where it
+ * can have a tail, unmark it as needing a tail
+ * packing
+ */
if ((have_large_tails(inode->i_sb)
&& inode->i_size > i_block_size(inode) * 4)
|| (have_small_tails(inode->i_sb)
@@ -2759,13 +2947,13 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
inode->i_size = pos + copied;
/*
* this will just nest into our transaction. It's important
- * to use mark_inode_dirty so the inode gets pushed around on the
- * dirty lists, and so that O_SYNC works as expected
+ * to use mark_inode_dirty so the inode gets pushed around on
+ * the dirty lists, and so that O_SYNC works as expected
*/
mark_inode_dirty(inode);
reiserfs_update_sd(&myth, inode);
update_sd = 1;
- ret = journal_end(&myth, inode->i_sb, 1);
+ ret = journal_end(&myth);
if (ret)
goto journal_error;
}
@@ -2781,7 +2969,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
goto out;
}
- out:
+out:
if (locked)
reiserfs_write_unlock(inode->i_sb);
unlock_page(page);
@@ -2792,7 +2980,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
return ret == 0 ? copied : ret;
- journal_error:
+journal_error:
reiserfs_write_unlock(inode->i_sb);
locked = false;
if (th) {
@@ -2822,15 +3010,18 @@ int reiserfs_commit_write(struct file *f, struct page *page,
}
reiserfs_commit_page(inode, page, from, to);
- /* generic_commit_write does this for us, but does not update the
- ** transaction tracking stuff when the size changes. So, we have
- ** to do the i_size updates here.
+ /*
+ * generic_commit_write does this for us, but does not update the
+ * transaction tracking stuff when the size changes. So, we have
+ * to do the i_size updates here.
*/
if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
- /* If the file have grown beyond the border where it
- can have a tail, unmark it as needing a tail
- packing */
+ /*
+ * If the file have grown beyond the border where it
+ * can have a tail, unmark it as needing a tail
+ * packing
+ */
if ((have_large_tails(inode->i_sb)
&& inode->i_size > i_block_size(inode) * 4)
|| (have_small_tails(inode->i_sb)
@@ -2845,13 +3036,13 @@ int reiserfs_commit_write(struct file *f, struct page *page,
inode->i_size = pos;
/*
* this will just nest into our transaction. It's important
- * to use mark_inode_dirty so the inode gets pushed around on the
- * dirty lists, and so that O_SYNC works as expected
+ * to use mark_inode_dirty so the inode gets pushed around
+ * on the dirty lists, and so that O_SYNC works as expected
*/
mark_inode_dirty(inode);
reiserfs_update_sd(&myth, inode);
update_sd = 1;
- ret = journal_end(&myth, inode->i_sb, 1);
+ ret = journal_end(&myth);
if (ret)
goto journal_error;
}
@@ -2863,10 +3054,10 @@ int reiserfs_commit_write(struct file *f, struct page *page,
goto out;
}
- out:
+out:
return ret;
- journal_error:
+journal_error:
if (th) {
if (!update_sd)
reiserfs_update_sd(th, inode);
@@ -2924,9 +3115,10 @@ void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs)
}
}
-/* decide if this buffer needs to stay around for data logging or ordered
-** write purposes
-*/
+/*
+ * decide if this buffer needs to stay around for data logging or ordered
+ * write purposes
+ */
static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
{
int ret = 1;
@@ -2937,7 +3129,8 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
if (!buffer_mapped(bh)) {
goto free_jh;
}
- /* the page is locked, and the only places that log a data buffer
+ /*
+ * the page is locked, and the only places that log a data buffer
* also lock the page.
*/
if (reiserfs_file_data_log(inode)) {
@@ -2952,7 +3145,8 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
struct reiserfs_journal_list *jl;
struct reiserfs_jh *jh = bh->b_private;
- /* why is this safe?
+ /*
+ * why is this safe?
* reiserfs_setattr updates i_size in the on disk
* stat data before allowing vmtruncate to be called.
*
@@ -2969,7 +3163,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
&& jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
ret = 0;
}
- free_jh:
+free_jh:
if (ret && bh->b_private) {
reiserfs_free_jh(bh);
}
@@ -3028,7 +3222,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
ret = try_to_release_page(page, 0);
/* maybe should BUG_ON(!ret); - neilb */
}
- out:
+out:
return;
}
@@ -3080,8 +3274,10 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
return ret;
}
-/* We thank Mingming Cao for helping us understand in great detail what
- to do in this section of the code. */
+/*
+ * We thank Mingming Cao for helping us understand in great detail what
+ * to do in this section of the code.
+ */
static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
@@ -3127,8 +3323,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
dquot_initialize(inode);
reiserfs_write_lock(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
- /* version 2 items will be caught by the s_maxbytes check
- ** done for us in vmtruncate
+ /*
+ * version 2 items will be caught by the s_maxbytes check
+ * done for us in vmtruncate
*/
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
attr->ia_size > MAX_NON_LFS) {
@@ -3149,7 +3346,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
err = journal_begin(&th, inode->i_sb, 4);
if (!err) {
reiserfs_discard_prealloc(&th, inode);
- err = journal_end(&th, inode->i_sb, 4);
+ err = journal_end(&th);
}
if (err)
error = err;
@@ -3189,7 +3386,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
- /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+ /*
+ * (user+group)*(old+new) structure - we count quota
+ * info and , inode write (sb, inode)
+ */
reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jbegin_count);
reiserfs_write_unlock(inode->i_sb);
@@ -3198,19 +3398,21 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
error = dquot_transfer(inode, attr);
reiserfs_write_lock(inode->i_sb);
if (error) {
- journal_end(&th, inode->i_sb, jbegin_count);
+ journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
goto out;
}
- /* Update corresponding info in inode so that everything is in
- * one transaction */
+ /*
+ * Update corresponding info in inode so that everything
+ * is in one transaction
+ */
if (attr->ia_valid & ATTR_UID)
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
mark_inode_dirty(inode);
- error = journal_end(&th, inode->i_sb, jbegin_count);
+ error = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
if (error)
goto out;
@@ -3220,8 +3422,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_size != i_size_read(inode)) {
error = inode_newsize_ok(inode, attr->ia_size);
if (!error) {
+ /*
+ * Could race against reiserfs_file_release
+ * if called from NFS, so take tailpack mutex.
+ */
+ mutex_lock(&REISERFS_I(inode)->tailpack);
truncate_setsize(inode, attr->ia_size);
- reiserfs_vfs_truncate_file(inode);
+ reiserfs_truncate_file(inode, 1);
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
}
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 946ccbf5b5a..501ed6811a2 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -15,7 +15,8 @@
* reiserfs_ioctl - handler for ioctl for inode
* supported commands:
* 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
- * and prevent packing file (argument arg has to be non-zero)
+ * and prevent packing file (argument arg has t
+ * be non-zero)
* 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
* 3) That's all for a while ...
*/
@@ -132,7 +133,10 @@ setversion_out:
long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- /* These are just misnamed, they actually get/put from/to user an int */
+ /*
+ * These are just misnamed, they actually
+ * get/put from/to user an int
+ */
switch (cmd) {
case REISERFS_IOC32_UNPACK:
cmd = REISERFS_IOC_UNPACK;
@@ -160,10 +164,10 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
/*
-** reiserfs_unpack
-** Function try to convert tail from direct item into indirect.
-** It set up nopack attribute in the REISERFS_I(inode)->nopack
-*/
+ * reiserfs_unpack
+ * Function try to convert tail from direct item into indirect.
+ * It set up nopack attribute in the REISERFS_I(inode)->nopack
+ */
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
@@ -194,9 +198,10 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
goto out;
}
- /* we unpack by finding the page with the tail, and calling
- ** __reiserfs_write_begin on that page. This will force a
- ** reiserfs_get_block to unpack the tail for us.
+ /*
+ * we unpack by finding the page with the tail, and calling
+ * __reiserfs_write_begin on that page. This will force a
+ * reiserfs_get_block to unpack the tail for us.
*/
index = inode->i_size >> PAGE_CACHE_SHIFT;
mapping = inode->i_mapping;
@@ -214,11 +219,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
retval = reiserfs_commit_write(NULL, page, write_from, write_from);
REISERFS_I(inode)->i_flags |= i_nopack_mask;
- out_unlock:
+out_unlock:
unlock_page(page);
page_cache_release(page);
- out:
+out:
mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
return retval;
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index ee382ef3d30..cfaee912ee0 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -5,15 +5,17 @@
#include <linux/time.h>
#include "reiserfs.h"
-// this contains item handlers for old item types: sd, direct,
-// indirect, directory
+/*
+ * this contains item handlers for old item types: sd, direct,
+ * indirect, directory
+ */
-/* and where are the comments? how about saying where we can find an
- explanation of each item handler method? -Hans */
+/*
+ * and where are the comments? how about saying where we can find an
+ * explanation of each item handler method? -Hans
+ */
-//////////////////////////////////////////////////////////////////////////////
-// stat data functions
-//
+/* stat data functions */
static int sd_bytes_number(struct item_head *ih, int block_size)
{
return 0;
@@ -60,7 +62,7 @@ static void sd_print_item(struct item_head *ih, char *item)
static void sd_check_item(struct item_head *ih, char *item)
{
- // FIXME: type something here!
+ /* unused */
}
static int sd_create_vi(struct virtual_node *vn,
@@ -68,7 +70,6 @@ static int sd_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_STAT_DATA;
- //vi->vi_type |= VI_TYPE_STAT_DATA;// not needed?
return 0;
}
@@ -117,15 +118,13 @@ static struct item_operations stat_data_ops = {
.print_vi = sd_print_vi
};
-//////////////////////////////////////////////////////////////////////////////
-// direct item functions
-//
+/* direct item functions */
static int direct_bytes_number(struct item_head *ih, int block_size)
{
return ih_item_len(ih);
}
-// FIXME: this should probably switch to indirect as well
+/* FIXME: this should probably switch to indirect as well */
static void direct_decrement_key(struct cpu_key *key)
{
cpu_key_k_offset_dec(key);
@@ -144,7 +143,7 @@ static void direct_print_item(struct item_head *ih, char *item)
{
int j = 0;
-// return;
+/* return; */
printk("\"");
while (j < ih_item_len(ih))
printk("%c", item[j++]);
@@ -153,7 +152,7 @@ static void direct_print_item(struct item_head *ih, char *item)
static void direct_check_item(struct item_head *ih, char *item)
{
- // FIXME: type something here!
+ /* unused */
}
static int direct_create_vi(struct virtual_node *vn,
@@ -161,7 +160,6 @@ static int direct_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_DIRECT;
- //vi->vi_type |= VI_TYPE_DIRECT;
return 0;
}
@@ -211,16 +209,13 @@ static struct item_operations direct_ops = {
.print_vi = direct_print_vi
};
-//////////////////////////////////////////////////////////////////////////////
-// indirect item functions
-//
-
+/* indirect item functions */
static int indirect_bytes_number(struct item_head *ih, int block_size)
{
- return ih_item_len(ih) / UNFM_P_SIZE * block_size; //- get_ih_free_space (ih);
+ return ih_item_len(ih) / UNFM_P_SIZE * block_size;
}
-// decrease offset, if it becomes 0, change type to stat data
+/* decrease offset, if it becomes 0, change type to stat data */
static void indirect_decrement_key(struct cpu_key *key)
{
cpu_key_k_offset_dec(key);
@@ -228,7 +223,7 @@ static void indirect_decrement_key(struct cpu_key *key)
set_cpu_key_k_type(key, TYPE_STAT_DATA);
}
-// if it is not first item of the body, then it is mergeable
+/* if it is not first item of the body, then it is mergeable */
static int indirect_is_left_mergeable(struct reiserfs_key *key,
unsigned long bsize)
{
@@ -236,7 +231,7 @@ static int indirect_is_left_mergeable(struct reiserfs_key *key,
return (le_key_k_offset(version, key) != 1);
}
-// printing of indirect item
+/* printing of indirect item */
static void start_new_sequence(__u32 * start, int *len, __u32 new)
{
*start = new;
@@ -295,7 +290,7 @@ static void indirect_print_item(struct item_head *ih, char *item)
static void indirect_check_item(struct item_head *ih, char *item)
{
- // FIXME: type something here!
+ /* unused */
}
static int indirect_create_vi(struct virtual_node *vn,
@@ -303,7 +298,6 @@ static int indirect_create_vi(struct virtual_node *vn,
int is_affected, int insert_size)
{
vi->vi_index = TYPE_INDIRECT;
- //vi->vi_type |= VI_TYPE_INDIRECT;
return 0;
}
@@ -321,16 +315,19 @@ static int indirect_check_right(struct virtual_item *vi, int free)
return indirect_check_left(vi, free, 0, 0);
}
-// return size in bytes of 'units' units. If first == 0 - calculate from the head (left), otherwise - from tail (right)
+/*
+ * return size in bytes of 'units' units. If first == 0 - calculate
+ * from the head (left), otherwise - from tail (right)
+ */
static int indirect_part_size(struct virtual_item *vi, int first, int units)
{
- // unit of indirect item is byte (yet)
+ /* unit of indirect item is byte (yet) */
return units;
}
static int indirect_unit_num(struct virtual_item *vi)
{
- // unit of indirect item is byte (yet)
+ /* unit of indirect item is byte (yet) */
return vi->vi_item_len - IH_SIZE;
}
@@ -356,10 +353,7 @@ static struct item_operations indirect_ops = {
.print_vi = indirect_print_vi
};
-//////////////////////////////////////////////////////////////////////////////
-// direntry functions
-//
-
+/* direntry functions */
static int direntry_bytes_number(struct item_head *ih, int block_size)
{
reiserfs_warning(NULL, "vs-16090",
@@ -396,7 +390,7 @@ static void direntry_print_item(struct item_head *ih, char *item)
deh = (struct reiserfs_de_head *)item;
- for (i = 0; i < I_ENTRY_COUNT(ih); i++, deh++) {
+ for (i = 0; i < ih_entry_count(ih); i++, deh++) {
namelen =
(i ? (deh_location(deh - 1)) : ih_item_len(ih)) -
deh_location(deh);
@@ -428,9 +422,9 @@ static void direntry_check_item(struct item_head *ih, char *item)
int i;
struct reiserfs_de_head *deh;
- // FIXME: type something here!
+ /* unused */
deh = (struct reiserfs_de_head *)item;
- for (i = 0; i < I_ENTRY_COUNT(ih); i++, deh++) {
+ for (i = 0; i < ih_entry_count(ih); i++, deh++) {
;
}
}
@@ -439,7 +433,8 @@ static void direntry_check_item(struct item_head *ih, char *item)
/*
* function returns old entry number in directory item in real node
- * using new entry number in virtual item in virtual node */
+ * using new entry number in virtual item in virtual node
+ */
static inline int old_entry_num(int is_affected, int virtual_entry_num,
int pos_in_item, int mode)
{
@@ -463,9 +458,11 @@ static inline int old_entry_num(int is_affected, int virtual_entry_num,
return virtual_entry_num - 1;
}
-/* Create an array of sizes of directory entries for virtual
- item. Return space used by an item. FIXME: no control over
- consuming of space used by this item handler */
+/*
+ * Create an array of sizes of directory entries for virtual
+ * item. Return space used by an item. FIXME: no control over
+ * consuming of space used by this item handler
+ */
static int direntry_create_vi(struct virtual_node *vn,
struct virtual_item *vi,
int is_affected, int insert_size)
@@ -494,8 +491,8 @@ static int direntry_create_vi(struct virtual_node *vn,
j = old_entry_num(is_affected, i, vn->vn_pos_in_item,
vn->vn_mode);
dir_u->entry_sizes[i] =
- (j ? deh_location(&(deh[j - 1])) : ih_item_len(vi->vi_ih)) -
- deh_location(&(deh[j])) + DEH_SIZE;
+ (j ? deh_location(&deh[j - 1]) : ih_item_len(vi->vi_ih)) -
+ deh_location(&deh[j]) + DEH_SIZE;
}
size += (dir_u->entry_count * sizeof(short));
@@ -529,10 +526,10 @@ static int direntry_create_vi(struct virtual_node *vn,
}
-//
-// return number of entries which may fit into specified amount of
-// free space, or -1 if free space is not enough even for 1 entry
-//
+/*
+ * return number of entries which may fit into specified amount of
+ * free space, or -1 if free space is not enough even for 1 entry
+ */
static int direntry_check_left(struct virtual_item *vi, int free,
int start_skip, int end_skip)
{
@@ -541,8 +538,8 @@ static int direntry_check_left(struct virtual_item *vi, int free,
struct direntry_uarea *dir_u = vi->vi_uarea;
for (i = start_skip; i < dir_u->entry_count - end_skip; i++) {
+ /* i-th entry doesn't fit into the remaining free space */
if (dir_u->entry_sizes[i] > free)
- /* i-th entry doesn't fit into the remaining free space */
break;
free -= dir_u->entry_sizes[i];
@@ -570,8 +567,8 @@ static int direntry_check_right(struct virtual_item *vi, int free)
struct direntry_uarea *dir_u = vi->vi_uarea;
for (i = dir_u->entry_count - 1; i >= 0; i--) {
+ /* i-th entry doesn't fit into the remaining free space */
if (dir_u->entry_sizes[i] > free)
- /* i-th entry doesn't fit into the remaining free space */
break;
free -= dir_u->entry_sizes[i];
@@ -643,9 +640,7 @@ static struct item_operations direntry_ops = {
.print_vi = direntry_print_vi
};
-//////////////////////////////////////////////////////////////////////////////
-// Error catching functions to catch errors caused by incorrect item types.
-//
+/* Error catching functions to catch errors caused by incorrect item types. */
static int errcatch_bytes_number(struct item_head *ih, int block_size)
{
reiserfs_warning(NULL, "green-16001",
@@ -685,8 +680,12 @@ static int errcatch_create_vi(struct virtual_node *vn,
{
reiserfs_warning(NULL, "green-16006",
"Invalid item type observed, run fsck ASAP");
- return 0; // We might return -1 here as well, but it won't help as create_virtual_node() from where
- // this operation is called from is of return type void.
+ /*
+ * We might return -1 here as well, but it won't help as
+ * create_virtual_node() from where this operation is called
+ * from is of return type void.
+ */
+ return 0;
}
static int errcatch_check_left(struct virtual_item *vi, int free,
@@ -739,9 +738,6 @@ static struct item_operations errcatch_ops = {
errcatch_print_vi
};
-//////////////////////////////////////////////////////////////////////////////
-//
-//
#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
#error Item types must use disk-format assigned values.
#endif
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index fd777032c2b..e8870de4627 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1,38 +1,38 @@
/*
-** Write ahead logging implementation copyright Chris Mason 2000
-**
-** The background commits make this code very interrelated, and
-** overly complex. I need to rethink things a bit....The major players:
-**
-** journal_begin -- call with the number of blocks you expect to log.
-** If the current transaction is too
-** old, it will block until the current transaction is
-** finished, and then start a new one.
-** Usually, your transaction will get joined in with
-** previous ones for speed.
-**
-** journal_join -- same as journal_begin, but won't block on the current
-** transaction regardless of age. Don't ever call
-** this. Ever. There are only two places it should be
-** called from, and they are both inside this file.
-**
-** journal_mark_dirty -- adds blocks into this transaction. clears any flags
-** that might make them get sent to disk
-** and then marks them BH_JDirty. Puts the buffer head
-** into the current transaction hash.
-**
-** journal_end -- if the current transaction is batchable, it does nothing
-** otherwise, it could do an async/synchronous commit, or
-** a full flush of all log and real blocks in the
-** transaction.
-**
-** flush_old_commits -- if the current transaction is too old, it is ended and
-** commit blocks are sent to disk. Forces commit blocks
-** to disk for all backgrounded commits that have been
-** around too long.
-** -- Note, if you call this as an immediate flush from
-** from within kupdate, it will ignore the immediate flag
-*/
+ * Write ahead logging implementation copyright Chris Mason 2000
+ *
+ * The background commits make this code very interrelated, and
+ * overly complex. I need to rethink things a bit....The major players:
+ *
+ * journal_begin -- call with the number of blocks you expect to log.
+ * If the current transaction is too
+ * old, it will block until the current transaction is
+ * finished, and then start a new one.
+ * Usually, your transaction will get joined in with
+ * previous ones for speed.
+ *
+ * journal_join -- same as journal_begin, but won't block on the current
+ * transaction regardless of age. Don't ever call
+ * this. Ever. There are only two places it should be
+ * called from, and they are both inside this file.
+ *
+ * journal_mark_dirty -- adds blocks into this transaction. clears any flags
+ * that might make them get sent to disk
+ * and then marks them BH_JDirty. Puts the buffer head
+ * into the current transaction hash.
+ *
+ * journal_end -- if the current transaction is batchable, it does nothing
+ * otherwise, it could do an async/synchronous commit, or
+ * a full flush of all log and real blocks in the
+ * transaction.
+ *
+ * flush_old_commits -- if the current transaction is too old, it is ended and
+ * commit blocks are sent to disk. Forces commit blocks
+ * to disk for all backgrounded commits that have been
+ * around too long.
+ * -- Note, if you call this as an immediate flush from
+ * from within kupdate, it will ignore the immediate flag
+ */
#include <linux/time.h>
#include <linux/semaphore.h>
@@ -58,23 +58,19 @@
#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
j_working_list))
-/* the number of mounted filesystems. This is used to decide when to
-** start and kill the commit workqueue
-*/
-static int reiserfs_mounted_fs_count;
-
-static struct workqueue_struct *commit_wq;
-
-#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
- structs at 4k */
+/* must be correct to keep the desc and commit structs at 4k */
+#define JOURNAL_TRANS_HALF 1018
#define BUFNR 64 /*read ahead */
/* cnode stat bits. Move these into reiserfs_fs.h */
-#define BLOCK_FREED 2 /* this block was freed, and can't be written. */
-#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
+/* this block was freed, and can't be written. */
+#define BLOCK_FREED 2
+/* this block was freed during this transaction, and can't be written */
+#define BLOCK_FREED_HOLDER 3
-#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
+/* used in flush_journal_list */
+#define BLOCK_NEEDS_FLUSH 4
#define BLOCK_DIRTIED 5
/* journal list state bits */
@@ -87,16 +83,14 @@ static struct workqueue_struct *commit_wq;
#define COMMIT_NOW 2 /* end and commit this transaction */
#define WAIT 4 /* wait for the log blocks to hit the disk */
-static int do_journal_end(struct reiserfs_transaction_handle *,
- struct super_block *, unsigned long nblocks,
- int flags);
+static int do_journal_end(struct reiserfs_transaction_handle *, int flags);
static int flush_journal_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall);
static int flush_commit_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall);
static int can_dirty(struct reiserfs_journal_cnode *cn);
static int journal_join(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks);
+ struct super_block *sb);
static void release_journal_dev(struct super_block *super,
struct reiserfs_journal *journal);
static int dirty_one_transaction(struct super_block *s,
@@ -107,8 +101,10 @@ static void queue_log_writer(struct super_block *s);
/* values for join in do_journal_begin_r */
enum {
JBEGIN_REG = 0, /* regular journal begin */
- JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
- JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
+ /* join the running transaction if at all possible */
+ JBEGIN_JOIN = 1,
+ /* called from cleanup code, ignores aborted flag */
+ JBEGIN_ABORT = 2,
};
static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
@@ -123,10 +119,11 @@ static void init_journal_hash(struct super_block *sb)
}
/*
-** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
-** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
-** more details.
-*/
+ * clears BH_Dirty and sticks the buffer on the clean list. Called because
+ * I can't allow refile_buffer to make schedule happen after I've freed a
+ * block. Look at remove_from_transaction and journal_mark_freed for
+ * more details.
+ */
static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
{
if (bh) {
@@ -163,7 +160,7 @@ static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb)
struct list_head *entry = journal->j_bitmap_nodes.next;
journal->j_used_bitmap_nodes++;
- repeat:
+repeat:
if (entry != &journal->j_bitmap_nodes) {
bn = list_entry(entry, struct reiserfs_bitmap_node, list);
@@ -204,7 +201,8 @@ static void allocate_bitmap_nodes(struct super_block *sb)
list_add(&bn->list, &journal->j_bitmap_nodes);
journal->j_free_bitmap_nodes++;
} else {
- break; /* this is ok, we'll try again when more are needed */
+ /* this is ok, we'll try again when more are needed */
+ break;
}
}
}
@@ -239,8 +237,8 @@ static void cleanup_bitmap_list(struct super_block *sb,
}
/*
-** only call this on FS unmount.
-*/
+ * only call this on FS unmount.
+ */
static int free_list_bitmaps(struct super_block *sb,
struct reiserfs_list_bitmap *jb_array)
{
@@ -275,9 +273,9 @@ static int free_bitmap_nodes(struct super_block *sb)
}
/*
-** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
-** jb_array is the array to be filled in.
-*/
+ * get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
+ * jb_array is the array to be filled in.
+ */
int reiserfs_allocate_list_bitmaps(struct super_block *sb,
struct reiserfs_list_bitmap *jb_array,
unsigned int bmap_nr)
@@ -306,9 +304,9 @@ int reiserfs_allocate_list_bitmaps(struct super_block *sb,
}
/*
-** find an available list bitmap. If you can't find one, flush a commit list
-** and try again
-*/
+ * find an available list bitmap. If you can't find one, flush a commit list
+ * and try again
+ */
static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
struct reiserfs_journal_list
*jl)
@@ -332,18 +330,18 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
break;
}
}
- if (jb->journal_list) { /* double check to make sure if flushed correctly */
+ /* double check to make sure if flushed correctly */
+ if (jb->journal_list)
return NULL;
- }
jb->journal_list = jl;
return jb;
}
/*
-** allocates a new chunk of X nodes, and links them all together as a list.
-** Uses the cnode->next and cnode->prev pointers
-** returns NULL on failure
-*/
+ * allocates a new chunk of X nodes, and links them all together as a list.
+ * Uses the cnode->next and cnode->prev pointers
+ * returns NULL on failure
+ */
static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
{
struct reiserfs_journal_cnode *head;
@@ -365,9 +363,7 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
return head;
}
-/*
-** pulls a cnode off the free list, or returns NULL on failure
-*/
+/* pulls a cnode off the free list, or returns NULL on failure */
static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
{
struct reiserfs_journal_cnode *cn;
@@ -393,8 +389,8 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
}
/*
-** returns a cnode to the free list
-*/
+ * returns a cnode to the free list
+ */
static void free_cnode(struct super_block *sb,
struct reiserfs_journal_cnode *cn)
{
@@ -419,7 +415,10 @@ static void clear_prepared_bits(struct buffer_head *bh)
clear_buffer_journal_restore_dirty(bh);
}
-/* return a cnode with same dev, block number and size in table, or null if not found */
+/*
+ * return a cnode with same dev, block number and size in table,
+ * or null if not found
+ */
static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
super_block
*sb,
@@ -439,23 +438,24 @@ static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
}
/*
-** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
-** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
-** being overwritten by a replay after crashing.
-**
-** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
-** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
-** sure you never write the block without logging it.
-**
-** next_zero_bit is a suggestion about the next block to try for find_forward.
-** when bl is rejected because it is set in a journal list bitmap, we search
-** for the next zero bit in the bitmap that rejected bl. Then, we return that
-** through next_zero_bit for find_forward to try.
-**
-** Just because we return something in next_zero_bit does not mean we won't
-** reject it on the next call to reiserfs_in_journal
-**
-*/
+ * this actually means 'can this block be reallocated yet?'. If you set
+ * search_all, a block can only be allocated if it is not in the current
+ * transaction, was not freed by the current transaction, and has no chance
+ * of ever being overwritten by a replay after crashing.
+ *
+ * If you don't set search_all, a block can only be allocated if it is not
+ * in the current transaction. Since deleting a block removes it from the
+ * current transaction, this case should never happen. If you don't set
+ * search_all, make sure you never write the block without logging it.
+ *
+ * next_zero_bit is a suggestion about the next block to try for find_forward.
+ * when bl is rejected because it is set in a journal list bitmap, we search
+ * for the next zero bit in the bitmap that rejected bl. Then, we return
+ * that through next_zero_bit for find_forward to try.
+ *
+ * Just because we return something in next_zero_bit does not mean we won't
+ * reject it on the next call to reiserfs_in_journal
+ */
int reiserfs_in_journal(struct super_block *sb,
unsigned int bmap_nr, int bit_nr, int search_all,
b_blocknr_t * next_zero_bit)
@@ -469,9 +469,11 @@ int reiserfs_in_journal(struct super_block *sb,
*next_zero_bit = 0; /* always start this at zero. */
PROC_INFO_INC(sb, journal.in_journal);
- /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
- ** if we crash before the transaction that freed it commits, this transaction won't
- ** have committed either, and the block will never be written
+ /*
+ * If we aren't doing a search_all, this is a metablock, and it
+ * will be logged before use. if we crash before the transaction
+ * that freed it commits, this transaction won't have committed
+ * either, and the block will never be written
*/
if (search_all) {
for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
@@ -511,8 +513,7 @@ int reiserfs_in_journal(struct super_block *sb,
return 0;
}
-/* insert cn into table
-*/
+/* insert cn into table */
static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
struct reiserfs_journal_cnode *cn)
{
@@ -558,10 +559,10 @@ static inline void put_journal_list(struct super_block *s,
}
/*
-** this used to be much more involved, and I'm keeping it just in case things get ugly again.
-** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
-** transaction.
-*/
+ * this used to be much more involved, and I'm keeping it just in case
+ * things get ugly again. it gets called by flush_commit_list, and
+ * cleans up any data stored about blocks freed during a transaction.
+ */
static void cleanup_freed_for_journal_list(struct super_block *sb,
struct reiserfs_journal_list *jl)
{
@@ -756,11 +757,12 @@ static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
jh = bh->b_private;
list_del_init(&jh->list);
} else {
- no_jh:
+no_jh:
get_bh(bh);
jh = alloc_jh();
spin_lock(&j->j_dirty_buffers_lock);
- /* buffer must be locked for __add_jh, should be able to have
+ /*
+ * buffer must be locked for __add_jh, should be able to have
* two adds at the same time
*/
BUG_ON(bh->b_private);
@@ -818,7 +820,8 @@ static int write_ordered_buffers(spinlock_t * lock,
spin_lock(lock);
goto loop_next;
}
- /* in theory, dirty non-uptodate buffers should never get here,
+ /*
+ * in theory, dirty non-uptodate buffers should never get here,
* but the upper layer io error paths still have a few quirks.
* Handle them here as gracefully as we can
*/
@@ -833,7 +836,7 @@ static int write_ordered_buffers(spinlock_t * lock,
reiserfs_free_jh(bh);
unlock_buffer(bh);
}
- loop_next:
+loop_next:
put_bh(bh);
cond_resched_lock(lock);
}
@@ -856,13 +859,14 @@ static int write_ordered_buffers(spinlock_t * lock,
if (!buffer_uptodate(bh)) {
ret = -EIO;
}
- /* ugly interaction with invalidatepage here.
- * reiserfs_invalidate_page will pin any buffer that has a valid
- * journal head from an older transaction. If someone else sets
- * our buffer dirty after we write it in the first loop, and
- * then someone truncates the page away, nobody will ever write
- * the buffer. We're safe if we write the page one last time
- * after freeing the journal header.
+ /*
+ * ugly interaction with invalidatepage here.
+ * reiserfs_invalidate_page will pin any buffer that has a
+ * valid journal head from an older transaction. If someone
+ * else sets our buffer dirty after we write it in the first
+ * loop, and then someone truncates the page away, nobody
+ * will ever write the buffer. We're safe if we write the
+ * page one last time after freeing the journal header.
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
@@ -887,7 +891,7 @@ static int flush_older_commits(struct super_block *s,
unsigned int other_trans_id;
unsigned int first_trans_id;
- find_first:
+find_first:
/*
* first we walk backwards to find the oldest uncommitted transation
*/
@@ -923,9 +927,11 @@ static int flush_older_commits(struct super_block *s,
if (!journal_list_still_alive(s, trans_id))
return 1;
- /* the one we just flushed is gone, this means all
- * older lists are also gone, so first_jl is no longer
- * valid either. Go back to the beginning.
+ /*
+ * the one we just flushed is gone, this means
+ * all older lists are also gone, so first_jl
+ * is no longer valid either. Go back to the
+ * beginning.
*/
if (!journal_list_still_alive
(s, other_trans_id)) {
@@ -958,12 +964,12 @@ static int reiserfs_async_progress_wait(struct super_block *s)
}
/*
-** if this journal list still has commit blocks unflushed, send them to disk.
-**
-** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
-** Before the commit block can by written, every other log block must be safely on disk
-**
-*/
+ * if this journal list still has commit blocks unflushed, send them to disk.
+ *
+ * log areas must be flushed in order (transaction 2 can't commit before
+ * transaction 1) Before the commit block can by written, every other log
+ * block must be safely on disk
+ */
static int flush_commit_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall)
{
@@ -982,8 +988,9 @@ static int flush_commit_list(struct super_block *s,
return 0;
}
- /* before we can put our commit blocks on disk, we have to make sure everyone older than
- ** us is on disk too
+ /*
+ * before we can put our commit blocks on disk, we have to make
+ * sure everyone older than us is on disk too
*/
BUG_ON(jl->j_len <= 0);
BUG_ON(trans_id == journal->j_trans_id);
@@ -991,7 +998,10 @@ static int flush_commit_list(struct super_block *s,
get_journal_list(jl);
if (flushall) {
if (flush_older_commits(s, jl) == 1) {
- /* list disappeared during flush_older_commits. return */
+ /*
+ * list disappeared during flush_older_commits.
+ * return
+ */
goto put_jl;
}
}
@@ -1006,9 +1016,9 @@ static int flush_commit_list(struct super_block *s,
BUG_ON(jl->j_trans_id == 0);
/* this commit is done, exit */
- if (atomic_read(&(jl->j_commit_left)) <= 0) {
+ if (atomic_read(&jl->j_commit_left) <= 0) {
if (flushall) {
- atomic_set(&(jl->j_older_commits_done), 1);
+ atomic_set(&jl->j_older_commits_done, 1);
}
mutex_unlock(&jl->j_commit_mutex);
goto put_jl;
@@ -1063,9 +1073,10 @@ static int flush_commit_list(struct super_block *s,
depth = reiserfs_write_unlock_nested(s);
__wait_on_buffer(tbh);
reiserfs_write_lock_nested(s, depth);
- // since we're using ll_rw_blk above, it might have skipped over
- // a locked buffer. Double check here
- //
+ /*
+ * since we're using ll_rw_blk above, it might have skipped
+ * over a locked buffer. Double check here
+ */
/* redundant, sync_dirty_buffer() checks */
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
@@ -1079,17 +1090,21 @@ static int flush_commit_list(struct super_block *s,
#endif
retval = -EIO;
}
- put_bh(tbh); /* once for journal_find_get_block */
- put_bh(tbh); /* once due to original getblk in do_journal_end */
- atomic_dec(&(jl->j_commit_left));
+ /* once for journal_find_get_block */
+ put_bh(tbh);
+ /* once due to original getblk in do_journal_end */
+ put_bh(tbh);
+ atomic_dec(&jl->j_commit_left);
}
- BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
+ BUG_ON(atomic_read(&jl->j_commit_left) != 1);
- /* If there was a write error in the journal - we can't commit
+ /*
+ * If there was a write error in the journal - we can't commit
* this transaction - it will be invalid and, if successful,
* will just end up propagating the write error out to
- * the file system. */
+ * the file system.
+ */
if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
if (buffer_dirty(jl->j_commit_bh))
BUG();
@@ -1102,9 +1117,11 @@ static int flush_commit_list(struct super_block *s,
reiserfs_write_lock_nested(s, depth);
}
- /* If there was a write error in the journal - we can't commit this
+ /*
+ * If there was a write error in the journal - we can't commit this
* transaction - it will be invalid and, if successful, will just end
- * up propagating the write error out to the filesystem. */
+ * up propagating the write error out to the filesystem.
+ */
if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(s, "journal-615", "buffer write failed");
@@ -1119,7 +1136,10 @@ static int flush_commit_list(struct super_block *s,
}
journal->j_last_commit_id = jl->j_trans_id;
- /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
+ /*
+ * now, every commit block is on the disk. It is safe to allow
+ * blocks freed during this transaction to be reallocated
+ */
cleanup_freed_for_journal_list(s, jl);
retval = retval ? retval : journal->j_errno;
@@ -1127,13 +1147,13 @@ static int flush_commit_list(struct super_block *s,
/* mark the metadata dirty */
if (!retval)
dirty_one_transaction(s, jl);
- atomic_dec(&(jl->j_commit_left));
+ atomic_dec(&jl->j_commit_left);
if (flushall) {
- atomic_set(&(jl->j_older_commits_done), 1);
+ atomic_set(&jl->j_older_commits_done, 1);
}
mutex_unlock(&jl->j_commit_mutex);
- put_jl:
+put_jl:
put_journal_list(s, jl);
if (retval)
@@ -1143,9 +1163,9 @@ static int flush_commit_list(struct super_block *s,
}
/*
-** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
-** returns NULL if it can't find anything
-*/
+ * flush_journal_list frequently needs to find a newer transaction for a
+ * given block. This does that, or returns NULL if it can't find anything
+ */
static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
reiserfs_journal_cnode
*cn)
@@ -1169,10 +1189,11 @@ static void remove_journal_hash(struct super_block *,
int);
/*
-** once all the real blocks have been flushed, it is safe to remove them from the
-** journal list for this transaction. Aside from freeing the cnode, this also allows the
-** block to be reallocated for data blocks if it had been deleted.
-*/
+ * once all the real blocks have been flushed, it is safe to remove them
+ * from the journal list for this transaction. Aside from freeing the
+ * cnode, this also allows the block to be reallocated for data blocks
+ * if it had been deleted.
+ */
static void remove_all_from_journal_list(struct super_block *sb,
struct reiserfs_journal_list *jl,
int debug)
@@ -1181,8 +1202,9 @@ static void remove_all_from_journal_list(struct super_block *sb,
struct reiserfs_journal_cnode *cn, *last;
cn = jl->j_realblock;
- /* which is better, to lock once around the whole loop, or
- ** to lock for each call to remove_journal_hash?
+ /*
+ * which is better, to lock once around the whole loop, or
+ * to lock for each call to remove_journal_hash?
*/
while (cn) {
if (cn->blocknr != 0) {
@@ -1204,12 +1226,13 @@ static void remove_all_from_journal_list(struct super_block *sb,
}
/*
-** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
-** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
-** releasing blocks in this transaction for reuse as data blocks.
-** called by flush_journal_list, before it calls remove_all_from_journal_list
-**
-*/
+ * if this timestamp is greater than the timestamp we wrote last to the
+ * header block, write it to the header block. once this is done, I can
+ * safely say the log area for this transaction won't ever be replayed,
+ * and I can start releasing blocks in this transaction for reuse as data
+ * blocks. called by flush_journal_list, before it calls
+ * remove_all_from_journal_list
+ */
static int _update_journal_header_block(struct super_block *sb,
unsigned long offset,
unsigned int trans_id)
@@ -1279,10 +1302,11 @@ static int flush_older_journal_lists(struct super_block *sb,
struct reiserfs_journal *journal = SB_JOURNAL(sb);
unsigned int trans_id = jl->j_trans_id;
- /* we know we are the only ones flushing things, no extra race
+ /*
+ * we know we are the only ones flushing things, no extra race
* protection is required.
*/
- restart:
+restart:
entry = journal->j_journal_list.next;
/* Did we wrap? */
if (entry == &journal->j_journal_list)
@@ -1309,15 +1333,16 @@ static void del_from_work_list(struct super_block *s,
}
}
-/* flush a journal list, both commit and real blocks
-**
-** always set flushall to 1, unless you are calling from inside
-** flush_journal_list
-**
-** IMPORTANT. This can only be called while there are no journal writers,
-** and the journal is locked. That means it can only be called from
-** do_journal_end, or by journal_release
-*/
+/*
+ * flush a journal list, both commit and real blocks
+ *
+ * always set flushall to 1, unless you are calling from inside
+ * flush_journal_list
+ *
+ * IMPORTANT. This can only be called while there are no journal writers,
+ * and the journal is locked. That means it can only be called from
+ * do_journal_end, or by journal_release
+ */
static int flush_journal_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall)
{
@@ -1354,13 +1379,14 @@ static int flush_journal_list(struct super_block *s,
}
/* if all the work is already done, get out of here */
- if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
- atomic_read(&(jl->j_commit_left)) <= 0) {
+ if (atomic_read(&jl->j_nonzerolen) <= 0 &&
+ atomic_read(&jl->j_commit_left) <= 0) {
goto flush_older_and_return;
}
- /* start by putting the commit list on disk. This will also flush
- ** the commit lists of any olders transactions
+ /*
+ * start by putting the commit list on disk. This will also flush
+ * the commit lists of any olders transactions
*/
flush_commit_list(s, jl, 1);
@@ -1369,15 +1395,16 @@ static int flush_journal_list(struct super_block *s,
BUG();
/* are we done now? */
- if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
- atomic_read(&(jl->j_commit_left)) <= 0) {
+ if (atomic_read(&jl->j_nonzerolen) <= 0 &&
+ atomic_read(&jl->j_commit_left) <= 0) {
goto flush_older_and_return;
}
- /* loop through each cnode, see if we need to write it,
- ** or wait on a more recent transaction, or just ignore it
+ /*
+ * loop through each cnode, see if we need to write it,
+ * or wait on a more recent transaction, or just ignore it
*/
- if (atomic_read(&(journal->j_wcount)) != 0) {
+ if (atomic_read(&journal->j_wcount) != 0) {
reiserfs_panic(s, "journal-844", "journal list is flushing, "
"wcount is not 0");
}
@@ -1391,20 +1418,25 @@ static int flush_journal_list(struct super_block *s,
goto free_cnode;
}
- /* This transaction failed commit. Don't write out to the disk */
+ /*
+ * This transaction failed commit.
+ * Don't write out to the disk
+ */
if (!(jl->j_state & LIST_DIRTY))
goto free_cnode;
pjl = find_newer_jl_for_cn(cn);
- /* the order is important here. We check pjl to make sure we
- ** don't clear BH_JDirty_wait if we aren't the one writing this
- ** block to disk
+ /*
+ * the order is important here. We check pjl to make sure we
+ * don't clear BH_JDirty_wait if we aren't the one writing this
+ * block to disk
*/
if (!pjl && cn->bh) {
saved_bh = cn->bh;
- /* we do this to make sure nobody releases the buffer while
- ** we are working with it
+ /*
+ * we do this to make sure nobody releases the
+ * buffer while we are working with it
*/
get_bh(saved_bh);
@@ -1413,13 +1445,17 @@ static int flush_journal_list(struct super_block *s,
was_jwait = 1;
was_dirty = 1;
} else if (can_dirty(cn)) {
- /* everything with !pjl && jwait should be writable */
+ /*
+ * everything with !pjl && jwait
+ * should be writable
+ */
BUG();
}
}
- /* if someone has this block in a newer transaction, just make
- ** sure they are committed, and don't try writing it to disk
+ /*
+ * if someone has this block in a newer transaction, just make
+ * sure they are committed, and don't try writing it to disk
*/
if (pjl) {
if (atomic_read(&pjl->j_commit_left))
@@ -1427,16 +1463,18 @@ static int flush_journal_list(struct super_block *s,
goto free_cnode;
}
- /* bh == NULL when the block got to disk on its own, OR,
- ** the block got freed in a future transaction
+ /*
+ * bh == NULL when the block got to disk on its own, OR,
+ * the block got freed in a future transaction
*/
if (saved_bh == NULL) {
goto free_cnode;
}
- /* this should never happen. kupdate_one_transaction has this list
- ** locked while it works, so we should never see a buffer here that
- ** is not marked JDirty_wait
+ /*
+ * this should never happen. kupdate_one_transaction has
+ * this list locked while it works, so we should never see a
+ * buffer here that is not marked JDirty_wait
*/
if ((!was_jwait) && !buffer_locked(saved_bh)) {
reiserfs_warning(s, "journal-813",
@@ -1447,7 +1485,10 @@ static int flush_journal_list(struct super_block *s,
was_jwait ? ' ' : '!');
}
if (was_dirty) {
- /* we inc again because saved_bh gets decremented at free_cnode */
+ /*
+ * we inc again because saved_bh gets decremented
+ * at free_cnode
+ */
get_bh(saved_bh);
set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
lock_buffer(saved_bh);
@@ -1463,13 +1504,16 @@ static int flush_journal_list(struct super_block *s,
(unsigned long long)saved_bh->
b_blocknr, __func__);
}
- free_cnode:
+free_cnode:
last = cn;
cn = cn->next;
if (saved_bh) {
- /* we incremented this to keep others from taking the buffer head away */
+ /*
+ * we incremented this to keep others from
+ * taking the buffer head away
+ */
put_bh(saved_bh);
- if (atomic_read(&(saved_bh->b_count)) < 0) {
+ if (atomic_read(&saved_bh->b_count) < 0) {
reiserfs_warning(s, "journal-945",
"saved_bh->b_count < 0");
}
@@ -1499,8 +1543,10 @@ static int flush_journal_list(struct super_block *s,
#endif
err = -EIO;
}
- /* note, we must clear the JDirty_wait bit after the up to date
- ** check, otherwise we race against our flushpage routine
+ /*
+ * note, we must clear the JDirty_wait bit
+ * after the up to date check, otherwise we
+ * race against our flushpage routine
*/
BUG_ON(!test_clear_buffer_journal_dirty
(cn->bh));
@@ -1518,25 +1564,27 @@ static int flush_journal_list(struct super_block *s,
reiserfs_abort(s, -EIO,
"Write error while pushing transaction to disk in %s",
__func__);
- flush_older_and_return:
+flush_older_and_return:
- /* before we can update the journal header block, we _must_ flush all
- ** real blocks from all older transactions to disk. This is because
- ** once the header block is updated, this transaction will not be
- ** replayed after a crash
+ /*
+ * before we can update the journal header block, we _must_ flush all
+ * real blocks from all older transactions to disk. This is because
+ * once the header block is updated, this transaction will not be
+ * replayed after a crash
*/
if (flushall) {
flush_older_journal_lists(s, jl);
}
err = journal->j_errno;
- /* before we can remove everything from the hash tables for this
- ** transaction, we must make sure it can never be replayed
- **
- ** since we are only called from do_journal_end, we know for sure there
- ** are no allocations going on while we are flushing journal lists. So,
- ** we only need to update the journal header block for the last list
- ** being flushed
+ /*
+ * before we can remove everything from the hash tables for this
+ * transaction, we must make sure it can never be replayed
+ *
+ * since we are only called from do_journal_end, we know for sure there
+ * are no allocations going on while we are flushing journal lists. So,
+ * we only need to update the journal header block for the last list
+ * being flushed
*/
if (!err && flushall) {
err =
@@ -1561,11 +1609,12 @@ static int flush_journal_list(struct super_block *s,
}
journal->j_last_flush_id = jl->j_trans_id;
- /* not strictly required since we are freeing the list, but it should
+ /*
+ * not strictly required since we are freeing the list, but it should
* help find code using dead lists later on
*/
jl->j_len = 0;
- atomic_set(&(jl->j_nonzerolen), 0);
+ atomic_set(&jl->j_nonzerolen, 0);
jl->j_start = 0;
jl->j_realblock = NULL;
jl->j_commit_bh = NULL;
@@ -1592,15 +1641,17 @@ static int write_one_transaction(struct super_block *s,
cn = jl->j_realblock;
while (cn) {
- /* if the blocknr == 0, this has been cleared from the hash,
- ** skip it
+ /*
+ * if the blocknr == 0, this has been cleared from the hash,
+ * skip it
*/
if (cn->blocknr == 0) {
goto next;
}
if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
struct buffer_head *tmp_bh;
- /* we can race against journal_mark_freed when we try
+ /*
+ * we can race against journal_mark_freed when we try
* to lock_buffer(cn->bh), so we have to inc the buffer
* count, and recheck things after locking
*/
@@ -1619,7 +1670,7 @@ static int write_one_transaction(struct super_block *s,
}
put_bh(tmp_bh);
}
- next:
+next:
cn = cn->next;
cond_resched();
}
@@ -1637,15 +1688,17 @@ static int dirty_one_transaction(struct super_block *s,
jl->j_state |= LIST_DIRTY;
cn = jl->j_realblock;
while (cn) {
- /* look for a more recent transaction that logged this
- ** buffer. Only the most recent transaction with a buffer in
- ** it is allowed to send that buffer to disk
+ /*
+ * look for a more recent transaction that logged this
+ * buffer. Only the most recent transaction with a buffer in
+ * it is allowed to send that buffer to disk
*/
pjl = find_newer_jl_for_cn(cn);
if (!pjl && cn->blocknr && cn->bh
&& buffer_journal_dirty(cn->bh)) {
BUG_ON(!can_dirty(cn));
- /* if the buffer is prepared, it will either be logged
+ /*
+ * if the buffer is prepared, it will either be logged
* or restored. If restored, we need to make sure
* it actually gets marked dirty
*/
@@ -1682,7 +1735,8 @@ static int kupdate_transactions(struct super_block *s,
goto done;
}
- /* we've got j_flush_mutex held, nobody is going to delete any
+ /*
+ * we've got j_flush_mutex held, nobody is going to delete any
* of these lists out from underneath us
*/
while ((num_trans && transactions_flushed < num_trans) ||
@@ -1716,20 +1770,21 @@ static int kupdate_transactions(struct super_block *s,
write_chunk(&chunk);
}
- done:
+done:
mutex_unlock(&journal->j_flush_mutex);
return ret;
}
-/* for o_sync and fsync heavy applications, they tend to use
-** all the journa list slots with tiny transactions. These
-** trigger lots and lots of calls to update the header block, which
-** adds seeks and slows things down.
-**
-** This function tries to clear out a large chunk of the journal lists
-** at once, which makes everything faster since only the newest journal
-** list updates the header block
-*/
+/*
+ * for o_sync and fsync heavy applications, they tend to use
+ * all the journa list slots with tiny transactions. These
+ * trigger lots and lots of calls to update the header block, which
+ * adds seeks and slows things down.
+ *
+ * This function tries to clear out a large chunk of the journal lists
+ * at once, which makes everything faster since only the newest journal
+ * list updates the header block
+ */
static int flush_used_journal_lists(struct super_block *s,
struct reiserfs_journal_list *jl)
{
@@ -1766,9 +1821,11 @@ static int flush_used_journal_lists(struct super_block *s,
}
get_journal_list(jl);
get_journal_list(flush_jl);
- /* try to find a group of blocks we can flush across all the
- ** transactions, but only bother if we've actually spanned
- ** across multiple lists
+
+ /*
+ * try to find a group of blocks we can flush across all the
+ * transactions, but only bother if we've actually spanned
+ * across multiple lists
*/
if (flush_jl != jl) {
ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
@@ -1780,9 +1837,9 @@ static int flush_used_journal_lists(struct super_block *s,
}
/*
-** removes any nodes in table with name block and dev as bh.
-** only touchs the hnext and hprev pointers.
-*/
+ * removes any nodes in table with name block and dev as bh.
+ * only touchs the hnext and hprev pointers.
+ */
void remove_journal_hash(struct super_block *sb,
struct reiserfs_journal_cnode **table,
struct reiserfs_journal_list *jl,
@@ -1811,8 +1868,12 @@ void remove_journal_hash(struct super_block *sb,
cur->blocknr = 0;
cur->sb = NULL;
cur->state = 0;
- if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
- atomic_dec(&(cur->jlist->j_nonzerolen));
+ /*
+ * anybody who clears the cur->bh will also
+ * dec the nonzerolen
+ */
+ if (cur->bh && cur->jlist)
+ atomic_dec(&cur->jlist->j_nonzerolen);
cur->bh = NULL;
cur->jlist = NULL;
}
@@ -1832,17 +1893,18 @@ static void free_journal_ram(struct super_block *sb)
if (journal->j_header_bh) {
brelse(journal->j_header_bh);
}
- /* j_header_bh is on the journal dev, make sure not to release the journal
- * dev until we brelse j_header_bh
+ /*
+ * j_header_bh is on the journal dev, make sure
+ * not to release the journal dev until we brelse j_header_bh
*/
release_journal_dev(sb, journal);
vfree(journal);
}
/*
-** call on unmount. Only set error to 1 if you haven't made your way out
-** of read_super() yet. Any other caller must keep error at 0.
-*/
+ * call on unmount. Only set error to 1 if you haven't made your way out
+ * of read_super() yet. Any other caller must keep error at 0.
+ */
static int do_journal_release(struct reiserfs_transaction_handle *th,
struct super_block *sb, int error)
{
@@ -1850,21 +1912,25 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
int flushed = 0;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
- /* we only want to flush out transactions if we were called with error == 0
+ /*
+ * we only want to flush out transactions if we were
+ * called with error == 0
*/
if (!error && !(sb->s_flags & MS_RDONLY)) {
/* end the current trans */
BUG_ON(!th->t_trans_id);
- do_journal_end(th, sb, 10, FLUSH_ALL);
+ do_journal_end(th, FLUSH_ALL);
- /* make sure something gets logged to force our way into the flush code */
- if (!journal_join(&myth, sb, 1)) {
+ /*
+ * make sure something gets logged to force
+ * our way into the flush code
+ */
+ if (!journal_join(&myth, sb)) {
reiserfs_prepare_for_journal(sb,
SB_BUFFER_WITH_SB(sb),
1);
- journal_mark_dirty(&myth, sb,
- SB_BUFFER_WITH_SB(sb));
- do_journal_end(&myth, sb, 1, FLUSH_ALL);
+ journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
+ do_journal_end(&myth, FLUSH_ALL);
flushed = 1;
}
}
@@ -1872,17 +1938,15 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
/* this also catches errors during the do_journal_end above */
if (!error && reiserfs_is_journal_aborted(journal)) {
memset(&myth, 0, sizeof(myth));
- if (!journal_join_abort(&myth, sb, 1)) {
+ if (!journal_join_abort(&myth, sb)) {
reiserfs_prepare_for_journal(sb,
SB_BUFFER_WITH_SB(sb),
1);
- journal_mark_dirty(&myth, sb,
- SB_BUFFER_WITH_SB(sb));
- do_journal_end(&myth, sb, 1, FLUSH_ALL);
+ journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
+ do_journal_end(&myth, FLUSH_ALL);
}
}
- reiserfs_mounted_fs_count--;
/* wait for all commits to finish */
cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
@@ -1893,12 +1957,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
reiserfs_write_unlock(sb);
cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
- flush_workqueue(commit_wq);
-
- if (!reiserfs_mounted_fs_count) {
- destroy_workqueue(commit_wq);
- commit_wq = NULL;
- }
+ flush_workqueue(REISERFS_SB(sb)->commit_wq);
free_journal_ram(sb);
@@ -1907,25 +1966,24 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
return 0;
}
-/*
-** call on unmount. flush all journal trans, release all alloc'd ram
-*/
+/* * call on unmount. flush all journal trans, release all alloc'd ram */
int journal_release(struct reiserfs_transaction_handle *th,
struct super_block *sb)
{
return do_journal_release(th, sb, 0);
}
-/*
-** only call from an error condition inside reiserfs_read_super!
-*/
+/* only call from an error condition inside reiserfs_read_super! */
int journal_release_error(struct reiserfs_transaction_handle *th,
struct super_block *sb)
{
return do_journal_release(th, sb, 1);
}
-/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
+/*
+ * compares description block with commit block.
+ * returns 1 if they differ, 0 if they are the same
+ */
static int journal_compare_desc_commit(struct super_block *sb,
struct reiserfs_journal_desc *desc,
struct reiserfs_journal_commit *commit)
@@ -1939,11 +1997,12 @@ static int journal_compare_desc_commit(struct super_block *sb,
return 0;
}
-/* returns 0 if it did not find a description block
-** returns -1 if it found a corrupt commit block
-** returns 1 if both desc and commit were valid
-** NOTE: only called during fs mount
-*/
+/*
+ * returns 0 if it did not find a description block
+ * returns -1 if it found a corrupt commit block
+ * returns 1 if both desc and commit were valid
+ * NOTE: only called during fs mount
+ */
static int journal_transaction_is_valid(struct super_block *sb,
struct buffer_head *d_bh,
unsigned int *oldest_invalid_trans_id,
@@ -1989,7 +2048,10 @@ static int journal_transaction_is_valid(struct super_block *sb,
}
offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
- /* ok, we have a journal description block, lets see if the transaction was valid */
+ /*
+ * ok, we have a journal description block,
+ * let's see if the transaction was valid
+ */
c_bh =
journal_bread(sb,
SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
@@ -2041,11 +2103,11 @@ static void brelse_array(struct buffer_head **heads, int num)
}
/*
-** given the start, and values for the oldest acceptable transactions,
-** this either reads in a replays a transaction, or returns because the
-** transaction is invalid, or too old.
-** NOTE: only called during fs mount
-*/
+ * given the start, and values for the oldest acceptable transactions,
+ * this either reads in a replays a transaction, or returns because the
+ * transaction is invalid, or too old.
+ * NOTE: only called during fs mount
+ */
static int journal_read_transaction(struct super_block *sb,
unsigned long cur_dblock,
unsigned long oldest_start,
@@ -2119,7 +2181,10 @@ static int journal_read_transaction(struct super_block *sb,
}
trans_id = get_desc_trans_id(desc);
- /* now we know we've got a good transaction, and it was inside the valid time ranges */
+ /*
+ * now we know we've got a good transaction, and it was
+ * inside the valid time ranges
+ */
log_blocks = kmalloc(get_desc_trans_len(desc) *
sizeof(struct buffer_head *), GFP_NOFS);
real_blocks = kmalloc(get_desc_trans_len(desc) *
@@ -2164,7 +2229,7 @@ static int journal_read_transaction(struct super_block *sb,
reiserfs_warning(sb, "journal-1204",
"REPLAY FAILURE fsck required! "
"Trying to replay onto a log block");
- abort_replay:
+abort_replay:
brelse_array(log_blocks, i);
brelse_array(real_blocks, i);
brelse(c_bh);
@@ -2226,7 +2291,10 @@ static int journal_read_transaction(struct super_block *sb,
"journal-1095: setting journal " "start to offset %ld",
cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
- /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
+ /*
+ * init starting values for the first transaction, in case
+ * this is the last transaction to be replayed.
+ */
journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
journal->j_last_flush_trans_id = trans_id;
journal->j_trans_id = trans_id + 1;
@@ -2240,12 +2308,14 @@ static int journal_read_transaction(struct super_block *sb,
return 0;
}
-/* This function reads blocks starting from block and to max_block of bufsize
- size (but no more than BUFNR blocks at a time). This proved to improve
- mounting speed on self-rebuilding raid5 arrays at least.
- Right now it is only used from journal code. But later we might use it
- from other places.
- Note: Do not use journal_getblk/sb_getblk functions here! */
+/*
+ * This function reads blocks starting from block and to max_block of bufsize
+ * size (but no more than BUFNR blocks at a time). This proved to improve
+ * mounting speed on self-rebuilding raid5 arrays at least.
+ * Right now it is only used from journal code. But later we might use it
+ * from other places.
+ * Note: Do not use journal_getblk/sb_getblk functions here!
+ */
static struct buffer_head *reiserfs_breada(struct block_device *dev,
b_blocknr_t block, int bufsize,
b_blocknr_t max_block)
@@ -2284,15 +2354,17 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
}
/*
-** read and replay the log
-** on a clean unmount, the journal header's next unflushed pointer will
-** be to an invalid transaction. This tests that before finding all the
-** transactions in the log, which makes normal mount times fast.
-** After a crash, this starts with the next unflushed transaction, and
-** replays until it finds one too old, or invalid.
-** On exit, it sets things up so the first transaction will work correctly.
-** NOTE: only called during fs mount
-*/
+ * read and replay the log
+ * on a clean unmount, the journal header's next unflushed pointer will be
+ * to an invalid transaction. This tests that before finding all the
+ * transactions in the log, which makes normal mount times fast.
+ *
+ * After a crash, this starts with the next unflushed transaction, and
+ * replays until it finds one too old, or invalid.
+ *
+ * On exit, it sets things up so the first transaction will work correctly.
+ * NOTE: only called during fs mount
+ */
static int journal_read(struct super_block *sb)
{
struct reiserfs_journal *journal = SB_JOURNAL(sb);
@@ -2316,9 +2388,10 @@ static int journal_read(struct super_block *sb)
bdevname(journal->j_dev_bd, b));
start = get_seconds();
- /* step 1, read in the journal header block. Check the transaction it says
- ** is the first unflushed, and if that transaction is not valid,
- ** replay is done
+ /*
+ * step 1, read in the journal header block. Check the transaction
+ * it says is the first unflushed, and if that transaction is not
+ * valid, replay is done
*/
journal->j_header_bh = journal_bread(sb,
SB_ONDISK_JOURNAL_1st_BLOCK(sb)
@@ -2342,9 +2415,10 @@ static int journal_read(struct super_block *sb)
le32_to_cpu(jh->j_last_flush_trans_id));
valid_journal_header = 1;
- /* now, we try to read the first unflushed offset. If it is not valid,
- ** there is nothing more we can do, and it makes no sense to read
- ** through the whole log.
+ /*
+ * now, we try to read the first unflushed offset. If it
+ * is not valid, there is nothing more we can do, and it
+ * makes no sense to read through the whole log.
*/
d_bh =
journal_bread(sb,
@@ -2358,15 +2432,19 @@ static int journal_read(struct super_block *sb)
goto start_log_replay;
}
- /* ok, there are transactions that need to be replayed. start with the first log block, find
- ** all the valid transactions, and pick out the oldest.
+ /*
+ * ok, there are transactions that need to be replayed. start
+ * with the first log block, find all the valid transactions, and
+ * pick out the oldest.
*/
while (continue_replay
&& cur_dblock <
(SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
SB_ONDISK_JOURNAL_SIZE(sb))) {
- /* Note that it is required for blocksize of primary fs device and journal
- device to be the same */
+ /*
+ * Note that it is required for blocksize of primary fs
+ * device and journal device to be the same
+ */
d_bh =
reiserfs_breada(journal->j_dev_bd, cur_dblock,
sb->s_blocksize,
@@ -2413,7 +2491,7 @@ static int journal_read(struct super_block *sb)
brelse(d_bh);
}
- start_log_replay:
+start_log_replay:
cur_dblock = oldest_start;
if (oldest_trans_id) {
reiserfs_debug(sb, REISERFS_DEBUG_CODE,
@@ -2444,9 +2522,11 @@ static int journal_read(struct super_block *sb)
reiserfs_debug(sb, REISERFS_DEBUG_CODE,
"journal-1225: No valid " "transactions found");
}
- /* j_start does not get set correctly if we don't replay any transactions.
- ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
- ** copy the trans_id from the header
+ /*
+ * j_start does not get set correctly if we don't replay any
+ * transactions. if we had a valid journal_header, set j_start
+ * to the first unflushed transaction value, copy the trans_id
+ * from the header
*/
if (valid_journal_header && replay_count == 0) {
journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
@@ -2475,8 +2555,9 @@ static int journal_read(struct super_block *sb)
_update_journal_header_block(sb, journal->j_start,
journal->j_last_flush_trans_id)) {
reiserfs_write_unlock(sb);
- /* replay failed, caller must call free_journal_ram and abort
- ** the mount
+ /*
+ * replay failed, caller must call free_journal_ram and abort
+ * the mount
*/
return -1;
}
@@ -2569,7 +2650,7 @@ static int journal_init_dev(struct super_block *super,
return 0;
}
-/**
+/*
* When creating/tuning a file system user can assign some
* journal params within boundaries which depend on the ratio
* blocksize/standard_blocksize.
@@ -2587,8 +2668,7 @@ static int check_advise_trans_params(struct super_block *sb,
struct reiserfs_journal *journal)
{
if (journal->j_trans_max) {
- /* Non-default journal params.
- Do sanity check for them. */
+ /* Non-default journal params. Do sanity check for them. */
int ratio = 1;
if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
@@ -2610,10 +2690,12 @@ static int check_advise_trans_params(struct super_block *sb,
return 1;
}
} else {
- /* Default journal params.
- The file system was created by old version
- of mkreiserfs, so some fields contain zeros,
- and we need to advise proper values for them */
+ /*
+ * Default journal params.
+ * The file system was created by old version
+ * of mkreiserfs, so some fields contain zeros,
+ * and we need to advise proper values for them
+ */
if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
sb->s_blocksize);
@@ -2626,9 +2708,7 @@ static int check_advise_trans_params(struct super_block *sb,
return 0;
}
-/*
-** must be called once on fs mount. calls journal_read for you
-*/
+/* must be called once on fs mount. calls journal_read for you */
int journal_init(struct super_block *sb, const char *j_dev_name,
int old_format, unsigned int commit_max_age)
{
@@ -2667,8 +2747,10 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
REISERFS_DISK_OFFSET_IN_BYTES /
sb->s_blocksize + 2);
- /* Sanity check to see is the standard journal fitting within first bitmap
- (actual for small blocksizes) */
+ /*
+ * Sanity check to see is the standard journal fitting
+ * within first bitmap (actual for small blocksizes)
+ */
if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
(SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
@@ -2754,20 +2836,20 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
journal->j_start = 0;
journal->j_len = 0;
journal->j_len_alloc = 0;
- atomic_set(&(journal->j_wcount), 0);
- atomic_set(&(journal->j_async_throttle), 0);
+ atomic_set(&journal->j_wcount, 0);
+ atomic_set(&journal->j_async_throttle, 0);
journal->j_bcount = 0;
journal->j_trans_start_time = 0;
journal->j_last = NULL;
journal->j_first = NULL;
- init_waitqueue_head(&(journal->j_join_wait));
+ init_waitqueue_head(&journal->j_join_wait);
mutex_init(&journal->j_mutex);
mutex_init(&journal->j_flush_mutex);
journal->j_trans_id = 10;
journal->j_mount_id = 10;
journal->j_state = 0;
- atomic_set(&(journal->j_jlock), 0);
+ atomic_set(&journal->j_jlock, 0);
journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
journal->j_cnode_free_orig = journal->j_cnode_free_list;
journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
@@ -2807,23 +2889,19 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
- reiserfs_mounted_fs_count++;
- if (reiserfs_mounted_fs_count <= 1)
- commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0);
-
INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
journal->j_work_sb = sb;
return 0;
- free_and_return:
+free_and_return:
free_journal_ram(sb);
return 1;
}
/*
-** test for a polite end of the current transaction. Used by file_write, and should
-** be used by delete to make sure they don't write more than can fit inside a single
-** transaction
-*/
+ * test for a polite end of the current transaction. Used by file_write,
+ * and should be used by delete to make sure they don't write more than
+ * can fit inside a single transaction
+ */
int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
int new_alloc)
{
@@ -2835,7 +2913,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
return 0;
if (journal->j_must_wait > 0 ||
(journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
- atomic_read(&(journal->j_jlock)) ||
+ atomic_read(&journal->j_jlock) ||
(now - journal->j_trans_start_time) > journal->j_max_trans_age ||
journal->j_cnode_free < (journal->j_trans_max * 3)) {
return 1;
@@ -2846,8 +2924,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
return 0;
}
-/* this must be called inside a transaction
-*/
+/* this must be called inside a transaction */
void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
{
struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
@@ -2857,8 +2934,7 @@ void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
return;
}
-/* this must be called without a transaction started
-*/
+/* this must be called without a transaction started */
void reiserfs_allow_writes(struct super_block *s)
{
struct reiserfs_journal *journal = SB_JOURNAL(s);
@@ -2866,8 +2942,7 @@ void reiserfs_allow_writes(struct super_block *s)
wake_up(&journal->j_join_wait);
}
-/* this must be called without a transaction started
-*/
+/* this must be called without a transaction started */
void reiserfs_wait_on_write_block(struct super_block *s)
{
struct reiserfs_journal *journal = SB_JOURNAL(s);
@@ -2929,11 +3004,12 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
}
}
-/* join == true if you must join an existing transaction.
-** join == false if you can deal with waiting for others to finish
-**
-** this will block until the transaction is joinable. send the number of blocks you
-** expect to use in nblocks.
+/*
+ * join == true if you must join an existing transaction.
+ * join == false if you can deal with waiting for others to finish
+ *
+ * this will block until the transaction is joinable. send the number of
+ * blocks you expect to use in nblocks.
*/
static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
struct super_block *sb, unsigned long nblocks,
@@ -2955,7 +3031,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
th->t_refcount = 1;
th->t_super = sb;
- relock:
+relock:
lock_journal(sb);
if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
unlock_journal(sb);
@@ -2974,9 +3050,11 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
}
now = get_seconds();
- /* if there is no room in the journal OR
- ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
- ** we don't sleep if there aren't other writers
+ /*
+ * if there is no room in the journal OR
+ * if this transaction is too old, and we weren't called joinable,
+ * wait for it to finish before beginning we don't sleep if there
+ * aren't other writers
*/
if ((!join && journal->j_must_wait > 0) ||
@@ -2990,7 +3068,8 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
|| (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
old_trans_id = journal->j_trans_id;
- unlock_journal(sb); /* allow others to finish this transaction */
+ /* allow others to finish this transaction */
+ unlock_journal(sb);
if (!join && (journal->j_len_alloc + nblocks + 2) >=
journal->j_max_batch &&
@@ -3002,8 +3081,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
goto relock;
}
}
- /* don't mess with joining the transaction if all we have to do is
- * wait for someone else to do a commit
+ /*
+ * don't mess with joining the transaction if all we
+ * have to do is wait for someone else to do a commit
*/
if (atomic_read(&journal->j_jlock)) {
while (journal->j_trans_id == old_trans_id &&
@@ -3012,15 +3092,15 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
}
goto relock;
}
- retval = journal_join(&myth, sb, 1);
+ retval = journal_join(&myth, sb);
if (retval)
goto out_fail;
/* someone might have ended the transaction while we joined */
if (old_trans_id != journal->j_trans_id) {
- retval = do_journal_end(&myth, sb, 1, 0);
+ retval = do_journal_end(&myth, 0);
} else {
- retval = do_journal_end(&myth, sb, 1, COMMIT_NOW);
+ retval = do_journal_end(&myth, COMMIT_NOW);
}
if (retval)
@@ -3033,7 +3113,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
if (journal->j_trans_start_time == 0) {
journal->j_trans_start_time = get_seconds();
}
- atomic_inc(&(journal->j_wcount));
+ atomic_inc(&journal->j_wcount);
journal->j_len_alloc += nblocks;
th->t_blocks_logged = 0;
th->t_blocks_allocated = nblocks;
@@ -3042,11 +3122,13 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
INIT_LIST_HEAD(&th->t_list);
return 0;
- out_fail:
+out_fail:
memset(th, 0, sizeof(*th));
- /* Re-set th->t_super, so we can properly keep track of how many
+ /*
+ * Re-set th->t_super, so we can properly keep track of how many
* persistent transactions there are. We need to do this so if this
- * call is part of a failed restart_transaction, we can free it later */
+ * call is part of a failed restart_transaction, we can free it later
+ */
th->t_super = sb;
return retval;
}
@@ -3059,14 +3141,15 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
int ret;
struct reiserfs_transaction_handle *th;
- /* if we're nesting into an existing transaction. It will be
- ** persistent on its own
+ /*
+ * if we're nesting into an existing transaction. It will be
+ * persistent on its own
*/
if (reiserfs_transaction_running(s)) {
th = current->journal_info;
th->t_refcount++;
BUG_ON(th->t_refcount < 2);
-
+
return th;
}
th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
@@ -3087,7 +3170,7 @@ int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
struct super_block *s = th->t_super;
int ret = 0;
if (th->t_trans_id)
- ret = journal_end(th, th->t_super, th->t_blocks_allocated);
+ ret = journal_end(th);
else
ret = -EIO;
if (th->t_refcount == 0) {
@@ -3098,29 +3181,31 @@ int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
}
static int journal_join(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks)
+ struct super_block *sb)
{
struct reiserfs_transaction_handle *cur_th = current->journal_info;
- /* this keeps do_journal_end from NULLing out the current->journal_info
- ** pointer
+ /*
+ * this keeps do_journal_end from NULLing out the
+ * current->journal_info pointer
*/
th->t_handle_save = cur_th;
BUG_ON(cur_th && cur_th->t_refcount > 1);
- return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN);
+ return do_journal_begin_r(th, sb, 1, JBEGIN_JOIN);
}
int journal_join_abort(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks)
+ struct super_block *sb)
{
struct reiserfs_transaction_handle *cur_th = current->journal_info;
- /* this keeps do_journal_end from NULLing out the current->journal_info
- ** pointer
+ /*
+ * this keeps do_journal_end from NULLing out the
+ * current->journal_info pointer
*/
th->t_handle_save = cur_th;
BUG_ON(cur_th && cur_th->t_refcount > 1);
- return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT);
+ return do_journal_begin_r(th, sb, 1, JBEGIN_ABORT);
}
int journal_begin(struct reiserfs_transaction_handle *th,
@@ -3142,9 +3227,10 @@ int journal_begin(struct reiserfs_transaction_handle *th,
"journal_info != 0");
return 0;
} else {
- /* we've ended up with a handle from a different filesystem.
- ** save it and restore on journal_end. This should never
- ** really happen...
+ /*
+ * we've ended up with a handle from a different
+ * filesystem. save it and restore on journal_end.
+ * This should never really happen...
*/
reiserfs_warning(sb, "clm-2100",
"nesting info a different FS");
@@ -3157,9 +3243,10 @@ int journal_begin(struct reiserfs_transaction_handle *th,
ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
BUG_ON(current->journal_info != th);
- /* I guess this boils down to being the reciprocal of clm-2100 above.
- * If do_journal_begin_r fails, we need to put it back, since journal_end
- * won't be called to do it. */
+ /*
+ * I guess this boils down to being the reciprocal of clm-2100 above.
+ * If do_journal_begin_r fails, we need to put it back, since
+ * journal_end won't be called to do it. */
if (ret)
current->journal_info = th->t_handle_save;
else
@@ -3169,17 +3256,19 @@ int journal_begin(struct reiserfs_transaction_handle *th,
}
/*
-** puts bh into the current transaction. If it was already there, reorders removes the
-** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
-**
-** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
-** transaction is committed.
-**
-** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
-*/
+ * puts bh into the current transaction. If it was already there, reorders
+ * removes the old pointers from the hash, and puts new ones in (to make
+ * sure replay happen in the right order).
+ *
+ * if it was dirty, cleans and files onto the clean list. I can't let it
+ * be dirty again until the transaction is committed.
+ *
+ * if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
+ */
int journal_mark_dirty(struct reiserfs_transaction_handle *th,
- struct super_block *sb, struct buffer_head *bh)
+ struct buffer_head *bh)
{
+ struct super_block *sb = th->t_super;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
struct reiserfs_journal_cnode *cn = NULL;
int count_already_incd = 0;
@@ -3201,9 +3290,10 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
return 0;
}
- /* this must be turned into a panic instead of a warning. We can't allow
- ** a dirty or journal_dirty or locked buffer to be logged, as some changes
- ** could get to disk too early. NOT GOOD.
+ /*
+ * this must be turned into a panic instead of a warning. We can't
+ * allow a dirty or journal_dirty or locked buffer to be logged, as
+ * some changes could get to disk too early. NOT GOOD.
*/
if (!prepared || buffer_dirty(bh)) {
reiserfs_warning(sb, "journal-1777",
@@ -3216,14 +3306,16 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
buffer_journal_dirty(bh) ? ' ' : '!');
}
- if (atomic_read(&(journal->j_wcount)) <= 0) {
+ if (atomic_read(&journal->j_wcount) <= 0) {
reiserfs_warning(sb, "journal-1409",
"returning because j_wcount was %d",
- atomic_read(&(journal->j_wcount)));
+ atomic_read(&journal->j_wcount));
return 1;
}
- /* this error means I've screwed up, and we've overflowed the transaction.
- ** Nothing can be done here, except make the FS readonly or panic.
+ /*
+ * this error means I've screwed up, and we've overflowed
+ * the transaction. Nothing can be done here, except make the
+ * FS readonly or panic.
*/
if (journal->j_len >= journal->j_trans_max) {
reiserfs_panic(th->t_super, "journal-1413",
@@ -3280,9 +3372,9 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
return 0;
}
-int journal_end(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks)
+int journal_end(struct reiserfs_transaction_handle *th)
{
+ struct super_block *sb = th->t_super;
if (!current->journal_info && th->t_refcount > 1)
reiserfs_warning(sb, "REISER-NESTING",
"th NULL, refcount %d", th->t_refcount);
@@ -3297,8 +3389,9 @@ int journal_end(struct reiserfs_transaction_handle *th,
struct reiserfs_transaction_handle *cur_th =
current->journal_info;
- /* we aren't allowed to close a nested transaction on a different
- ** filesystem from the one in the task struct
+ /*
+ * we aren't allowed to close a nested transaction on a
+ * different filesystem from the one in the task struct
*/
BUG_ON(cur_th->t_super != th->t_super);
@@ -3308,17 +3401,18 @@ int journal_end(struct reiserfs_transaction_handle *th,
}
return 0;
} else {
- return do_journal_end(th, sb, nblocks, 0);
+ return do_journal_end(th, 0);
}
}
-/* removes from the current transaction, relsing and descrementing any counters.
-** also files the removed buffer directly onto the clean list
-**
-** called by journal_mark_freed when a block has been deleted
-**
-** returns 1 if it cleaned and relsed the buffer. 0 otherwise
-*/
+/*
+ * removes from the current transaction, relsing and descrementing any counters.
+ * also files the removed buffer directly onto the clean list
+ *
+ * called by journal_mark_freed when a block has been deleted
+ *
+ * returns 1 if it cleaned and relsed the buffer. 0 otherwise
+ */
static int remove_from_transaction(struct super_block *sb,
b_blocknr_t blocknr, int already_cleaned)
{
@@ -3354,7 +3448,7 @@ static int remove_from_transaction(struct super_block *sb,
clear_buffer_dirty(bh);
clear_buffer_journal_test(bh);
put_bh(bh);
- if (atomic_read(&(bh->b_count)) < 0) {
+ if (atomic_read(&bh->b_count) < 0) {
reiserfs_warning(sb, "journal-1752",
"b_count < 0");
}
@@ -3367,15 +3461,16 @@ static int remove_from_transaction(struct super_block *sb,
}
/*
-** for any cnode in a journal list, it can only be dirtied of all the
-** transactions that include it are committed to disk.
-** this checks through each transaction, and returns 1 if you are allowed to dirty,
-** and 0 if you aren't
-**
-** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
-** blocks for a given transaction on disk
-**
-*/
+ * for any cnode in a journal list, it can only be dirtied of all the
+ * transactions that include it are committed to disk.
+ * this checks through each transaction, and returns 1 if you are allowed
+ * to dirty, and 0 if you aren't
+ *
+ * it is called by dirty_journal_list, which is called after
+ * flush_commit_list has gotten all the log blocks for a given
+ * transaction on disk
+ *
+ */
static int can_dirty(struct reiserfs_journal_cnode *cn)
{
struct super_block *sb = cn->sb;
@@ -3383,9 +3478,10 @@ static int can_dirty(struct reiserfs_journal_cnode *cn)
struct reiserfs_journal_cnode *cur = cn->hprev;
int can_dirty = 1;
- /* first test hprev. These are all newer than cn, so any node here
- ** with the same block number and dev means this node can't be sent
- ** to disk right now.
+ /*
+ * first test hprev. These are all newer than cn, so any node here
+ * with the same block number and dev means this node can't be sent
+ * to disk right now.
*/
while (cur && can_dirty) {
if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
@@ -3394,13 +3490,14 @@ static int can_dirty(struct reiserfs_journal_cnode *cn)
}
cur = cur->hprev;
}
- /* then test hnext. These are all older than cn. As long as they
- ** are committed to the log, it is safe to write cn to disk
+ /*
+ * then test hnext. These are all older than cn. As long as they
+ * are committed to the log, it is safe to write cn to disk
*/
cur = cn->hnext;
while (cur && can_dirty) {
if (cur->jlist && cur->jlist->j_len > 0 &&
- atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
+ atomic_read(&cur->jlist->j_commit_left) > 0 && cur->bh &&
cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
can_dirty = 0;
}
@@ -3409,12 +3506,13 @@ static int can_dirty(struct reiserfs_journal_cnode *cn)
return can_dirty;
}
-/* syncs the commit blocks, but does not force the real buffers to disk
-** will wait until the current transaction is done/committed before returning
-*/
-int journal_end_sync(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks)
+/*
+ * syncs the commit blocks, but does not force the real buffers to disk
+ * will wait until the current transaction is done/committed before returning
+ */
+int journal_end_sync(struct reiserfs_transaction_handle *th)
{
+ struct super_block *sb = th->t_super;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
BUG_ON(!th->t_trans_id);
@@ -3423,14 +3521,12 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
if (journal->j_len == 0) {
reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
1);
- journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
+ journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb));
}
- return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT);
+ return do_journal_end(th, COMMIT_NOW | WAIT);
}
-/*
-** writeback the pending async commits to disk
-*/
+/* writeback the pending async commits to disk */
static void flush_async_commits(struct work_struct *work)
{
struct reiserfs_journal *journal =
@@ -3450,9 +3546,9 @@ static void flush_async_commits(struct work_struct *work)
}
/*
-** flushes any old transactions to disk
-** ends the current transaction if it is too old
-*/
+ * flushes any old transactions to disk
+ * ends the current transaction if it is too old
+ */
void reiserfs_flush_old_commits(struct super_block *sb)
{
time_t now;
@@ -3460,48 +3556,53 @@ void reiserfs_flush_old_commits(struct super_block *sb)
struct reiserfs_journal *journal = SB_JOURNAL(sb);
now = get_seconds();
- /* safety check so we don't flush while we are replaying the log during
+ /*
+ * safety check so we don't flush while we are replaying the log during
* mount
*/
if (list_empty(&journal->j_journal_list))
return;
- /* check the current transaction. If there are no writers, and it is
+ /*
+ * check the current transaction. If there are no writers, and it is
* too old, finish it, and force the commit blocks to disk
*/
if (atomic_read(&journal->j_wcount) <= 0 &&
journal->j_trans_start_time > 0 &&
journal->j_len > 0 &&
(now - journal->j_trans_start_time) > journal->j_max_trans_age) {
- if (!journal_join(&th, sb, 1)) {
+ if (!journal_join(&th, sb)) {
reiserfs_prepare_for_journal(sb,
SB_BUFFER_WITH_SB(sb),
1);
- journal_mark_dirty(&th, sb,
- SB_BUFFER_WITH_SB(sb));
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb));
- /* we're only being called from kreiserfsd, it makes no sense to do
- ** an async commit so that kreiserfsd can do it later
+ /*
+ * we're only being called from kreiserfsd, it makes
+ * no sense to do an async commit so that kreiserfsd
+ * can do it later
*/
- do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
+ do_journal_end(&th, COMMIT_NOW | WAIT);
}
}
}
/*
-** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
-**
-** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
-** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
-** flushes the commit list and returns 0.
-**
-** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
-**
-** Note, we can't allow the journal_end to proceed while there are still writers in the log.
-*/
-static int check_journal_end(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks,
- int flags)
+ * returns 0 if do_journal_end should return right away, returns 1 if
+ * do_journal_end should finish the commit
+ *
+ * if the current transaction is too old, but still has writers, this will
+ * wait on j_join_wait until all the writers are done. By the time it
+ * wakes up, the transaction it was called has already ended, so it just
+ * flushes the commit list and returns 0.
+ *
+ * Won't batch when flush or commit_now is set. Also won't batch when
+ * others are waiting on j_join_wait.
+ *
+ * Note, we can't allow the journal_end to proceed while there are still
+ * writers in the log.
+ */
+static int check_journal_end(struct reiserfs_transaction_handle *th, int flags)
{
time_t now;
@@ -3509,6 +3610,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
int commit_now = flags & COMMIT_NOW;
int wait_on_commit = flags & WAIT;
struct reiserfs_journal_list *jl;
+ struct super_block *sb = th->t_super;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
BUG_ON(!th->t_trans_id);
@@ -3520,23 +3622,27 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
}
journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
- if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
- atomic_dec(&(journal->j_wcount));
- }
+ /* <= 0 is allowed. unmounting might not call begin */
+ if (atomic_read(&journal->j_wcount) > 0)
+ atomic_dec(&journal->j_wcount);
- /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
- ** will be dealt with by next transaction that actually writes something, but should be taken
- ** care of in this trans
+ /*
+ * BUG, deal with case where j_len is 0, but people previously
+ * freed blocks need to be released will be dealt with by next
+ * transaction that actually writes something, but should be taken
+ * care of in this trans
*/
BUG_ON(journal->j_len == 0);
- /* if wcount > 0, and we are called to with flush or commit_now,
- ** we wait on j_join_wait. We will wake up when the last writer has
- ** finished the transaction, and started it on its way to the disk.
- ** Then, we flush the commit or journal list, and just return 0
- ** because the rest of journal end was already done for this transaction.
+ /*
+ * if wcount > 0, and we are called to with flush or commit_now,
+ * we wait on j_join_wait. We will wake up when the last writer has
+ * finished the transaction, and started it on its way to the disk.
+ * Then, we flush the commit or journal list, and just return 0
+ * because the rest of journal end was already done for this
+ * transaction.
*/
- if (atomic_read(&(journal->j_wcount)) > 0) {
+ if (atomic_read(&journal->j_wcount) > 0) {
if (flush || commit_now) {
unsigned trans_id;
@@ -3544,27 +3650,30 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
trans_id = jl->j_trans_id;
if (wait_on_commit)
jl->j_state |= LIST_COMMIT_PENDING;
- atomic_set(&(journal->j_jlock), 1);
+ atomic_set(&journal->j_jlock, 1);
if (flush) {
journal->j_next_full_flush = 1;
}
unlock_journal(sb);
- /* sleep while the current transaction is still j_jlocked */
+ /*
+ * sleep while the current transaction is
+ * still j_jlocked
+ */
while (journal->j_trans_id == trans_id) {
if (atomic_read(&journal->j_jlock)) {
queue_log_writer(sb);
} else {
lock_journal(sb);
if (journal->j_trans_id == trans_id) {
- atomic_set(&(journal->j_jlock),
+ atomic_set(&journal->j_jlock,
1);
}
unlock_journal(sb);
}
}
BUG_ON(journal->j_trans_id == trans_id);
-
+
if (commit_now
&& journal_list_still_alive(sb, trans_id)
&& wait_on_commit) {
@@ -3584,7 +3693,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
}
/* don't batch when someone is waiting on j_join_wait */
/* don't batch when syncing the commit or flushing the whole trans */
- if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
+ if (!(journal->j_must_wait > 0) && !(atomic_read(&journal->j_jlock))
&& !flush && !commit_now && (journal->j_len < journal->j_max_batch)
&& journal->j_len_alloc < journal->j_max_batch
&& journal->j_cnode_free > (journal->j_trans_max * 3)) {
@@ -3602,19 +3711,22 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
}
/*
-** Does all the work that makes deleting blocks safe.
-** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
-**
-** otherwise:
-** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
-** before this transaction has finished.
-**
-** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
-** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
-** the block can't be reallocated yet.
-**
-** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
-*/
+ * Does all the work that makes deleting blocks safe.
+ * when deleting a block mark BH_JNew, just remove it from the current
+ * transaction, clean it's buffer_head and move on.
+ *
+ * otherwise:
+ * set a bit for the block in the journal bitmap. That will prevent it from
+ * being allocated for unformatted nodes before this transaction has finished.
+ *
+ * mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers.
+ * That will prevent any old transactions with this block from trying to flush
+ * to the real location. Since we aren't removing the cnode from the
+ * journal_list_hash, *the block can't be reallocated yet.
+ *
+ * Then remove it from the current transaction, decrementing any counters and
+ * filing it on the clean list.
+ */
int journal_mark_freed(struct reiserfs_transaction_handle *th,
struct super_block *sb, b_blocknr_t blocknr)
{
@@ -3637,7 +3749,10 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th,
reiserfs_clean_and_file_buffer(bh);
cleaned = remove_from_transaction(sb, blocknr, cleaned);
} else {
- /* set the bit for this block in the journal bitmap for this transaction */
+ /*
+ * set the bit for this block in the journal bitmap
+ * for this transaction
+ */
jb = journal->j_current_jl->j_list_bitmap;
if (!jb) {
reiserfs_panic(sb, "journal-1702",
@@ -3653,17 +3768,22 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th,
}
cleaned = remove_from_transaction(sb, blocknr, cleaned);
- /* find all older transactions with this block, make sure they don't try to write it out */
+ /*
+ * find all older transactions with this block,
+ * make sure they don't try to write it out
+ */
cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
blocknr);
while (cn) {
if (sb == cn->sb && blocknr == cn->blocknr) {
set_bit(BLOCK_FREED, &cn->state);
if (cn->bh) {
+ /*
+ * remove_from_transaction will brelse
+ * the buffer if it was in the current
+ * trans
+ */
if (!cleaned) {
- /* remove_from_transaction will brelse the buffer if it was
- ** in the current trans
- */
clear_buffer_journal_dirty(cn->
bh);
clear_buffer_dirty(cn->bh);
@@ -3672,16 +3792,19 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th,
cleaned = 1;
put_bh(cn->bh);
if (atomic_read
- (&(cn->bh->b_count)) < 0) {
+ (&cn->bh->b_count) < 0) {
reiserfs_warning(sb,
"journal-2138",
"cn->bh->b_count < 0");
}
}
- if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
- atomic_dec(&
- (cn->jlist->
- j_nonzerolen));
+ /*
+ * since we are clearing the bh,
+ * we MUST dec nonzerolen
+ */
+ if (cn->jlist) {
+ atomic_dec(&cn->jlist->
+ j_nonzerolen);
}
cn->bh = NULL;
}
@@ -3714,10 +3837,16 @@ static int __commit_trans_jl(struct inode *inode, unsigned long id,
struct reiserfs_journal *journal = SB_JOURNAL(sb);
int ret = 0;
- /* is it from the current transaction, or from an unknown transaction? */
+ /*
+ * is it from the current transaction,
+ * or from an unknown transaction?
+ */
if (id == journal->j_trans_id) {
jl = journal->j_current_jl;
- /* try to let other writers come in and grow this transaction */
+ /*
+ * try to let other writers come in and
+ * grow this transaction
+ */
let_transaction_grow(sb, id);
if (journal->j_trans_id != id) {
goto flush_commit_only;
@@ -3731,21 +3860,22 @@ static int __commit_trans_jl(struct inode *inode, unsigned long id,
if (journal->j_trans_id != id) {
reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
1);
- journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
- ret = journal_end(&th, sb, 1);
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(sb));
+ ret = journal_end(&th);
goto flush_commit_only;
}
- ret = journal_end_sync(&th, sb, 1);
+ ret = journal_end_sync(&th);
if (!ret)
ret = 1;
} else {
- /* this gets tricky, we have to make sure the journal list in
+ /*
+ * this gets tricky, we have to make sure the journal list in
* the inode still exists. We know the list is still around
* if we've got a larger transaction id than the oldest list
*/
- flush_commit_only:
+flush_commit_only:
if (journal_list_still_alive(inode->i_sb, id)) {
/*
* we only set ret to 1 when we know for sure
@@ -3768,7 +3898,8 @@ int reiserfs_commit_for_inode(struct inode *inode)
unsigned int id = REISERFS_I(inode)->i_trans_id;
struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
- /* for the whole inode, assume unset id means it was
+ /*
+ * for the whole inode, assume unset id means it was
* changed in the current transaction. More conservative
*/
if (!id || !jl) {
@@ -3806,12 +3937,11 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
extern struct tree_balance *cur_tb;
/*
-** before we can change a metadata block, we have to make sure it won't
-** be written to disk while we are altering it. So, we must:
-** clean it
-** wait on it.
-**
-*/
+ * before we can change a metadata block, we have to make sure it won't
+ * be written to disk while we are altering it. So, we must:
+ * clean it
+ * wait on it.
+ */
int reiserfs_prepare_for_journal(struct super_block *sb,
struct buffer_head *bh, int wait)
{
@@ -3832,19 +3962,18 @@ int reiserfs_prepare_for_journal(struct super_block *sb,
}
/*
-** long and ugly. If flush, will not return until all commit
-** blocks and all real buffers in the trans are on disk.
-** If no_async, won't return until all commit blocks are on disk.
-**
-** keep reading, there are comments as you go along
-**
-** If the journal is aborted, we just clean up. Things like flushing
-** journal lists, etc just won't happen.
-*/
-static int do_journal_end(struct reiserfs_transaction_handle *th,
- struct super_block *sb, unsigned long nblocks,
- int flags)
+ * long and ugly. If flush, will not return until all commit
+ * blocks and all real buffers in the trans are on disk.
+ * If no_async, won't return until all commit blocks are on disk.
+ *
+ * keep reading, there are comments as you go along
+ *
+ * If the journal is aborted, we just clean up. Things like flushing
+ * journal lists, etc just won't happen.
+ */
+static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
{
+ struct super_block *sb = th->t_super;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
struct reiserfs_journal_cnode *cn, *next, *jl_cn;
struct reiserfs_journal_cnode *last_cn = NULL;
@@ -3866,9 +3995,12 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
BUG_ON(th->t_refcount > 1);
BUG_ON(!th->t_trans_id);
+ BUG_ON(!th->t_super);
- /* protect flush_older_commits from doing mistakes if the
- transaction ID counter gets overflowed. */
+ /*
+ * protect flush_older_commits from doing mistakes if the
+ * transaction ID counter gets overflowed.
+ */
if (th->t_trans_id == ~0U)
flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
flush = flags & FLUSH_ALL;
@@ -3879,7 +4011,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
if (journal->j_len == 0) {
reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
1);
- journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
+ journal_mark_dirty(th, SB_BUFFER_WITH_SB(sb));
}
lock_journal(sb);
@@ -3892,10 +4024,12 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
wait_on_commit = 1;
}
- /* check_journal_end locks the journal, and unlocks if it does not return 1
- ** it tells us if we should continue with the journal_end, or just return
+ /*
+ * check_journal_end locks the journal, and unlocks if it does
+ * not return 1 it tells us if we should continue with the
+ * journal_end, or just return
*/
- if (!check_journal_end(th, sb, nblocks, flags)) {
+ if (!check_journal_end(th, flags)) {
reiserfs_schedule_old_flush(sb);
wake_queued_writers(sb);
reiserfs_async_progress_wait(sb);
@@ -3908,19 +4042,23 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
}
/*
- ** j must wait means we have to flush the log blocks, and the real blocks for
- ** this transaction
+ * j must wait means we have to flush the log blocks, and the
+ * real blocks for this transaction
*/
if (journal->j_must_wait > 0) {
flush = 1;
}
#ifdef REISERFS_PREALLOCATE
- /* quota ops might need to nest, setup the journal_info pointer for them
- * and raise the refcount so that it is > 0. */
+ /*
+ * quota ops might need to nest, setup the journal_info pointer
+ * for them and raise the refcount so that it is > 0.
+ */
current->journal_info = th;
th->t_refcount++;
- reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
- * the transaction */
+
+ /* it should not involve new blocks into the transaction */
+ reiserfs_discard_all_prealloc(th);
+
th->t_refcount--;
current->journal_info = th->t_handle_save;
#endif
@@ -3936,7 +4074,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
set_desc_trans_id(desc, journal->j_trans_id);
- /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
+ /*
+ * setup commit block. Don't write (keep it clean too) this one
+ * until after everyone else is written
+ */
c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
((journal->j_start + journal->j_len +
1) % SB_ONDISK_JOURNAL_SIZE(sb)));
@@ -3948,7 +4089,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
/* init this journal list */
jl = journal->j_current_jl;
- /* we lock the commit before doing anything because
+ /*
+ * we lock the commit before doing anything because
* we want to make sure nobody tries to run flush_commit_list until
* the new transaction is fully setup, and we've already flushed the
* ordered bh list
@@ -3968,9 +4110,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
atomic_set(&jl->j_commit_left, journal->j_len + 2);
jl->j_realblock = NULL;
- /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
- ** for each real block, add it to the journal list hash,
- ** copy into real block index array in the commit or desc block
+ /*
+ * The ENTIRE FOR LOOP MUST not cause schedule to occur.
+ * for each real block, add it to the journal list hash,
+ * copy into real block index array in the commit or desc block
*/
trans_half = journal_trans_half(sb->s_blocksize);
for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
@@ -3989,9 +4132,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
last_cn->next = jl_cn;
}
last_cn = jl_cn;
- /* make sure the block we are trying to log is not a block
- of journal or reserved area */
-
+ /*
+ * make sure the block we are trying to log
+ * is not a block of journal or reserved area
+ */
if (is_block_in_log_or_reserved_area
(sb, cn->bh->b_blocknr)) {
reiserfs_panic(sb, "journal-2332",
@@ -4021,19 +4165,26 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
set_desc_trans_id(desc, journal->j_trans_id);
set_commit_trans_len(commit, journal->j_len);
- /* special check in case all buffers in the journal were marked for not logging */
+ /*
+ * special check in case all buffers in the journal
+ * were marked for not logging
+ */
BUG_ON(journal->j_len == 0);
- /* we're about to dirty all the log blocks, mark the description block
+ /*
+ * we're about to dirty all the log blocks, mark the description block
* dirty now too. Don't mark the commit block dirty until all the
* others are on disk
*/
mark_buffer_dirty(d_bh);
- /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
+ /*
+ * first data block is j_start + 1, so add one to
+ * cur_write_start wherever you use it
+ */
cur_write_start = journal->j_start;
cn = journal->j_first;
- jindex = 1; /* start at one so we don't get the desc again */
+ jindex = 1; /* start at one so we don't get the desc again */
while (cn) {
clear_buffer_journal_new(cn->bh);
/* copy all the real blocks into log area. dirty log blocks */
@@ -4059,7 +4210,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
set_buffer_journal_dirty(cn->bh);
clear_buffer_journaled(cn->bh);
} else {
- /* JDirty cleared sometime during transaction. don't log this one */
+ /*
+ * JDirty cleared sometime during transaction.
+ * don't log this one
+ */
reiserfs_warning(sb, "journal-2048",
"BAD, buffer in journal hash, "
"but not JDirty!");
@@ -4071,9 +4225,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
reiserfs_cond_resched(sb);
}
- /* we are done with both the c_bh and d_bh, but
- ** c_bh must be written after all other commit blocks,
- ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
+ /*
+ * we are done with both the c_bh and d_bh, but
+ * c_bh must be written after all other commit blocks,
+ * so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
*/
journal->j_current_jl = alloc_journal_list(sb);
@@ -4088,7 +4243,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
journal->j_start =
(journal->j_start + journal->j_len +
2) % SB_ONDISK_JOURNAL_SIZE(sb);
- atomic_set(&(journal->j_wcount), 0);
+ atomic_set(&journal->j_wcount, 0);
journal->j_bcount = 0;
journal->j_last = NULL;
journal->j_first = NULL;
@@ -4104,15 +4259,18 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
journal->j_next_async_flush = 0;
init_journal_hash(sb);
- // make sure reiserfs_add_jh sees the new current_jl before we
- // write out the tails
+ /*
+ * make sure reiserfs_add_jh sees the new current_jl before we
+ * write out the tails
+ */
smp_mb();
- /* tail conversion targets have to hit the disk before we end the
+ /*
+ * tail conversion targets have to hit the disk before we end the
* transaction. Otherwise a later transaction might repack the tail
- * before this transaction commits, leaving the data block unflushed and
- * clean, if we crash before the later transaction commits, the data block
- * is lost.
+ * before this transaction commits, leaving the data block unflushed
+ * and clean, if we crash before the later transaction commits, the
+ * data block is lost.
*/
if (!list_empty(&jl->j_tail_bh_list)) {
depth = reiserfs_write_unlock_nested(sb);
@@ -4123,24 +4281,27 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
BUG_ON(!list_empty(&jl->j_tail_bh_list));
mutex_unlock(&jl->j_commit_mutex);
- /* honor the flush wishes from the caller, simple commits can
- ** be done outside the journal lock, they are done below
- **
- ** if we don't flush the commit list right now, we put it into
- ** the work queue so the people waiting on the async progress work
- ** queue don't wait for this proc to flush journal lists and such.
+ /*
+ * honor the flush wishes from the caller, simple commits can
+ * be done outside the journal lock, they are done below
+ *
+ * if we don't flush the commit list right now, we put it into
+ * the work queue so the people waiting on the async progress work
+ * queue don't wait for this proc to flush journal lists and such.
*/
if (flush) {
flush_commit_list(sb, jl, 1);
flush_journal_list(sb, jl, 1);
} else if (!(jl->j_state & LIST_COMMIT_PENDING))
- queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
+ queue_delayed_work(REISERFS_SB(sb)->commit_wq,
+ &journal->j_work, HZ / 10);
- /* if the next transaction has any chance of wrapping, flush
- ** transactions that might get overwritten. If any journal lists are very
- ** old flush them as well.
+ /*
+ * if the next transaction has any chance of wrapping, flush
+ * transactions that might get overwritten. If any journal lists
+ * are very old flush them as well.
*/
- first_jl:
+first_jl:
list_for_each_safe(entry, safe, &journal->j_journal_list) {
temp_jl = JOURNAL_LIST_ENTRY(entry);
if (journal->j_start <= temp_jl->j_start) {
@@ -4151,8 +4312,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
} else if ((journal->j_start +
journal->j_trans_max + 1) <
SB_ONDISK_JOURNAL_SIZE(sb)) {
- /* if we don't cross into the next transaction and we don't
- * wrap, there is no way we can overlap any later transactions
+ /*
+ * if we don't cross into the next
+ * transaction and we don't wrap, there is
+ * no way we can overlap any later transactions
* break now
*/
break;
@@ -4166,10 +4329,12 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
flush_used_journal_lists(sb, temp_jl);
goto first_jl;
} else {
- /* we don't overlap anything from out start to the end of the
- * log, and our wrapped portion doesn't overlap anything at
- * the start of the log. We can break
- */
+ /*
+ * we don't overlap anything from out start
+ * to the end of the log, and our wrapped
+ * portion doesn't overlap anything at
+ * the start of the log. We can break
+ */
break;
}
}
@@ -4183,23 +4348,25 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
"could not get a list bitmap");
}
- atomic_set(&(journal->j_jlock), 0);
+ atomic_set(&journal->j_jlock, 0);
unlock_journal(sb);
/* wake up any body waiting to join. */
clear_bit(J_WRITERS_QUEUED, &journal->j_state);
- wake_up(&(journal->j_join_wait));
+ wake_up(&journal->j_join_wait);
if (!flush && wait_on_commit &&
journal_list_still_alive(sb, commit_trans_id)) {
flush_commit_list(sb, jl, 1);
}
- out:
+out:
reiserfs_check_lock_depth(sb, "journal end2");
memset(th, 0, sizeof(*th));
- /* Re-set th->t_super, so we can properly keep track of how many
+ /*
+ * Re-set th->t_super, so we can properly keep track of how many
* persistent transactions there are. We need to do this so if this
- * call is part of a failed restart_transaction, we can free it later */
+ * call is part of a failed restart_transaction, we can free it later
+ */
th->t_super = sb;
return journal->j_errno;
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index 79e5a8b4c22..d6744c8b24e 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -8,46 +8,42 @@
#include "reiserfs.h"
#include <linux/buffer_head.h>
-/* these are used in do_balance.c */
-
-/* leaf_move_items
- leaf_shift_left
- leaf_shift_right
- leaf_delete_items
- leaf_insert_into_buf
- leaf_paste_in_buffer
- leaf_cut_from_buffer
- leaf_paste_entries
- */
-
-/* copy copy_count entries from source directory item to dest buffer (creating new item if needed) */
+/*
+ * copy copy_count entries from source directory item to dest buffer
+ * (creating new item if needed)
+ */
static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
struct buffer_head *source, int last_first,
int item_num, int from, int copy_count)
{
struct buffer_head *dest = dest_bi->bi_bh;
- int item_num_in_dest; /* either the number of target item,
- or if we must create a new item,
- the number of the item we will
- create it next to */
+ /*
+ * either the number of target item, or if we must create a
+ * new item, the number of the item we will create it next to
+ */
+ int item_num_in_dest;
+
struct item_head *ih;
struct reiserfs_de_head *deh;
int copy_records_len; /* length of all records in item to be copied */
char *records;
- ih = B_N_PITEM_HEAD(source, item_num);
+ ih = item_head(source, item_num);
RFALSE(!is_direntry_le_ih(ih), "vs-10000: item must be directory item");
- /* length of all record to be copied and first byte of the last of them */
+ /*
+ * length of all record to be copied and first byte of
+ * the last of them
+ */
deh = B_I_DEH(source, ih);
if (copy_count) {
- copy_records_len = (from ? deh_location(&(deh[from - 1])) :
+ copy_records_len = (from ? deh_location(&deh[from - 1]) :
ih_item_len(ih)) -
- deh_location(&(deh[from + copy_count - 1]));
+ deh_location(&deh[from + copy_count - 1]);
records =
source->b_data + ih_location(ih) +
- deh_location(&(deh[from + copy_count - 1]));
+ deh_location(&deh[from + copy_count - 1]);
} else {
copy_records_len = 0;
records = NULL;
@@ -59,12 +55,15 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
LAST_TO_FIRST) ? ((B_NR_ITEMS(dest)) ? 0 : -1) : (B_NR_ITEMS(dest)
- 1);
- /* if there are no items in dest or the first/last item in dest is not item of the same directory */
+ /*
+ * if there are no items in dest or the first/last item in
+ * dest is not item of the same directory
+ */
if ((item_num_in_dest == -1) ||
(last_first == FIRST_TO_LAST && le_ih_k_offset(ih) == DOT_OFFSET) ||
(last_first == LAST_TO_FIRST
&& comp_short_le_keys /*COMP_SHORT_KEYS */ (&ih->ih_key,
- B_N_PKEY(dest,
+ leaf_key(dest,
item_num_in_dest))))
{
/* create new item in dest */
@@ -80,16 +79,22 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
if (last_first == LAST_TO_FIRST) {
/* form key by the following way */
- if (from < I_ENTRY_COUNT(ih)) {
+ if (from < ih_entry_count(ih)) {
set_le_ih_k_offset(&new_ih,
- deh_offset(&(deh[from])));
- /*memcpy (&new_ih.ih_key.k_offset, &deh[from].deh_offset, SHORT_KEY_SIZE); */
+ deh_offset(&deh[from]));
} else {
- /* no entries will be copied to this item in this function */
+ /*
+ * no entries will be copied to this
+ * item in this function
+ */
set_le_ih_k_offset(&new_ih, U32_MAX);
- /* this item is not yet valid, but we want I_IS_DIRECTORY_ITEM to return 1 for it, so we -1 */
+ /*
+ * this item is not yet valid, but we
+ * want I_IS_DIRECTORY_ITEM to return 1
+ * for it, so we -1
+ */
}
- set_le_key_k_type(KEY_FORMAT_3_5, &(new_ih.ih_key),
+ set_le_key_k_type(KEY_FORMAT_3_5, &new_ih.ih_key,
TYPE_DIRENTRY);
}
@@ -113,36 +118,44 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
leaf_paste_entries(dest_bi, item_num_in_dest,
(last_first ==
- FIRST_TO_LAST) ? I_ENTRY_COUNT(B_N_PITEM_HEAD(dest,
+ FIRST_TO_LAST) ? ih_entry_count(item_head(dest,
item_num_in_dest))
: 0, copy_count, deh + from, records,
DEH_SIZE * copy_count + copy_records_len);
}
-/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
- part of it or nothing (see the return 0 below) from SOURCE to the end
- (if last_first) or beginning (!last_first) of the DEST */
+/*
+ * Copy the first (if last_first == FIRST_TO_LAST) or last
+ * (last_first == LAST_TO_FIRST) item or part of it or nothing
+ * (see the return 0 below) from SOURCE to the end (if last_first)
+ * or beginning (!last_first) of the DEST
+ */
/* returns 1 if anything was copied, else 0 */
static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
struct buffer_head *src, int last_first,
int bytes_or_entries)
{
struct buffer_head *dest = dest_bi->bi_bh;
- int dest_nr_item, src_nr_item; /* number of items in the source and destination buffers */
+ /* number of items in the source and destination buffers */
+ int dest_nr_item, src_nr_item;
struct item_head *ih;
struct item_head *dih;
dest_nr_item = B_NR_ITEMS(dest);
+ /*
+ * if ( DEST is empty or first item of SOURCE and last item of
+ * DEST are the items of different objects or of different types )
+ * then there is no need to treat this item differently from the
+ * other items that we copy, so we return
+ */
if (last_first == FIRST_TO_LAST) {
- /* if ( DEST is empty or first item of SOURCE and last item of DEST are the items of different objects
- or of different types ) then there is no need to treat this item differently from the other items
- that we copy, so we return */
- ih = B_N_PITEM_HEAD(src, 0);
- dih = B_N_PITEM_HEAD(dest, dest_nr_item - 1);
+ ih = item_head(src, 0);
+ dih = item_head(dest, dest_nr_item - 1);
+
+ /* there is nothing to merge */
if (!dest_nr_item
- || (!op_is_left_mergeable(&(ih->ih_key), src->b_size)))
- /* there is nothing to merge */
+ || (!op_is_left_mergeable(&ih->ih_key, src->b_size)))
return 0;
RFALSE(!ih_item_len(ih),
@@ -157,8 +170,11 @@ static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
return 1;
}
- /* copy part of the body of the first item of SOURCE to the end of the body of the last item of the DEST
- part defined by 'bytes_or_entries'; if bytes_or_entries == -1 copy whole body; don't create new item header
+ /*
+ * copy part of the body of the first item of SOURCE
+ * to the end of the body of the last item of the DEST
+ * part defined by 'bytes_or_entries'; if bytes_or_entries
+ * == -1 copy whole body; don't create new item header
*/
if (bytes_or_entries == -1)
bytes_or_entries = ih_item_len(ih);
@@ -176,11 +192,13 @@ static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
}
#endif
- /* merge first item (or its part) of src buffer with the last
- item of dest buffer. Both are of the same file */
+ /*
+ * merge first item (or its part) of src buffer with the last
+ * item of dest buffer. Both are of the same file
+ */
leaf_paste_in_buffer(dest_bi,
dest_nr_item - 1, ih_item_len(dih),
- bytes_or_entries, B_I_PITEM(src, ih), 0);
+ bytes_or_entries, ih_item_body(src, ih), 0);
if (is_indirect_le_ih(dih)) {
RFALSE(get_ih_free_space(dih),
@@ -195,19 +213,23 @@ static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
/* copy boundary item to right (last_first == LAST_TO_FIRST) */
- /* ( DEST is empty or last item of SOURCE and first item of DEST
- are the items of different object or of different types )
+ /*
+ * (DEST is empty or last item of SOURCE and first item of DEST
+ * are the items of different object or of different types)
*/
src_nr_item = B_NR_ITEMS(src);
- ih = B_N_PITEM_HEAD(src, src_nr_item - 1);
- dih = B_N_PITEM_HEAD(dest, 0);
+ ih = item_head(src, src_nr_item - 1);
+ dih = item_head(dest, 0);
- if (!dest_nr_item || !op_is_left_mergeable(&(dih->ih_key), src->b_size))
+ if (!dest_nr_item || !op_is_left_mergeable(&dih->ih_key, src->b_size))
return 0;
if (is_direntry_le_ih(ih)) {
+ /*
+ * bytes_or_entries = entries number in last
+ * item body of SOURCE
+ */
if (bytes_or_entries == -1)
- /* bytes_or_entries = entries number in last item body of SOURCE */
bytes_or_entries = ih_entry_count(ih);
leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST,
@@ -217,9 +239,11 @@ static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
return 1;
}
- /* copy part of the body of the last item of SOURCE to the begin of the body of the first item of the DEST;
- part defined by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body; change first item key of the DEST;
- don't create new item header
+ /*
+ * copy part of the body of the last item of SOURCE to the
+ * begin of the body of the first item of the DEST; part defined
+ * by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body;
+ * change first item key of the DEST; don't create new item header
*/
RFALSE(is_indirect_le_ih(ih) && get_ih_free_space(ih),
@@ -270,15 +294,18 @@ static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
}
leaf_paste_in_buffer(dest_bi, 0, 0, bytes_or_entries,
- B_I_PITEM(src,
+ ih_item_body(src,
ih) + ih_item_len(ih) - bytes_or_entries,
0);
return 1;
}
-/* copy cpy_mun items from buffer src to buffer dest
- * last_first == FIRST_TO_LAST means, that we copy cpy_num items beginning from first-th item in src to tail of dest
- * last_first == LAST_TO_FIRST means, that we copy cpy_num items beginning from first-th item in src to head of dest
+/*
+ * copy cpy_mun items from buffer src to buffer dest
+ * last_first == FIRST_TO_LAST means, that we copy cpy_num items beginning
+ * from first-th item in src to tail of dest
+ * last_first == LAST_TO_FIRST means, that we copy cpy_num items beginning
+ * from first-th item in src to head of dest
*/
static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
struct buffer_head *src, int last_first,
@@ -311,11 +338,14 @@ static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
nr = blkh_nr_item(blkh);
free_space = blkh_free_space(blkh);
- /* we will insert items before 0-th or nr-th item in dest buffer. It depends of last_first parameter */
+ /*
+ * we will insert items before 0-th or nr-th item in dest buffer.
+ * It depends of last_first parameter
+ */
dest_before = (last_first == LAST_TO_FIRST) ? 0 : nr;
/* location of head of first new item */
- ih = B_N_PITEM_HEAD(dest, dest_before);
+ ih = item_head(dest, dest_before);
RFALSE(blkh_free_space(blkh) < cpy_num * IH_SIZE,
"vs-10140: not enough free space for headers %d (needed %d)",
@@ -325,7 +355,7 @@ static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
memmove(ih + cpy_num, ih, (nr - dest_before) * IH_SIZE);
/* copy item headers */
- memcpy(ih, B_N_PITEM_HEAD(src, first), cpy_num * IH_SIZE);
+ memcpy(ih, item_head(src, first), cpy_num * IH_SIZE);
free_space -= (IH_SIZE * cpy_num);
set_blkh_free_space(blkh, free_space);
@@ -338,8 +368,8 @@ static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
}
/* prepare space for items */
- last_loc = ih_location(&(ih[nr + cpy_num - 1 - dest_before]));
- last_inserted_loc = ih_location(&(ih[cpy_num - 1]));
+ last_loc = ih_location(&ih[nr + cpy_num - 1 - dest_before]);
+ last_inserted_loc = ih_location(&ih[cpy_num - 1]);
/* check free space */
RFALSE(free_space < j - last_inserted_loc,
@@ -352,7 +382,8 @@ static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
/* copy items */
memcpy(dest->b_data + last_inserted_loc,
- B_N_PITEM(src, (first + cpy_num - 1)), j - last_inserted_loc);
+ item_body(src, (first + cpy_num - 1)),
+ j - last_inserted_loc);
/* sizes, item number */
set_blkh_nr_item(blkh, nr + cpy_num);
@@ -376,8 +407,10 @@ static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
}
}
-/* This function splits the (liquid) item into two items (useful when
- shifting part of an item into another node.) */
+/*
+ * This function splits the (liquid) item into two items (useful when
+ * shifting part of an item into another node.)
+ */
static void leaf_item_bottle(struct buffer_info *dest_bi,
struct buffer_head *src, int last_first,
int item_num, int cpy_bytes)
@@ -389,17 +422,22 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
"vs-10170: bytes == - 1 means: do not split item");
if (last_first == FIRST_TO_LAST) {
- /* if ( if item in position item_num in buffer SOURCE is directory item ) */
- ih = B_N_PITEM_HEAD(src, item_num);
+ /*
+ * if ( if item in position item_num in buffer SOURCE
+ * is directory item )
+ */
+ ih = item_head(src, item_num);
if (is_direntry_le_ih(ih))
leaf_copy_dir_entries(dest_bi, src, FIRST_TO_LAST,
item_num, 0, cpy_bytes);
else {
struct item_head n_ih;
- /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
- part defined by 'cpy_bytes'; create new item header; change old item_header (????);
- n_ih = new item_header;
+ /*
+ * copy part of the body of the item number 'item_num'
+ * of SOURCE to the end of the DEST part defined by
+ * 'cpy_bytes'; create new item header; change old
+ * item_header (????); n_ih = new item_header;
*/
memcpy(&n_ih, ih, IH_SIZE);
put_ih_item_len(&n_ih, cpy_bytes);
@@ -411,30 +449,36 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
set_ih_free_space(&n_ih, 0);
}
- RFALSE(op_is_left_mergeable(&(ih->ih_key), src->b_size),
+ RFALSE(op_is_left_mergeable(&ih->ih_key, src->b_size),
"vs-10190: bad mergeability of item %h", ih);
n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
leaf_insert_into_buf(dest_bi, B_NR_ITEMS(dest), &n_ih,
- B_N_PITEM(src, item_num), 0);
+ item_body(src, item_num), 0);
}
} else {
- /* if ( if item in position item_num in buffer SOURCE is directory item ) */
- ih = B_N_PITEM_HEAD(src, item_num);
+ /*
+ * if ( if item in position item_num in buffer
+ * SOURCE is directory item )
+ */
+ ih = item_head(src, item_num);
if (is_direntry_le_ih(ih))
leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST,
item_num,
- I_ENTRY_COUNT(ih) - cpy_bytes,
+ ih_entry_count(ih) - cpy_bytes,
cpy_bytes);
else {
struct item_head n_ih;
- /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
- part defined by 'cpy_bytes'; create new item header;
- n_ih = new item_header;
+ /*
+ * copy part of the body of the item number 'item_num'
+ * of SOURCE to the begin of the DEST part defined by
+ * 'cpy_bytes'; create new item header;
+ * n_ih = new item_header;
*/
memcpy(&n_ih, ih, SHORT_KEY_SIZE);
- n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
+ /* Endian safe, both le */
+ n_ih.ih_version = ih->ih_version;
if (is_direct_le_ih(ih)) {
set_le_ih_k_offset(&n_ih,
@@ -458,20 +502,22 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
/* set item length */
put_ih_item_len(&n_ih, cpy_bytes);
- n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
+ /* Endian safe, both le */
+ n_ih.ih_version = ih->ih_version;
leaf_insert_into_buf(dest_bi, 0, &n_ih,
- B_N_PITEM(src,
- item_num) +
- ih_item_len(ih) - cpy_bytes, 0);
+ item_body(src, item_num) +
+ ih_item_len(ih) - cpy_bytes, 0);
}
}
}
-/* If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE to DEST.
- If cpy_bytes not equal to minus one than copy cpy_num-1 whole items from SOURCE to DEST.
- From last item copy cpy_num bytes for regular item and cpy_num directory entries for
- directory item. */
+/*
+ * If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE
+ * to DEST. If cpy_bytes not equal to minus one than copy cpy_num-1 whole
+ * items from SOURCE to DEST. From last item copy cpy_num bytes for regular
+ * item and cpy_num directory entries for directory item.
+ */
static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
int last_first, int cpy_num, int cpy_bytes)
{
@@ -498,22 +544,34 @@ static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
else
bytes = -1;
- /* copy the first item or it part or nothing to the end of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes)) */
+ /*
+ * copy the first item or it part or nothing to the end of
+ * the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes))
+ */
i = leaf_copy_boundary_item(dest_bi, src, FIRST_TO_LAST, bytes);
cpy_num -= i;
if (cpy_num == 0)
return i;
pos += i;
if (cpy_bytes == -1)
- /* copy first cpy_num items starting from position 'pos' of SOURCE to end of DEST */
+ /*
+ * copy first cpy_num items starting from position
+ * 'pos' of SOURCE to end of DEST
+ */
leaf_copy_items_entirely(dest_bi, src, FIRST_TO_LAST,
pos, cpy_num);
else {
- /* copy first cpy_num-1 items starting from position 'pos-1' of the SOURCE to the end of the DEST */
+ /*
+ * copy first cpy_num-1 items starting from position
+ * 'pos-1' of the SOURCE to the end of the DEST
+ */
leaf_copy_items_entirely(dest_bi, src, FIRST_TO_LAST,
pos, cpy_num - 1);
- /* copy part of the item which number is cpy_num+pos-1 to the end of the DEST */
+ /*
+ * copy part of the item which number is
+ * cpy_num+pos-1 to the end of the DEST
+ */
leaf_item_bottle(dest_bi, src, FIRST_TO_LAST,
cpy_num + pos - 1, cpy_bytes);
}
@@ -525,7 +583,11 @@ static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
else
bytes = -1;
- /* copy the last item or it part or nothing to the begin of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes)); */
+ /*
+ * copy the last item or it part or nothing to the
+ * begin of the DEST
+ * (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes));
+ */
i = leaf_copy_boundary_item(dest_bi, src, LAST_TO_FIRST, bytes);
cpy_num -= i;
@@ -534,15 +596,24 @@ static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
pos = src_nr_item - cpy_num - i;
if (cpy_bytes == -1) {
- /* starting from position 'pos' copy last cpy_num items of SOURCE to begin of DEST */
+ /*
+ * starting from position 'pos' copy last cpy_num
+ * items of SOURCE to begin of DEST
+ */
leaf_copy_items_entirely(dest_bi, src, LAST_TO_FIRST,
pos, cpy_num);
} else {
- /* copy last cpy_num-1 items starting from position 'pos+1' of the SOURCE to the begin of the DEST; */
+ /*
+ * copy last cpy_num-1 items starting from position
+ * 'pos+1' of the SOURCE to the begin of the DEST;
+ */
leaf_copy_items_entirely(dest_bi, src, LAST_TO_FIRST,
pos + 1, cpy_num - 1);
- /* copy part of the item which number is pos to the begin of the DEST */
+ /*
+ * copy part of the item which number is pos to
+ * the begin of the DEST
+ */
leaf_item_bottle(dest_bi, src, LAST_TO_FIRST, pos,
cpy_bytes);
}
@@ -550,9 +621,11 @@ static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
return i;
}
-/* there are types of coping: from S[0] to L[0], from S[0] to R[0],
- from R[0] to L[0]. for each of these we have to define parent and
- positions of destination and source buffers */
+/*
+ * there are types of coping: from S[0] to L[0], from S[0] to R[0],
+ * from R[0] to L[0]. for each of these we have to define parent and
+ * positions of destination and source buffers
+ */
static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb,
struct buffer_info *dest_bi,
struct buffer_info *src_bi,
@@ -568,7 +641,9 @@ static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb,
src_bi->tb = tb;
src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
- src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0); /* src->b_item_order */
+
+ /* src->b_item_order */
+ src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
dest_bi->tb = tb;
dest_bi->bi_bh = tb->L[0];
dest_bi->bi_parent = tb->FL[0];
@@ -633,8 +708,10 @@ static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb,
shift_mode, src_bi->bi_bh, dest_bi->bi_bh);
}
-/* copy mov_num items and mov_bytes of the (mov_num-1)th item to
- neighbor. Delete them from source */
+/*
+ * copy mov_num items and mov_bytes of the (mov_num-1)th item to
+ * neighbor. Delete them from source
+ */
int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
int mov_bytes, struct buffer_head *Snew)
{
@@ -657,18 +734,24 @@ int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
return ret_value;
}
-/* Shift shift_num items (and shift_bytes of last shifted item if shift_bytes != -1)
- from S[0] to L[0] and replace the delimiting key */
+/*
+ * Shift shift_num items (and shift_bytes of last shifted item if
+ * shift_bytes != -1) from S[0] to L[0] and replace the delimiting key
+ */
int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes)
{
struct buffer_head *S0 = PATH_PLAST_BUFFER(tb->tb_path);
int i;
- /* move shift_num (and shift_bytes bytes) items from S[0] to left neighbor L[0] */
+ /*
+ * move shift_num (and shift_bytes bytes) items from S[0]
+ * to left neighbor L[0]
+ */
i = leaf_move_items(LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, NULL);
if (shift_num) {
- if (B_NR_ITEMS(S0) == 0) { /* number of items in S[0] == 0 */
+ /* number of items in S[0] == 0 */
+ if (B_NR_ITEMS(S0) == 0) {
RFALSE(shift_bytes != -1,
"vs-10270: S0 is empty now, but shift_bytes != -1 (%d)",
@@ -691,10 +774,10 @@ int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes)
replace_key(tb, tb->CFL[0], tb->lkey[0], S0, 0);
RFALSE((shift_bytes != -1 &&
- !(is_direntry_le_ih(B_N_PITEM_HEAD(S0, 0))
- && !I_ENTRY_COUNT(B_N_PITEM_HEAD(S0, 0)))) &&
+ !(is_direntry_le_ih(item_head(S0, 0))
+ && !ih_entry_count(item_head(S0, 0)))) &&
(!op_is_left_mergeable
- (B_N_PKEY(S0, 0), S0->b_size)),
+ (leaf_key(S0, 0), S0->b_size)),
"vs-10280: item must be mergeable");
}
}
@@ -704,13 +787,18 @@ int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes)
/* CLEANING STOPPED HERE */
-/* Shift shift_num (shift_bytes) items from S[0] to the right neighbor, and replace the delimiting key */
+/*
+ * Shift shift_num (shift_bytes) items from S[0] to the right neighbor,
+ * and replace the delimiting key
+ */
int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
{
- // struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
int ret_value;
- /* move shift_num (and shift_bytes) items from S[0] to right neighbor R[0] */
+ /*
+ * move shift_num (and shift_bytes) items from S[0] to
+ * right neighbor R[0]
+ */
ret_value =
leaf_move_items(LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, NULL);
@@ -725,12 +813,16 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
static void leaf_delete_items_entirely(struct buffer_info *bi,
int first, int del_num);
-/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
- If not.
- If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
- the first item. Part defined by del_bytes. Don't delete first item header
- If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
- the last item . Part defined by del_bytes. Don't delete last item header.
+/*
+ * If del_bytes == -1, starting from position 'first' delete del_num
+ * items in whole in buffer CUR.
+ * If not.
+ * If last_first == 0. Starting from position 'first' delete del_num-1
+ * items in whole. Delete part of body of the first item. Part defined by
+ * del_bytes. Don't delete first item header
+ * If last_first == 1. Starting from position 'first+1' delete del_num-1
+ * items in whole. Delete part of body of the last item . Part defined by
+ * del_bytes. Don't delete last item header.
*/
void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
int first, int del_num, int del_bytes)
@@ -761,32 +853,43 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
leaf_delete_items_entirely(cur_bi, first, del_num);
else {
if (last_first == FIRST_TO_LAST) {
- /* delete del_num-1 items beginning from item in position first */
+ /*
+ * delete del_num-1 items beginning from
+ * item in position first
+ */
leaf_delete_items_entirely(cur_bi, first, del_num - 1);
- /* delete the part of the first item of the bh
- do not delete item header
+ /*
+ * delete the part of the first item of the bh
+ * do not delete item header
*/
leaf_cut_from_buffer(cur_bi, 0, 0, del_bytes);
} else {
struct item_head *ih;
int len;
- /* delete del_num-1 items beginning from item in position first+1 */
+ /*
+ * delete del_num-1 items beginning from
+ * item in position first+1
+ */
leaf_delete_items_entirely(cur_bi, first + 1,
del_num - 1);
- ih = B_N_PITEM_HEAD(bh, B_NR_ITEMS(bh) - 1);
+ ih = item_head(bh, B_NR_ITEMS(bh) - 1);
if (is_direntry_le_ih(ih))
/* the last item is directory */
- /* len = numbers of directory entries in this item */
+ /*
+ * len = numbers of directory entries
+ * in this item
+ */
len = ih_entry_count(ih);
else
/* len = body len of item */
len = ih_item_len(ih);
- /* delete the part of the last item of the bh
- do not delete item header
+ /*
+ * delete the part of the last item of the bh
+ * do not delete item header
*/
leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
len - del_bytes, del_bytes);
@@ -820,10 +923,10 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before,
zeros_number, ih_item_len(inserted_item_ih));
/* get item new item must be inserted before */
- ih = B_N_PITEM_HEAD(bh, before);
+ ih = item_head(bh, before);
/* prepare space for the body of new item */
- last_loc = nr ? ih_location(&(ih[nr - before - 1])) : bh->b_size;
+ last_loc = nr ? ih_location(&ih[nr - before - 1]) : bh->b_size;
unmoved_loc = before ? ih_location(ih - 1) : bh->b_size;
memmove(bh->b_data + last_loc - ih_item_len(inserted_item_ih),
@@ -846,8 +949,8 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before,
/* change locations */
for (i = before; i < nr + 1; i++) {
- unmoved_loc -= ih_item_len(&(ih[i - before]));
- put_ih_location(&(ih[i - before]), unmoved_loc);
+ unmoved_loc -= ih_item_len(&ih[i - before]);
+ put_ih_location(&ih[i - before], unmoved_loc);
}
/* sizes, free space, item number */
@@ -867,8 +970,10 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before,
}
}
-/* paste paste_size bytes to affected_item_num-th item.
- When item is a directory, this only prepare space for new entries */
+/*
+ * paste paste_size bytes to affected_item_num-th item.
+ * When item is a directory, this only prepare space for new entries
+ */
void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
int pos_in_item, int paste_size,
const char *body, int zeros_number)
@@ -902,9 +1007,9 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
#endif /* CONFIG_REISERFS_CHECK */
/* item to be appended */
- ih = B_N_PITEM_HEAD(bh, affected_item_num);
+ ih = item_head(bh, affected_item_num);
- last_loc = ih_location(&(ih[nr - affected_item_num - 1]));
+ last_loc = ih_location(&ih[nr - affected_item_num - 1]);
unmoved_loc = affected_item_num ? ih_location(ih - 1) : bh->b_size;
/* prepare space */
@@ -913,8 +1018,8 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
/* change locations */
for (i = affected_item_num; i < nr; i++)
- put_ih_location(&(ih[i - affected_item_num]),
- ih_location(&(ih[i - affected_item_num])) -
+ put_ih_location(&ih[i - affected_item_num],
+ ih_location(&ih[i - affected_item_num]) -
paste_size);
if (body) {
@@ -957,10 +1062,12 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
}
}
-/* cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
- does not have free space, so it moves DEHs and remaining records as
- necessary. Return value is size of removed part of directory item
- in bytes. */
+/*
+ * cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
+ * does not have free space, so it moves DEHs and remaining records as
+ * necessary. Return value is size of removed part of directory item
+ * in bytes.
+ */
static int leaf_cut_entries(struct buffer_head *bh,
struct item_head *ih, int from, int del_count)
{
@@ -971,12 +1078,14 @@ static int leaf_cut_entries(struct buffer_head *bh,
int cut_records_len; /* length of all removed records */
int i;
- /* make sure, that item is directory and there are enough entries to
- remove */
+ /*
+ * make sure that item is directory and there are enough entries to
+ * remove
+ */
RFALSE(!is_direntry_le_ih(ih), "10180: item is not directory item");
- RFALSE(I_ENTRY_COUNT(ih) < from + del_count,
+ RFALSE(ih_entry_count(ih) < from + del_count,
"10185: item contains not enough entries: entry_count = %d, from = %d, to delete = %d",
- I_ENTRY_COUNT(ih), from, del_count);
+ ih_entry_count(ih), from, del_count);
if (del_count == 0)
return 0;
@@ -987,22 +1096,24 @@ static int leaf_cut_entries(struct buffer_head *bh,
/* entry head array */
deh = B_I_DEH(bh, ih);
- /* first byte of remaining entries, those are BEFORE cut entries
- (prev_record) and length of all removed records (cut_records_len) */
+ /*
+ * first byte of remaining entries, those are BEFORE cut entries
+ * (prev_record) and length of all removed records (cut_records_len)
+ */
prev_record_offset =
- (from ? deh_location(&(deh[from - 1])) : ih_item_len(ih));
+ (from ? deh_location(&deh[from - 1]) : ih_item_len(ih));
cut_records_len = prev_record_offset /*from_record */ -
- deh_location(&(deh[from + del_count - 1]));
+ deh_location(&deh[from + del_count - 1]);
prev_record = item + prev_record_offset;
/* adjust locations of remaining entries */
- for (i = I_ENTRY_COUNT(ih) - 1; i > from + del_count - 1; i--)
- put_deh_location(&(deh[i]),
+ for (i = ih_entry_count(ih) - 1; i > from + del_count - 1; i--)
+ put_deh_location(&deh[i],
deh_location(&deh[i]) -
(DEH_SIZE * del_count));
for (i = 0; i < from; i++)
- put_deh_location(&(deh[i]),
+ put_deh_location(&deh[i],
deh_location(&deh[i]) - (DEH_SIZE * del_count +
cut_records_len));
@@ -1021,14 +1132,15 @@ static int leaf_cut_entries(struct buffer_head *bh,
return DEH_SIZE * del_count + cut_records_len;
}
-/* when cut item is part of regular file
- pos_in_item - first byte that must be cut
- cut_size - number of bytes to be cut beginning from pos_in_item
-
- when cut item is part of directory
- pos_in_item - number of first deleted entry
- cut_size - count of deleted entries
- */
+/*
+ * when cut item is part of regular file
+ * pos_in_item - first byte that must be cut
+ * cut_size - number of bytes to be cut beginning from pos_in_item
+ *
+ * when cut item is part of directory
+ * pos_in_item - number of first deleted entry
+ * cut_size - count of deleted entries
+ */
void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
int pos_in_item, int cut_size)
{
@@ -1043,7 +1155,7 @@ void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
nr = blkh_nr_item(blkh);
/* item head of truncated item */
- ih = B_N_PITEM_HEAD(bh, cut_item_num);
+ ih = item_head(bh, cut_item_num);
if (is_direntry_le_ih(ih)) {
/* first cut entry () */
@@ -1055,7 +1167,6 @@ void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
cut_item_num);
/* change item key by key of first entry in the item */
set_le_ih_k_offset(ih, deh_offset(B_I_DEH(bh, ih)));
- /*memcpy (&ih->ih_key.k_offset, &(B_I_DEH (bh, ih)->deh_offset), SHORT_KEY_SIZE); */
}
} else {
/* item is direct or indirect */
@@ -1089,7 +1200,7 @@ void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
}
/* location of the last item */
- last_loc = ih_location(&(ih[nr - cut_item_num - 1]));
+ last_loc = ih_location(&ih[nr - cut_item_num - 1]);
/* location of the item, which is remaining at the same place */
unmoved_loc = cut_item_num ? ih_location(ih - 1) : bh->b_size;
@@ -1108,7 +1219,7 @@ void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
/* change locations */
for (i = cut_item_num; i < nr; i++)
- put_ih_location(&(ih[i - cut_item_num]),
+ put_ih_location(&ih[i - cut_item_num],
ih_location(&ih[i - cut_item_num]) + cut_size);
/* size, free space */
@@ -1156,14 +1267,14 @@ static void leaf_delete_items_entirely(struct buffer_info *bi,
return;
}
- ih = B_N_PITEM_HEAD(bh, first);
+ ih = item_head(bh, first);
/* location of unmovable item */
j = (first == 0) ? bh->b_size : ih_location(ih - 1);
/* delete items */
- last_loc = ih_location(&(ih[nr - 1 - first]));
- last_removed_loc = ih_location(&(ih[del_num - 1]));
+ last_loc = ih_location(&ih[nr - 1 - first]);
+ last_removed_loc = ih_location(&ih[del_num - 1]);
memmove(bh->b_data + last_loc + j - last_removed_loc,
bh->b_data + last_loc, last_removed_loc - last_loc);
@@ -1173,8 +1284,8 @@ static void leaf_delete_items_entirely(struct buffer_info *bi,
/* change item location */
for (i = first; i < nr - del_num; i++)
- put_ih_location(&(ih[i - first]),
- ih_location(&(ih[i - first])) + (j -
+ put_ih_location(&ih[i - first],
+ ih_location(&ih[i - first]) + (j -
last_removed_loc));
/* sizes, item number */
@@ -1195,7 +1306,10 @@ static void leaf_delete_items_entirely(struct buffer_info *bi,
}
}
-/* paste new_entry_count entries (new_dehs, records) into position before to item_num-th item */
+/*
+ * paste new_entry_count entries (new_dehs, records) into position
+ * before to item_num-th item
+ */
void leaf_paste_entries(struct buffer_info *bi,
int item_num,
int before,
@@ -1213,13 +1327,16 @@ void leaf_paste_entries(struct buffer_info *bi,
if (new_entry_count == 0)
return;
- ih = B_N_PITEM_HEAD(bh, item_num);
+ ih = item_head(bh, item_num);
- /* make sure, that item is directory, and there are enough records in it */
+ /*
+ * make sure, that item is directory, and there are enough
+ * records in it
+ */
RFALSE(!is_direntry_le_ih(ih), "10225: item is not directory item");
- RFALSE(I_ENTRY_COUNT(ih) < before,
+ RFALSE(ih_entry_count(ih) < before,
"10230: there are no entry we paste entries before. entry_count = %d, before = %d",
- I_ENTRY_COUNT(ih), before);
+ ih_entry_count(ih), before);
/* first byte of dest item */
item = bh->b_data + ih_location(ih);
@@ -1230,21 +1347,21 @@ void leaf_paste_entries(struct buffer_info *bi,
/* new records will be pasted at this point */
insert_point =
item +
- (before ? deh_location(&(deh[before - 1]))
+ (before ? deh_location(&deh[before - 1])
: (ih_item_len(ih) - paste_size));
/* adjust locations of records that will be AFTER new records */
- for (i = I_ENTRY_COUNT(ih) - 1; i >= before; i--)
- put_deh_location(&(deh[i]),
- deh_location(&(deh[i])) +
+ for (i = ih_entry_count(ih) - 1; i >= before; i--)
+ put_deh_location(&deh[i],
+ deh_location(&deh[i]) +
(DEH_SIZE * new_entry_count));
/* adjust locations of records that will be BEFORE new records */
for (i = 0; i < before; i++)
- put_deh_location(&(deh[i]),
- deh_location(&(deh[i])) + paste_size);
+ put_deh_location(&deh[i],
+ deh_location(&deh[i]) + paste_size);
- old_entry_num = I_ENTRY_COUNT(ih);
+ old_entry_num = ih_entry_count(ih);
put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count);
/* prepare space for pasted records */
@@ -1266,10 +1383,10 @@ void leaf_paste_entries(struct buffer_info *bi,
/* set locations of new records */
for (i = 0; i < new_entry_count; i++) {
- put_deh_location(&(deh[i]),
- deh_location(&(deh[i])) +
+ put_deh_location(&deh[i],
+ deh_location(&deh[i]) +
(-deh_location
- (&(new_dehs[new_entry_count - 1])) +
+ (&new_dehs[new_entry_count - 1]) +
insert_point + DEH_SIZE * new_entry_count -
item));
}
@@ -1277,28 +1394,26 @@ void leaf_paste_entries(struct buffer_info *bi,
/* change item key if necessary (when we paste before 0-th entry */
if (!before) {
set_le_ih_k_offset(ih, deh_offset(new_dehs));
-/* memcpy (&ih->ih_key.k_offset,
- &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
}
#ifdef CONFIG_REISERFS_CHECK
{
int prev, next;
/* check record locations */
deh = B_I_DEH(bh, ih);
- for (i = 0; i < I_ENTRY_COUNT(ih); i++) {
+ for (i = 0; i < ih_entry_count(ih); i++) {
next =
(i <
- I_ENTRY_COUNT(ih) -
- 1) ? deh_location(&(deh[i + 1])) : 0;
- prev = (i != 0) ? deh_location(&(deh[i - 1])) : 0;
+ ih_entry_count(ih) -
+ 1) ? deh_location(&deh[i + 1]) : 0;
+ prev = (i != 0) ? deh_location(&deh[i - 1]) : 0;
- if (prev && prev <= deh_location(&(deh[i])))
+ if (prev && prev <= deh_location(&deh[i]))
reiserfs_error(sb_from_bi(bi), "vs-10240",
"directory item (%h) "
"corrupted (prev %a, "
"cur(%d) %a)",
ih, deh + i - 1, i, deh + i);
- if (next && next >= deh_location(&(deh[i])))
+ if (next && next >= deh_location(&deh[i]))
reiserfs_error(sb_from_bi(bi), "vs-10250",
"directory item (%h) "
"corrupted (cur(%d) %a, "
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index e825f8b63e6..cd11358b10c 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -22,8 +22,10 @@
#define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) set_nlink(i, 1); }
#define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
-// directory item contains array of entry headers. This performs
-// binary search through that array
+/*
+ * directory item contains array of entry headers. This performs
+ * binary search through that array
+ */
static int bin_search_in_dir_item(struct reiserfs_dir_entry *de, loff_t off)
{
struct item_head *ih = de->de_ih;
@@ -31,7 +33,7 @@ static int bin_search_in_dir_item(struct reiserfs_dir_entry *de, loff_t off)
int rbound, lbound, j;
lbound = 0;
- rbound = I_ENTRY_COUNT(ih) - 1;
+ rbound = ih_entry_count(ih) - 1;
for (j = (rbound + lbound) / 2; lbound <= rbound;
j = (rbound + lbound) / 2) {
@@ -43,7 +45,7 @@ static int bin_search_in_dir_item(struct reiserfs_dir_entry *de, loff_t off)
lbound = j + 1;
continue;
}
- // this is not name found, but matched third key component
+ /* this is not name found, but matched third key component */
de->de_entry_num = j;
return NAME_FOUND;
}
@@ -52,17 +54,21 @@ static int bin_search_in_dir_item(struct reiserfs_dir_entry *de, loff_t off)
return NAME_NOT_FOUND;
}
-// comment? maybe something like set de to point to what the path points to?
+/*
+ * comment? maybe something like set de to point to what the path points to?
+ */
static inline void set_de_item_location(struct reiserfs_dir_entry *de,
struct treepath *path)
{
de->de_bh = get_last_bh(path);
- de->de_ih = get_ih(path);
+ de->de_ih = tp_item_head(path);
de->de_deh = B_I_DEH(de->de_bh, de->de_ih);
de->de_item_num = PATH_LAST_POSITION(path);
}
-// de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
+/*
+ * de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
+ */
inline void set_de_name_and_namelen(struct reiserfs_dir_entry *de)
{
struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num;
@@ -71,17 +77,17 @@ inline void set_de_name_and_namelen(struct reiserfs_dir_entry *de)
de->de_entrylen = entry_length(de->de_bh, de->de_ih, de->de_entry_num);
de->de_namelen = de->de_entrylen - (de_with_sd(deh) ? SD_SIZE : 0);
- de->de_name = B_I_PITEM(de->de_bh, de->de_ih) + deh_location(deh);
+ de->de_name = ih_item_body(de->de_bh, de->de_ih) + deh_location(deh);
if (de->de_name[de->de_namelen - 1] == 0)
de->de_namelen = strlen(de->de_name);
}
-// what entry points to
+/* what entry points to */
static inline void set_de_object_key(struct reiserfs_dir_entry *de)
{
BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih));
- de->de_dir_id = deh_dir_id(&(de->de_deh[de->de_entry_num]));
- de->de_objectid = deh_objectid(&(de->de_deh[de->de_entry_num]));
+ de->de_dir_id = deh_dir_id(&de->de_deh[de->de_entry_num]);
+ de->de_objectid = deh_objectid(&de->de_deh[de->de_entry_num]);
}
static inline void store_de_entry_key(struct reiserfs_dir_entry *de)
@@ -96,21 +102,20 @@ static inline void store_de_entry_key(struct reiserfs_dir_entry *de)
le32_to_cpu(de->de_ih->ih_key.k_dir_id);
de->de_entry_key.on_disk_key.k_objectid =
le32_to_cpu(de->de_ih->ih_key.k_objectid);
- set_cpu_key_k_offset(&(de->de_entry_key), deh_offset(deh));
- set_cpu_key_k_type(&(de->de_entry_key), TYPE_DIRENTRY);
+ set_cpu_key_k_offset(&de->de_entry_key, deh_offset(deh));
+ set_cpu_key_k_type(&de->de_entry_key, TYPE_DIRENTRY);
}
-/* We assign a key to each directory item, and place multiple entries
-in a single directory item. A directory item has a key equal to the
-key of the first directory entry in it.
-
-This function first calls search_by_key, then, if item whose first
-entry matches is not found it looks for the entry inside directory
-item found by search_by_key. Fills the path to the entry, and to the
-entry position in the item
-
-*/
-
+/*
+ * We assign a key to each directory item, and place multiple entries in a
+ * single directory item. A directory item has a key equal to the key of
+ * the first directory entry in it.
+
+ * This function first calls search_by_key, then, if item whose first entry
+ * matches is not found it looks for the entry inside directory item found
+ * by search_by_key. Fills the path to the entry, and to the entry position
+ * in the item
+ */
/* The function is NOT SCHEDULE-SAFE! */
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
struct treepath *path, struct reiserfs_dir_entry *de)
@@ -144,7 +149,7 @@ int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
#ifdef CONFIG_REISERFS_CHECK
if (!is_direntry_le_ih(de->de_ih) ||
- COMP_SHORT_KEYS(&(de->de_ih->ih_key), key)) {
+ COMP_SHORT_KEYS(&de->de_ih->ih_key, key)) {
print_block(de->de_bh, 0, -1, -1);
reiserfs_panic(sb, "vs-7005", "found item %h is not directory "
"item or does not belong to the same directory "
@@ -152,12 +157,17 @@ int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
}
#endif /* CONFIG_REISERFS_CHECK */
- /* binary search in directory item by third componen t of the
- key. sets de->de_entry_num of de */
+ /*
+ * binary search in directory item by third component of the
+ * key. sets de->de_entry_num of de
+ */
retval = bin_search_in_dir_item(de, cpu_key_k_offset(key));
path->pos_in_item = de->de_entry_num;
if (retval != NAME_NOT_FOUND) {
- // ugly, but rename needs de_bh, de_deh, de_name, de_namelen, de_objectid set
+ /*
+ * ugly, but rename needs de_bh, de_deh, de_name,
+ * de_namelen, de_objectid set
+ */
set_de_name_and_namelen(de);
set_de_object_key(de);
}
@@ -166,11 +176,12 @@ int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
/* Keyed 32-bit hash function using TEA in a Davis-Meyer function */
-/* The third component is hashed, and you can choose from more than
- one hash function. Per directory hashes are not yet implemented
- but are thought about. This function should be moved to hashes.c
- Jedi, please do so. -Hans */
-
+/*
+ * The third component is hashed, and you can choose from more than
+ * one hash function. Per directory hashes are not yet implemented
+ * but are thought about. This function should be moved to hashes.c
+ * Jedi, please do so. -Hans
+ */
static __u32 get_third_component(struct super_block *s,
const char *name, int len)
{
@@ -183,11 +194,13 @@ static __u32 get_third_component(struct super_block *s,
res = REISERFS_SB(s)->s_hash_function(name, len);
- // take bits from 7-th to 30-th including both bounds
+ /* take bits from 7-th to 30-th including both bounds */
res = GET_HASH_VALUE(res);
if (res == 0)
- // needed to have no names before "." and ".." those have hash
- // value == 0 and generation conters 1 and 2 accordingly
+ /*
+ * needed to have no names before "." and ".." those have hash
+ * value == 0 and generation conters 1 and 2 accordingly
+ */
res = 128;
return res + MAX_GENERATION_NUMBER;
}
@@ -208,7 +221,7 @@ static int reiserfs_match(struct reiserfs_dir_entry *de,
/* de's de_bh, de_ih, de_deh, de_item_num, de_entry_num are set already */
- /* used when hash collisions exist */
+/* used when hash collisions exist */
static int linear_search_in_dir_item(struct cpu_key *key,
struct reiserfs_dir_entry *de,
@@ -220,7 +233,7 @@ static int linear_search_in_dir_item(struct cpu_key *key,
i = de->de_entry_num;
- if (i == I_ENTRY_COUNT(de->de_ih) ||
+ if (i == ih_entry_count(de->de_ih) ||
GET_HASH_VALUE(deh_offset(deh + i)) !=
GET_HASH_VALUE(cpu_key_k_offset(key))) {
i--;
@@ -232,43 +245,50 @@ static int linear_search_in_dir_item(struct cpu_key *key,
deh += i;
for (; i >= 0; i--, deh--) {
+ /* hash value does not match, no need to check whole name */
if (GET_HASH_VALUE(deh_offset(deh)) !=
GET_HASH_VALUE(cpu_key_k_offset(key))) {
- // hash value does not match, no need to check whole name
return NAME_NOT_FOUND;
}
- /* mark, that this generation number is used */
+ /* mark that this generation number is used */
if (de->de_gen_number_bit_string)
set_bit(GET_GENERATION_NUMBER(deh_offset(deh)),
de->de_gen_number_bit_string);
- // calculate pointer to name and namelen
+ /* calculate pointer to name and namelen */
de->de_entry_num = i;
set_de_name_and_namelen(de);
+ /*
+ * de's de_name, de_namelen, de_recordlen are set.
+ * Fill the rest.
+ */
if ((retval =
reiserfs_match(de, name, namelen)) != NAME_NOT_FOUND) {
- // de's de_name, de_namelen, de_recordlen are set. Fill the rest:
- // key of pointed object
+ /* key of pointed object */
set_de_object_key(de);
store_de_entry_key(de);
- // retval can be NAME_FOUND or NAME_FOUND_INVISIBLE
+ /* retval can be NAME_FOUND or NAME_FOUND_INVISIBLE */
return retval;
}
}
if (GET_GENERATION_NUMBER(le_ih_k_offset(de->de_ih)) == 0)
- /* we have reached left most entry in the node. In common we
- have to go to the left neighbor, but if generation counter
- is 0 already, we know for sure, that there is no name with
- the same hash value */
- // FIXME: this work correctly only because hash value can not
- // be 0. Btw, in case of Yura's hash it is probably possible,
- // so, this is a bug
+ /*
+ * we have reached left most entry in the node. In common we
+ * have to go to the left neighbor, but if generation counter
+ * is 0 already, we know for sure, that there is no name with
+ * the same hash value
+ */
+ /*
+ * FIXME: this work correctly only because hash value can not
+ * be 0. Btw, in case of Yura's hash it is probably possible,
+ * so, this is a bug
+ */
return NAME_NOT_FOUND;
RFALSE(de->de_item_num,
@@ -277,8 +297,10 @@ static int linear_search_in_dir_item(struct cpu_key *key,
return GOTO_PREVIOUS_ITEM;
}
-// may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND
-// FIXME: should add something like IOERROR
+/*
+ * may return NAME_FOUND, NAME_FOUND_INVISIBLE, NAME_NOT_FOUND
+ * FIXME: should add something like IOERROR
+ */
static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen,
struct treepath *path_to_entry,
struct reiserfs_dir_entry *de)
@@ -307,13 +329,19 @@ static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen,
retval =
linear_search_in_dir_item(&key_to_search, de, name,
namelen);
+ /*
+ * there is no need to scan directory anymore.
+ * Given entry found or does not exist
+ */
if (retval != GOTO_PREVIOUS_ITEM) {
- /* there is no need to scan directory anymore. Given entry found or does not exist */
path_to_entry->pos_in_item = de->de_entry_num;
return retval;
}
- /* there is left neighboring item of this directory and given entry can be there */
+ /*
+ * there is left neighboring item of this directory
+ * and given entry can be there
+ */
set_cpu_key_k_offset(&key_to_search,
le_ih_k_offset(de->de_ih) - 1);
pathrelse(path_to_entry);
@@ -341,14 +369,16 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
pathrelse(&path_to_entry);
if (retval == NAME_FOUND) {
inode = reiserfs_iget(dir->i_sb,
- (struct cpu_key *)&(de.de_dir_id));
+ (struct cpu_key *)&de.de_dir_id);
if (!inode || IS_ERR(inode)) {
reiserfs_write_unlock(dir->i_sb);
return ERR_PTR(-EACCES);
}
- /* Propagate the private flag so we know we're
- * in the priv tree */
+ /*
+ * Propagate the private flag so we know we're
+ * in the priv tree
+ */
if (IS_PRIVATE(dir))
inode->i_flags |= S_PRIVATE;
}
@@ -361,9 +391,9 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
}
/*
-** looks up the dentry of the parent directory for child.
-** taken from ext2_get_parent
-*/
+ * looks up the dentry of the parent directory for child.
+ * taken from ext2_get_parent
+ */
struct dentry *reiserfs_get_parent(struct dentry *child)
{
int retval;
@@ -384,7 +414,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child)
reiserfs_write_unlock(dir->i_sb);
return ERR_PTR(-ENOENT);
}
- inode = reiserfs_iget(dir->i_sb, (struct cpu_key *)&(de.de_dir_id));
+ inode = reiserfs_iget(dir->i_sb, (struct cpu_key *)&de.de_dir_id);
reiserfs_write_unlock(dir->i_sb);
return d_obtain_alias(inode);
@@ -406,8 +436,13 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
struct reiserfs_dir_entry de;
DECLARE_BITMAP(bit_string, MAX_GENERATION_NUMBER + 1);
int gen_number;
- char small_buf[32 + DEH_SIZE]; /* 48 bytes now and we avoid kmalloc
- if we create file with short name */
+
+ /*
+ * 48 bytes now and we avoid kmalloc if we
+ * create file with short name
+ */
+ char small_buf[32 + DEH_SIZE];
+
char *buffer;
int buflen, paste_size;
int retval;
@@ -439,21 +474,30 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
(get_inode_sd_version(dir) ==
STAT_DATA_V1) ? (DEH_SIZE + namelen) : buflen;
- /* fill buffer : directory entry head, name[, dir objectid | , stat data | ,stat data, dir objectid ] */
+ /*
+ * fill buffer : directory entry head, name[, dir objectid | ,
+ * stat data | ,stat data, dir objectid ]
+ */
deh = (struct reiserfs_de_head *)buffer;
deh->deh_location = 0; /* JDM Endian safe if 0 */
put_deh_offset(deh, cpu_key_k_offset(&entry_key));
deh->deh_state = 0; /* JDM Endian safe if 0 */
/* put key (ino analog) to de */
- deh->deh_dir_id = INODE_PKEY(inode)->k_dir_id; /* safe: k_dir_id is le */
- deh->deh_objectid = INODE_PKEY(inode)->k_objectid; /* safe: k_objectid is le */
+
+ /* safe: k_dir_id is le */
+ deh->deh_dir_id = INODE_PKEY(inode)->k_dir_id;
+ /* safe: k_objectid is le */
+ deh->deh_objectid = INODE_PKEY(inode)->k_objectid;
/* copy name */
memcpy((char *)(deh + 1), name, namelen);
/* padd by 0s to the 4 byte boundary */
padd_item((char *)(deh + 1), ROUND_UP(namelen), namelen);
- /* entry is ready to be pasted into tree, set 'visibility' and 'stat data in entry' attributes */
+ /*
+ * entry is ready to be pasted into tree, set 'visibility'
+ * and 'stat data in entry' attributes
+ */
mark_de_without_sd(deh);
visible ? mark_de_visible(deh) : mark_de_hidden(deh);
@@ -499,7 +543,8 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
/* update max-hash-collisions counter in reiserfs_sb_info */
PROC_INFO_MAX(th->t_super, max_hash_collisions, gen_number);
- if (gen_number != 0) { /* we need to re-search for the insertion point */
+ /* we need to re-search for the insertion point */
+ if (gen_number != 0) {
if (search_by_entry_key(dir->i_sb, &entry_key, &path, &de) !=
NAME_NOT_FOUND) {
reiserfs_warning(dir->i_sb, "vs-7032",
@@ -527,18 +572,19 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
dir->i_size += paste_size;
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
if (!S_ISDIR(inode->i_mode) && visible)
- // reiserfs_mkdir or reiserfs_rename will do that by itself
+ /* reiserfs_mkdir or reiserfs_rename will do that by itself */
reiserfs_update_sd(th, dir);
reiserfs_check_path(&path);
return 0;
}
-/* quota utility function, call if you've had to abort after calling
-** new_inode_init, and have not called reiserfs_new_inode yet.
-** This should only be called on inodes that do not have stat data
-** inserted into the tree yet.
-*/
+/*
+ * quota utility function, call if you've had to abort after calling
+ * new_inode_init, and have not called reiserfs_new_inode yet.
+ * This should only be called on inodes that do not have stat data
+ * inserted into the tree yet.
+ */
static int drop_new_inode(struct inode *inode)
{
dquot_drop(inode);
@@ -548,18 +594,23 @@ static int drop_new_inode(struct inode *inode)
return 0;
}
-/* utility function that does setup for reiserfs_new_inode.
-** dquot_initialize needs lots of credits so it's better to have it
-** outside of a transaction, so we had to pull some bits of
-** reiserfs_new_inode out into this func.
-*/
+/*
+ * utility function that does setup for reiserfs_new_inode.
+ * dquot_initialize needs lots of credits so it's better to have it
+ * outside of a transaction, so we had to pull some bits of
+ * reiserfs_new_inode out into this func.
+ */
static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode)
{
- /* Make inode invalid - just in case we are going to drop it before
- * the initialization happens */
+ /*
+ * Make inode invalid - just in case we are going to drop it before
+ * the initialization happens
+ */
INODE_PKEY(inode)->k_objectid = 0;
- /* the quota init calls have to know who to charge the quota to, so
- ** we have to set uid and gid here
+
+ /*
+ * the quota init calls have to know who to charge the quota to, so
+ * we have to set uid and gid here
*/
inode_init_owner(inode, dir, mode);
dquot_initialize(inode);
@@ -571,7 +622,10 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
{
int retval;
struct inode *inode;
- /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ /*
+ * We need blocks for transaction + (user+group)*(quotas
+ * for new inode + update of quota for directory owner)
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 2 +
2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
@@ -618,7 +672,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
int err;
drop_nlink(inode);
reiserfs_update_sd(&th, inode);
- err = journal_end(&th, dir->i_sb, jbegin_count);
+ err = journal_end(&th);
if (err)
retval = err;
unlock_new_inode(inode);
@@ -630,9 +684,9 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
unlock_new_inode(inode);
d_instantiate(dentry, inode);
- retval = journal_end(&th, dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
- out_failed:
+out_failed:
reiserfs_write_unlock(dir->i_sb);
return retval;
}
@@ -644,7 +698,10 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
struct inode *inode;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
- /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ /*
+ * We need blocks for transaction + (user+group)*(quotas
+ * for new inode + update of quota for directory owner)
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
@@ -685,7 +742,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
inode->i_op = &reiserfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
- //FIXME: needed for block and char devices only
+ /* FIXME: needed for block and char devices only */
reiserfs_update_sd(&th, inode);
reiserfs_update_inode_transaction(inode);
@@ -698,7 +755,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
int err;
drop_nlink(inode);
reiserfs_update_sd(&th, inode);
- err = journal_end(&th, dir->i_sb, jbegin_count);
+ err = journal_end(&th);
if (err)
retval = err;
unlock_new_inode(inode);
@@ -708,9 +765,9 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
unlock_new_inode(inode);
d_instantiate(dentry, inode);
- retval = journal_end(&th, dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
- out_failed:
+out_failed:
reiserfs_write_unlock(dir->i_sb);
return retval;
}
@@ -721,7 +778,10 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct inode *inode;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
- /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ /*
+ * We need blocks for transaction + (user+group)*(quotas
+ * for new inode + update of quota for directory owner)
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
@@ -730,7 +790,10 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
dquot_initialize(dir);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
- /* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */
+ /*
+ * set flag that new packing locality created and new blocks
+ * for the content of that directory are not displaced yet
+ */
REISERFS_I(dir)->new_packing_locality = 1;
#endif
mode = S_IFDIR | mode;
@@ -754,8 +817,9 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
goto out_failed;
}
- /* inc the link count now, so another writer doesn't overflow it while
- ** we sleep later on.
+ /*
+ * inc the link count now, so another writer doesn't overflow
+ * it while we sleep later on.
*/
INC_DIR_INODE_NLINK(dir)
@@ -774,7 +838,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
inode->i_op = &reiserfs_dir_inode_operations;
inode->i_fop = &reiserfs_dir_operations;
- // note, _this_ add_entry will not update dir's stat data
+ /* note, _this_ add_entry will not update dir's stat data */
retval =
reiserfs_add_entry(&th, dir, dentry->d_name.name,
dentry->d_name.len, inode, 1 /*visible */ );
@@ -783,19 +847,19 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
clear_nlink(inode);
DEC_DIR_INODE_NLINK(dir);
reiserfs_update_sd(&th, inode);
- err = journal_end(&th, dir->i_sb, jbegin_count);
+ err = journal_end(&th);
if (err)
retval = err;
unlock_new_inode(inode);
iput(inode);
goto out_failed;
}
- // the above add_entry did not update dir's stat data
+ /* the above add_entry did not update dir's stat data */
reiserfs_update_sd(&th, dir);
unlock_new_inode(inode);
d_instantiate(dentry, inode);
- retval = journal_end(&th, dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
out_failed:
reiserfs_write_unlock(dir->i_sb);
return retval;
@@ -803,10 +867,11 @@ out_failed:
static inline int reiserfs_empty_dir(struct inode *inode)
{
- /* we can cheat because an old format dir cannot have
- ** EMPTY_DIR_SIZE, and a new format dir cannot have
- ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
- ** regardless of disk format version, the directory is empty.
+ /*
+ * we can cheat because an old format dir cannot have
+ * EMPTY_DIR_SIZE, and a new format dir cannot have
+ * EMPTY_DIR_SIZE_V1. So, if the inode is either size,
+ * regardless of disk format version, the directory is empty.
*/
if (inode->i_size != EMPTY_DIR_SIZE &&
inode->i_size != EMPTY_DIR_SIZE_V1) {
@@ -824,10 +889,12 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
INITIALIZE_PATH(path);
struct reiserfs_dir_entry de;
- /* we will be doing 2 balancings and update 2 stat data, we change quotas
- * of the owner of the directory and of the owner of the parent directory.
- * The quota structure is possibly deleted only on last iput => outside
- * of this transaction */
+ /*
+ * we will be doing 2 balancings and update 2 stat data, we
+ * change quotas of the owner of the directory and of the owner
+ * of the parent directory. The quota structure is possibly
+ * deleted only on last iput => outside of this transaction
+ */
jbegin_count =
JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
@@ -856,8 +923,9 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
reiserfs_update_inode_transaction(dir);
if (de.de_objectid != inode->i_ino) {
- // FIXME: compare key of an object and a key found in the
- // entry
+ /*
+ * FIXME: compare key of an object and a key found in the entry
+ */
retval = -EIO;
goto end_rmdir;
}
@@ -867,7 +935,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
}
/* cut entry from dir directory */
- retval = reiserfs_cut_from_item(&th, &path, &(de.de_entry_key), dir, NULL, /* page */
+ retval = reiserfs_cut_from_item(&th, &path, &de.de_entry_key,
+ dir, NULL, /* page */
0 /*new file size - not used here */ );
if (retval < 0)
goto end_rmdir;
@@ -888,18 +957,20 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
/* prevent empty directory from getting lost */
add_save_link(&th, inode, 0 /* not truncate */ );
- retval = journal_end(&th, dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
reiserfs_check_path(&path);
- out_rmdir:
+out_rmdir:
reiserfs_write_unlock(dir->i_sb);
return retval;
- end_rmdir:
- /* we must release path, because we did not call
- reiserfs_cut_from_item, or reiserfs_cut_from_item does not
- release path if operation was not complete */
+end_rmdir:
+ /*
+ * we must release path, because we did not call
+ * reiserfs_cut_from_item, or reiserfs_cut_from_item does not
+ * release path if operation was not complete
+ */
pathrelse(&path);
- err = journal_end(&th, dir->i_sb, jbegin_count);
+ err = journal_end(&th);
reiserfs_write_unlock(dir->i_sb);
return err ? err : retval;
}
@@ -918,10 +989,13 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
inode = dentry->d_inode;
- /* in this transaction we can be doing at max two balancings and update
- * two stat datas, we change quotas of the owner of the directory and of
- * the owner of the parent directory. The quota structure is possibly
- * deleted only on iput => outside of this transaction */
+ /*
+ * in this transaction we can be doing at max two balancings and
+ * update two stat datas, we change quotas of the owner of the
+ * directory and of the owner of the parent directory. The quota
+ * structure is possibly deleted only on iput => outside of
+ * this transaction
+ */
jbegin_count =
JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
@@ -946,8 +1020,9 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
reiserfs_update_inode_transaction(dir);
if (de.de_objectid != inode->i_ino) {
- // FIXME: compare key of an object and a key found in the
- // entry
+ /*
+ * FIXME: compare key of an object and a key found in the entry
+ */
retval = -EIO;
goto end_unlink;
}
@@ -968,7 +1043,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
savelink = inode->i_nlink;
retval =
- reiserfs_cut_from_item(&th, &path, &(de.de_entry_key), dir, NULL,
+ reiserfs_cut_from_item(&th, &path, &de.de_entry_key, dir, NULL,
0);
if (retval < 0) {
inc_nlink(inode);
@@ -985,18 +1060,18 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
/* prevent file from getting lost */
add_save_link(&th, inode, 0 /* not truncate */ );
- retval = journal_end(&th, dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
reiserfs_check_path(&path);
reiserfs_write_unlock(dir->i_sb);
return retval;
- end_unlink:
+end_unlink:
pathrelse(&path);
- err = journal_end(&th, dir->i_sb, jbegin_count);
+ err = journal_end(&th);
reiserfs_check_path(&path);
if (err)
retval = err;
- out_unlink:
+out_unlink:
reiserfs_write_unlock(dir->i_sb);
return retval;
}
@@ -1011,7 +1086,10 @@ static int reiserfs_symlink(struct inode *parent_dir,
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
int mode = S_IFLNK | S_IRWXUGO;
- /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ /*
+ * We need blocks for transaction + (user+group)*(quotas for
+ * new inode + update of quota for directory owner)
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
@@ -1070,17 +1148,13 @@ static int reiserfs_symlink(struct inode *parent_dir,
inode->i_op = &reiserfs_symlink_inode_operations;
inode->i_mapping->a_ops = &reiserfs_address_space_operations;
- // must be sure this inode is written with this transaction
- //
- //reiserfs_update_sd (&th, inode, READ_BLOCKS);
-
retval = reiserfs_add_entry(&th, parent_dir, dentry->d_name.name,
dentry->d_name.len, inode, 1 /*visible */ );
if (retval) {
int err;
drop_nlink(inode);
reiserfs_update_sd(&th, inode);
- err = journal_end(&th, parent_dir->i_sb, jbegin_count);
+ err = journal_end(&th);
if (err)
retval = err;
unlock_new_inode(inode);
@@ -1090,8 +1164,8 @@ static int reiserfs_symlink(struct inode *parent_dir,
unlock_new_inode(inode);
d_instantiate(dentry, inode);
- retval = journal_end(&th, parent_dir->i_sb, jbegin_count);
- out_failed:
+ retval = journal_end(&th);
+out_failed:
reiserfs_write_unlock(parent_dir->i_sb);
return retval;
}
@@ -1102,7 +1176,10 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
int retval;
struct inode *inode = old_dentry->d_inode;
struct reiserfs_transaction_handle th;
- /* We need blocks for transaction + update of quotas for the owners of the directory */
+ /*
+ * We need blocks for transaction + update of quotas for
+ * the owners of the directory
+ */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
@@ -1111,7 +1188,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
reiserfs_write_lock(dir->i_sb);
if (inode->i_nlink >= REISERFS_LINK_MAX) {
- //FIXME: sd_nlink is 32 bit for new files
+ /* FIXME: sd_nlink is 32 bit for new files */
reiserfs_write_unlock(dir->i_sb);
return -EMLINK;
}
@@ -1137,7 +1214,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
if (retval) {
int err;
drop_nlink(inode);
- err = journal_end(&th, dir->i_sb, jbegin_count);
+ err = journal_end(&th);
reiserfs_write_unlock(dir->i_sb);
return err ? err : retval;
}
@@ -1147,7 +1224,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
ihold(inode);
d_instantiate(dentry, inode);
- retval = journal_end(&th, dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
reiserfs_write_unlock(dir->i_sb);
return retval;
}
@@ -1158,9 +1235,9 @@ static int de_still_valid(const char *name, int len,
{
struct reiserfs_dir_entry tmp = *de;
- // recalculate pointer to name and name length
+ /* recalculate pointer to name and name length */
set_de_name_and_namelen(&tmp);
- // FIXME: could check more
+ /* FIXME: could check more */
if (tmp.de_namelen != len || memcmp(name, de->de_name, len))
return 0;
return 1;
@@ -1217,14 +1294,16 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned long savelink = 1;
struct timespec ctime;
- /* three balancings: (1) old name removal, (2) new name insertion
- and (3) maybe "save" link insertion
- stat data updates: (1) old directory,
- (2) new directory and (3) maybe old object stat data (when it is
- directory) and (4) maybe stat data of object to which new entry
- pointed initially and (5) maybe block containing ".." of
- renamed directory
- quota updates: two parent directories */
+ /*
+ * three balancings: (1) old name removal, (2) new name insertion
+ * and (3) maybe "save" link insertion
+ * stat data updates: (1) old directory,
+ * (2) new directory and (3) maybe old object stat data (when it is
+ * directory) and (4) maybe stat data of object to which new entry
+ * pointed initially and (5) maybe block containing ".." of
+ * renamed directory
+ * quota updates: two parent directories
+ */
jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 + 5 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);
@@ -1235,8 +1314,10 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_inode = old_dentry->d_inode;
new_dentry_inode = new_dentry->d_inode;
- // make sure, that oldname still exists and points to an object we
- // are going to rename
+ /*
+ * make sure that oldname still exists and points to an object we
+ * are going to rename
+ */
old_de.de_gen_number_bit_string = NULL;
reiserfs_write_lock(old_dir->i_sb);
retval =
@@ -1256,10 +1337,11 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_inode_mode = old_inode->i_mode;
if (S_ISDIR(old_inode_mode)) {
- // make sure, that directory being renamed has correct ".."
- // and that its new parent directory has not too many links
- // already
-
+ /*
+ * make sure that directory being renamed has correct ".."
+ * and that its new parent directory has not too many links
+ * already
+ */
if (new_dentry_inode) {
if (!reiserfs_empty_dir(new_dentry_inode)) {
reiserfs_write_unlock(old_dir->i_sb);
@@ -1267,8 +1349,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- /* directory is renamed, its parent directory will be changed,
- ** so find ".." entry
+ /*
+ * directory is renamed, its parent directory will be changed,
+ * so find ".." entry
*/
dot_dot_de.de_gen_number_bit_string = NULL;
retval =
@@ -1303,7 +1386,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
"new entry is found, new inode == 0");
}
} else if (retval) {
- int err = journal_end(&th, old_dir->i_sb, jbegin_count);
+ int err = journal_end(&th);
reiserfs_write_unlock(old_dir->i_sb);
return err ? err : retval;
}
@@ -1311,8 +1394,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
reiserfs_update_inode_transaction(old_dir);
reiserfs_update_inode_transaction(new_dir);
- /* this makes it so an fsync on an open fd for the old name will
- ** commit the rename operation
+ /*
+ * this makes it so an fsync on an open fd for the old name will
+ * commit the rename operation
*/
reiserfs_update_inode_transaction(old_inode);
@@ -1320,38 +1404,45 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
reiserfs_update_inode_transaction(new_dentry_inode);
while (1) {
- // look for old name using corresponding entry key (found by reiserfs_find_entry)
+ /*
+ * look for old name using corresponding entry key
+ * (found by reiserfs_find_entry)
+ */
if ((retval =
search_by_entry_key(new_dir->i_sb, &old_de.de_entry_key,
&old_entry_path,
&old_de)) != NAME_FOUND) {
pathrelse(&old_entry_path);
- journal_end(&th, old_dir->i_sb, jbegin_count);
+ journal_end(&th);
reiserfs_write_unlock(old_dir->i_sb);
return -EIO;
}
- copy_item_head(&old_entry_ih, get_ih(&old_entry_path));
+ copy_item_head(&old_entry_ih, tp_item_head(&old_entry_path));
reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1);
- // look for new name by reiserfs_find_entry
+ /* look for new name by reiserfs_find_entry */
new_de.de_gen_number_bit_string = NULL;
retval =
reiserfs_find_entry(new_dir, new_dentry->d_name.name,
new_dentry->d_name.len, &new_entry_path,
&new_de);
- // reiserfs_add_entry should not return IO_ERROR, because it is called with essentially same parameters from
- // reiserfs_add_entry above, and we'll catch any i/o errors before we get here.
+ /*
+ * reiserfs_add_entry should not return IO_ERROR,
+ * because it is called with essentially same parameters from
+ * reiserfs_add_entry above, and we'll catch any i/o errors
+ * before we get here.
+ */
if (retval != NAME_FOUND_INVISIBLE && retval != NAME_FOUND) {
pathrelse(&new_entry_path);
pathrelse(&old_entry_path);
- journal_end(&th, old_dir->i_sb, jbegin_count);
+ journal_end(&th);
reiserfs_write_unlock(old_dir->i_sb);
return -EIO;
}
- copy_item_head(&new_entry_ih, get_ih(&new_entry_path));
+ copy_item_head(&new_entry_ih, tp_item_head(&new_entry_path));
reiserfs_prepare_for_journal(old_inode->i_sb, new_de.de_bh, 1);
@@ -1364,28 +1455,32 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
pathrelse(&dot_dot_entry_path);
pathrelse(&new_entry_path);
pathrelse(&old_entry_path);
- journal_end(&th, old_dir->i_sb, jbegin_count);
+ journal_end(&th);
reiserfs_write_unlock(old_dir->i_sb);
return -EIO;
}
copy_item_head(&dot_dot_ih,
- get_ih(&dot_dot_entry_path));
- // node containing ".." gets into transaction
+ tp_item_head(&dot_dot_entry_path));
+ /* node containing ".." gets into transaction */
reiserfs_prepare_for_journal(old_inode->i_sb,
dot_dot_de.de_bh, 1);
}
- /* we should check seals here, not do
- this stuff, yes? Then, having
- gathered everything into RAM we
- should lock the buffers, yes? -Hans */
- /* probably. our rename needs to hold more
- ** than one path at once. The seals would
- ** have to be written to deal with multi-path
- ** issues -chris
+ /*
+ * we should check seals here, not do
+ * this stuff, yes? Then, having
+ * gathered everything into RAM we
+ * should lock the buffers, yes? -Hans
+ */
+ /*
+ * probably. our rename needs to hold more
+ * than one path at once. The seals would
+ * have to be written to deal with multi-path
+ * issues -chris
*/
- /* sanity checking before doing the rename - avoid races many
- ** of the above checks could have scheduled. We have to be
- ** sure our items haven't been shifted by another process.
+ /*
+ * sanity checking before doing the rename - avoid races many
+ * of the above checks could have scheduled. We have to be
+ * sure our items haven't been shifted by another process.
*/
if (item_moved(&new_entry_ih, &new_entry_path) ||
!entry_points_to_object(new_dentry->d_name.name,
@@ -1430,24 +1525,28 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
break;
}
- /* ok, all the changes can be done in one fell swoop when we
- have claimed all the buffers needed. */
+ /*
+ * ok, all the changes can be done in one fell swoop when we
+ * have claimed all the buffers needed.
+ */
mark_de_visible(new_de.de_deh + new_de.de_entry_num);
set_ino_in_dir_entry(&new_de, INODE_PKEY(old_inode));
- journal_mark_dirty(&th, old_dir->i_sb, new_de.de_bh);
+ journal_mark_dirty(&th, new_de.de_bh);
mark_de_hidden(old_de.de_deh + old_de.de_entry_num);
- journal_mark_dirty(&th, old_dir->i_sb, old_de.de_bh);
+ journal_mark_dirty(&th, old_de.de_bh);
ctime = CURRENT_TIME_SEC;
old_dir->i_ctime = old_dir->i_mtime = ctime;
new_dir->i_ctime = new_dir->i_mtime = ctime;
- /* thanks to Alex Adriaanse <alex_a@caltech.edu> for patch which adds ctime update of
- renamed object */
+ /*
+ * thanks to Alex Adriaanse <alex_a@caltech.edu> for patch
+ * which adds ctime update of renamed object
+ */
old_inode->i_ctime = ctime;
if (new_dentry_inode) {
- // adjust link number of the victim
+ /* adjust link number of the victim */
if (S_ISDIR(new_dentry_inode->i_mode)) {
clear_nlink(new_dentry_inode);
} else {
@@ -1460,25 +1559,32 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(old_inode_mode)) {
/* adjust ".." of renamed directory */
set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
- journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
+ journal_mark_dirty(&th, dot_dot_de.de_bh);
+ /*
+ * there (in new_dir) was no directory, so it got new link
+ * (".." of renamed directory)
+ */
if (!new_dentry_inode)
- /* there (in new_dir) was no directory, so it got new link
- (".." of renamed directory) */
INC_DIR_INODE_NLINK(new_dir);
/* old directory lost one link - ".. " of renamed directory */
DEC_DIR_INODE_NLINK(old_dir);
}
- // looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse
+ /*
+ * looks like in 2.3.99pre3 brelse is atomic.
+ * so we can use pathrelse
+ */
pathrelse(&new_entry_path);
pathrelse(&dot_dot_entry_path);
- // FIXME: this reiserfs_cut_from_item's return value may screw up
- // anybody, but it will panic if will not be able to find the
- // entry. This needs one more clean up
+ /*
+ * FIXME: this reiserfs_cut_from_item's return value may screw up
+ * anybody, but it will panic if will not be able to find the
+ * entry. This needs one more clean up
+ */
if (reiserfs_cut_from_item
- (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL,
+ (&th, &old_entry_path, &old_de.de_entry_key, old_dir, NULL,
0) < 0)
reiserfs_error(old_dir->i_sb, "vs-7060",
"couldn't not cut old name. Fsck later?");
@@ -1496,16 +1602,13 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
reiserfs_update_sd(&th, new_dentry_inode);
}
- retval = journal_end(&th, old_dir->i_sb, jbegin_count);
+ retval = journal_end(&th);
reiserfs_write_unlock(old_dir->i_sb);
return retval;
}
-/*
- * directories can handle most operations...
- */
+/* directories can handle most operations... */
const struct inode_operations reiserfs_dir_inode_operations = {
- //&reiserfs_dir_operations, /* default_file_ops */
.create = reiserfs_create,
.lookup = reiserfs_lookup,
.link = reiserfs_link,
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index f732d6a5251..99a5d5dae46 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -7,7 +7,7 @@
#include <linux/time.h>
#include "reiserfs.h"
-// find where objectid map starts
+/* find where objectid map starts */
#define objectid_map(s,rs) (old_format_only (s) ? \
(__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\
(__le32 *)((rs) + 1))
@@ -20,7 +20,7 @@ static void check_objectid_map(struct super_block *s, __le32 * map)
reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
(long unsigned int)le32_to_cpu(map[0]));
- // FIXME: add something else here
+ /* FIXME: add something else here */
}
#else
@@ -29,19 +29,21 @@ static void check_objectid_map(struct super_block *s, __le32 * map)
}
#endif
-/* When we allocate objectids we allocate the first unused objectid.
- Each sequence of objectids in use (the odd sequences) is followed
- by a sequence of objectids not in use (the even sequences). We
- only need to record the last objectid in each of these sequences
- (both the odd and even sequences) in order to fully define the
- boundaries of the sequences. A consequence of allocating the first
- objectid not in use is that under most conditions this scheme is
- extremely compact. The exception is immediately after a sequence
- of operations which deletes a large number of objects of
- non-sequential objectids, and even then it will become compact
- again as soon as more objects are created. Note that many
- interesting optimizations of layout could result from complicating
- objectid assignment, but we have deferred making them for now. */
+/*
+ * When we allocate objectids we allocate the first unused objectid.
+ * Each sequence of objectids in use (the odd sequences) is followed
+ * by a sequence of objectids not in use (the even sequences). We
+ * only need to record the last objectid in each of these sequences
+ * (both the odd and even sequences) in order to fully define the
+ * boundaries of the sequences. A consequence of allocating the first
+ * objectid not in use is that under most conditions this scheme is
+ * extremely compact. The exception is immediately after a sequence
+ * of operations which deletes a large number of objects of
+ * non-sequential objectids, and even then it will become compact
+ * again as soon as more objects are created. Note that many
+ * interesting optimizations of layout could result from complicating
+ * objectid assignment, but we have deferred making them for now.
+ */
/* get unique object identifier */
__u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
@@ -64,26 +66,30 @@ __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
return 0;
}
- /* This incrementation allocates the first unused objectid. That
- is to say, the first entry on the objectid map is the first
- unused objectid, and by incrementing it we use it. See below
- where we check to see if we eliminated a sequence of unused
- objectids.... */
+ /*
+ * This incrementation allocates the first unused objectid. That
+ * is to say, the first entry on the objectid map is the first
+ * unused objectid, and by incrementing it we use it. See below
+ * where we check to see if we eliminated a sequence of unused
+ * objectids....
+ */
map[1] = cpu_to_le32(unused_objectid + 1);
- /* Now we check to see if we eliminated the last remaining member of
- the first even sequence (and can eliminate the sequence by
- eliminating its last objectid from oids), and can collapse the
- first two odd sequences into one sequence. If so, then the net
- result is to eliminate a pair of objectids from oids. We do this
- by shifting the entire map to the left. */
+ /*
+ * Now we check to see if we eliminated the last remaining member of
+ * the first even sequence (and can eliminate the sequence by
+ * eliminating its last objectid from oids), and can collapse the
+ * first two odd sequences into one sequence. If so, then the net
+ * result is to eliminate a pair of objectids from oids. We do this
+ * by shifting the entire map to the left.
+ */
if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) {
memmove(map + 1, map + 3,
(sb_oid_cursize(rs) - 3) * sizeof(__u32));
set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
}
- journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
return unused_objectid;
}
@@ -97,30 +103,33 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
int i = 0;
BUG_ON(!th->t_trans_id);
- //return;
+ /*return; */
check_objectid_map(s, map);
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
- journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
-
- /* start at the beginning of the objectid map (i = 0) and go to
- the end of it (i = disk_sb->s_oid_cursize). Linear search is
- what we use, though it is possible that binary search would be
- more efficient after performing lots of deletions (which is
- when oids is large.) We only check even i's. */
+ journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
+
+ /*
+ * start at the beginning of the objectid map (i = 0) and go to
+ * the end of it (i = disk_sb->s_oid_cursize). Linear search is
+ * what we use, though it is possible that binary search would be
+ * more efficient after performing lots of deletions (which is
+ * when oids is large.) We only check even i's.
+ */
while (i < sb_oid_cursize(rs)) {
if (objectid_to_release == le32_to_cpu(map[i])) {
/* This incrementation unallocates the objectid. */
- //map[i]++;
le32_add_cpu(&map[i], 1);
- /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
+ /*
+ * Did we unallocate the last member of an
+ * odd sequence, and can shrink oids?
+ */
if (map[i] == map[i + 1]) {
/* shrink objectid map */
memmove(map + i, map + i + 2,
(sb_oid_cursize(rs) - i -
2) * sizeof(__u32));
- //disk_sb->s_oid_cursize -= 2;
set_sb_oid_cursize(rs, sb_oid_cursize(rs) - 2);
RFALSE(sb_oid_cursize(rs) < 2 ||
@@ -135,14 +144,19 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
objectid_to_release < le32_to_cpu(map[i + 1])) {
/* size of objectid map is not changed */
if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
- //objectid_map[i+1]--;
le32_add_cpu(&map[i + 1], -1);
return;
}
- /* JDM comparing two little-endian values for equality -- safe */
+ /*
+ * JDM comparing two little-endian values for
+ * equality -- safe
+ */
+ /*
+ * objectid map must be expanded, but
+ * there is no space
+ */
if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
- /* objectid map must be expanded, but there is no space */
PROC_INFO_INC(s, leaked_oid);
return;
}
@@ -178,8 +192,9 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
new_objectid_map = (__le32 *) (disk_sb + 1);
if (cur_size > new_size) {
- /* mark everyone used that was listed as free at the end of the objectid
- ** map
+ /*
+ * mark everyone used that was listed as free at
+ * the end of the objectid map
*/
objectid_map[new_size - 1] = objectid_map[cur_size - 1];
set_sb_oid_cursize(disk_sb, new_size);
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 54944d5a4a6..c9b47e91baf 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -172,18 +172,19 @@ static char *is_there_reiserfs_struct(char *fmt, int *what)
return k;
}
-/* debugging reiserfs we used to print out a lot of different
- variables, like keys, item headers, buffer heads etc. Values of
- most fields matter. So it took a long time just to write
- appropriative printk. With this reiserfs_warning you can use format
- specification for complex structures like you used to do with
- printfs for integers, doubles and pointers. For instance, to print
- out key structure you have to write just:
- reiserfs_warning ("bad key %k", key);
- instead of
- printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
- key->k_offset, key->k_uniqueness);
-*/
+/*
+ * debugging reiserfs we used to print out a lot of different
+ * variables, like keys, item headers, buffer heads etc. Values of
+ * most fields matter. So it took a long time just to write
+ * appropriative printk. With this reiserfs_warning you can use format
+ * specification for complex structures like you used to do with
+ * printfs for integers, doubles and pointers. For instance, to print
+ * out key structure you have to write just:
+ * reiserfs_warning ("bad key %k", key);
+ * instead of
+ * printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
+ * key->k_offset, key->k_uniqueness);
+ */
static DEFINE_SPINLOCK(error_lock);
static void prepare_error_buf(const char *fmt, va_list args)
{
@@ -243,15 +244,16 @@ static void prepare_error_buf(const char *fmt, va_list args)
}
-/* in addition to usual conversion specifiers this accepts reiserfs
- specific conversion specifiers:
- %k to print little endian key,
- %K to print cpu key,
- %h to print item_head,
- %t to print directory entry
- %z to print block head (arg must be struct buffer_head *
- %b to print buffer_head
-*/
+/*
+ * in addition to usual conversion specifiers this accepts reiserfs
+ * specific conversion specifiers:
+ * %k to print little endian key,
+ * %K to print cpu key,
+ * %h to print item_head,
+ * %t to print directory entry
+ * %z to print block head (arg must be struct buffer_head *
+ * %b to print buffer_head
+ */
#define do_reiserfs_warning(fmt)\
{\
@@ -304,50 +306,52 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
#endif
}
-/* The format:
-
- maintainer-errorid: [function-name:] message
-
- where errorid is unique to the maintainer and function-name is
- optional, is recommended, so that anyone can easily find the bug
- with a simple grep for the short to type string
- maintainer-errorid. Don't bother with reusing errorids, there are
- lots of numbers out there.
-
- Example:
-
- reiserfs_panic(
- p_sb, "reiser-29: reiserfs_new_blocknrs: "
- "one of search_start or rn(%d) is equal to MAX_B_NUM,"
- "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
- rn, bh
- );
-
- Regular panic()s sometimes clear the screen before the message can
- be read, thus the need for the while loop.
-
- Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
- pointless complexity):
-
- panics in reiserfs.h have numbers from 1000 to 1999
- super.c 2000 to 2999
- preserve.c (unused) 3000 to 3999
- bitmap.c 4000 to 4999
- stree.c 5000 to 5999
- prints.c 6000 to 6999
- namei.c 7000 to 7999
- fix_nodes.c 8000 to 8999
- dir.c 9000 to 9999
- lbalance.c 10000 to 10999
- ibalance.c 11000 to 11999 not ready
- do_balan.c 12000 to 12999
- inode.c 13000 to 13999
- file.c 14000 to 14999
- objectid.c 15000 - 15999
- buffer.c 16000 - 16999
- symlink.c 17000 - 17999
-
- . */
+/*
+ * The format:
+ *
+ * maintainer-errorid: [function-name:] message
+ *
+ * where errorid is unique to the maintainer and function-name is
+ * optional, is recommended, so that anyone can easily find the bug
+ * with a simple grep for the short to type string
+ * maintainer-errorid. Don't bother with reusing errorids, there are
+ * lots of numbers out there.
+ *
+ * Example:
+ *
+ * reiserfs_panic(
+ * p_sb, "reiser-29: reiserfs_new_blocknrs: "
+ * "one of search_start or rn(%d) is equal to MAX_B_NUM,"
+ * "which means that we are optimizing location based on the "
+ * "bogus location of a temp buffer (%p).",
+ * rn, bh
+ * );
+ *
+ * Regular panic()s sometimes clear the screen before the message can
+ * be read, thus the need for the while loop.
+ *
+ * Numbering scheme for panic used by Vladimir and Anatoly( Hans completely
+ * ignores this scheme, and considers it pointless complexity):
+ *
+ * panics in reiserfs_fs.h have numbers from 1000 to 1999
+ * super.c 2000 to 2999
+ * preserve.c (unused) 3000 to 3999
+ * bitmap.c 4000 to 4999
+ * stree.c 5000 to 5999
+ * prints.c 6000 to 6999
+ * namei.c 7000 to 7999
+ * fix_nodes.c 8000 to 8999
+ * dir.c 9000 to 9999
+ * lbalance.c 10000 to 10999
+ * ibalance.c 11000 to 11999 not ready
+ * do_balan.c 12000 to 12999
+ * inode.c 13000 to 13999
+ * file.c 14000 to 14999
+ * objectid.c 15000 - 15999
+ * buffer.c 16000 - 16999
+ * symlink.c 17000 - 17999
+ *
+ * . */
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
@@ -411,9 +415,11 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
reiserfs_abort_journal(sb, errno);
}
-/* this prints internal nodes (4 keys/items in line) (dc_number,
- dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
- dc_size)...*/
+/*
+ * this prints internal nodes (4 keys/items in line) (dc_number,
+ * dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
+ * dc_size)...
+ */
static int print_internal(struct buffer_head *bh, int first, int last)
{
struct reiserfs_key *key;
@@ -439,7 +445,7 @@ static int print_internal(struct buffer_head *bh, int first, int last)
dc = B_N_CHILD(bh, from);
reiserfs_printk("PTR %d: %y ", from, dc);
- for (i = from, key = B_N_PDELIM_KEY(bh, from), dc++; i < to;
+ for (i = from, key = internal_key(bh, from), dc++; i < to;
i++, key++, dc++) {
reiserfs_printk("KEY %d: %k PTR %d: %y ", i, key, i + 1, dc);
if (i && i % 4 == 0)
@@ -463,7 +469,7 @@ static int print_leaf(struct buffer_head *bh, int print_mode, int first,
check_leaf(bh);
blkh = B_BLK_HEAD(bh);
- ih = B_N_PITEM_HEAD(bh, 0);
+ ih = item_head(bh, 0);
nr = blkh_nr_item(blkh);
printk
@@ -496,7 +502,7 @@ static int print_leaf(struct buffer_head *bh, int print_mode, int first,
("-------------------------------------------------------------------------------\n");
reiserfs_printk("|%2d| %h |\n", i, ih);
if (print_mode & PRINT_LEAF_ITEMS)
- op_print_item(ih, B_I_PITEM(bh, ih));
+ op_print_item(ih, ih_item_body(bh, ih));
}
printk
@@ -543,9 +549,11 @@ static int print_super_block(struct buffer_head *bh)
printk("Block count %u\n", sb_block_count(rs));
printk("Blocksize %d\n", sb_blocksize(rs));
printk("Free blocks %u\n", sb_free_blocks(rs));
- // FIXME: this would be confusing if
- // someone stores reiserfs super block in some data block ;)
+ /*
+ * FIXME: this would be confusing if
+ * someone stores reiserfs super block in some data block ;)
// skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
+ */
skipped = bh->b_blocknr;
data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
(!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) +
@@ -581,8 +589,8 @@ static int print_desc_block(struct buffer_head *bh)
return 0;
}
-
-void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int last)
+/* ..., int print_mode, int first, int last) */
+void print_block(struct buffer_head *bh, ...)
{
va_list args;
int mode, first, last;
@@ -644,11 +652,11 @@ void store_print_tb(struct tree_balance *tb)
"* %d * %3lld(%2d) * %3lld(%2d) * %3lld(%2d) * %5lld * %5lld * %5lld * %5lld * %5lld *\n",
h,
(tbSh) ? (long long)(tbSh->b_blocknr) : (-1LL),
- (tbSh) ? atomic_read(&(tbSh->b_count)) : -1,
+ (tbSh) ? atomic_read(&tbSh->b_count) : -1,
(tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
- (tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1,
+ (tb->L[h]) ? atomic_read(&tb->L[h]->b_count) : -1,
(tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
- (tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1,
+ (tb->R[h]) ? atomic_read(&tb->R[h]->b_count) : -1,
(tbFh) ? (long long)(tbFh->b_blocknr) : (-1LL),
(tb->FL[h]) ? (long long)(tb->FL[h]->
b_blocknr) : (-1LL),
@@ -665,9 +673,9 @@ void store_print_tb(struct tree_balance *tb)
"* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
"* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],
- tb->rbytes, tb->blknum[0], tb->s0num, tb->s1num, tb->s1bytes,
- tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0],
- tb->rkey[0]);
+ tb->rbytes, tb->blknum[0], tb->s0num, tb->snum[0],
+ tb->sbytes[0], tb->snum[1], tb->sbytes[1],
+ tb->cur_blknum, tb->lkey[0], tb->rkey[0]);
/* this prints balance parameters for non-leaf levels */
h = 0;
@@ -690,7 +698,7 @@ void store_print_tb(struct tree_balance *tb)
"%p (%llu %d)%s", tb->FEB[i],
tb->FEB[i] ? (unsigned long long)tb->FEB[i]->
b_blocknr : 0ULL,
- tb->FEB[i] ? atomic_read(&(tb->FEB[i]->b_count)) : 0,
+ tb->FEB[i] ? atomic_read(&tb->FEB[i]->b_count) : 0,
(i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", ");
sprintf(print_tb_buf + strlen(print_tb_buf),
@@ -744,8 +752,8 @@ void check_leaf(struct buffer_head *bh)
if (!bh)
return;
check_leaf_block_head(bh);
- for (i = 0, ih = B_N_PITEM_HEAD(bh, 0); i < B_NR_ITEMS(bh); i++, ih++)
- op_check_item(ih, B_I_PITEM(bh, ih));
+ for (i = 0, ih = item_head(bh, 0); i < B_NR_ITEMS(bh); i++, ih++)
+ op_check_item(ih, ih_item_body(bh, ih));
}
void check_internal(struct buffer_head *bh)
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 83d4eac8059..bf53888c7f5 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1,5 +1,6 @@
/*
- * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details
+ * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for
+ * licensing and copyright details
*/
#include <linux/reiserfs_fs.h>
@@ -23,52 +24,73 @@
struct reiserfs_journal_list;
-/** bitmasks for i_flags field in reiserfs-specific part of inode */
+/* bitmasks for i_flags field in reiserfs-specific part of inode */
typedef enum {
- /** this says what format of key do all items (but stat data) of
- an object have. If this is set, that format is 3.6 otherwise
- - 3.5 */
+ /*
+ * this says what format of key do all items (but stat data) of
+ * an object have. If this is set, that format is 3.6 otherwise - 3.5
+ */
i_item_key_version_mask = 0x0001,
- /** If this is unset, object has 3.5 stat data, otherwise, it has
- 3.6 stat data with 64bit size, 32bit nlink etc. */
+
+ /*
+ * If this is unset, object has 3.5 stat data, otherwise,
+ * it has 3.6 stat data with 64bit size, 32bit nlink etc.
+ */
i_stat_data_version_mask = 0x0002,
- /** file might need tail packing on close */
+
+ /* file might need tail packing on close */
i_pack_on_close_mask = 0x0004,
- /** don't pack tail of file */
+
+ /* don't pack tail of file */
i_nopack_mask = 0x0008,
- /** If those is set, "safe link" was created for this file during
- truncate or unlink. Safe link is used to avoid leakage of disk
- space on crash with some files open, but unlinked. */
+
+ /*
+ * If either of these are set, "safe link" was created for this
+ * file during truncate or unlink. Safe link is used to avoid
+ * leakage of disk space on crash with some files open, but unlinked.
+ */
i_link_saved_unlink_mask = 0x0010,
i_link_saved_truncate_mask = 0x0020,
+
i_has_xattr_dir = 0x0040,
i_data_log = 0x0080,
} reiserfs_inode_flags;
struct reiserfs_inode_info {
__u32 i_key[4]; /* key is still 4 32 bit integers */
- /** transient inode flags that are never stored on disk. Bitmasks
- for this field are defined above. */
+
+ /*
+ * transient inode flags that are never stored on disk. Bitmasks
+ * for this field are defined above.
+ */
__u32 i_flags;
- __u32 i_first_direct_byte; // offset of first byte stored in direct item.
+ /* offset of first byte stored in direct item. */
+ __u32 i_first_direct_byte;
/* copy of persistent inode flags read from sd_attrs. */
__u32 i_attrs;
- int i_prealloc_block; /* first unused block of a sequence of unused blocks */
+ /* first unused block of a sequence of unused blocks */
+ int i_prealloc_block;
int i_prealloc_count; /* length of that sequence */
- struct list_head i_prealloc_list; /* per-transaction list of inodes which
- * have preallocated blocks */
- unsigned new_packing_locality:1; /* new_packig_locality is created; new blocks
- * for the contents of this directory should be
- * displaced */
+ /* per-transaction list of inodes which have preallocated blocks */
+ struct list_head i_prealloc_list;
- /* we use these for fsync or O_SYNC to decide which transaction
- ** needs to be committed in order for this inode to be properly
- ** flushed */
+ /*
+ * new_packing_locality is created; new blocks for the contents
+ * of this directory should be displaced
+ */
+ unsigned new_packing_locality:1;
+
+ /*
+ * we use these for fsync or O_SYNC to decide which transaction
+ * needs to be committed in order for this inode to be properly
+ * flushed
+ */
unsigned int i_trans_id;
+
struct reiserfs_journal_list *i_jl;
atomic_t openers;
struct mutex tailpack;
@@ -82,9 +104,10 @@ typedef enum {
reiserfs_attrs_cleared = 0x00000001,
} reiserfs_super_block_flags;
-/* struct reiserfs_super_block accessors/mutators
- * since this is a disk structure, it will always be in
- * little endian format. */
+/*
+ * struct reiserfs_super_block accessors/mutators since this is a disk
+ * structure, it will always be in little endian format.
+ */
#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
#define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_v1.s_free_blocks))
@@ -152,48 +175,61 @@ typedef enum {
/* LOGGING -- */
-/* These all interelate for performance.
-**
-** If the journal block count is smaller than n transactions, you lose speed.
-** I don't know what n is yet, I'm guessing 8-16.
-**
-** typical transaction size depends on the application, how often fsync is
-** called, and how many metadata blocks you dirty in a 30 second period.
-** The more small files (<16k) you use, the larger your transactions will
-** be.
-**
-** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
-** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
-** to prevent wrapping before dirty meta blocks get to disk.
-**
-** If the batch max is smaller than the transaction max, you'll waste space at the end of the journal
-** because journal_end sets the next transaction to start at 0 if the next transaction has any chance of wrapping.
-**
-** The large the batch max age, the better the speed, and the more meta data changes you'll lose after a crash.
-**
-*/
+/*
+ * These all interelate for performance.
+ *
+ * If the journal block count is smaller than n transactions, you lose speed.
+ * I don't know what n is yet, I'm guessing 8-16.
+ *
+ * typical transaction size depends on the application, how often fsync is
+ * called, and how many metadata blocks you dirty in a 30 second period.
+ * The more small files (<16k) you use, the larger your transactions will
+ * be.
+ *
+ * If your journal fills faster than dirty buffers get flushed to disk, it
+ * must flush them before allowing the journal to wrap, which slows things
+ * down. If you need high speed meta data updates, the journal should be
+ * big enough to prevent wrapping before dirty meta blocks get to disk.
+ *
+ * If the batch max is smaller than the transaction max, you'll waste space
+ * at the end of the journal because journal_end sets the next transaction
+ * to start at 0 if the next transaction has any chance of wrapping.
+ *
+ * The large the batch max age, the better the speed, and the more meta
+ * data changes you'll lose after a crash.
+ */
/* don't mess with these for a while */
- /* we have a node size define somewhere in reiserfs_fs.h. -Hans */
+/* we have a node size define somewhere in reiserfs_fs.h. -Hans */
#define JOURNAL_BLOCK_SIZE 4096 /* BUG gotta get rid of this */
#define JOURNAL_MAX_CNODE 1500 /* max cnodes to allocate. */
#define JOURNAL_HASH_SIZE 8192
-#define JOURNAL_NUM_BITMAPS 5 /* number of copies of the bitmaps to have floating. Must be >= 2 */
-
-/* One of these for every block in every transaction
-** Each one is in two hash tables. First, a hash of the current transaction, and after journal_end, a
-** hash of all the in memory transactions.
-** next and prev are used by the current transaction (journal_hash).
-** hnext and hprev are used by journal_list_hash. If a block is in more than one transaction, the journal_list_hash
-** links it in multiple times. This allows flush_journal_list to remove just the cnode belonging
-** to a given transaction.
-*/
+
+/* number of copies of the bitmaps to have floating. Must be >= 2 */
+#define JOURNAL_NUM_BITMAPS 5
+
+/*
+ * One of these for every block in every transaction
+ * Each one is in two hash tables. First, a hash of the current transaction,
+ * and after journal_end, a hash of all the in memory transactions.
+ * next and prev are used by the current transaction (journal_hash).
+ * hnext and hprev are used by journal_list_hash. If a block is in more
+ * than one transaction, the journal_list_hash links it in multiple times.
+ * This allows flush_journal_list to remove just the cnode belonging to a
+ * given transaction.
+ */
struct reiserfs_journal_cnode {
struct buffer_head *bh; /* real buffer head */
struct super_block *sb; /* dev of real buffer head */
- __u32 blocknr; /* block number of real buffer head, == 0 when buffer on disk */
+
+ /* block number of real buffer head, == 0 when buffer on disk */
+ __u32 blocknr;
+
unsigned long state;
- struct reiserfs_journal_list *jlist; /* journal list this cnode lives in */
+
+ /* journal list this cnode lives in */
+ struct reiserfs_journal_list *jlist;
+
struct reiserfs_journal_cnode *next; /* next in transaction list */
struct reiserfs_journal_cnode *prev; /* prev in transaction list */
struct reiserfs_journal_cnode *hprev; /* prev in hash list */
@@ -212,18 +248,22 @@ struct reiserfs_list_bitmap {
};
/*
-** one of these for each transaction. The most important part here is the j_realblock.
-** this list of cnodes is used to hash all the blocks in all the commits, to mark all the
-** real buffer heads dirty once all the commits hit the disk,
-** and to make sure every real block in a transaction is on disk before allowing the log area
-** to be overwritten */
+ * one of these for each transaction. The most important part here is the
+ * j_realblock. this list of cnodes is used to hash all the blocks in all
+ * the commits, to mark all the real buffer heads dirty once all the commits
+ * hit the disk, and to make sure every real block in a transaction is on
+ * disk before allowing the log area to be overwritten
+ */
struct reiserfs_journal_list {
unsigned long j_start;
unsigned long j_state;
unsigned long j_len;
atomic_t j_nonzerolen;
atomic_t j_commit_left;
- atomic_t j_older_commits_done; /* all commits older than this on disk */
+
+ /* all commits older than this on disk */
+ atomic_t j_older_commits_done;
+
struct mutex j_commit_mutex;
unsigned int j_trans_id;
time_t j_timestamp;
@@ -234,11 +274,15 @@ struct reiserfs_journal_list {
/* time ordered list of all active transactions */
struct list_head j_list;
- /* time ordered list of all transactions we haven't tried to flush yet */
+ /*
+ * time ordered list of all transactions we haven't tried
+ * to flush yet
+ */
struct list_head j_working_list;
/* list of tail conversion targets in need of flush before commit */
struct list_head j_tail_bh_list;
+
/* list of data=ordered buffers in need of flush before commit */
struct list_head j_bh_list;
int j_refcount;
@@ -246,46 +290,83 @@ struct reiserfs_journal_list {
struct reiserfs_journal {
struct buffer_head **j_ap_blocks; /* journal blocks on disk */
- struct reiserfs_journal_cnode *j_last; /* newest journal block */
- struct reiserfs_journal_cnode *j_first; /* oldest journal block. start here for traverse */
+ /* newest journal block */
+ struct reiserfs_journal_cnode *j_last;
+
+ /* oldest journal block. start here for traverse */
+ struct reiserfs_journal_cnode *j_first;
struct block_device *j_dev_bd;
fmode_t j_dev_mode;
- int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
+
+ /* first block on s_dev of reserved area journal */
+ int j_1st_reserved_block;
unsigned long j_state;
unsigned int j_trans_id;
unsigned long j_mount_id;
- unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */
+
+ /* start of current waiting commit (index into j_ap_blocks) */
+ unsigned long j_start;
unsigned long j_len; /* length of current waiting commit */
- unsigned long j_len_alloc; /* number of buffers requested by journal_begin() */
+
+ /* number of buffers requested by journal_begin() */
+ unsigned long j_len_alloc;
+
atomic_t j_wcount; /* count of writers for current commit */
- unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */
- unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */
- unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */
+
+ /* batch count. allows turning X transactions into 1 */
+ unsigned long j_bcount;
+
+ /* first unflushed transactions offset */
+ unsigned long j_first_unflushed_offset;
+
+ /* last fully flushed journal timestamp */
+ unsigned j_last_flush_trans_id;
+
struct buffer_head *j_header_bh;
time_t j_trans_start_time; /* time this transaction started */
struct mutex j_mutex;
struct mutex j_flush_mutex;
- wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */
- atomic_t j_jlock; /* lock for j_join_wait */
+
+ /* wait for current transaction to finish before starting new one */
+ wait_queue_head_t j_join_wait;
+
+ atomic_t j_jlock; /* lock for j_join_wait */
int j_list_bitmap_index; /* number of next list bitmap to use */
- int j_must_wait; /* no more journal begins allowed. MUST sleep on j_join_wait */
- int j_next_full_flush; /* next journal_end will flush all journal list */
- int j_next_async_flush; /* next journal_end will flush all async commits */
+
+ /* no more journal begins allowed. MUST sleep on j_join_wait */
+ int j_must_wait;
+
+ /* next journal_end will flush all journal list */
+ int j_next_full_flush;
+
+ /* next journal_end will flush all async commits */
+ int j_next_async_flush;
int j_cnode_used; /* number of cnodes on the used list */
int j_cnode_free; /* number of cnodes on the free list */
- unsigned int j_trans_max; /* max number of blocks in a transaction. */
- unsigned int j_max_batch; /* max number of blocks to batch into a trans */
- unsigned int j_max_commit_age; /* in seconds, how old can an async commit be */
- unsigned int j_max_trans_age; /* in seconds, how old can a transaction be */
- unsigned int j_default_max_commit_age; /* the default for the max commit age */
+ /* max number of blocks in a transaction. */
+ unsigned int j_trans_max;
+
+ /* max number of blocks to batch into a trans */
+ unsigned int j_max_batch;
+
+ /* in seconds, how old can an async commit be */
+ unsigned int j_max_commit_age;
+
+ /* in seconds, how old can a transaction be */
+ unsigned int j_max_trans_age;
+
+ /* the default for the max commit age */
+ unsigned int j_default_max_commit_age;
struct reiserfs_journal_cnode *j_cnode_free_list;
- struct reiserfs_journal_cnode *j_cnode_free_orig; /* orig pointer returned from vmalloc */
+
+ /* orig pointer returned from vmalloc */
+ struct reiserfs_journal_cnode *j_cnode_free_orig;
struct reiserfs_journal_list *j_current_jl;
int j_free_bitmap_nodes;
@@ -306,14 +387,21 @@ struct reiserfs_journal {
/* list of all active transactions */
struct list_head j_journal_list;
+
/* lists that haven't been touched by writeback attempts */
struct list_head j_working_list;
- struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
- struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
- struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
- the transactions */
- struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
+ /* hash table for real buffer heads in current trans */
+ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE];
+
+ /* hash table for all the real buffer heads in all the transactions */
+ struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE];
+
+ /* array of bitmaps to record the deleted blocks */
+ struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS];
+
+ /* list of inodes which have preallocated blocks */
+ struct list_head j_prealloc_list;
int j_persistent_trans;
unsigned long j_max_trans_size;
unsigned long j_max_batch_size;
@@ -328,11 +416,12 @@ struct reiserfs_journal {
enum journal_state_bits {
J_WRITERS_BLOCKED = 1, /* set when new writers not allowed */
- J_WRITERS_QUEUED, /* set when log is full due to too many writers */
- J_ABORTED, /* set when log is aborted */
+ J_WRITERS_QUEUED, /* set when log is full due to too many writers */
+ J_ABORTED, /* set when log is aborted */
};
-#define JOURNAL_DESC_MAGIC "ReIsErLB" /* ick. magic string to find desc blocks in the journal */
+/* ick. magic string to find desc blocks in the journal */
+#define JOURNAL_DESC_MAGIC "ReIsErLB"
typedef __u32(*hashf_t) (const signed char *, int);
@@ -364,7 +453,10 @@ typedef struct reiserfs_proc_info_data {
stat_cnt_t leaked_oid;
stat_cnt_t leaves_removable;
- /* balances per level. Use explicit 5 as MAX_HEIGHT is not visible yet. */
+ /*
+ * balances per level.
+ * Use explicit 5 as MAX_HEIGHT is not visible yet.
+ */
stat_cnt_t balance_at[5]; /* XXX */
/* sbk == search_by_key */
stat_cnt_t sbk_read_at[5]; /* XXX */
@@ -416,47 +508,75 @@ typedef struct reiserfs_proc_info_data {
/* reiserfs union of in-core super block data */
struct reiserfs_sb_info {
- struct buffer_head *s_sbh; /* Buffer containing the super block */
- /* both the comment and the choice of
- name are unclear for s_rs -Hans */
- struct reiserfs_super_block *s_rs; /* Pointer to the super block in the buffer */
+ /* Buffer containing the super block */
+ struct buffer_head *s_sbh;
+
+ /* Pointer to the on-disk super block in the buffer */
+ struct reiserfs_super_block *s_rs;
struct reiserfs_bitmap_info *s_ap_bitmap;
- struct reiserfs_journal *s_journal; /* pointer to journal information */
+
+ /* pointer to journal information */
+ struct reiserfs_journal *s_journal;
+
unsigned short s_mount_state; /* reiserfs state (valid, invalid) */
/* Serialize writers access, replace the old bkl */
struct mutex lock;
+
/* Owner of the lock (can be recursive) */
struct task_struct *lock_owner;
+
/* Depth of the lock, start from -1 like the bkl */
int lock_depth;
+ struct workqueue_struct *commit_wq;
+
/* Comment? -Hans */
void (*end_io_handler) (struct buffer_head *, int);
- hashf_t s_hash_function; /* pointer to function which is used
- to sort names in directory. Set on
- mount */
- unsigned long s_mount_opt; /* reiserfs's mount options are set
- here (currently - NOTAIL, NOLOG,
- REPLAYONLY) */
-
- struct { /* This is a structure that describes block allocator options */
- unsigned long bits; /* Bitfield for enable/disable kind of options */
- unsigned long large_file_size; /* size started from which we consider file to be a large one(in blocks) */
+
+ /*
+ * pointer to function which is used to sort names in directory.
+ * Set on mount
+ */
+ hashf_t s_hash_function;
+
+ /* reiserfs's mount options are set here */
+ unsigned long s_mount_opt;
+
+ /* This is a structure that describes block allocator options */
+ struct {
+ /* Bitfield for enable/disable kind of options */
+ unsigned long bits;
+
+ /*
+ * size started from which we consider file
+ * to be a large one (in blocks)
+ */
+ unsigned long large_file_size;
+
int border; /* percentage of disk, border takes */
- int preallocmin; /* Minimal file size (in blocks) starting from which we do preallocations */
- int preallocsize; /* Number of blocks we try to prealloc when file
- reaches preallocmin size (in blocks) or
- prealloc_list is empty. */
+
+ /*
+ * Minimal file size (in blocks) starting
+ * from which we do preallocations
+ */
+ int preallocmin;
+
+ /*
+ * Number of blocks we try to prealloc when file
+ * reaches preallocmin size (in blocks) or prealloc_list
+ is empty.
+ */
+ int preallocsize;
} s_alloc_options;
/* Comment? -Hans */
wait_queue_head_t s_wait;
- /* To be obsoleted soon by per buffer seals.. -Hans */
- atomic_t s_generation_counter; // increased by one every time the
- // tree gets re-balanced
- unsigned long s_properties; /* File system properties. Currently holds
- on-disk FS format */
+ /* increased by one every time the tree gets re-balanced */
+ atomic_t s_generation_counter;
+
+ /* File system properties. Currently holds on-disk FS format */
+ unsigned long s_properties;
/* session statistics */
int s_disk_reads;
@@ -469,14 +589,23 @@ struct reiserfs_sb_info {
int s_bmaps_without_search;
int s_direct2indirect;
int s_indirect2direct;
- /* set up when it's ok for reiserfs_read_inode2() to read from
- disk inode with nlink==0. Currently this is only used during
- finish_unfinished() processing at mount time */
+
+ /*
+ * set up when it's ok for reiserfs_read_inode2() to read from
+ * disk inode with nlink==0. Currently this is only used during
+ * finish_unfinished() processing at mount time
+ */
int s_is_unlinked_ok;
+
reiserfs_proc_info_data_t s_proc_info_data;
struct proc_dir_entry *procdir;
- int reserved_blocks; /* amount of blocks reserved for further allocations */
- spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
+
+ /* amount of blocks reserved for further allocations */
+ int reserved_blocks;
+
+
+ /* this lock on now only used to protect reserved_blocks variable */
+ spinlock_t bitmap_lock;
struct dentry *priv_root; /* root of /.reiserfs_priv */
struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */
int j_errno;
@@ -492,14 +621,13 @@ struct reiserfs_sb_info {
char *s_jdev; /* Stored jdev for mount option showing */
#ifdef CONFIG_REISERFS_CHECK
- struct tree_balance *cur_tb; /*
- * Detects whether more than one
- * copy of tb exists per superblock
- * as a means of checking whether
- * do_balance is executing concurrently
- * against another tree reader/writer
- * on a same mount point.
- */
+ /*
+ * Detects whether more than one copy of tb exists per superblock
+ * as a means of checking whether do_balance is executing
+ * concurrently against another tree reader/writer on a same
+ * mount point.
+ */
+ struct tree_balance *cur_tb;
#endif
};
@@ -508,25 +636,36 @@ struct reiserfs_sb_info {
#define REISERFS_3_6 1
#define REISERFS_OLD_FORMAT 2
-enum reiserfs_mount_options {
/* Mount options */
- REISERFS_LARGETAIL, /* large tails will be created in a session */
- REISERFS_SMALLTAIL, /* small (for files less than block size) tails will be created in a session */
- REPLAYONLY, /* replay journal and return 0. Use by fsck */
- REISERFS_CONVERT, /* -o conv: causes conversion of old
- format super block to the new
- format. If not specified - old
- partition will be dealt with in a
- manner of 3.5.x */
-
-/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
-** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
-** is not required. If the normal autodection code can't determine which
-** hash to use (because both hashes had the same value for a file)
-** use this option to force a specific hash. It won't allow you to override
-** the existing hash on the FS, so if you have a tea hash disk, and mount
-** with -o hash=rupasov, the mount will fail.
-*/
+enum reiserfs_mount_options {
+ /* large tails will be created in a session */
+ REISERFS_LARGETAIL,
+ /*
+ * small (for files less than block size) tails will
+ * be created in a session
+ */
+ REISERFS_SMALLTAIL,
+
+ /* replay journal and return 0. Use by fsck */
+ REPLAYONLY,
+
+ /*
+ * -o conv: causes conversion of old format super block to the
+ * new format. If not specified - old partition will be dealt
+ * with in a manner of 3.5.x
+ */
+ REISERFS_CONVERT,
+
+ /*
+ * -o hash={tea, rupasov, r5, detect} is meant for properly mounting
+ * reiserfs disks from 3.5.19 or earlier. 99% of the time, this
+ * option is not required. If the normal autodection code can't
+ * determine which hash to use (because both hashes had the same
+ * value for a file) use this option to force a specific hash.
+ * It won't allow you to override the existing hash on the FS, so
+ * if you have a tea hash disk, and mount with -o hash=rupasov,
+ * the mount will fail.
+ */
FORCE_TEA_HASH, /* try to force tea hash on mount */
FORCE_RUPASOV_HASH, /* try to force rupasov hash on mount */
FORCE_R5_HASH, /* try to force rupasov hash on mount */
@@ -536,9 +675,11 @@ enum reiserfs_mount_options {
REISERFS_DATA_ORDERED,
REISERFS_DATA_WRITEBACK,
-/* used for testing experimental features, makes benchmarking new
- features with and without more convenient, should never be used by
- users in any code shipped to users (ideally) */
+ /*
+ * used for testing experimental features, makes benchmarking new
+ * features with and without more convenient, should never be used by
+ * users in any code shipped to users (ideally)
+ */
REISERFS_NO_BORDER,
REISERFS_NO_UNHASHED_RELOCATION,
@@ -705,28 +846,28 @@ static inline void reiserfs_cond_resched(struct super_block *s)
struct fid;
-/* in reading the #defines, it may help to understand that they employ
- the following abbreviations:
-
- B = Buffer
- I = Item header
- H = Height within the tree (should be changed to LEV)
- N = Number of the item in the node
- STAT = stat data
- DEH = Directory Entry Header
- EC = Entry Count
- E = Entry number
- UL = Unsigned Long
- BLKH = BLocK Header
- UNFM = UNForMatted node
- DC = Disk Child
- P = Path
-
- These #defines are named by concatenating these abbreviations,
- where first comes the arguments, and last comes the return value,
- of the macro.
-
-*/
+/*
+ * in reading the #defines, it may help to understand that they employ
+ * the following abbreviations:
+ *
+ * B = Buffer
+ * I = Item header
+ * H = Height within the tree (should be changed to LEV)
+ * N = Number of the item in the node
+ * STAT = stat data
+ * DEH = Directory Entry Header
+ * EC = Entry Count
+ * E = Entry number
+ * UL = Unsigned Long
+ * BLKH = BLocK Header
+ * UNFM = UNForMatted node
+ * DC = Disk Child
+ * P = Path
+ *
+ * These #defines are named by concatenating these abbreviations,
+ * where first comes the arguments, and last comes the return value,
+ * of the macro.
+ */
#define USE_INODE_GENERATION_COUNTER
@@ -737,14 +878,17 @@ struct fid;
/* n must be power of 2 */
#define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
-// to be ok for alpha and others we have to align structures to 8 byte
-// boundary.
-// FIXME: do not change 4 by anything else: there is code which relies on that
+/*
+ * to be ok for alpha and others we have to align structures to 8 byte
+ * boundary.
+ * FIXME: do not change 4 by anything else: there is code which relies on that
+ */
#define ROUND_UP(x) _ROUND_UP(x,8LL)
-/* debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug
-** messages.
-*/
+/*
+ * debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug
+ * messages.
+ */
#define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */
void __reiserfs_warning(struct super_block *s, const char *id,
@@ -753,7 +897,7 @@ void __reiserfs_warning(struct super_block *s, const char *id,
__reiserfs_warning(s, id, __func__, fmt, ##args)
/* assertions handling */
-/** always check a condition and panic if it's false. */
+/* always check a condition and panic if it's false. */
#define __RASSERT(cond, scond, format, args...) \
do { \
if (!(cond)) \
@@ -776,35 +920,48 @@ do { \
* Disk Data Structures
*/
-/***************************************************************************/
-/* SUPER BLOCK */
-/***************************************************************************/
+/***************************************************************************
+ * SUPER BLOCK *
+ ***************************************************************************/
/*
- * Structure of super block on disk, a version of which in RAM is often accessed as REISERFS_SB(s)->s_rs
- * the version in RAM is part of a larger structure containing fields never written to disk.
+ * Structure of super block on disk, a version of which in RAM is often
+ * accessed as REISERFS_SB(s)->s_rs. The version in RAM is part of a larger
+ * structure containing fields never written to disk.
*/
-#define UNSET_HASH 0 // read_super will guess about, what hash names
- // in directories were sorted with
+#define UNSET_HASH 0 /* Detect hash on disk */
#define TEA_HASH 1
#define YURA_HASH 2
#define R5_HASH 3
#define DEFAULT_HASH R5_HASH
struct journal_params {
- __le32 jp_journal_1st_block; /* where does journal start from on its
- * device */
- __le32 jp_journal_dev; /* journal device st_rdev */
- __le32 jp_journal_size; /* size of the journal */
- __le32 jp_journal_trans_max; /* max number of blocks in a transaction. */
- __le32 jp_journal_magic; /* random value made on fs creation (this
- * was sb_journal_block_count) */
- __le32 jp_journal_max_batch; /* max number of blocks to batch into a
- * trans */
- __le32 jp_journal_max_commit_age; /* in seconds, how old can an async
- * commit be */
- __le32 jp_journal_max_trans_age; /* in seconds, how old can a transaction
- * be */
+ /* where does journal start from on its * device */
+ __le32 jp_journal_1st_block;
+
+ /* journal device st_rdev */
+ __le32 jp_journal_dev;
+
+ /* size of the journal */
+ __le32 jp_journal_size;
+
+ /* max number of blocks in a transaction. */
+ __le32 jp_journal_trans_max;
+
+ /*
+ * random value made on fs creation
+ * (this was sb_journal_block_count)
+ */
+ __le32 jp_journal_magic;
+
+ /* max number of blocks to batch into a trans */
+ __le32 jp_journal_max_batch;
+
+ /* in seconds, how old can an async commit be */
+ __le32 jp_journal_max_commit_age;
+
+ /* in seconds, how old can a transaction be */
+ __le32 jp_journal_max_trans_age;
};
/* this is the super from 3.5.X, where X >= 10 */
@@ -814,26 +971,48 @@ struct reiserfs_super_block_v1 {
__le32 s_root_block; /* root block number */
struct journal_params s_journal;
__le16 s_blocksize; /* block size */
- __le16 s_oid_maxsize; /* max size of object id array, see
- * get_objectid() commentary */
+
+ /* max size of object id array, see get_objectid() commentary */
+ __le16 s_oid_maxsize;
__le16 s_oid_cursize; /* current size of object id array */
- __le16 s_umount_state; /* this is set to 1 when filesystem was
- * umounted, to 2 - when not */
- char s_magic[10]; /* reiserfs magic string indicates that
- * file system is reiserfs:
- * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */
- __le16 s_fs_state; /* it is set to used by fsck to mark which
- * phase of rebuilding is done */
- __le32 s_hash_function_code; /* indicate, what hash function is being use
- * to sort names in a directory*/
+
+ /* this is set to 1 when filesystem was umounted, to 2 - when not */
+ __le16 s_umount_state;
+
+ /*
+ * reiserfs magic string indicates that file system is reiserfs:
+ * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs"
+ */
+ char s_magic[10];
+
+ /*
+ * it is set to used by fsck to mark which
+ * phase of rebuilding is done
+ */
+ __le16 s_fs_state;
+ /*
+ * indicate, what hash function is being use
+ * to sort names in a directory
+ */
+ __le32 s_hash_function_code;
__le16 s_tree_height; /* height of disk tree */
- __le16 s_bmap_nr; /* amount of bitmap blocks needed to address
- * each block of file system */
- __le16 s_version; /* this field is only reliable on filesystem
- * with non-standard journal */
- __le16 s_reserved_for_journal; /* size in blocks of journal area on main
- * device, we need to keep after
- * making fs with non-standard journal */
+
+ /*
+ * amount of bitmap blocks needed to address
+ * each block of file system
+ */
+ __le16 s_bmap_nr;
+
+ /*
+ * this field is only reliable on filesystem with non-standard journal
+ */
+ __le16 s_version;
+
+ /*
+ * size in blocks of journal area on main device, we need to
+ * keep after making fs with non-standard journal
+ */
+ __le16 s_reserved_for_journal;
} __attribute__ ((__packed__));
#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
@@ -842,17 +1021,21 @@ struct reiserfs_super_block_v1 {
struct reiserfs_super_block {
struct reiserfs_super_block_v1 s_v1;
__le32 s_inode_generation;
- __le32 s_flags; /* Right now used only by inode-attributes, if enabled */
+
+ /* Right now used only by inode-attributes, if enabled */
+ __le32 s_flags;
+
unsigned char s_uuid[16]; /* filesystem unique identifier */
unsigned char s_label[16]; /* filesystem volume label */
__le16 s_mnt_count; /* Count of mounts since last fsck */
__le16 s_max_mnt_count; /* Maximum mounts before check */
__le32 s_lastcheck; /* Timestamp of last fsck */
__le32 s_check_interval; /* Interval between checks */
- char s_unused[76]; /* zero filled by mkreiserfs and
- * reiserfs_convert_objectid_map_v1()
- * so any additions must be updated
- * there as well. */
+
+ /*
+ * zero filled by mkreiserfs and reiserfs_convert_objectid_map_v1()
+ * so any additions must be updated there as well. */
+ char s_unused[76];
} __attribute__ ((__packed__));
#define SB_SIZE (sizeof(struct reiserfs_super_block))
@@ -860,7 +1043,7 @@ struct reiserfs_super_block {
#define REISERFS_VERSION_1 0
#define REISERFS_VERSION_2 2
-// on-disk super block fields converted to cpu form
+/* on-disk super block fields converted to cpu form */
#define SB_DISK_SUPER_BLOCK(s) (REISERFS_SB(s)->s_rs)
#define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1))
#define SB_BLOCKSIZE(s) \
@@ -915,11 +1098,13 @@ int is_reiserfs_3_5(struct reiserfs_super_block *rs);
int is_reiserfs_3_6(struct reiserfs_super_block *rs);
int is_reiserfs_jr(struct reiserfs_super_block *rs);
-/* ReiserFS leaves the first 64k unused, so that partition labels have
- enough space. If someone wants to write a fancy bootloader that
- needs more than 64k, let us know, and this will be increased in size.
- This number must be larger than than the largest block size on any
- platform, or code will break. -Hans */
+/*
+ * ReiserFS leaves the first 64k unused, so that partition labels have
+ * enough space. If someone wants to write a fancy bootloader that
+ * needs more than 64k, let us know, and this will be increased in size.
+ * This number must be larger than than the largest block size on any
+ * platform, or code will break. -Hans
+ */
#define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
#define REISERFS_FIRST_BLOCK unused_define
#define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES
@@ -944,8 +1129,7 @@ struct unfm_nodeinfo {
unsigned short unfm_freespace;
};
-/* there are two formats of keys: 3.5 and 3.6
- */
+/* there are two formats of keys: 3.5 and 3.6 */
#define KEY_FORMAT_3_5 0
#define KEY_FORMAT_3_6 1
@@ -963,8 +1147,10 @@ static inline struct reiserfs_sb_info *REISERFS_SB(const struct super_block *sb)
return sb->s_fs_info;
}
-/* Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16
- * which overflows on large file systems. */
+/*
+ * Don't trust REISERFS_SB(sb)->s_bmap_nr, it's a u16
+ * which overflows on large file systems.
+ */
static inline __u32 reiserfs_bmap_count(struct super_block *sb)
{
return (SB_BLOCK_COUNT(sb) - 1) / (sb->s_blocksize * 8) + 1;
@@ -975,8 +1161,10 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
return bmap_nr > ((1LL << 16) - 1);
}
-/** this says about version of key of all items (but stat data) the
- object consists of */
+/*
+ * this says about version of key of all items (but stat data) the
+ * object consists of
+ */
#define get_inode_item_key_version( inode ) \
((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5)
@@ -995,16 +1183,18 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
else \
REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; })
-/* This is an aggressive tail suppression policy, I am hoping it
- improves our benchmarks. The principle behind it is that percentage
- space saving is what matters, not absolute space saving. This is
- non-intuitive, but it helps to understand it if you consider that the
- cost to access 4 blocks is not much more than the cost to access 1
- block, if you have to do a seek and rotate. A tail risks a
- non-linear disk access that is significant as a percentage of total
- time cost for a 4 block file and saves an amount of space that is
- less significant as a percentage of space, or so goes the hypothesis.
- -Hans */
+/*
+ * This is an aggressive tail suppression policy, I am hoping it
+ * improves our benchmarks. The principle behind it is that percentage
+ * space saving is what matters, not absolute space saving. This is
+ * non-intuitive, but it helps to understand it if you consider that the
+ * cost to access 4 blocks is not much more than the cost to access 1
+ * block, if you have to do a seek and rotate. A tail risks a
+ * non-linear disk access that is significant as a percentage of total
+ * time cost for a 4 block file and saves an amount of space that is
+ * less significant as a percentage of space, or so goes the hypothesis.
+ * -Hans
+ */
#define STORE_TAIL_IN_UNFM_S1(n_file_size,n_tail_size,n_block_size) \
(\
(!(n_tail_size)) || \
@@ -1018,10 +1208,11 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
( (n_tail_size) >= (MAX_DIRECT_ITEM_LEN(n_block_size) * 3)/4) ) ) \
)
-/* Another strategy for tails, this one means only create a tail if all the
- file would fit into one DIRECT item.
- Primary intention for this one is to increase performance by decreasing
- seeking.
+/*
+ * Another strategy for tails, this one means only create a tail if all the
+ * file would fit into one DIRECT item.
+ * Primary intention for this one is to increase performance by decreasing
+ * seeking.
*/
#define STORE_TAIL_IN_UNFM_S2(n_file_size,n_tail_size,n_block_size) \
(\
@@ -1035,23 +1226,21 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
#define REISERFS_VALID_FS 1
#define REISERFS_ERROR_FS 2
-//
-// there are 5 item types currently
-//
+/*
+ * there are 5 item types currently
+ */
#define TYPE_STAT_DATA 0
#define TYPE_INDIRECT 1
#define TYPE_DIRECT 2
#define TYPE_DIRENTRY 3
#define TYPE_MAXTYPE 3
-#define TYPE_ANY 15 // FIXME: comment is required
+#define TYPE_ANY 15 /* FIXME: comment is required */
-/***************************************************************************/
-/* KEY & ITEM HEAD */
-/***************************************************************************/
+/***************************************************************************
+ * KEY & ITEM HEAD *
+ ***************************************************************************/
-//
-// directories use this key as well as old files
-//
+/* * directories use this key as well as old files */
struct offset_v1 {
__le32 k_offset;
__le32 k_uniqueness;
@@ -1084,11 +1273,14 @@ static inline void set_offset_v2_k_offset(struct offset_v2 *v2, loff_t offset)
v2->v = (v2->v & cpu_to_le64(15ULL << 60)) | cpu_to_le64(offset);
}
-/* Key of an item determines its location in the S+tree, and
- is composed of 4 components */
+/*
+ * Key of an item determines its location in the S+tree, and
+ * is composed of 4 components
+ */
struct reiserfs_key {
- __le32 k_dir_id; /* packing locality: by default parent
- directory object id */
+ /* packing locality: by default parent directory object id */
+ __le32 k_dir_id;
+
__le32 k_objectid; /* object identifier */
union {
struct offset_v1 k_offset_v1;
@@ -1097,8 +1289,8 @@ struct reiserfs_key {
} __attribute__ ((__packed__));
struct in_core_key {
- __u32 k_dir_id; /* packing locality: by default parent
- directory object id */
+ /* packing locality: by default parent directory object id */
+ __u32 k_dir_id;
__u32 k_objectid; /* object identifier */
__u64 k_offset;
__u8 k_type;
@@ -1107,14 +1299,16 @@ struct in_core_key {
struct cpu_key {
struct in_core_key on_disk_key;
int version;
- int key_length; /* 3 in all cases but direct2indirect and
- indirect2direct conversion */
+ /* 3 in all cases but direct2indirect and indirect2direct conversion */
+ int key_length;
};
-/* Our function for comparing keys can compare keys of different
- lengths. It takes as a parameter the length of the keys it is to
- compare. These defines are used in determining what is to be passed
- to it as that parameter. */
+/*
+ * Our function for comparing keys can compare keys of different
+ * lengths. It takes as a parameter the length of the keys it is to
+ * compare. These defines are used in determining what is to be passed
+ * to it as that parameter.
+ */
#define REISERFS_FULL_KEY_LEN 4
#define REISERFS_SHORT_KEY_LEN 2
@@ -1143,40 +1337,52 @@ struct cpu_key {
#define POSITION_FOUND 1
#define POSITION_NOT_FOUND 0
-// return values for reiserfs_find_entry and search_by_entry_key
+/* return values for reiserfs_find_entry and search_by_entry_key */
#define NAME_FOUND 1
#define NAME_NOT_FOUND 0
#define GOTO_PREVIOUS_ITEM 2
#define NAME_FOUND_INVISIBLE 3
-/* Everything in the filesystem is stored as a set of items. The
- item head contains the key of the item, its free space (for
- indirect items) and specifies the location of the item itself
- within the block. */
+/*
+ * Everything in the filesystem is stored as a set of items. The
+ * item head contains the key of the item, its free space (for
+ * indirect items) and specifies the location of the item itself
+ * within the block.
+ */
struct item_head {
- /* Everything in the tree is found by searching for it based on
- * its key.*/
+ /*
+ * Everything in the tree is found by searching for it based on
+ * its key.
+ */
struct reiserfs_key ih_key;
union {
- /* The free space in the last unformatted node of an
- indirect item if this is an indirect item. This
- equals 0xFFFF iff this is a direct item or stat data
- item. Note that the key, not this field, is used to
- determine the item type, and thus which field this
- union contains. */
+ /*
+ * The free space in the last unformatted node of an
+ * indirect item if this is an indirect item. This
+ * equals 0xFFFF iff this is a direct item or stat data
+ * item. Note that the key, not this field, is used to
+ * determine the item type, and thus which field this
+ * union contains.
+ */
__le16 ih_free_space_reserved;
- /* Iff this is a directory item, this field equals the
- number of directory entries in the directory item. */
+
+ /*
+ * Iff this is a directory item, this field equals the
+ * number of directory entries in the directory item.
+ */
__le16 ih_entry_count;
} __attribute__ ((__packed__)) u;
__le16 ih_item_len; /* total size of the item body */
- __le16 ih_item_location; /* an offset to the item body
- * within the block */
- __le16 ih_version; /* 0 for all old items, 2 for new
- ones. Highest bit is set by fsck
- temporary, cleaned after all
- done */
+
+ /* an offset to the item body within the block */
+ __le16 ih_item_location;
+
+ /*
+ * 0 for all old items, 2 for new ones. Highest bit is set by fsck
+ * temporary, cleaned after all done
+ */
+ __le16 ih_version;
} __attribute__ ((__packed__));
/* size of item header */
#define IH_SIZE (sizeof(struct item_head))
@@ -1198,27 +1404,24 @@ struct item_head {
#define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih))
#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val)))
-/* these operate on indirect items, where you've got an array of ints
-** at a possibly unaligned location. These are a noop on ia32
-**
-** p is the array of __u32, i is the index into the array, v is the value
-** to store there.
-*/
+/*
+ * these operate on indirect items, where you've got an array of ints
+ * at a possibly unaligned location. These are a noop on ia32
+ *
+ * p is the array of __u32, i is the index into the array, v is the value
+ * to store there.
+ */
#define get_block_num(p, i) get_unaligned_le32((p) + (i))
#define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i))
-//
-// in old version uniqueness field shows key type
-//
+/* * in old version uniqueness field shows key type */
#define V1_SD_UNIQUENESS 0
#define V1_INDIRECT_UNIQUENESS 0xfffffffe
#define V1_DIRECT_UNIQUENESS 0xffffffff
#define V1_DIRENTRY_UNIQUENESS 500
-#define V1_ANY_UNIQUENESS 555 // FIXME: comment is required
+#define V1_ANY_UNIQUENESS 555 /* FIXME: comment is required */
-//
-// here are conversion routines
-//
+/* here are conversion routines */
static inline int uniqueness2type(__u32 uniqueness) CONSTF;
static inline int uniqueness2type(__u32 uniqueness)
{
@@ -1255,11 +1458,11 @@ static inline __u32 type2uniqueness(int type)
}
}
-//
-// key is pointer to on disk key which is stored in le, result is cpu,
-// there is no way to get version of object from key, so, provide
-// version to these defines
-//
+/*
+ * key is pointer to on disk key which is stored in le, result is cpu,
+ * there is no way to get version of object from key, so, provide
+ * version to these defines
+ */
static inline loff_t le_key_k_offset(int version,
const struct reiserfs_key *key)
{
@@ -1275,9 +1478,11 @@ static inline loff_t le_ih_k_offset(const struct item_head *ih)
static inline loff_t le_key_k_type(int version, const struct reiserfs_key *key)
{
- return (version == KEY_FORMAT_3_5) ?
- uniqueness2type(le32_to_cpu(key->u.k_offset_v1.k_uniqueness)) :
- offset_v2_k_type(&(key->u.k_offset_v2));
+ if (version == KEY_FORMAT_3_5) {
+ loff_t val = le32_to_cpu(key->u.k_offset_v1.k_uniqueness);
+ return uniqueness2type(val);
+ } else
+ return offset_v2_k_type(&(key->u.k_offset_v2));
}
static inline loff_t le_ih_k_type(const struct item_head *ih)
@@ -1288,8 +1493,22 @@ static inline loff_t le_ih_k_type(const struct item_head *ih)
static inline void set_le_key_k_offset(int version, struct reiserfs_key *key,
loff_t offset)
{
- (version == KEY_FORMAT_3_5) ? (void)(key->u.k_offset_v1.k_offset = cpu_to_le32(offset)) : /* jdm check */
- (void)(set_offset_v2_k_offset(&(key->u.k_offset_v2), offset));
+ if (version == KEY_FORMAT_3_5)
+ key->u.k_offset_v1.k_offset = cpu_to_le32(offset);
+ else
+ set_offset_v2_k_offset(&key->u.k_offset_v2, offset);
+}
+
+static inline void add_le_key_k_offset(int version, struct reiserfs_key *key,
+ loff_t offset)
+{
+ set_le_key_k_offset(version, key,
+ le_key_k_offset(version, key) + offset);
+}
+
+static inline void add_le_ih_k_offset(struct item_head *ih, loff_t offset)
+{
+ add_le_key_k_offset(ih_version(ih), &(ih->ih_key), offset);
}
static inline void set_le_ih_k_offset(struct item_head *ih, loff_t offset)
@@ -1300,10 +1519,11 @@ static inline void set_le_ih_k_offset(struct item_head *ih, loff_t offset)
static inline void set_le_key_k_type(int version, struct reiserfs_key *key,
int type)
{
- (version == KEY_FORMAT_3_5) ?
- (void)(key->u.k_offset_v1.k_uniqueness =
- cpu_to_le32(type2uniqueness(type)))
- : (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type));
+ if (version == KEY_FORMAT_3_5) {
+ type = type2uniqueness(type);
+ key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type);
+ } else
+ set_offset_v2_k_type(&key->u.k_offset_v2, type);
}
static inline void set_le_ih_k_type(struct item_head *ih, int type)
@@ -1331,9 +1551,7 @@ static inline int is_statdata_le_key(int version, struct reiserfs_key *key)
return le_key_k_type(version, key) == TYPE_STAT_DATA;
}
-//
-// item header has version.
-//
+/* item header has version. */
static inline int is_direntry_le_ih(struct item_head *ih)
{
return is_direntry_le_key(ih_version(ih), &ih->ih_key);
@@ -1354,9 +1572,7 @@ static inline int is_statdata_le_ih(struct item_head *ih)
return is_statdata_le_key(ih_version(ih), &ih->ih_key);
}
-//
-// key is pointer to cpu key, result is cpu
-//
+/* key is pointer to cpu key, result is cpu */
static inline loff_t cpu_key_k_offset(const struct cpu_key *key)
{
return key->on_disk_key.k_offset;
@@ -1407,7 +1623,7 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key)
extern struct reiserfs_key root_key;
-/*
+/*
* Picture represents a leaf of the S+tree
* ______________________________________________________
* | | Array of | | |
@@ -1416,15 +1632,19 @@ extern struct reiserfs_key root_key;
* |______|_______________|___________________|___________|
*/
-/* Header of a disk block. More precisely, header of a formatted leaf
- or internal node, and not the header of an unformatted node. */
+/*
+ * Header of a disk block. More precisely, header of a formatted leaf
+ * or internal node, and not the header of an unformatted node.
+ */
struct block_head {
__le16 blk_level; /* Level of a block in the tree. */
__le16 blk_nr_item; /* Number of keys/items in a block. */
__le16 blk_free_space; /* Block free space in bytes. */
__le16 blk_reserved;
/* dump this in v4/planA */
- struct reiserfs_key blk_right_delim_key; /* kept only for compatibility */
+
+ /* kept only for compatibility */
+ struct reiserfs_key blk_right_delim_key;
};
#define BLKH_SIZE (sizeof(struct block_head))
@@ -1439,18 +1659,20 @@ struct block_head {
#define blkh_right_delim_key(p_blkh) ((p_blkh)->blk_right_delim_key)
#define set_blkh_right_delim_key(p_blkh,val) ((p_blkh)->blk_right_delim_key = val)
+/* values for blk_level field of the struct block_head */
+
/*
- * values for blk_level field of the struct block_head
+ * When node gets removed from the tree its blk_level is set to FREE_LEVEL.
+ * It is then used to see whether the node is still in the tree
*/
-
-#define FREE_LEVEL 0 /* when node gets removed from the tree its
- blk_level is set to FREE_LEVEL. It is then
- used to see whether the node is still in the
- tree */
+#define FREE_LEVEL 0
#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */
-/* Given the buffer head of a formatted node, resolve to the block head of that node. */
+/*
+ * Given the buffer head of a formatted node, resolve to the
+ * block head of that node.
+ */
#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data))
/* Number of items that are in buffer. */
#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh)))
@@ -1471,14 +1693,14 @@ struct block_head {
#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
&& B_LEVEL(bh) <= MAX_HEIGHT)
-/***************************************************************************/
-/* STAT DATA */
-/***************************************************************************/
+/***************************************************************************
+ * STAT DATA *
+ ***************************************************************************/
-//
-// old stat data is 32 bytes long. We are going to distinguish new one by
-// different size
-//
+/*
+ * old stat data is 32 bytes long. We are going to distinguish new one by
+ * different size
+*/
struct stat_data_v1 {
__le16 sd_mode; /* file type, permissions */
__le16 sd_nlink; /* number of hard links */
@@ -1487,20 +1709,25 @@ struct stat_data_v1 {
__le32 sd_size; /* file size */
__le32 sd_atime; /* time of last access */
__le32 sd_mtime; /* time file was last modified */
- __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+
+ /*
+ * time inode (stat data) was last changed
+ * (except changes to sd_atime and sd_mtime)
+ */
+ __le32 sd_ctime;
union {
__le32 sd_rdev;
__le32 sd_blocks; /* number of blocks file uses */
} __attribute__ ((__packed__)) u;
- __le32 sd_first_direct_byte; /* first byte of file which is stored
- in a direct item: except that if it
- equals 1 it is a symlink and if it
- equals ~(__u32)0 there is no
- direct item. The existence of this
- field really grates on me. Let's
- replace it with a macro based on
- sd_size and our tail suppression
- policy. Someday. -Hans */
+
+ /*
+ * first byte of file which is stored in a direct item: except that if
+ * it equals 1 it is a symlink and if it equals ~(__u32)0 there is no
+ * direct item. The existence of this field really grates on me.
+ * Let's replace it with a macro based on sd_size and our tail
+ * suppression policy. Someday. -Hans
+ */
+ __le32 sd_first_direct_byte;
} __attribute__ ((__packed__));
#define SD_V1_SIZE (sizeof(struct stat_data_v1))
@@ -1532,8 +1759,10 @@ struct stat_data_v1 {
/* inode flags stored in sd_attrs (nee sd_reserved) */
-/* we want common flags to have the same values as in ext2,
- so chattr(1) will work without problems */
+/*
+ * we want common flags to have the same values as in ext2,
+ * so chattr(1) will work without problems
+ */
#define REISERFS_IMMUTABLE_FL FS_IMMUTABLE_FL
#define REISERFS_APPEND_FL FS_APPEND_FL
#define REISERFS_SYNC_FL FS_SYNC_FL
@@ -1553,8 +1782,10 @@ struct stat_data_v1 {
REISERFS_COMPR_FL | \
REISERFS_NOTAIL_FL )
-/* Stat Data on disk (reiserfs version of UFS disk inode minus the
- address blocks) */
+/*
+ * Stat Data on disk (reiserfs version of UFS disk inode minus the
+ * address blocks)
+ */
struct stat_data {
__le16 sd_mode; /* file type, permissions */
__le16 sd_attrs; /* persistent inode flags */
@@ -1564,25 +1795,20 @@ struct stat_data {
__le32 sd_gid; /* group */
__le32 sd_atime; /* time of last access */
__le32 sd_mtime; /* time file was last modified */
- __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+
+ /*
+ * time inode (stat data) was last changed
+ * (except changes to sd_atime and sd_mtime)
+ */
+ __le32 sd_ctime;
__le32 sd_blocks;
union {
__le32 sd_rdev;
__le32 sd_generation;
- //__le32 sd_first_direct_byte;
- /* first byte of file which is stored in a
- direct item: except that if it equals 1
- it is a symlink and if it equals
- ~(__u32)0 there is no direct item. The
- existence of this field really grates
- on me. Let's replace it with a macro
- based on sd_size and our tail
- suppression policy? */
} __attribute__ ((__packed__)) u;
} __attribute__ ((__packed__));
-//
-// this is 44 bytes long
-//
+
+/* this is 44 bytes long */
#define SD_SIZE (sizeof(struct stat_data))
#define SD_V2_SIZE SD_SIZE
#define stat_data_v2(ih) (ih_version (ih) == KEY_FORMAT_3_6)
@@ -1613,48 +1839,61 @@ struct stat_data {
#define sd_v2_attrs(sdp) (le16_to_cpu((sdp)->sd_attrs))
#define set_sd_v2_attrs(sdp,v) ((sdp)->sd_attrs = cpu_to_le16(v))
-/***************************************************************************/
-/* DIRECTORY STRUCTURE */
-/***************************************************************************/
-/*
- Picture represents the structure of directory items
- ________________________________________________
- | Array of | | | | | |
- | directory |N-1| N-2 | .... | 1st |0th|
- | entry headers | | | | | |
- |_______________|___|_____|________|_______|___|
- <---- directory entries ------>
-
- First directory item has k_offset component 1. We store "." and ".."
- in one item, always, we never split "." and ".." into differing
- items. This makes, among other things, the code for removing
- directories simpler. */
+/***************************************************************************
+ * DIRECTORY STRUCTURE *
+ ***************************************************************************/
+/*
+ * Picture represents the structure of directory items
+ * ________________________________________________
+ * | Array of | | | | | |
+ * | directory |N-1| N-2 | .... | 1st |0th|
+ * | entry headers | | | | | |
+ * |_______________|___|_____|________|_______|___|
+ * <---- directory entries ------>
+ *
+ * First directory item has k_offset component 1. We store "." and ".."
+ * in one item, always, we never split "." and ".." into differing
+ * items. This makes, among other things, the code for removing
+ * directories simpler.
+ */
#define SD_OFFSET 0
#define SD_UNIQUENESS 0
#define DOT_OFFSET 1
#define DOT_DOT_OFFSET 2
#define DIRENTRY_UNIQUENESS 500
-/* */
#define FIRST_ITEM_OFFSET 1
/*
- Q: How to get key of object pointed to by entry from entry?
-
- A: Each directory entry has its header. This header has deh_dir_id and deh_objectid fields, those are key
- of object, entry points to */
+ * Q: How to get key of object pointed to by entry from entry?
+ *
+ * A: Each directory entry has its header. This header has deh_dir_id
+ * and deh_objectid fields, those are key of object, entry points to
+ */
-/* NOT IMPLEMENTED:
- Directory will someday contain stat data of object */
+/*
+ * NOT IMPLEMENTED:
+ * Directory will someday contain stat data of object
+ */
struct reiserfs_de_head {
__le32 deh_offset; /* third component of the directory entry key */
- __le32 deh_dir_id; /* objectid of the parent directory of the object, that is referenced
- by directory entry */
- __le32 deh_objectid; /* objectid of the object, that is referenced by directory entry */
+
+ /*
+ * objectid of the parent directory of the object, that is referenced
+ * by directory entry
+ */
+ __le32 deh_dir_id;
+
+ /* objectid of the object, that is referenced by directory entry */
+ __le32 deh_objectid;
__le16 deh_location; /* offset of name in the whole item */
- __le16 deh_state; /* whether 1) entry contains stat data (for future), and 2) whether
- entry is hidden (unlinked) */
+
+ /*
+ * whether 1) entry contains stat data (for future), and
+ * 2) whether entry is hidden (unlinked)
+ */
+ __le16 deh_state;
} __attribute__ ((__packed__));
#define DEH_SIZE sizeof(struct reiserfs_de_head)
#define deh_offset(p_deh) (le32_to_cpu((p_deh)->deh_offset))
@@ -1684,9 +1923,11 @@ struct reiserfs_de_head {
# define ADDR_UNALIGNED_BITS (3)
#endif
-/* These are only used to manipulate deh_state.
+/*
+ * These are only used to manipulate deh_state.
* Because of this, we'll use the ext2_ bit routines,
- * since they are little endian */
+ * since they are little endian
+ */
#ifdef ADDR_UNALIGNED_BITS
# define aligned_address(addr) ((void *)((long)(addr) & ~((1UL << ADDR_UNALIGNED_BITS) - 1)))
@@ -1721,46 +1962,16 @@ extern void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid,
extern void make_empty_dir_item(char *body, __le32 dirid, __le32 objid,
__le32 par_dirid, __le32 par_objid);
-/* array of the entry headers */
- /* get item body */
-#define B_I_PITEM(bh,ih) ( (bh)->b_data + ih_location(ih) )
-#define B_I_DEH(bh,ih) ((struct reiserfs_de_head *)(B_I_PITEM(bh,ih)))
-
-/* length of the directory entry in directory item. This define
- calculates length of i-th directory entry using directory entry
- locations from dir entry head. When it calculates length of 0-th
- directory entry, it uses length of whole item in place of entry
- location of the non-existent following entry in the calculation.
- See picture above.*/
-/*
-#define I_DEH_N_ENTRY_LENGTH(ih,deh,i) \
-((i) ? (deh_location((deh)-1) - deh_location((deh))) : (ih_item_len((ih)) - deh_location((deh))))
-*/
-static inline int entry_length(const struct buffer_head *bh,
- const struct item_head *ih, int pos_in_item)
-{
- struct reiserfs_de_head *deh;
-
- deh = B_I_DEH(bh, ih) + pos_in_item;
- if (pos_in_item)
- return deh_location(deh - 1) - deh_location(deh);
-
- return ih_item_len(ih) - deh_location(deh);
-}
-
-/* number of entries in the directory item, depends on ENTRY_COUNT being at the start of directory dynamic data. */
-#define I_ENTRY_COUNT(ih) (ih_entry_count((ih)))
-
-/* name by bh, ih and entry_num */
-#define B_I_E_NAME(bh,ih,entry_num) ((char *)(bh->b_data + ih_location(ih) + deh_location(B_I_DEH(bh,ih)+(entry_num))))
-
-// two entries per block (at least)
+/* two entries per block (at least) */
#define REISERFS_MAX_NAME(block_size) 255
-/* this structure is used for operations on directory entries. It is
- not a disk structure. */
-/* When reiserfs_find_entry or search_by_entry_key find directory
- entry, they return filled reiserfs_dir_entry structure */
+/*
+ * this structure is used for operations on directory entries. It is
+ * not a disk structure.
+ *
+ * When reiserfs_find_entry or search_by_entry_key find directory
+ * entry, they return filled reiserfs_dir_entry structure
+ */
struct reiserfs_dir_entry {
struct buffer_head *de_bh;
int de_item_num;
@@ -1778,10 +1989,14 @@ struct reiserfs_dir_entry {
struct cpu_key de_entry_key;
};
-/* these defines are useful when a particular member of a reiserfs_dir_entry is needed */
+/*
+ * these defines are useful when a particular member of
+ * a reiserfs_dir_entry is needed
+ */
/* pointer to file name, stored in entry */
-#define B_I_DEH_ENTRY_FILE_NAME(bh,ih,deh) (B_I_PITEM (bh, ih) + deh_location(deh))
+#define B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh) \
+ (ih_item_body(bh, ih) + deh_location(deh))
/* length of name */
#define I_DEH_N_ENTRY_FILE_NAME_LENGTH(ih,deh,entry_num) \
@@ -1804,11 +2019,13 @@ struct reiserfs_dir_entry {
* |______|_______________|___________________|___________|
*/
-/***************************************************************************/
-/* DISK CHILD */
-/***************************************************************************/
-/* Disk child pointer: The pointer from an internal node of the tree
- to a node that is on disk. */
+/***************************************************************************
+ * DISK CHILD *
+ ***************************************************************************/
+/*
+ * Disk child pointer:
+ * The pointer from an internal node of the tree to a node that is on disk.
+ */
struct disk_child {
__le32 dc_block_number; /* Disk child's block number. */
__le16 dc_size; /* Disk child's used space. */
@@ -1841,47 +2058,66 @@ struct disk_child {
#define MAX_NR_KEY(bh) ( (MAX_CHILD_SIZE(bh)-DC_SIZE)/(KEY_SIZE+DC_SIZE) )
#define MIN_NR_KEY(bh) (MAX_NR_KEY(bh)/2)
-/***************************************************************************/
-/* PATH STRUCTURES AND DEFINES */
-/***************************************************************************/
+/***************************************************************************
+ * PATH STRUCTURES AND DEFINES *
+ ***************************************************************************/
-/* Search_by_key fills up the path from the root to the leaf as it descends the tree looking for the
- key. It uses reiserfs_bread to try to find buffers in the cache given their block number. If it
- does not find them in the cache it reads them from disk. For each node search_by_key finds using
- reiserfs_bread it then uses bin_search to look through that node. bin_search will find the
- position of the block_number of the next node if it is looking through an internal node. If it
- is looking through a leaf node bin_search will find the position of the item which has key either
- equal to given key, or which is the maximal key less than the given key. */
+/*
+ * search_by_key fills up the path from the root to the leaf as it descends
+ * the tree looking for the key. It uses reiserfs_bread to try to find
+ * buffers in the cache given their block number. If it does not find
+ * them in the cache it reads them from disk. For each node search_by_key
+ * finds using reiserfs_bread it then uses bin_search to look through that
+ * node. bin_search will find the position of the block_number of the next
+ * node if it is looking through an internal node. If it is looking through
+ * a leaf node bin_search will find the position of the item which has key
+ * either equal to given key, or which is the maximal key less than the
+ * given key.
+ */
struct path_element {
- struct buffer_head *pe_buffer; /* Pointer to the buffer at the path in the tree. */
- int pe_position; /* Position in the tree node which is placed in the */
- /* buffer above. */
+ /* Pointer to the buffer at the path in the tree. */
+ struct buffer_head *pe_buffer;
+ /* Position in the tree node which is placed in the buffer above. */
+ int pe_position;
};
-#define MAX_HEIGHT 5 /* maximal height of a tree. don't change this without changing JOURNAL_PER_BALANCE_CNT */
-#define EXTENDED_MAX_HEIGHT 7 /* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
-#define FIRST_PATH_ELEMENT_OFFSET 2 /* Must be equal to at least 2. */
-
-#define ILLEGAL_PATH_ELEMENT_OFFSET 1 /* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
-#define MAX_FEB_SIZE 6 /* this MUST be MAX_HEIGHT + 1. See about FEB below */
-
-/* We need to keep track of who the ancestors of nodes are. When we
- perform a search we record which nodes were visited while
- descending the tree looking for the node we searched for. This list
- of nodes is called the path. This information is used while
- performing balancing. Note that this path information may become
- invalid, and this means we must check it when using it to see if it
- is still valid. You'll need to read search_by_key and the comments
- in it, especially about decrement_counters_in_path(), to understand
- this structure.
-
-Paths make the code so much harder to work with and debug.... An
-enormous number of bugs are due to them, and trying to write or modify
-code that uses them just makes my head hurt. They are based on an
-excessive effort to avoid disturbing the precious VFS code.:-( The
-gods only know how we are going to SMP the code that uses them.
-znodes are the way! */
+/*
+ * maximal height of a tree. don't change this without
+ * changing JOURNAL_PER_BALANCE_CNT
+ */
+#define MAX_HEIGHT 5
+
+/* Must be equals MAX_HEIGHT + FIRST_PATH_ELEMENT_OFFSET */
+#define EXTENDED_MAX_HEIGHT 7
+
+/* Must be equal to at least 2. */
+#define FIRST_PATH_ELEMENT_OFFSET 2
+
+/* Must be equal to FIRST_PATH_ELEMENT_OFFSET - 1 */
+#define ILLEGAL_PATH_ELEMENT_OFFSET 1
+
+/* this MUST be MAX_HEIGHT + 1. See about FEB below */
+#define MAX_FEB_SIZE 6
+
+/*
+ * We need to keep track of who the ancestors of nodes are. When we
+ * perform a search we record which nodes were visited while
+ * descending the tree looking for the node we searched for. This list
+ * of nodes is called the path. This information is used while
+ * performing balancing. Note that this path information may become
+ * invalid, and this means we must check it when using it to see if it
+ * is still valid. You'll need to read search_by_key and the comments
+ * in it, especially about decrement_counters_in_path(), to understand
+ * this structure.
+ *
+ * Paths make the code so much harder to work with and debug.... An
+ * enormous number of bugs are due to them, and trying to write or modify
+ * code that uses them just makes my head hurt. They are based on an
+ * excessive effort to avoid disturbing the precious VFS code.:-( The
+ * gods only know how we are going to SMP the code that uses them.
+ * znodes are the way!
+ */
#define PATH_READA 0x1 /* do read ahead */
#define PATH_READA_BACK 0x2 /* read backwards */
@@ -1889,7 +2125,8 @@ znodes are the way! */
struct treepath {
int path_length; /* Length of the array above. */
int reada;
- struct path_element path_elements[EXTENDED_MAX_HEIGHT]; /* Array of the path elements. */
+ /* Array of the path elements. */
+ struct path_element path_elements[EXTENDED_MAX_HEIGHT];
int pos_in_item;
};
@@ -1908,41 +2145,124 @@ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
- /* you know, to the person who didn't
- write this the macro name does not
- at first suggest what it does.
- Maybe POSITION_FROM_PATH_END? Or
- maybe we should just focus on
- dumping paths... -Hans */
+
+/*
+ * you know, to the person who didn't write this the macro name does not
+ * at first suggest what it does. Maybe POSITION_FROM_PATH_END? Or
+ * maybe we should just focus on dumping paths... -Hans
+ */
#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
-#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path))
+/*
+ * in do_balance leaf has h == 0 in contrast with path structure,
+ * where root has level == 0. That is why we need these defines
+ */
+
+/* tb->S[h] */
+#define PATH_H_PBUFFER(path, h) \
+ PATH_OFFSET_PBUFFER(path, path->path_length - (h))
+
+/* tb->F[h] or tb->S[0]->b_parent */
+#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER(path, (h) + 1)
+
+#define PATH_H_POSITION(path, h) \
+ PATH_OFFSET_POSITION(path, path->path_length - (h))
-/* in do_balance leaf has h == 0 in contrast with path structure,
- where root has level == 0. That is why we need these defines */
-#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */
-#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
-#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
-#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
+/* tb->S[h]->b_item_order */
+#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1)
#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
+static inline void *reiserfs_node_data(const struct buffer_head *bh)
+{
+ return bh->b_data + sizeof(struct block_head);
+}
+
+/* get key from internal node */
+static inline struct reiserfs_key *internal_key(struct buffer_head *bh,
+ int item_num)
+{
+ struct reiserfs_key *key = reiserfs_node_data(bh);
+
+ return &key[item_num];
+}
+
+/* get the item header from leaf node */
+static inline struct item_head *item_head(const struct buffer_head *bh,
+ int item_num)
+{
+ struct item_head *ih = reiserfs_node_data(bh);
+
+ return &ih[item_num];
+}
+
+/* get the key from leaf node */
+static inline struct reiserfs_key *leaf_key(const struct buffer_head *bh,
+ int item_num)
+{
+ return &item_head(bh, item_num)->ih_key;
+}
+
+static inline void *ih_item_body(const struct buffer_head *bh,
+ const struct item_head *ih)
+{
+ return bh->b_data + ih_location(ih);
+}
+
+/* get item body from leaf node */
+static inline void *item_body(const struct buffer_head *bh, int item_num)
+{
+ return ih_item_body(bh, item_head(bh, item_num));
+}
+
+static inline struct item_head *tp_item_head(const struct treepath *path)
+{
+ return item_head(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path));
+}
+
+static inline void *tp_item_body(const struct treepath *path)
+{
+ return item_body(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path));
+}
+
#define get_last_bh(path) PATH_PLAST_BUFFER(path)
-#define get_ih(path) PATH_PITEM_HEAD(path)
#define get_item_pos(path) PATH_LAST_POSITION(path)
-#define get_item(path) ((void *)B_N_PITEM(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION (path)))
#define item_moved(ih,path) comp_items(ih, path)
#define path_changed(ih,path) comp_items (ih, path)
-/***************************************************************************/
-/* MISC */
-/***************************************************************************/
+/* array of the entry headers */
+ /* get item body */
+#define B_I_DEH(bh, ih) ((struct reiserfs_de_head *)(ih_item_body(bh, ih)))
+
+/*
+ * length of the directory entry in directory item. This define
+ * calculates length of i-th directory entry using directory entry
+ * locations from dir entry head. When it calculates length of 0-th
+ * directory entry, it uses length of whole item in place of entry
+ * location of the non-existent following entry in the calculation.
+ * See picture above.
+ */
+static inline int entry_length(const struct buffer_head *bh,
+ const struct item_head *ih, int pos_in_item)
+{
+ struct reiserfs_de_head *deh;
+
+ deh = B_I_DEH(bh, ih) + pos_in_item;
+ if (pos_in_item)
+ return deh_location(deh - 1) - deh_location(deh);
+
+ return ih_item_len(ih) - deh_location(deh);
+}
+
+/***************************************************************************
+ * MISC *
+ ***************************************************************************/
/* Size of pointer to the unformatted node. */
#define UNFM_P_SIZE (sizeof(unp_t))
#define UNFM_P_SHIFT 2
-// in in-core inode key is stored on le form
+/* in in-core inode key is stored on le form */
#define INODE_PKEY(inode) ((struct reiserfs_key *)(REISERFS_I(inode)->i_key))
#define MAX_UL_INT 0xffffffff
@@ -1958,7 +2278,6 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
return (loff_t) ((~(__u64) 0) >> 4);
}
-/*#define MAX_KEY_UNIQUENESS MAX_UL_INT*/
#define MAX_KEY_OBJECTID MAX_UL_INT
#define MAX_B_NUM MAX_UL_INT
@@ -1967,9 +2286,12 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
/* the purpose is to detect overflow of an unsigned short */
#define REISERFS_LINK_MAX (MAX_US_INT - 1000)
-/* The following defines are used in reiserfs_insert_item and reiserfs_append_item */
-#define REISERFS_KERNEL_MEM 0 /* reiserfs kernel memory mode */
-#define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
+/*
+ * The following defines are used in reiserfs_insert_item
+ * and reiserfs_append_item
+ */
+#define REISERFS_KERNEL_MEM 0 /* kernel memory mode */
+#define REISERFS_USER_MEM 1 /* user memory mode */
#define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
#define get_generation(s) atomic_read (&fs_generation(s))
@@ -1981,46 +2303,65 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
__fs_changed(gen, s); \
})
-/***************************************************************************/
-/* FIXATE NODES */
-/***************************************************************************/
+/***************************************************************************
+ * FIXATE NODES *
+ ***************************************************************************/
#define VI_TYPE_LEFT_MERGEABLE 1
#define VI_TYPE_RIGHT_MERGEABLE 2
-/* To make any changes in the tree we always first find node, that
- contains item to be changed/deleted or place to insert a new
- item. We call this node S. To do balancing we need to decide what
- we will shift to left/right neighbor, or to a new node, where new
- item will be etc. To make this analysis simpler we build virtual
- node. Virtual node is an array of items, that will replace items of
- node S. (For instance if we are going to delete an item, virtual
- node does not contain it). Virtual node keeps information about
- item sizes and types, mergeability of first and last items, sizes
- of all entries in directory item. We use this array of items when
- calculating what we can shift to neighbors and how many nodes we
- have to have if we do not any shiftings, if we shift to left/right
- neighbor or to both. */
+/*
+ * To make any changes in the tree we always first find node, that
+ * contains item to be changed/deleted or place to insert a new
+ * item. We call this node S. To do balancing we need to decide what
+ * we will shift to left/right neighbor, or to a new node, where new
+ * item will be etc. To make this analysis simpler we build virtual
+ * node. Virtual node is an array of items, that will replace items of
+ * node S. (For instance if we are going to delete an item, virtual
+ * node does not contain it). Virtual node keeps information about
+ * item sizes and types, mergeability of first and last items, sizes
+ * of all entries in directory item. We use this array of items when
+ * calculating what we can shift to neighbors and how many nodes we
+ * have to have if we do not any shiftings, if we shift to left/right
+ * neighbor or to both.
+ */
struct virtual_item {
- int vi_index; // index in the array of item operations
- unsigned short vi_type; // left/right mergeability
- unsigned short vi_item_len; /* length of item that it will have after balancing */
+ int vi_index; /* index in the array of item operations */
+ unsigned short vi_type; /* left/right mergeability */
+
+ /* length of item that it will have after balancing */
+ unsigned short vi_item_len;
+
struct item_head *vi_ih;
- const char *vi_item; // body of item (old or new)
- const void *vi_new_data; // 0 always but paste mode
- void *vi_uarea; // item specific area
+ const char *vi_item; /* body of item (old or new) */
+ const void *vi_new_data; /* 0 always but paste mode */
+ void *vi_uarea; /* item specific area */
};
struct virtual_node {
- char *vn_free_ptr; /* this is a pointer to the free space in the buffer */
+ /* this is a pointer to the free space in the buffer */
+ char *vn_free_ptr;
+
unsigned short vn_nr_item; /* number of items in virtual node */
- short vn_size; /* size of node , that node would have if it has unlimited size and no balancing is performed */
- short vn_mode; /* mode of balancing (paste, insert, delete, cut) */
+
+ /*
+ * size of node , that node would have if it has
+ * unlimited size and no balancing is performed
+ */
+ short vn_size;
+
+ /* mode of balancing (paste, insert, delete, cut) */
+ short vn_mode;
+
short vn_affected_item_num;
short vn_pos_in_item;
- struct item_head *vn_ins_ih; /* item header of inserted item, 0 for other modes */
+
+ /* item header of inserted item, 0 for other modes */
+ struct item_head *vn_ins_ih;
const void *vn_data;
- struct virtual_item *vn_vi; /* array of items (including a new one, excluding item to be deleted) */
+
+ /* array of items (including a new one, excluding item to be deleted) */
+ struct virtual_item *vn_vi;
};
/* used by directory items when creating virtual nodes */
@@ -2030,22 +2371,25 @@ struct direntry_uarea {
__u16 entry_sizes[1];
} __attribute__ ((__packed__));
-/***************************************************************************/
-/* TREE BALANCE */
-/***************************************************************************/
+/***************************************************************************
+ * TREE BALANCE *
+ ***************************************************************************/
-/* This temporary structure is used in tree balance algorithms, and
- constructed as we go to the extent that its various parts are
- needed. It contains arrays of nodes that can potentially be
- involved in the balancing of node S, and parameters that define how
- each of the nodes must be balanced. Note that in these algorithms
- for balancing the worst case is to need to balance the current node
- S and the left and right neighbors and all of their parents plus
- create a new node. We implement S1 balancing for the leaf nodes
- and S0 balancing for the internal nodes (S1 and S0 are defined in
- our papers.)*/
+/*
+ * This temporary structure is used in tree balance algorithms, and
+ * constructed as we go to the extent that its various parts are
+ * needed. It contains arrays of nodes that can potentially be
+ * involved in the balancing of node S, and parameters that define how
+ * each of the nodes must be balanced. Note that in these algorithms
+ * for balancing the worst case is to need to balance the current node
+ * S and the left and right neighbors and all of their parents plus
+ * create a new node. We implement S1 balancing for the leaf nodes
+ * and S0 balancing for the internal nodes (S1 and S0 are defined in
+ * our papers.)
+ */
-#define MAX_FREE_BLOCK 7 /* size of the array of buffers to free at end of do_balance */
+/* size of the array of buffers to free at end of do_balance */
+#define MAX_FREE_BLOCK 7
/* maximum number of FEB blocknrs on a single level */
#define MAX_AMOUNT_NEEDED 2
@@ -2057,64 +2401,144 @@ struct tree_balance {
struct super_block *tb_sb;
struct reiserfs_transaction_handle *transaction_handle;
struct treepath *tb_path;
- struct buffer_head *L[MAX_HEIGHT]; /* array of left neighbors of nodes in the path */
- struct buffer_head *R[MAX_HEIGHT]; /* array of right neighbors of nodes in the path */
- struct buffer_head *FL[MAX_HEIGHT]; /* array of fathers of the left neighbors */
- struct buffer_head *FR[MAX_HEIGHT]; /* array of fathers of the right neighbors */
- struct buffer_head *CFL[MAX_HEIGHT]; /* array of common parents of center node and its left neighbor */
- struct buffer_head *CFR[MAX_HEIGHT]; /* array of common parents of center node and its right neighbor */
-
- struct buffer_head *FEB[MAX_FEB_SIZE]; /* array of empty buffers. Number of buffers in array equals
- cur_blknum. */
+
+ /* array of left neighbors of nodes in the path */
+ struct buffer_head *L[MAX_HEIGHT];
+
+ /* array of right neighbors of nodes in the path */
+ struct buffer_head *R[MAX_HEIGHT];
+
+ /* array of fathers of the left neighbors */
+ struct buffer_head *FL[MAX_HEIGHT];
+
+ /* array of fathers of the right neighbors */
+ struct buffer_head *FR[MAX_HEIGHT];
+ /* array of common parents of center node and its left neighbor */
+ struct buffer_head *CFL[MAX_HEIGHT];
+
+ /* array of common parents of center node and its right neighbor */
+ struct buffer_head *CFR[MAX_HEIGHT];
+
+ /*
+ * array of empty buffers. Number of buffers in array equals
+ * cur_blknum.
+ */
+ struct buffer_head *FEB[MAX_FEB_SIZE];
struct buffer_head *used[MAX_FEB_SIZE];
struct buffer_head *thrown[MAX_FEB_SIZE];
- int lnum[MAX_HEIGHT]; /* array of number of items which must be
- shifted to the left in order to balance the
- current node; for leaves includes item that
- will be partially shifted; for internal
- nodes, it is the number of child pointers
- rather than items. It includes the new item
- being created. The code sometimes subtracts
- one to get the number of wholly shifted
- items for other purposes. */
- int rnum[MAX_HEIGHT]; /* substitute right for left in comment above */
- int lkey[MAX_HEIGHT]; /* array indexed by height h mapping the key delimiting L[h] and
- S[h] to its item number within the node CFL[h] */
- int rkey[MAX_HEIGHT]; /* substitute r for l in comment above */
- int insert_size[MAX_HEIGHT]; /* the number of bytes by we are trying to add or remove from
- S[h]. A negative value means removing. */
- int blknum[MAX_HEIGHT]; /* number of nodes that will replace node S[h] after
- balancing on the level h of the tree. If 0 then S is
- being deleted, if 1 then S is remaining and no new nodes
- are being created, if 2 or 3 then 1 or 2 new nodes is
- being created */
+
+ /*
+ * array of number of items which must be shifted to the left in
+ * order to balance the current node; for leaves includes item that
+ * will be partially shifted; for internal nodes, it is the number
+ * of child pointers rather than items. It includes the new item
+ * being created. The code sometimes subtracts one to get the
+ * number of wholly shifted items for other purposes.
+ */
+ int lnum[MAX_HEIGHT];
+
+ /* substitute right for left in comment above */
+ int rnum[MAX_HEIGHT];
+
+ /*
+ * array indexed by height h mapping the key delimiting L[h] and
+ * S[h] to its item number within the node CFL[h]
+ */
+ int lkey[MAX_HEIGHT];
+
+ /* substitute r for l in comment above */
+ int rkey[MAX_HEIGHT];
+
+ /*
+ * the number of bytes by we are trying to add or remove from
+ * S[h]. A negative value means removing.
+ */
+ int insert_size[MAX_HEIGHT];
+
+ /*
+ * number of nodes that will replace node S[h] after balancing
+ * on the level h of the tree. If 0 then S is being deleted,
+ * if 1 then S is remaining and no new nodes are being created,
+ * if 2 or 3 then 1 or 2 new nodes is being created
+ */
+ int blknum[MAX_HEIGHT];
/* fields that are used only for balancing leaves of the tree */
- int cur_blknum; /* number of empty blocks having been already allocated */
- int s0num; /* number of items that fall into left most node when S[0] splits */
- int s1num; /* number of items that fall into first new node when S[0] splits */
- int s2num; /* number of items that fall into second new node when S[0] splits */
- int lbytes; /* number of bytes which can flow to the left neighbor from the left */
- /* most liquid item that cannot be shifted from S[0] entirely */
- /* if -1 then nothing will be partially shifted */
- int rbytes; /* number of bytes which will flow to the right neighbor from the right */
- /* most liquid item that cannot be shifted from S[0] entirely */
- /* if -1 then nothing will be partially shifted */
- int s1bytes; /* number of bytes which flow to the first new node when S[0] splits */
- /* note: if S[0] splits into 3 nodes, then items do not need to be cut */
- int s2bytes;
- struct buffer_head *buf_to_free[MAX_FREE_BLOCK]; /* buffers which are to be freed after do_balance finishes by unfix_nodes */
- char *vn_buf; /* kmalloced memory. Used to create
- virtual node and keep map of
- dirtied bitmap blocks */
+
+ /* number of empty blocks having been already allocated */
+ int cur_blknum;
+
+ /* number of items that fall into left most node when S[0] splits */
+ int s0num;
+
+ /*
+ * number of bytes which can flow to the left neighbor from the left
+ * most liquid item that cannot be shifted from S[0] entirely
+ * if -1 then nothing will be partially shifted
+ */
+ int lbytes;
+
+ /*
+ * number of bytes which will flow to the right neighbor from the right
+ * most liquid item that cannot be shifted from S[0] entirely
+ * if -1 then nothing will be partially shifted
+ */
+ int rbytes;
+
+
+ /*
+ * index into the array of item headers in
+ * S[0] of the affected item
+ */
+ int item_pos;
+
+ /* new nodes allocated to hold what could not fit into S */
+ struct buffer_head *S_new[2];
+
+ /*
+ * number of items that will be placed into nodes in S_new
+ * when S[0] splits
+ */
+ int snum[2];
+
+ /*
+ * number of bytes which flow to nodes in S_new when S[0] splits
+ * note: if S[0] splits into 3 nodes, then items do not need to be cut
+ */
+ int sbytes[2];
+
+ int pos_in_item;
+ int zeroes_num;
+
+ /*
+ * buffers which are to be freed after do_balance finishes
+ * by unfix_nodes
+ */
+ struct buffer_head *buf_to_free[MAX_FREE_BLOCK];
+
+ /*
+ * kmalloced memory. Used to create virtual node and keep
+ * map of dirtied bitmap blocks
+ */
+ char *vn_buf;
+
int vn_buf_size; /* size of the vn_buf */
- struct virtual_node *tb_vn; /* VN starts after bitmap of bitmap blocks */
- int fs_gen; /* saved value of `reiserfs_generation' counter
- see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */
+ /* VN starts after bitmap of bitmap blocks */
+ struct virtual_node *tb_vn;
+
+ /*
+ * saved value of `reiserfs_generation' counter see
+ * FILESYSTEM_CHANGED() macro in reiserfs_fs.h
+ */
+ int fs_gen;
+
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
- struct in_core_key key; /* key pointer, to pass to block allocator or
- another low-level subsystem */
+ /*
+ * key pointer, to pass to block allocator or
+ * another low-level subsystem
+ */
+ struct in_core_key key;
#endif
};
@@ -2122,20 +2546,24 @@ struct tree_balance {
/* When inserting an item. */
#define M_INSERT 'i'
-/* When inserting into (directories only) or appending onto an already
- existent item. */
+/*
+ * When inserting into (directories only) or appending onto an already
+ * existent item.
+ */
#define M_PASTE 'p'
/* When deleting an item. */
#define M_DELETE 'd'
/* When truncating an item or removing an entry from a (directory) item. */
-#define M_CUT 'c'
+#define M_CUT 'c'
/* used when balancing on leaf level skipped (in reiserfsck) */
#define M_INTERNAL 'n'
-/* When further balancing is not needed, then do_balance does not need
- to be called. */
-#define M_SKIP_BALANCING 's'
+/*
+ * When further balancing is not needed, then do_balance does not need
+ * to be called.
+ */
+#define M_SKIP_BALANCING 's'
#define M_CONVERT 'v'
/* modes of leaf_move_items */
@@ -2148,8 +2576,10 @@ struct tree_balance {
#define FIRST_TO_LAST 0
#define LAST_TO_FIRST 1
-/* used in do_balance for passing parent of node information that has
- been gotten from tb struct */
+/*
+ * used in do_balance for passing parent of node information that has
+ * been gotten from tb struct
+ */
struct buffer_info {
struct tree_balance *tb;
struct buffer_head *bi_bh;
@@ -2167,20 +2597,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
return bi ? sb_from_tb(bi->tb) : NULL;
}
-/* there are 4 types of items: stat data, directory item, indirect, direct.
-+-------------------+------------+--------------+------------+
-| | k_offset | k_uniqueness | mergeable? |
-+-------------------+------------+--------------+------------+
-| stat data | 0 | 0 | no |
-+-------------------+------------+--------------+------------+
-| 1st directory item| DOT_OFFSET |DIRENTRY_UNIQUENESS| no |
-| non 1st directory | hash value | | yes |
-| item | | | |
-+-------------------+------------+--------------+------------+
-| indirect item | offset + 1 |TYPE_INDIRECT | if this is not the first indirect item of the object
-+-------------------+------------+--------------+------------+
-| direct item | offset + 1 |TYPE_DIRECT | if not this is not the first direct item of the object
-+-------------------+------------+--------------+------------+
+/*
+ * there are 4 types of items: stat data, directory item, indirect, direct.
+ * +-------------------+------------+--------------+------------+
+ * | | k_offset | k_uniqueness | mergeable? |
+ * +-------------------+------------+--------------+------------+
+ * | stat data | 0 | 0 | no |
+ * +-------------------+------------+--------------+------------+
+ * | 1st directory item| DOT_OFFSET | DIRENTRY_ .. | no |
+ * | non 1st directory | hash value | UNIQUENESS | yes |
+ * | item | | | |
+ * +-------------------+------------+--------------+------------+
+ * | indirect item | offset + 1 |TYPE_INDIRECT | [1] |
+ * +-------------------+------------+--------------+------------+
+ * | direct item | offset + 1 |TYPE_DIRECT | [2] |
+ * +-------------------+------------+--------------+------------+
+ *
+ * [1] if this is not the first indirect item of the object
+ * [2] if this is not the first direct item of the object
*/
struct item_operations {
@@ -2219,49 +2653,43 @@ extern struct item_operations *item_ops[TYPE_ANY + 1];
/* number of blocks pointed to by the indirect item */
#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE)
-/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
+/*
+ * the used space within the unformatted node corresponding
+ * to pos within the item pointed to by ih
+ */
#define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
-/* number of bytes contained by the direct item or the unformatted nodes the indirect item points to */
-
-/* get the item header */
-#define B_N_PITEM_HEAD(bh,item_num) ( (struct item_head * )((bh)->b_data + BLKH_SIZE) + (item_num) )
-
-/* get key */
-#define B_N_PDELIM_KEY(bh,item_num) ( (struct reiserfs_key * )((bh)->b_data + BLKH_SIZE) + (item_num) )
-
-/* get the key */
-#define B_N_PKEY(bh,item_num) ( &(B_N_PITEM_HEAD(bh,item_num)->ih_key) )
-
-/* get item body */
-#define B_N_PITEM(bh,item_num) ( (bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(item_num))))
-
-/* get the stat data by the buffer header and the item order */
-#define B_N_STAT_DATA(bh,nr) \
-( (struct stat_data *)((bh)->b_data + ih_location(B_N_PITEM_HEAD((bh),(nr))) ) )
+/*
+ * number of bytes contained by the direct item or the
+ * unformatted nodes the indirect item points to
+ */
- /* following defines use reiserfs buffer header and item header */
+/* following defines use reiserfs buffer header and item header */
/* get stat-data */
#define B_I_STAT_DATA(bh, ih) ( (struct stat_data * )((bh)->b_data + ih_location(ih)) )
-// this is 3976 for size==4096
+/* this is 3976 for size==4096 */
#define MAX_DIRECT_ITEM_LEN(size) ((size) - BLKH_SIZE - 2*IH_SIZE - SD_SIZE - UNFM_P_SIZE)
-/* indirect items consist of entries which contain blocknrs, pos
- indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
- blocknr contained by the entry pos points to */
-#define B_I_POS_UNFM_POINTER(bh,ih,pos) le32_to_cpu(*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)))
-#define PUT_B_I_POS_UNFM_POINTER(bh,ih,pos, val) do {*(((unp_t *)B_I_PITEM(bh,ih)) + (pos)) = cpu_to_le32(val); } while (0)
+/*
+ * indirect items consist of entries which contain blocknrs, pos
+ * indicates which entry, and B_I_POS_UNFM_POINTER resolves to the
+ * blocknr contained by the entry pos points to
+ */
+#define B_I_POS_UNFM_POINTER(bh, ih, pos) \
+ le32_to_cpu(*(((unp_t *)ih_item_body(bh, ih)) + (pos)))
+#define PUT_B_I_POS_UNFM_POINTER(bh, ih, pos, val) \
+ (*(((unp_t *)ih_item_body(bh, ih)) + (pos)) = cpu_to_le32(val))
struct reiserfs_iget_args {
__u32 objectid;
__u32 dirid;
};
-/***************************************************************************/
-/* FUNCTION DECLARATIONS */
-/***************************************************************************/
+/***************************************************************************
+ * FUNCTION DECLARATIONS *
+ ***************************************************************************/
#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
@@ -2273,7 +2701,10 @@ struct reiserfs_iget_args {
/* first block written in a commit. */
struct reiserfs_journal_desc {
__le32 j_trans_id; /* id of commit */
- __le32 j_len; /* length of commit. len +1 is the commit block */
+
+ /* length of commit. len +1 is the commit block */
+ __le32 j_len;
+
__le32 j_mount_id; /* mount id of this trans */
__le32 j_realblock[1]; /* real locations for each block */
};
@@ -2300,22 +2731,35 @@ struct reiserfs_journal_commit {
#define set_commit_trans_id(c,val) do { (c)->j_trans_id = cpu_to_le32 (val); } while (0)
#define set_commit_trans_len(c,val) do { (c)->j_len = cpu_to_le32 (val); } while (0)
-/* this header block gets written whenever a transaction is considered fully flushed, and is more recent than the
-** last fully flushed transaction. fully flushed means all the log blocks and all the real blocks are on disk,
-** and this transaction does not need to be replayed.
-*/
+/*
+ * this header block gets written whenever a transaction is considered
+ * fully flushed, and is more recent than the last fully flushed transaction.
+ * fully flushed means all the log blocks and all the real blocks are on
+ * disk, and this transaction does not need to be replayed.
+ */
struct reiserfs_journal_header {
- __le32 j_last_flush_trans_id; /* id of last fully flushed transaction */
- __le32 j_first_unflushed_offset; /* offset in the log of where to start replay after a crash */
+ /* id of last fully flushed transaction */
+ __le32 j_last_flush_trans_id;
+
+ /* offset in the log of where to start replay after a crash */
+ __le32 j_first_unflushed_offset;
+
__le32 j_mount_id;
/* 12 */ struct journal_params jh_journal;
};
/* biggest tunable defines are right here */
#define JOURNAL_BLOCK_COUNT 8192 /* number of blocks in the journal */
-#define JOURNAL_TRANS_MAX_DEFAULT 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
+
+/* biggest possible single transaction, don't change for now (8/3/99) */
+#define JOURNAL_TRANS_MAX_DEFAULT 1024
#define JOURNAL_TRANS_MIN_DEFAULT 256
-#define JOURNAL_MAX_BATCH_DEFAULT 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
+
+/*
+ * max blocks to batch into one transaction,
+ * don't make this any bigger than 900
+ */
+#define JOURNAL_MAX_BATCH_DEFAULT 900
#define JOURNAL_MIN_RATIO 2
#define JOURNAL_MAX_COMMIT_AGE 30
#define JOURNAL_MAX_TRANS_AGE 30
@@ -2340,16 +2784,18 @@ struct reiserfs_journal_header {
#define REISERFS_QUOTA_DEL_BLOCKS(s) 0
#endif
-/* both of these can be as low as 1, or as high as you want. The min is the
-** number of 4k bitmap nodes preallocated on mount. New nodes are allocated
-** as needed, and released when transactions are committed. On release, if
-** the current number of nodes is > max, the node is freed, otherwise,
-** it is put on a free list for faster use later.
+/*
+ * both of these can be as low as 1, or as high as you want. The min is the
+ * number of 4k bitmap nodes preallocated on mount. New nodes are allocated
+ * as needed, and released when transactions are committed. On release, if
+ * the current number of nodes is > max, the node is freed, otherwise,
+ * it is put on a free list for faster use later.
*/
#define REISERFS_MIN_BITMAP_NODES 10
#define REISERFS_MAX_BITMAP_NODES 100
-#define JBH_HASH_SHIFT 13 /* these are based on journal hash size of 8192 */
+/* these are based on journal hash size of 8192 */
+#define JBH_HASH_SHIFT 13
#define JBH_HASH_MASK 8191
#define _jhashfn(sb,block) \
@@ -2357,7 +2803,7 @@ struct reiserfs_journal_header {
(((block)<<(JBH_HASH_SHIFT - 6)) ^ ((block) >> 13) ^ ((block) << (JBH_HASH_SHIFT - 12))))
#define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
-// We need these to make journal.c code more readable
+/* We need these to make journal.c code more readable */
#define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
@@ -2365,12 +2811,14 @@ struct reiserfs_journal_header {
enum reiserfs_bh_state_bits {
BH_JDirty = BH_PrivateStart, /* buffer is in current transaction */
BH_JDirty_wait,
- BH_JNew, /* disk block was taken off free list before
- * being in a finished transaction, or
- * written to disk. Can be reused immed. */
+ /*
+ * disk block was taken off free list before being in a
+ * finished transaction, or written to disk. Can be reused immed.
+ */
+ BH_JNew,
BH_JPrepared,
BH_JRestore_dirty,
- BH_JTest, // debugging only will go away
+ BH_JTest, /* debugging only will go away */
};
BUFFER_FNS(JDirty, journaled);
@@ -2386,27 +2834,36 @@ TAS_BUFFER_FNS(JRestore_dirty, journal_restore_dirty);
BUFFER_FNS(JTest, journal_test);
TAS_BUFFER_FNS(JTest, journal_test);
-/*
-** transaction handle which is passed around for all journal calls
-*/
+/* transaction handle which is passed around for all journal calls */
struct reiserfs_transaction_handle {
- struct super_block *t_super; /* super for this FS when journal_begin was
- called. saves calls to reiserfs_get_super
- also used by nested transactions to make
- sure they are nesting on the right FS
- _must_ be first in the handle
- */
+ /*
+ * super for this FS when journal_begin was called. saves calls to
+ * reiserfs_get_super also used by nested transactions to make
+ * sure they are nesting on the right FS _must_ be first
+ * in the handle
+ */
+ struct super_block *t_super;
+
int t_refcount;
int t_blocks_logged; /* number of blocks this writer has logged */
int t_blocks_allocated; /* number of blocks this writer allocated */
- unsigned int t_trans_id; /* sanity check, equals the current trans id */
+
+ /* sanity check, equals the current trans id */
+ unsigned int t_trans_id;
+
void *t_handle_save; /* save existing current->journal_info */
- unsigned displace_new_blocks:1; /* if new block allocation occurres, that block
- should be displaced from others */
+
+ /*
+ * if new block allocation occurres, that block
+ * should be displaced from others
+ */
+ unsigned displace_new_blocks:1;
+
struct list_head t_list;
};
-/* used to keep track of ordered and tail writes, attached to the buffer
+/*
+ * used to keep track of ordered and tail writes, attached to the buffer
* head through b_journal_head.
*/
struct reiserfs_jh {
@@ -2419,7 +2876,7 @@ void reiserfs_free_jh(struct buffer_head *bh);
int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh);
int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh);
int journal_mark_dirty(struct reiserfs_transaction_handle *,
- struct super_block *, struct buffer_head *bh);
+ struct buffer_head *bh);
static inline int reiserfs_file_data_log(struct inode *inode)
{
@@ -2469,10 +2926,8 @@ int journal_init(struct super_block *, const char *j_dev_name, int old_format,
int journal_release(struct reiserfs_transaction_handle *, struct super_block *);
int journal_release_error(struct reiserfs_transaction_handle *,
struct super_block *);
-int journal_end(struct reiserfs_transaction_handle *, struct super_block *,
- unsigned long);
-int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *,
- unsigned long);
+int journal_end(struct reiserfs_transaction_handle *);
+int journal_end_sync(struct reiserfs_transaction_handle *);
int journal_mark_freed(struct reiserfs_transaction_handle *,
struct super_block *, b_blocknr_t blocknr);
int journal_transaction_should_end(struct reiserfs_transaction_handle *, int);
@@ -2481,7 +2936,7 @@ int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr,
int journal_begin(struct reiserfs_transaction_handle *,
struct super_block *sb, unsigned long);
int journal_join_abort(struct reiserfs_transaction_handle *,
- struct super_block *sb, unsigned long);
+ struct super_block *sb);
void reiserfs_abort_journal(struct super_block *sb, int errno);
void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
int reiserfs_allocate_list_bitmaps(struct super_block *s,
@@ -2503,20 +2958,18 @@ int B_IS_IN_TREE(const struct buffer_head *);
extern void copy_item_head(struct item_head *to,
const struct item_head *from);
-// first key is in cpu form, second - le
+/* first key is in cpu form, second - le */
extern int comp_short_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key);
extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from);
-// both are in le form
+/* both are in le form */
extern int comp_le_keys(const struct reiserfs_key *,
const struct reiserfs_key *);
extern int comp_short_le_keys(const struct reiserfs_key *,
const struct reiserfs_key *);
-//
-// get key version from on disk key - kludge
-//
+/* * get key version from on disk key - kludge */
static inline int le_key_version(const struct reiserfs_key *key)
{
int type;
@@ -2593,12 +3046,12 @@ void padd_item(char *item, int total_length, int length);
/* inode.c */
/* args for the create parameter of reiserfs_get_block */
-#define GET_BLOCK_NO_CREATE 0 /* don't create new blocks or convert tails */
-#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
-#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
-#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
-#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
-#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
+#define GET_BLOCK_NO_CREATE 0 /* don't create new blocks or convert tails */
+#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
+#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
+#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
+#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
+#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
void reiserfs_read_locked_inode(struct inode *inode,
struct reiserfs_iget_args *args);
@@ -2797,25 +3250,49 @@ struct buffer_head *get_FEB(struct tree_balance *);
/* bitmap.c */
-/* structure contains hints for block allocator, and it is a container for
- * arguments, such as node, search path, transaction_handle, etc. */
+/*
+ * structure contains hints for block allocator, and it is a container for
+ * arguments, such as node, search path, transaction_handle, etc.
+ */
struct __reiserfs_blocknr_hint {
- struct inode *inode; /* inode passed to allocator, if we allocate unf. nodes */
+ /* inode passed to allocator, if we allocate unf. nodes */
+ struct inode *inode;
+
sector_t block; /* file offset, in blocks */
struct in_core_key key;
- struct treepath *path; /* search path, used by allocator to deternine search_start by
- * various ways */
- struct reiserfs_transaction_handle *th; /* transaction handle is needed to log super blocks and
- * bitmap blocks changes */
+
+ /*
+ * search path, used by allocator to deternine search_start by
+ * various ways
+ */
+ struct treepath *path;
+
+ /*
+ * transaction handle is needed to log super blocks
+ * and bitmap blocks changes
+ */
+ struct reiserfs_transaction_handle *th;
+
b_blocknr_t beg, end;
- b_blocknr_t search_start; /* a field used to transfer search start value (block number)
- * between different block allocator procedures
- * (determine_search_start() and others) */
- int prealloc_size; /* is set in determine_prealloc_size() function, used by underlayed
- * function that do actual allocation */
-
- unsigned formatted_node:1; /* the allocator uses different polices for getting disk space for
- * formatted/unformatted blocks with/without preallocation */
+
+ /*
+ * a field used to transfer search start value (block number)
+ * between different block allocator procedures
+ * (determine_search_start() and others)
+ */
+ b_blocknr_t search_start;
+
+ /*
+ * is set in determine_prealloc_size() function,
+ * used by underlayed function that do actual allocation
+ */
+ int prealloc_size;
+
+ /*
+ * the allocator uses different polices for getting disk
+ * space for formatted/unformatted blocks with/without preallocation
+ */
+ unsigned formatted_node:1;
unsigned preallocate:1;
};
@@ -2909,13 +3386,15 @@ __u32 r5_hash(const signed char *msg, int len);
#define reiserfs_test_le_bit test_bit_le
#define reiserfs_find_next_zero_le_bit find_next_zero_bit_le
-/* sometimes reiserfs_truncate may require to allocate few new blocks
- to perform indirect2direct conversion. People probably used to
- think, that truncate should work without problems on a filesystem
- without free disk space. They may complain that they can not
- truncate due to lack of free disk space. This spare space allows us
- to not worry about it. 500 is probably too much, but it should be
- absolutely safe */
+/*
+ * sometimes reiserfs_truncate may require to allocate few new blocks
+ * to perform indirect2direct conversion. People probably used to
+ * think, that truncate should work without problems on a filesystem
+ * without free disk space. They may complain that they can not
+ * truncate due to lack of free disk space. This spare space allows us
+ * to not worry about it. 500 is probably too much, but it should be
+ * absolutely safe
+ */
#define SPARE_SPACE 500
/* prototypes from ioctl.c */
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index a4ef5cd606e..6052d323bc9 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -53,8 +53,10 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
}
bforget(bh);
- /* old disk layout detection; those partitions can be mounted, but
- * cannot be resized */
+ /*
+ * old disk layout detection; those partitions can be mounted, but
+ * cannot be resized
+ */
if (SB_BUFFER_WITH_SB(s)->b_blocknr * SB_BUFFER_WITH_SB(s)->b_size
!= REISERFS_DISK_OFFSET_IN_BYTES) {
printk
@@ -86,12 +88,14 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
return -ENOMEM;
}
- /* the new journal bitmaps are zero filled, now we copy in the bitmap
- ** node pointers from the old journal bitmap structs, and then
- ** transfer the new data structures into the journal struct.
- **
- ** using the copy_size var below allows this code to work for
- ** both shrinking and expanding the FS.
+ /*
+ * the new journal bitmaps are zero filled, now we copy i
+ * the bitmap node pointers from the old journal bitmap
+ * structs, and then transfer the new data structures
+ * into the journal struct.
+ *
+ * using the copy_size var below allows this code to work for
+ * both shrinking and expanding the FS.
*/
copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
copy_size =
@@ -101,36 +105,45 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
jb = SB_JOURNAL(s)->j_list_bitmap + i;
memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
- /* just in case vfree schedules on us, copy the new
- ** pointer into the journal struct before freeing the
- ** old one
+ /*
+ * just in case vfree schedules on us, copy the new
+ * pointer into the journal struct before freeing the
+ * old one
*/
node_tmp = jb->bitmaps;
jb->bitmaps = jbitmap[i].bitmaps;
vfree(node_tmp);
}
- /* allocate additional bitmap blocks, reallocate array of bitmap
- * block pointers */
+ /*
+ * allocate additional bitmap blocks, reallocate
+ * array of bitmap block pointers
+ */
bitmap =
vzalloc(sizeof(struct reiserfs_bitmap_info) * bmap_nr_new);
if (!bitmap) {
- /* Journal bitmaps are still supersized, but the memory isn't
- * leaked, so I guess it's ok */
+ /*
+ * Journal bitmaps are still supersized, but the
+ * memory isn't leaked, so I guess it's ok
+ */
printk("reiserfs_resize: unable to allocate memory.\n");
return -ENOMEM;
}
for (i = 0; i < bmap_nr; i++)
bitmap[i] = old_bitmap[i];
- /* This doesn't go through the journal, but it doesn't have to.
- * The changes are still atomic: We're synced up when the journal
- * transaction begins, and the new bitmaps don't matter if the
- * transaction fails. */
+ /*
+ * This doesn't go through the journal, but it doesn't have to.
+ * The changes are still atomic: We're synced up when the
+ * journal transaction begins, and the new bitmaps don't
+ * matter if the transaction fails.
+ */
for (i = bmap_nr; i < bmap_nr_new; i++) {
int depth;
- /* don't use read_bitmap_block since it will cache
- * the uninitialized bitmap */
+ /*
+ * don't use read_bitmap_block since it will cache
+ * the uninitialized bitmap
+ */
depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, i * s->s_blocksize * 8);
reiserfs_write_lock_nested(s, depth);
@@ -147,7 +160,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(bh);
reiserfs_write_lock_nested(s, depth);
- // update bitmap_info stuff
+ /* update bitmap_info stuff */
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
}
@@ -156,9 +169,11 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
vfree(old_bitmap);
}
- /* begin transaction, if there was an error, it's fine. Yes, we have
+ /*
+ * begin transaction, if there was an error, it's fine. Yes, we have
* incorrect bitmaps now, but none of it is ever going to touch the
- * disk anyway. */
+ * disk anyway.
+ */
err = journal_begin(&th, s, 10);
if (err)
return err;
@@ -167,7 +182,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
info = SB_AP_BITMAP(s) + bmap_nr - 1;
bh = reiserfs_read_bitmap_block(s, bmap_nr - 1);
if (!bh) {
- int jerr = journal_end(&th, s, 10);
+ int jerr = journal_end(&th);
if (jerr)
return jerr;
return -EIO;
@@ -178,14 +193,14 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
reiserfs_clear_le_bit(i, bh->b_data);
info->free_count += s->s_blocksize * 8 - block_r;
- journal_mark_dirty(&th, s, bh);
+ journal_mark_dirty(&th, bh);
brelse(bh);
/* Correct new last bitmap block - It may not be full */
info = SB_AP_BITMAP(s) + bmap_nr_new - 1;
bh = reiserfs_read_bitmap_block(s, bmap_nr_new - 1);
if (!bh) {
- int jerr = journal_end(&th, s, 10);
+ int jerr = journal_end(&th);
if (jerr)
return jerr;
return -EIO;
@@ -194,7 +209,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
reiserfs_prepare_for_journal(s, bh, 1);
for (i = block_r_new; i < s->s_blocksize * 8; i++)
reiserfs_set_le_bit(i, bh->b_data);
- journal_mark_dirty(&th, s, bh);
+ journal_mark_dirty(&th, bh);
brelse(bh);
info->free_count -= s->s_blocksize * 8 - block_r_new;
@@ -207,8 +222,8 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
PUT_SB_BLOCK_COUNT(s, block_count_new);
PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
- journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
SB_JOURNAL(s)->j_must_wait = 1;
- return journal_end(&th, s, 10);
+ return journal_end(&th);
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 615cd9ab794..dd44468edc2 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -8,46 +8,6 @@
* Pereslavl-Zalessky Russia
*/
-/*
- * This file contains functions dealing with S+tree
- *
- * B_IS_IN_TREE
- * copy_item_head
- * comp_short_keys
- * comp_keys
- * comp_short_le_keys
- * le_key2cpu_key
- * comp_le_keys
- * bin_search
- * get_lkey
- * get_rkey
- * key_in_buffer
- * decrement_bcount
- * reiserfs_check_path
- * pathrelse_and_restore
- * pathrelse
- * search_by_key_reada
- * search_by_key
- * search_for_position_by_key
- * comp_items
- * prepare_for_direct_item
- * prepare_for_direntry_item
- * prepare_for_delete_or_cut
- * calc_deleted_bytes_number
- * init_tb_struct
- * padd_item
- * reiserfs_delete_item
- * reiserfs_delete_solid_item
- * reiserfs_delete_object
- * maybe_indirect_to_direct
- * indirect_to_direct_roll_back
- * reiserfs_cut_from_item
- * truncate_directory
- * reiserfs_do_truncate
- * reiserfs_paste_into_item
- * reiserfs_insert_item
- */
-
#include <linux/time.h>
#include <linux/string.h>
#include <linux/pagemap.h>
@@ -65,21 +25,21 @@ inline int B_IS_IN_TREE(const struct buffer_head *bh)
return (B_LEVEL(bh) != FREE_LEVEL);
}
-//
-// to gets item head in le form
-//
+/* to get item head in le form */
inline void copy_item_head(struct item_head *to,
const struct item_head *from)
{
memcpy(to, from, IH_SIZE);
}
-/* k1 is pointer to on-disk structure which is stored in little-endian
- form. k2 is pointer to cpu variable. For key of items of the same
- object this returns 0.
- Returns: -1 if key1 < key2
- 0 if key1 == key2
- 1 if key1 > key2 */
+/*
+ * k1 is pointer to on-disk structure which is stored in little-endian
+ * form. k2 is pointer to cpu variable. For key of items of the same
+ * object this returns 0.
+ * Returns: -1 if key1 < key2
+ * 0 if key1 == key2
+ * 1 if key1 > key2
+ */
inline int comp_short_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key)
{
@@ -97,11 +57,13 @@ inline int comp_short_keys(const struct reiserfs_key *le_key,
return 0;
}
-/* k1 is pointer to on-disk structure which is stored in little-endian
- form. k2 is pointer to cpu variable.
- Compare keys using all 4 key fields.
- Returns: -1 if key1 < key2 0
- if key1 = key2 1 if key1 > key2 */
+/*
+ * k1 is pointer to on-disk structure which is stored in little-endian
+ * form. k2 is pointer to cpu variable.
+ * Compare keys using all 4 key fields.
+ * Returns: -1 if key1 < key2 0
+ * if key1 = key2 1 if key1 > key2
+ */
static inline int comp_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key)
{
@@ -155,15 +117,17 @@ inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from)
to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id);
to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid);
- // find out version of the key
+ /* find out version of the key */
version = le_key_version(from);
to->version = version;
to->on_disk_key.k_offset = le_key_k_offset(version, from);
to->on_disk_key.k_type = le_key_k_type(version, from);
}
-// this does not say which one is bigger, it only returns 1 if keys
-// are not equal, 0 otherwise
+/*
+ * this does not say which one is bigger, it only returns 1 if keys
+ * are not equal, 0 otherwise
+ */
inline int comp_le_keys(const struct reiserfs_key *k1,
const struct reiserfs_key *k2)
{
@@ -177,24 +141,27 @@ inline int comp_le_keys(const struct reiserfs_key *k1,
* *pos = number of the searched element if found, else the *
* number of the first element that is larger than key. *
**************************************************************************/
-/* For those not familiar with binary search: lbound is the leftmost item that it
- could be, rbound the rightmost item that it could be. We examine the item
- halfway between lbound and rbound, and that tells us either that we can increase
- lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that
- there are no possible items, and we have not found it. With each examination we
- cut the number of possible items it could be by one more than half rounded down,
- or we find it. */
+/*
+ * For those not familiar with binary search: lbound is the leftmost item
+ * that it could be, rbound the rightmost item that it could be. We examine
+ * the item halfway between lbound and rbound, and that tells us either
+ * that we can increase lbound, or decrease rbound, or that we have found it,
+ * or if lbound <= rbound that there are no possible items, and we have not
+ * found it. With each examination we cut the number of possible items it
+ * could be by one more than half rounded down, or we find it.
+ */
static inline int bin_search(const void *key, /* Key to search for. */
const void *base, /* First item in the array. */
int num, /* Number of items in the array. */
- int width, /* Item size in the array.
- searched. Lest the reader be
- confused, note that this is crafted
- as a general function, and when it
- is applied specifically to the array
- of item headers in a node, width
- is actually the item header size not
- the item size. */
+ /*
+ * Item size in the array. searched. Lest the
+ * reader be confused, note that this is crafted
+ * as a general function, and when it is applied
+ * specifically to the array of item headers in a
+ * node, width is actually the item header size
+ * not the item size.
+ */
+ int width,
int *pos /* Number of the searched for element. */
)
{
@@ -216,8 +183,10 @@ static inline int bin_search(const void *key, /* Key to search for. */
return ITEM_FOUND; /* Key found in the array. */
}
- /* bin_search did not find given key, it returns position of key,
- that is minimal and greater than the given one. */
+ /*
+ * bin_search did not find given key, it returns position of key,
+ * that is minimal and greater than the given one.
+ */
*pos = lbound;
return ITEM_NOT_FOUND;
}
@@ -234,10 +203,14 @@ static const struct reiserfs_key MAX_KEY = {
cpu_to_le32(0xffffffff)},}
};
-/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
- of the path, and going upwards. We must check the path's validity at each step. If the key is not in
- the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
- case we return a special key, either MIN_KEY or MAX_KEY. */
+/*
+ * Get delimiting key of the buffer by looking for it in the buffers in the
+ * path, starting from the bottom of the path, and going upwards. We must
+ * check the path's validity at each step. If the key is not in the path,
+ * there is no delimiting key in the tree (buffer is first or last buffer
+ * in tree), and in this case we return a special key, either MIN_KEY or
+ * MAX_KEY.
+ */
static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path,
const struct super_block *sb)
{
@@ -270,9 +243,12 @@ static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_pat
PATH_OFFSET_PBUFFER(chk_path,
path_offset + 1)->b_blocknr)
return &MAX_KEY;
- /* Return delimiting key if position in the parent is not equal to zero. */
+ /*
+ * Return delimiting key if position in the parent
+ * is not equal to zero.
+ */
if (position)
- return B_N_PDELIM_KEY(parent, position - 1);
+ return internal_key(parent, position - 1);
}
/* Return MIN_KEY if we are in the root of the buffer tree. */
if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
@@ -308,15 +284,23 @@ inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
path_offset)) >
B_NR_ITEMS(parent))
return &MIN_KEY;
- /* Check whether parent at the path really points to the child. */
+ /*
+ * Check whether parent at the path really points
+ * to the child.
+ */
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(chk_path,
path_offset + 1)->b_blocknr)
return &MIN_KEY;
- /* Return delimiting key if position in the parent is not the last one. */
+
+ /*
+ * Return delimiting key if position in the parent
+ * is not the last one.
+ */
if (position != B_NR_ITEMS(parent))
- return B_N_PDELIM_KEY(parent, position);
+ return internal_key(parent, position);
}
+
/* Return MAX_KEY if we are in the root of the buffer tree. */
if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
b_blocknr == SB_ROOT_BLOCK(sb))
@@ -324,13 +308,20 @@ inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
return &MIN_KEY;
}
-/* Check whether a key is contained in the tree rooted from a buffer at a path. */
-/* This works by looking at the left and right delimiting keys for the buffer in the last path_element in
- the path. These delimiting keys are stored at least one level above that buffer in the tree. If the
- buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
- this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
-static inline int key_in_buffer(struct treepath *chk_path, /* Path which should be checked. */
- const struct cpu_key *key, /* Key which should be checked. */
+/*
+ * Check whether a key is contained in the tree rooted from a buffer at a path.
+ * This works by looking at the left and right delimiting keys for the buffer
+ * in the last path_element in the path. These delimiting keys are stored
+ * at least one level above that buffer in the tree. If the buffer is the
+ * first or last node in the tree order then one of the delimiting keys may
+ * be absent, and in this case get_lkey and get_rkey return a special key
+ * which is MIN_KEY or MAX_KEY.
+ */
+static inline int key_in_buffer(
+ /* Path which should be checked. */
+ struct treepath *chk_path,
+ /* Key which should be checked. */
+ const struct cpu_key *key,
struct super_block *sb
)
{
@@ -359,9 +350,11 @@ int reiserfs_check_path(struct treepath *p)
return 0;
}
-/* Drop the reference to each buffer in a path and restore
+/*
+ * Drop the reference to each buffer in a path and restore
* dirty bits clean when preparing the buffer for the log.
- * This version should only be called from fix_nodes() */
+ * This version should only be called from fix_nodes()
+ */
void pathrelse_and_restore(struct super_block *sb,
struct treepath *search_path)
{
@@ -418,14 +411,17 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
}
ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih));
+
+ /* free space does not match to calculated amount of use space */
if (used_space != blocksize - blkh_free_space(blkh)) {
- /* free space does not match to calculated amount of use space */
reiserfs_warning(NULL, "reiserfs-5082",
"free space seems wrong: %z", bh);
return 0;
}
- // FIXME: it is_leaf will hit performance too much - we may have
- // return 1 here
+ /*
+ * FIXME: it is_leaf will hit performance too much - we may have
+ * return 1 here
+ */
/* check tables of item heads */
ih = (struct item_head *)(buf + BLKH_SIZE);
@@ -460,7 +456,7 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
prev_location = ih_location(ih);
}
- // one may imagine much more checks
+ /* one may imagine many more checks */
return 1;
}
@@ -481,8 +477,8 @@ static int is_internal(char *buf, int blocksize, struct buffer_head *bh)
}
nr = blkh_nr_item(blkh);
+ /* for internal which is not root we might check min number of keys */
if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
- /* for internal which is not root we might check min number of keys */
reiserfs_warning(NULL, "reiserfs-5088",
"number of key seems wrong: %z", bh);
return 0;
@@ -494,12 +490,15 @@ static int is_internal(char *buf, int blocksize, struct buffer_head *bh)
"free space seems wrong: %z", bh);
return 0;
}
- // one may imagine much more checks
+
+ /* one may imagine many more checks */
return 1;
}
-// make sure that bh contains formatted node of reiserfs tree of
-// 'level'-th level
+/*
+ * make sure that bh contains formatted node of reiserfs tree of
+ * 'level'-th level
+ */
static int is_tree_node(struct buffer_head *bh, int level)
{
if (B_LEVEL(bh) != level) {
@@ -546,7 +545,8 @@ static int search_by_key_reada(struct super_block *s,
for (j = 0; j < i; j++) {
/*
* note, this needs attention if we are getting rid of the BKL
- * you have to make sure the prepared bit isn't set on this buffer
+ * you have to make sure the prepared bit isn't set on this
+ * buffer
*/
if (!buffer_uptodate(bh[j])) {
if (depth == -1)
@@ -558,39 +558,34 @@ static int search_by_key_reada(struct super_block *s,
return depth;
}
-/**************************************************************************
- * Algorithm SearchByKey *
- * look for item in the Disk S+Tree by its key *
- * Input: sb - super block *
- * key - pointer to the key to search *
- * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR *
- * search_path - path from the root to the needed leaf *
- **************************************************************************/
-
-/* This function fills up the path from the root to the leaf as it
- descends the tree looking for the key. It uses reiserfs_bread to
- try to find buffers in the cache given their block number. If it
- does not find them in the cache it reads them from disk. For each
- node search_by_key finds using reiserfs_bread it then uses
- bin_search to look through that node. bin_search will find the
- position of the block_number of the next node if it is looking
- through an internal node. If it is looking through a leaf node
- bin_search will find the position of the item which has key either
- equal to given key, or which is the maximal key less than the given
- key. search_by_key returns a path that must be checked for the
- correctness of the top of the path but need not be checked for the
- correctness of the bottom of the path */
-/* The function is NOT SCHEDULE-SAFE! */
-int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to search. */
- struct treepath *search_path,/* This structure was
- allocated and initialized
- by the calling
- function. It is filled up
- by this function. */
- int stop_level /* How far down the tree to search. To
- stop at leaf level - set to
- DISK_LEAF_NODE_LEVEL */
- )
+/*
+ * This function fills up the path from the root to the leaf as it
+ * descends the tree looking for the key. It uses reiserfs_bread to
+ * try to find buffers in the cache given their block number. If it
+ * does not find them in the cache it reads them from disk. For each
+ * node search_by_key finds using reiserfs_bread it then uses
+ * bin_search to look through that node. bin_search will find the
+ * position of the block_number of the next node if it is looking
+ * through an internal node. If it is looking through a leaf node
+ * bin_search will find the position of the item which has key either
+ * equal to given key, or which is the maximal key less than the given
+ * key. search_by_key returns a path that must be checked for the
+ * correctness of the top of the path but need not be checked for the
+ * correctness of the bottom of the path
+ */
+/*
+ * search_by_key - search for key (and item) in stree
+ * @sb: superblock
+ * @key: pointer to key to search for
+ * @search_path: Allocated and initialized struct treepath; Returned filled
+ * on success.
+ * @stop_level: How far down the tree to search, Use DISK_LEAF_NODE_LEVEL to
+ * stop at leaf level.
+ *
+ * The function is NOT SCHEDULE-SAFE!
+ */
+int search_by_key(struct super_block *sb, const struct cpu_key *key,
+ struct treepath *search_path, int stop_level)
{
b_blocknr_t block_number;
int expected_level;
@@ -609,17 +604,22 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
PROC_INFO_INC(sb, search_by_key);
- /* As we add each node to a path we increase its count. This means that
- we must be careful to release all nodes in a path before we either
- discard the path struct or re-use the path struct, as we do here. */
+ /*
+ * As we add each node to a path we increase its count. This means
+ * that we must be careful to release all nodes in a path before we
+ * either discard the path struct or re-use the path struct, as we
+ * do here.
+ */
pathrelse(search_path);
right_neighbor_of_leaf_node = 0;
- /* With each iteration of this loop we search through the items in the
- current node, and calculate the next current node(next path element)
- for the next iteration of this loop.. */
+ /*
+ * With each iteration of this loop we search through the items in the
+ * current node, and calculate the next current node(next path element)
+ * for the next iteration of this loop..
+ */
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
while (1) {
@@ -639,8 +639,10 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
++search_path->path_length);
fs_gen = get_generation(sb);
- /* Read the next tree node, and set the last element in the path to
- have a pointer to it. */
+ /*
+ * Read the next tree node, and set the last element
+ * in the path to have a pointer to it.
+ */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
@@ -666,7 +668,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
if (!buffer_uptodate(bh))
goto io_error;
} else {
- io_error:
+io_error:
search_path->path_length--;
pathrelse(search_path);
return IO_ERROR;
@@ -676,9 +678,12 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
expected_level = SB_TREE_HEIGHT(sb);
expected_level--;
- /* It is possible that schedule occurred. We must check whether the key
- to search is still in the tree rooted from the current buffer. If
- not then repeat search from the root. */
+ /*
+ * It is possible that schedule occurred. We must check
+ * whether the key to search is still in the tree rooted
+ * from the current buffer. If not then repeat search
+ * from the root.
+ */
if (fs_changed(fs_gen, sb) &&
(!B_IS_IN_TREE(bh) ||
B_LEVEL(bh) != expected_level ||
@@ -689,8 +694,10 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
sbk_restarted[expected_level - 1]);
pathrelse(search_path);
- /* Get the root block number so that we can repeat the search
- starting from the root. */
+ /*
+ * Get the root block number so that we can
+ * repeat the search starting from the root.
+ */
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
right_neighbor_of_leaf_node = 0;
@@ -699,9 +706,11 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
continue;
}
- /* only check that the key is in the buffer if key is not
- equal to the MAX_KEY. Latter case is only possible in
- "finish_unfinished()" processing during mount. */
+ /*
+ * only check that the key is in the buffer if key is not
+ * equal to the MAX_KEY. Latter case is only possible in
+ * "finish_unfinished()" processing during mount.
+ */
RFALSE(comp_keys(&MAX_KEY, key) &&
!key_in_buffer(search_path, key, sb),
"PAP-5130: key is not in the buffer");
@@ -713,8 +722,10 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
}
#endif
- // make sure, that the node contents look like a node of
- // certain level
+ /*
+ * make sure, that the node contents look like a node of
+ * certain level
+ */
if (!is_tree_node(bh, expected_level)) {
reiserfs_error(sb, "vs-5150",
"invalid format found in block %ld. "
@@ -732,32 +743,42 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
"vs-5152: tree level (%d) is less than stop level (%d)",
node_level, stop_level);
- retval = bin_search(key, B_N_PITEM_HEAD(bh, 0),
+ retval = bin_search(key, item_head(bh, 0),
B_NR_ITEMS(bh),
(node_level ==
DISK_LEAF_NODE_LEVEL) ? IH_SIZE :
KEY_SIZE,
- &(last_element->pe_position));
+ &last_element->pe_position);
if (node_level == stop_level) {
return retval;
}
/* we are not in the stop level */
+ /*
+ * item has been found, so we choose the pointer which
+ * is to the right of the found one
+ */
if (retval == ITEM_FOUND)
- /* item has been found, so we choose the pointer which is to the right of the found one */
last_element->pe_position++;
- /* if item was not found we choose the position which is to
- the left of the found item. This requires no code,
- bin_search did it already. */
+ /*
+ * if item was not found we choose the position which is to
+ * the left of the found item. This requires no code,
+ * bin_search did it already.
+ */
- /* So we have chosen a position in the current node which is
- an internal node. Now we calculate child block number by
- position in the node. */
+ /*
+ * So we have chosen a position in the current node which is
+ * an internal node. Now we calculate child block number by
+ * position in the node.
+ */
block_number =
B_N_CHILD_NUM(bh, last_element->pe_position);
- /* if we are going to read leaf nodes, try for read ahead as well */
+ /*
+ * if we are going to read leaf nodes, try for read
+ * ahead as well
+ */
if ((search_path->reada & PATH_READA) &&
node_level == DISK_LEAF_NODE_LEVEL + 1) {
int pos = last_element->pe_position;
@@ -779,7 +800,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
/*
* check to make sure we're in the same object
*/
- le_key = B_N_PDELIM_KEY(bh, pos);
+ le_key = internal_key(bh, pos);
if (le32_to_cpu(le_key->k_objectid) !=
key->on_disk_key.k_objectid) {
break;
@@ -789,26 +810,28 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
}
}
-/* Form the path to an item and position in this item which contains
- file byte defined by key. If there is no such item
- corresponding to the key, we point the path to the item with
- maximal key less than key, and *pos_in_item is set to one
- past the last entry/byte in the item. If searching for entry in a
- directory item, and it is not found, *pos_in_item is set to one
- entry more than the entry with maximal key which is less than the
- sought key.
-
- Note that if there is no entry in this same node which is one more,
- then we point to an imaginary entry. for direct items, the
- position is in units of bytes, for indirect items the position is
- in units of blocknr entries, for directory items the position is in
- units of directory entries. */
-
+/*
+ * Form the path to an item and position in this item which contains
+ * file byte defined by key. If there is no such item
+ * corresponding to the key, we point the path to the item with
+ * maximal key less than key, and *pos_in_item is set to one
+ * past the last entry/byte in the item. If searching for entry in a
+ * directory item, and it is not found, *pos_in_item is set to one
+ * entry more than the entry with maximal key which is less than the
+ * sought key.
+ *
+ * Note that if there is no entry in this same node which is one more,
+ * then we point to an imaginary entry. for direct items, the
+ * position is in units of bytes, for indirect items the position is
+ * in units of blocknr entries, for directory items the position is in
+ * units of directory entries.
+ */
/* The function is NOT SCHEDULE-SAFE! */
-int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */
- const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */
- struct treepath *search_path /* Filled up by this function. */
- )
+int search_for_position_by_key(struct super_block *sb,
+ /* Key to search (cpu variable) */
+ const struct cpu_key *p_cpu_key,
+ /* Filled up by this function. */
+ struct treepath *search_path)
{
struct item_head *p_le_ih; /* pointer to on-disk structure */
int blk_size;
@@ -830,7 +853,7 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b
if (retval == ITEM_FOUND) {
RFALSE(!ih_item_len
- (B_N_PITEM_HEAD
+ (item_head
(PATH_PLAST_BUFFER(search_path),
PATH_LAST_POSITION(search_path))),
"PAP-5165: item length equals zero");
@@ -844,14 +867,14 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b
/* Item is not found. Set path to the previous item. */
p_le_ih =
- B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path),
+ item_head(PATH_PLAST_BUFFER(search_path),
--PATH_LAST_POSITION(search_path));
blk_size = sb->s_blocksize;
- if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) {
+ if (comp_short_keys(&p_le_ih->ih_key, p_cpu_key))
return FILE_NOT_FOUND;
- }
- // FIXME: quite ugly this far
+
+ /* FIXME: quite ugly this far */
item_offset = le_ih_k_offset(p_le_ih);
offset = cpu_key_k_offset(p_cpu_key);
@@ -866,8 +889,10 @@ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super b
return POSITION_FOUND;
}
- /* Needed byte is not contained in the item pointed to by the
- path. Set pos_in_item out of the item. */
+ /*
+ * Needed byte is not contained in the item pointed to by the
+ * path. Set pos_in_item out of the item.
+ */
if (is_indirect_le_ih(p_le_ih))
pos_in_item(search_path) =
ih_item_len(p_le_ih) / UNFM_P_SIZE;
@@ -892,19 +917,17 @@ int comp_items(const struct item_head *stored_ih, const struct treepath *path)
return 1;
/* we need only to know, whether it is the same item */
- ih = get_ih(path);
+ ih = tp_item_head(path);
return memcmp(stored_ih, ih, IH_SIZE);
}
-/* unformatted nodes are not logged anymore, ever. This is safe
-** now
-*/
+/* unformatted nodes are not logged anymore, ever. This is safe now */
#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
-// block can not be forgotten as it is in I/O or held by someone
+/* block can not be forgotten as it is in I/O or held by someone */
#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
-// prepare for delete or cut of direct item
+/* prepare for delete or cut of direct item */
static inline int prepare_for_direct_item(struct treepath *path,
struct item_head *le_ih,
struct inode *inode,
@@ -917,9 +940,8 @@ static inline int prepare_for_direct_item(struct treepath *path,
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE;
}
- // new file gets truncated
+ /* new file gets truncated */
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
- //
round_len = ROUND_UP(new_file_length);
/* this was new_file_length < le_ih ... */
if (round_len < le_ih_k_offset(le_ih)) {
@@ -933,12 +955,13 @@ static inline int prepare_for_direct_item(struct treepath *path,
return M_CUT; /* Cut from this item. */
}
- // old file: items may have any length
+ /* old file: items may have any length */
if (new_file_length < le_ih_k_offset(le_ih)) {
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE; /* Delete this item. */
}
+
/* Calculate first position and size for cutting from item. */
*cut_size = -(ih_item_len(le_ih) -
(pos_in_item(path) =
@@ -957,12 +980,15 @@ static inline int prepare_for_direntry_item(struct treepath *path,
RFALSE(ih_entry_count(le_ih) != 2,
"PAP-5220: incorrect empty directory item (%h)", le_ih);
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
- return M_DELETE; /* Delete the directory item containing "." and ".." entry. */
+ /* Delete the directory item containing "." and ".." entry. */
+ return M_DELETE;
}
if (ih_entry_count(le_ih) == 1) {
- /* Delete the directory item such as there is one record only
- in this item */
+ /*
+ * Delete the directory item such as there is one record only
+ * in this item
+ */
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE;
}
@@ -976,18 +1002,34 @@ static inline int prepare_for_direntry_item(struct treepath *path,
#define JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD (2 * JOURNAL_PER_BALANCE_CNT + 1)
-/* If the path points to a directory or direct item, calculate mode and the size cut, for balance.
- If the path points to an indirect item, remove some number of its unformatted nodes.
- In case of file truncate calculate whether this item must be deleted/truncated or last
- unformatted node of this item will be converted to a direct item.
- This function returns a determination of what balance mode the calling function should employ. */
-static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed
- from end of the file. */
- int *cut_size, unsigned long long new_file_length /* MAX_KEY_OFFSET in case of delete. */
+/*
+ * If the path points to a directory or direct item, calculate mode
+ * and the size cut, for balance.
+ * If the path points to an indirect item, remove some number of its
+ * unformatted nodes.
+ * In case of file truncate calculate whether this item must be
+ * deleted/truncated or last unformatted node of this item will be
+ * converted to a direct item.
+ * This function returns a determination of what balance mode the
+ * calling function should employ.
+ */
+static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
+ struct treepath *path,
+ const struct cpu_key *item_key,
+ /*
+ * Number of unformatted nodes
+ * which were removed from end
+ * of the file.
+ */
+ int *removed,
+ int *cut_size,
+ /* MAX_KEY_OFFSET in case of delete. */
+ unsigned long long new_file_length
)
{
struct super_block *sb = inode->i_sb;
- struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
+ struct item_head *p_le_ih = tp_item_head(path);
struct buffer_head *bh = PATH_PLAST_BUFFER(path);
BUG_ON(!th->t_trans_id);
@@ -1023,8 +1065,10 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
int pos = 0;
if ( new_file_length == max_reiserfs_offset (inode) ) {
- /* prepare_for_delete_or_cut() is called by
- * reiserfs_delete_item() */
+ /*
+ * prepare_for_delete_or_cut() is called by
+ * reiserfs_delete_item()
+ */
new_file_length = 0;
delete = 1;
}
@@ -1033,27 +1077,30 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
need_re_search = 0;
*cut_size = 0;
bh = PATH_PLAST_BUFFER(path);
- copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ copy_item_head(&s_ih, tp_item_head(path));
pos = I_UNFM_NUM(&s_ih);
while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) {
__le32 *unfm;
__u32 block;
- /* Each unformatted block deletion may involve one additional
- * bitmap block into the transaction, thereby the initial
- * journal space reservation might not be enough. */
+ /*
+ * Each unformatted block deletion may involve
+ * one additional bitmap block into the transaction,
+ * thereby the initial journal space reservation
+ * might not be enough.
+ */
if (!delete && (*cut_size) != 0 &&
reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD)
break;
- unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1;
+ unfm = (__le32 *)ih_item_body(bh, &s_ih) + pos - 1;
block = get_block_num(unfm, 0);
if (block != 0) {
reiserfs_prepare_for_journal(sb, bh, 1);
put_block_num(unfm, 0, 0);
- journal_mark_dirty(th, sb, bh);
+ journal_mark_dirty(th, bh);
reiserfs_free_block(th, inode, block, 1);
}
@@ -1074,17 +1121,21 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
break;
}
}
- /* a trick. If the buffer has been logged, this will do nothing. If
- ** we've broken the loop without logging it, it will restore the
- ** buffer */
+ /*
+ * a trick. If the buffer has been logged, this will
+ * do nothing. If we've broken the loop without logging
+ * it, it will restore the buffer
+ */
reiserfs_restore_prepared_buffer(sb, bh);
} while (need_re_search &&
search_for_position_by_key(sb, item_key, path) == POSITION_FOUND);
pos_in_item(path) = pos * UNFM_P_SIZE;
if (*cut_size == 0) {
- /* Nothing were cut. maybe convert last unformatted node to the
- * direct item? */
+ /*
+ * Nothing was cut. maybe convert last unformatted node to the
+ * direct item?
+ */
result = M_CONVERT;
}
return result;
@@ -1095,7 +1146,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
static int calc_deleted_bytes_number(struct tree_balance *tb, char mode)
{
int del_size;
- struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path);
+ struct item_head *p_le_ih = tp_item_head(tb->tb_path);
if (is_statdata_le_ih(p_le_ih))
return 0;
@@ -1104,9 +1155,11 @@ static int calc_deleted_bytes_number(struct tree_balance *tb, char mode)
(mode ==
M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
if (is_direntry_le_ih(p_le_ih)) {
- /* return EMPTY_DIR_SIZE; We delete emty directoris only.
- * we can't use EMPTY_DIR_SIZE, as old format dirs have a different
- * empty size. ick. FIXME, is this right? */
+ /*
+ * return EMPTY_DIR_SIZE; We delete emty directories only.
+ * we can't use EMPTY_DIR_SIZE, as old format dirs have a
+ * different empty size. ick. FIXME, is this right?
+ */
return del_size;
}
@@ -1169,7 +1222,8 @@ char head2type(struct item_head *ih)
}
#endif
-/* Delete object item.
+/*
+ * Delete object item.
* th - active transaction handle
* path - path to the deleted item
* item_key - key to search for the deleted item
@@ -1212,7 +1266,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
- copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ copy_item_head(&s_ih, tp_item_head(path));
s_del_balance.insert_size[0] = del_size;
ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
@@ -1221,7 +1275,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
PROC_INFO_INC(sb, delete_item_restarted);
- // file system changed, repeat search
+ /* file system changed, repeat search */
ret_value =
search_for_position_by_key(sb, item_key, path);
if (ret_value == IO_ERROR)
@@ -1238,16 +1292,18 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
unfix_nodes(&s_del_balance);
return 0;
}
- // reiserfs_delete_item returns item length when success
+
+ /* reiserfs_delete_item returns item length when success */
ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
- q_ih = get_ih(path);
+ q_ih = tp_item_head(path);
quota_cut_bytes = ih_item_len(q_ih);
- /* hack so the quota code doesn't have to guess if the file
- ** has a tail. On tail insert, we allocate quota for 1 unformatted node.
- ** We test the offset because the tail might have been
- ** split into multiple items, and we only want to decrement for
- ** the unfm node once
+ /*
+ * hack so the quota code doesn't have to guess if the file has a
+ * tail. On tail insert, we allocate quota for 1 unformatted node.
+ * We test the offset because the tail might have been
+ * split into multiple items, and we only want to decrement for
+ * the unfm node once
*/
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) {
if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) {
@@ -1261,33 +1317,38 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
int off;
char *data;
- /* We are in direct2indirect conversion, so move tail contents
- to the unformatted node */
- /* note, we do the copy before preparing the buffer because we
- ** don't care about the contents of the unformatted node yet.
- ** the only thing we really care about is the direct item's data
- ** is in the unformatted node.
- **
- ** Otherwise, we would have to call reiserfs_prepare_for_journal on
- ** the unformatted node, which might schedule, meaning we'd have to
- ** loop all the way back up to the start of the while loop.
- **
- ** The unformatted node must be dirtied later on. We can't be
- ** sure here if the entire tail has been deleted yet.
- **
- ** un_bh is from the page cache (all unformatted nodes are
- ** from the page cache) and might be a highmem page. So, we
- ** can't use un_bh->b_data.
- ** -clm
+ /*
+ * We are in direct2indirect conversion, so move tail contents
+ * to the unformatted node
+ */
+ /*
+ * note, we do the copy before preparing the buffer because we
+ * don't care about the contents of the unformatted node yet.
+ * the only thing we really care about is the direct item's
+ * data is in the unformatted node.
+ *
+ * Otherwise, we would have to call
+ * reiserfs_prepare_for_journal on the unformatted node,
+ * which might schedule, meaning we'd have to loop all the
+ * way back up to the start of the while loop.
+ *
+ * The unformatted node must be dirtied later on. We can't be
+ * sure here if the entire tail has been deleted yet.
+ *
+ * un_bh is from the page cache (all unformatted nodes are
+ * from the page cache) and might be a highmem page. So, we
+ * can't use un_bh->b_data.
+ * -clm
*/
data = kmap_atomic(un_bh->b_page);
off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
memcpy(data + off,
- B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
+ ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
ret_value);
kunmap_atomic(data);
}
+
/* Perform balancing after all resources have been collected at once. */
do_balance(&s_del_balance, NULL, NULL, M_DELETE);
@@ -1304,20 +1365,21 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
return ret_value;
}
-/* Summary Of Mechanisms For Handling Collisions Between Processes:
-
- deletion of the body of the object is performed by iput(), with the
- result that if multiple processes are operating on a file, the
- deletion of the body of the file is deferred until the last process
- that has an open inode performs its iput().
-
- writes and truncates are protected from collisions by use of
- semaphores.
-
- creates, linking, and mknod are protected from collisions with other
- processes by making the reiserfs_add_entry() the last step in the
- creation, and then rolling back all changes if there was a collision.
- - Hans
+/*
+ * Summary Of Mechanisms For Handling Collisions Between Processes:
+ *
+ * deletion of the body of the object is performed by iput(), with the
+ * result that if multiple processes are operating on a file, the
+ * deletion of the body of the file is deferred until the last process
+ * that has an open inode performs its iput().
+ *
+ * writes and truncates are protected from collisions by use of
+ * semaphores.
+ *
+ * creates, linking, and mknod are protected from collisions with other
+ * processes by making the reiserfs_add_entry() the last step in the
+ * creation, and then rolling back all changes if there was a collision.
+ * - Hans
*/
/* this deletes item which never gets split */
@@ -1347,7 +1409,11 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
}
if (retval != ITEM_FOUND) {
pathrelse(&path);
- // No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir
+ /*
+ * No need for a warning, if there is just no free
+ * space to insert '..' item into the
+ * newly-created subdir
+ */
if (!
((unsigned long long)
GET_HASH_VALUE(le_key_k_offset
@@ -1362,11 +1428,11 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
}
if (!tb_init) {
tb_init = 1;
- item_len = ih_item_len(PATH_PITEM_HEAD(&path));
+ item_len = ih_item_len(tp_item_head(&path));
init_tb_struct(th, &tb, th->t_super, &path,
-(IH_SIZE + item_len));
}
- quota_cut_bytes = ih_item_len(PATH_PITEM_HEAD(&path));
+ quota_cut_bytes = ih_item_len(tp_item_head(&path));
retval = fix_nodes(M_DELETE, &tb, NULL, NULL);
if (retval == REPEAT_SEARCH) {
@@ -1376,7 +1442,11 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
if (retval == CARRY_ON) {
do_balance(&tb, NULL, NULL, M_DELETE);
- if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */
+ /*
+ * Should we count quota for item? (we don't
+ * count quotas for save-links)
+ */
+ if (inode) {
int depth;
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
@@ -1391,7 +1461,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
}
break;
}
- // IO_ERROR, NO_DISK_SPACE, etc
+
+ /* IO_ERROR, NO_DISK_SPACE, etc */
reiserfs_warning(th->t_super, "vs-5360",
"could not delete %K due to fix_nodes failure",
&cpu_key);
@@ -1447,11 +1518,13 @@ static void unmap_buffers(struct page *page, loff_t pos)
do {
next = bh->b_this_page;
- /* we want to unmap the buffers that contain the tail, and
- ** all the buffers after it (since the tail must be at the
- ** end of the file). We don't want to unmap file data
- ** before the tail, since it might be dirty and waiting to
- ** reach disk
+ /*
+ * we want to unmap the buffers that contain
+ * the tail, and all the buffers after it
+ * (since the tail must be at the end of the
+ * file). We don't want to unmap file data
+ * before the tail, since it might be dirty
+ * and waiting to reach disk
*/
cur_index += bh->b_size;
if (cur_index > tail_index) {
@@ -1476,9 +1549,10 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
BUG_ON(new_file_size != inode->i_size);
- /* the page being sent in could be NULL if there was an i/o error
- ** reading in the last block. The user will hit problems trying to
- ** read the file, but for now we just skip the indirect2direct
+ /*
+ * the page being sent in could be NULL if there was an i/o error
+ * reading in the last block. The user will hit problems trying to
+ * read the file, but for now we just skip the indirect2direct
*/
if (atomic_read(&inode->i_count) > 1 ||
!tail_has_to_be_packed(inode) ||
@@ -1490,17 +1564,18 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
pathrelse(path);
return cut_bytes;
}
+
/* Perform the conversion to a direct_item. */
- /* return indirect_to_direct(inode, path, item_key,
- new_file_size, mode); */
return indirect2direct(th, inode, page, path, item_key,
new_file_size, mode);
}
-/* we did indirect_to_direct conversion. And we have inserted direct
- item successesfully, but there were no disk space to cut unfm
- pointer being converted. Therefore we have to delete inserted
- direct item(s) */
+/*
+ * we did indirect_to_direct conversion. And we have inserted direct
+ * item successesfully, but there were no disk space to cut unfm
+ * pointer being converted. Therefore we have to delete inserted
+ * direct item(s)
+ */
static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
struct inode *inode, struct treepath *path)
{
@@ -1509,7 +1584,7 @@ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
int removed;
BUG_ON(!th->t_trans_id);
- make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4); // !!!!
+ make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);
tail_key.key_length = 4;
tail_len =
@@ -1521,7 +1596,7 @@ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
reiserfs_panic(inode->i_sb, "vs-5615",
"found invalid item");
RFALSE(path->pos_in_item !=
- ih_item_len(PATH_PITEM_HEAD(path)) - 1,
+ ih_item_len(tp_item_head(path)) - 1,
"vs-5616: appended bytes found");
PATH_LAST_POSITION(path)--;
@@ -1539,7 +1614,6 @@ static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct "
"conversion has been rolled back due to "
"lack of disk space");
- //mark_file_without_tail (inode);
mark_inode_dirty(inode);
}
@@ -1551,15 +1625,18 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
struct page *page, loff_t new_file_size)
{
struct super_block *sb = inode->i_sb;
- /* Every function which is going to call do_balance must first
- create a tree_balance structure. Then it must fill up this
- structure by using the init_tb_struct and fix_nodes functions.
- After that we can make tree balancing. */
+ /*
+ * Every function which is going to call do_balance must first
+ * create a tree_balance structure. Then it must fill up this
+ * structure by using the init_tb_struct and fix_nodes functions.
+ * After that we can make tree balancing.
+ */
struct tree_balance s_cut_balance;
struct item_head *p_le_ih;
- int cut_size = 0, /* Amount to be cut. */
- ret_value = CARRY_ON, removed = 0, /* Number of the removed unformatted nodes. */
- is_inode_locked = 0;
+ int cut_size = 0; /* Amount to be cut. */
+ int ret_value = CARRY_ON;
+ int removed = 0; /* Number of the removed unformatted nodes. */
+ int is_inode_locked = 0;
char mode; /* Mode of the balance. */
int retval2 = -1;
int quota_cut_bytes;
@@ -1571,21 +1648,27 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
cut_size);
- /* Repeat this loop until we either cut the item without needing
- to balance, or we fix_nodes without schedule occurring */
+ /*
+ * Repeat this loop until we either cut the item without needing
+ * to balance, or we fix_nodes without schedule occurring
+ */
while (1) {
- /* Determine the balance mode, position of the first byte to
- be cut, and size to be cut. In case of the indirect item
- free unformatted nodes which are pointed to by the cut
- pointers. */
+ /*
+ * Determine the balance mode, position of the first byte to
+ * be cut, and size to be cut. In case of the indirect item
+ * free unformatted nodes which are pointed to by the cut
+ * pointers.
+ */
mode =
prepare_for_delete_or_cut(th, inode, path,
item_key, &removed,
&cut_size, new_file_size);
if (mode == M_CONVERT) {
- /* convert last unformatted node to direct item or leave
- tail in the unformatted node */
+ /*
+ * convert last unformatted node to direct item or
+ * leave tail in the unformatted node
+ */
RFALSE(ret_value != CARRY_ON,
"PAP-5570: can not convert twice");
@@ -1599,15 +1682,20 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
is_inode_locked = 1;
- /* removing of last unformatted node will change value we
- have to return to truncate. Save it */
+ /*
+ * removing of last unformatted node will
+ * change value we have to return to truncate.
+ * Save it
+ */
retval2 = ret_value;
- /*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */
- /* So, we have performed the first part of the conversion:
- inserting the new direct item. Now we are removing the
- last unformatted node pointer. Set key to search for
- it. */
+ /*
+ * So, we have performed the first part of the
+ * conversion:
+ * inserting the new direct item. Now we are
+ * removing the last unformatted node pointer.
+ * Set key to search for it.
+ */
set_cpu_key_k_type(item_key, TYPE_INDIRECT);
item_key->key_length = 4;
new_file_size -=
@@ -1650,11 +1738,13 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
return (ret_value == IO_ERROR) ? -EIO : -ENOENT;
} /* while */
- // check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
+ /* check fix_nodes results (IO_ERROR or NO_DISK_SPACE) */
if (ret_value != CARRY_ON) {
if (is_inode_locked) {
- // FIXME: this seems to be not needed: we are always able
- // to cut item
+ /*
+ * FIXME: this seems to be not needed: we are always
+ * able to cut item
+ */
indirect_to_direct_roll_back(th, inode, path);
}
if (ret_value == NO_DISK_SPACE)
@@ -1671,22 +1761,23 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
/* Calculate number of bytes that need to be cut from the item. */
quota_cut_bytes =
(mode ==
- M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance.
+ M_DELETE) ? ih_item_len(tp_item_head(path)) : -s_cut_balance.
insert_size[0];
if (retval2 == -1)
ret_value = calc_deleted_bytes_number(&s_cut_balance, mode);
else
ret_value = retval2;
- /* For direct items, we only change the quota when deleting the last
- ** item.
+ /*
+ * For direct items, we only change the quota when deleting the last
+ * item.
*/
- p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path);
+ p_le_ih = tp_item_head(s_cut_balance.tb_path);
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) {
if (mode == M_DELETE &&
(le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
1) {
- // FIXME: this is to keep 3.5 happy
+ /* FIXME: this is to keep 3.5 happy */
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
} else {
@@ -1696,10 +1787,12 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
#ifdef CONFIG_REISERFS_CHECK
if (is_inode_locked) {
struct item_head *le_ih =
- PATH_PITEM_HEAD(s_cut_balance.tb_path);
- /* we are going to complete indirect2direct conversion. Make
- sure, that we exactly remove last unformatted node pointer
- of the item */
+ tp_item_head(s_cut_balance.tb_path);
+ /*
+ * we are going to complete indirect2direct conversion. Make
+ * sure, that we exactly remove last unformatted node pointer
+ * of the item
+ */
if (!is_indirect_le_ih(le_ih))
reiserfs_panic(sb, "vs-5652",
"item must be indirect %h", le_ih);
@@ -1717,17 +1810,20 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"(CUT, insert_size==%d)",
le_ih, s_cut_balance.insert_size[0]);
}
- /* it would be useful to make sure, that right neighboring
- item is direct item of this file */
+ /*
+ * it would be useful to make sure, that right neighboring
+ * item is direct item of this file
+ */
}
#endif
do_balance(&s_cut_balance, NULL, NULL, mode);
if (is_inode_locked) {
- /* we've done an indirect->direct conversion. when the data block
- ** was freed, it was removed from the list of blocks that must
- ** be flushed before the transaction commits, make sure to
- ** unmap and invalidate it
+ /*
+ * we've done an indirect->direct conversion. when the
+ * data block was freed, it was removed from the list of
+ * blocks that must be flushed before the transaction
+ * commits, make sure to unmap and invalidate it
*/
unmap_buffers(page, tail_pos);
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
@@ -1758,20 +1854,25 @@ static void truncate_directory(struct reiserfs_transaction_handle *th,
set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_STAT_DATA);
}
-/* Truncate file to the new size. Note, this must be called with a transaction
- already started */
+/*
+ * Truncate file to the new size. Note, this must be called with a
+ * transaction already started
+ */
int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
- struct inode *inode, /* ->i_size contains new size */
+ struct inode *inode, /* ->i_size contains new size */
struct page *page, /* up to date for last block */
- int update_timestamps /* when it is called by
- file_release to convert
- the tail - no timestamps
- should be updated */
+ /*
+ * when it is called by file_release to convert
+ * the tail - no timestamps should be updated
+ */
+ int update_timestamps
)
{
INITIALIZE_PATH(s_search_path); /* Path to the current object item. */
struct item_head *p_le_ih; /* Pointer to an item header. */
- struct cpu_key s_item_key; /* Key to search for a previous file item. */
+
+ /* Key to search for a previous file item. */
+ struct cpu_key s_item_key;
loff_t file_size, /* Old file size. */
new_file_size; /* New file size. */
int deleted; /* Number of deleted or truncated bytes. */
@@ -1784,8 +1885,8 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
|| S_ISLNK(inode->i_mode)))
return 0;
+ /* deletion of directory - no need to update timestamps */
if (S_ISDIR(inode->i_mode)) {
- // deletion of directory - no need to update timestamps
truncate_directory(th, inode);
return 0;
}
@@ -1793,7 +1894,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
/* Get new file size. */
new_file_size = inode->i_size;
- // FIXME: note, that key type is unimportant here
+ /* FIXME: note, that key type is unimportant here */
make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode),
TYPE_DIRECT, 3);
@@ -1819,7 +1920,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
s_search_path.pos_in_item--;
/* Get real file size (total length of all file items) */
- p_le_ih = PATH_PITEM_HEAD(&s_search_path);
+ p_le_ih = tp_item_head(&s_search_path);
if (is_statdata_le_ih(p_le_ih))
file_size = 0;
else {
@@ -1827,9 +1928,11 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
int bytes =
op_bytes_number(p_le_ih, inode->i_sb->s_blocksize);
- /* this may mismatch with real file size: if last direct item
- had no padding zeros and last unformatted node had no free
- space, this file would have this file size */
+ /*
+ * this may mismatch with real file size: if last direct item
+ * had no padding zeros and last unformatted node had no free
+ * space, this file would have this file size
+ */
file_size = offset + bytes - 1;
}
/*
@@ -1867,18 +1970,20 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
set_cpu_key_k_offset(&s_item_key, file_size);
- /* While there are bytes to truncate and previous file item is presented in the tree. */
+ /*
+ * While there are bytes to truncate and previous
+ * file item is presented in the tree.
+ */
/*
- ** This loop could take a really long time, and could log
- ** many more blocks than a transaction can hold. So, we do a polite
- ** journal end here, and if the transaction needs ending, we make
- ** sure the file is consistent before ending the current trans
- ** and starting a new one
+ * This loop could take a really long time, and could log
+ * many more blocks than a transaction can hold. So, we do
+ * a polite journal end here, and if the transaction needs
+ * ending, we make sure the file is consistent before ending
+ * the current trans and starting a new one
*/
if (journal_transaction_should_end(th, 0) ||
reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) {
- int orig_len_alloc = th->t_blocks_allocated;
pathrelse(&s_search_path);
if (update_timestamps) {
@@ -1887,7 +1992,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
}
reiserfs_update_sd(th, inode);
- err = journal_end(th, inode->i_sb, orig_len_alloc);
+ err = journal_end(th);
if (err)
goto out;
err = journal_begin(th, inode->i_sb,
@@ -1904,25 +2009,25 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
"PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d",
new_file_size, file_size, s_item_key.on_disk_key.k_objectid);
- update_and_out:
+update_and_out:
if (update_timestamps) {
- // this is truncate, not file closing
+ /* this is truncate, not file closing */
inode->i_mtime = CURRENT_TIME_SEC;
inode->i_ctime = CURRENT_TIME_SEC;
}
reiserfs_update_sd(th, inode);
- out:
+out:
pathrelse(&s_search_path);
return err;
}
#ifdef CONFIG_REISERFS_CHECK
-// this makes sure, that we __append__, not overwrite or add holes
+/* this makes sure, that we __append__, not overwrite or add holes */
static void check_research_for_paste(struct treepath *path,
const struct cpu_key *key)
{
- struct item_head *found_ih = get_ih(path);
+ struct item_head *found_ih = tp_item_head(path);
if (is_direct_le_ih(found_ih)) {
if (le_ih_k_offset(found_ih) +
@@ -1952,13 +2057,22 @@ static void check_research_for_paste(struct treepath *path,
}
#endif /* config reiserfs check */
-/* Paste bytes to the existing item. Returns bytes number pasted into the item. */
-int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path, /* Path to the pasted item. */
- const struct cpu_key *key, /* Key to search for the needed item. */
- struct inode *inode, /* Inode item belongs to */
- const char *body, /* Pointer to the bytes to paste. */
+/*
+ * Paste bytes to the existing item.
+ * Returns bytes number pasted into the item.
+ */
+int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th,
+ /* Path to the pasted item. */
+ struct treepath *search_path,
+ /* Key to search for the needed item. */
+ const struct cpu_key *key,
+ /* Inode item belongs to */
+ struct inode *inode,
+ /* Pointer to the bytes to paste. */
+ const char *body,
+ /* Size of pasted bytes. */
int pasted_size)
-{ /* Size of pasted bytes. */
+{
struct super_block *sb = inode->i_sb;
struct tree_balance s_paste_balance;
int retval;
@@ -1973,7 +2087,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota paste_into_item(): allocating %u id=%u type=%c",
pasted_size, inode->i_uid,
- key2type(&(key->on_disk_key)));
+ key2type(&key->on_disk_key));
#endif
depth = reiserfs_write_unlock_nested(sb);
@@ -1997,7 +2111,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
while ((retval =
fix_nodes(M_PASTE, &s_paste_balance, NULL,
body)) == REPEAT_SEARCH) {
- search_again:
+search_again:
/* file system changed while we were in the fix_nodes */
PROC_INFO_INC(th->t_super, paste_into_item_restarted);
retval =
@@ -2019,21 +2133,23 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
#endif
}
- /* Perform balancing after all resources are collected by fix_nodes, and
- accessing them will not risk triggering schedule. */
+ /*
+ * Perform balancing after all resources are collected by fix_nodes,
+ * and accessing them will not risk triggering schedule.
+ */
if (retval == CARRY_ON) {
do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE);
return 0;
}
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
- error_out:
+error_out:
/* this also releases the path */
unfix_nodes(&s_paste_balance);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota paste_into_item(): freeing %u id=%u type=%c",
pasted_size, inode->i_uid,
- key2type(&(key->on_disk_key)));
+ key2type(&key->on_disk_key));
#endif
depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode, pasted_size);
@@ -2041,7 +2157,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
return retval;
}
-/* Insert new item into the buffer at the path.
+/*
+ * Insert new item into the buffer at the path.
* th - active transaction handle
* path - path to the inserted item
* ih - pointer to the item header to insert
@@ -2064,8 +2181,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
fs_gen = get_generation(inode->i_sb);
quota_bytes = ih_item_len(ih);
- /* hack so the quota code doesn't have to guess if the file has
- ** a tail, links are always tails, so there's no guessing needed
+ /*
+ * hack so the quota code doesn't have to guess
+ * if the file has a tail, links are always tails,
+ * so there's no guessing needed
*/
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih))
quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE;
@@ -2074,8 +2193,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
- /* We can't dirty inode here. It would be immediately written but
- * appropriate stat item isn't inserted yet... */
+ /*
+ * We can't dirty inode here. It would be immediately
+ * written but appropriate stat item isn't inserted yet...
+ */
depth = reiserfs_write_unlock_nested(inode->i_sb);
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
reiserfs_write_lock_nested(inode->i_sb, depth);
@@ -2089,7 +2210,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
s_ins_balance.key = key->on_disk_key;
#endif
- /* DQUOT_* can schedule, must check to be sure calling fix_nodes is safe */
+ /*
+ * DQUOT_* can schedule, must check to be sure calling
+ * fix_nodes is safe
+ */
if (inode && fs_changed(fs_gen, inode->i_sb)) {
goto search_again;
}
@@ -2097,7 +2221,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
while ((retval =
fix_nodes(M_INSERT, &s_ins_balance, ih,
body)) == REPEAT_SEARCH) {
- search_again:
+search_again:
/* file system changed while we were in the fix_nodes */
PROC_INFO_INC(th->t_super, insert_item_restarted);
retval = search_item(th->t_super, key, path);
@@ -2121,7 +2245,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
}
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
- error_out:
+error_out:
/* also releases the path */
unfix_nodes(&s_ins_balance);
#ifdef REISERQUOTA_DEBUG
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 9fb20426005..a392cef6acc 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -74,7 +74,7 @@ static int reiserfs_sync_fs(struct super_block *s, int wait)
dquot_writeback_dquots(s, -1);
reiserfs_write_lock(s);
if (!journal_begin(&th, s, 1))
- if (!journal_end_sync(&th, s, 1))
+ if (!journal_end_sync(&th))
reiserfs_flush_old_commits(s);
reiserfs_write_unlock(s);
return 0;
@@ -136,9 +136,9 @@ static int reiserfs_freeze(struct super_block *s)
} else {
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
1);
- journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
reiserfs_block_writes(&th);
- journal_end_sync(&th, s, 1);
+ journal_end_sync(&th);
}
}
reiserfs_write_unlock(s);
@@ -153,13 +153,15 @@ static int reiserfs_unfreeze(struct super_block *s)
extern const struct in_core_key MAX_IN_CORE_KEY;
-/* this is used to delete "save link" when there are no items of a
- file it points to. It can either happen if unlink is completed but
- "save unlink" removal, or if file has both unlink and truncate
- pending and as unlink completes first (because key of "save link"
- protecting unlink is bigger that a key lf "save link" which
- protects truncate), so there left no items to make truncate
- completion on */
+/*
+ * this is used to delete "save link" when there are no items of a
+ * file it points to. It can either happen if unlink is completed but
+ * "save unlink" removal, or if file has both unlink and truncate
+ * pending and as unlink completes first (because key of "save link"
+ * protecting unlink is bigger that a key lf "save link" which
+ * protects truncate), so there left no items to make truncate
+ * completion on
+ */
static int remove_save_link_only(struct super_block *s,
struct reiserfs_key *key, int oid_free)
{
@@ -176,7 +178,7 @@ static int remove_save_link_only(struct super_block *s,
/* removals are protected by direct items */
reiserfs_release_objectid(&th, le32_to_cpu(key->k_objectid));
- return journal_end(&th, s, JOURNAL_PER_BALANCE_CNT);
+ return journal_end(&th);
}
#ifdef CONFIG_QUOTA
@@ -258,7 +260,7 @@ static int finish_unfinished(struct super_block *s)
break;
}
item_pos--;
- ih = B_N_PITEM_HEAD(bh, item_pos);
+ ih = item_head(bh, item_pos);
if (le32_to_cpu(ih->ih_key.k_dir_id) != MAX_KEY_OBJECTID)
/* there are no "save" links anymore */
@@ -271,7 +273,7 @@ static int finish_unfinished(struct super_block *s)
truncate = 0;
/* reiserfs_iget needs k_dirid and k_objectid only */
- item = B_I_PITEM(bh, ih);
+ item = ih_item_body(bh, ih);
obj_key.on_disk_key.k_dir_id = le32_to_cpu(*(__le32 *) item);
obj_key.on_disk_key.k_objectid =
le32_to_cpu(ih->ih_key.k_objectid);
@@ -282,8 +284,10 @@ static int finish_unfinished(struct super_block *s)
inode = reiserfs_iget(s, &obj_key);
if (!inode) {
- /* the unlink almost completed, it just did not manage to remove
- "save" link and release objectid */
+ /*
+ * the unlink almost completed, it just did not
+ * manage to remove "save" link and release objectid
+ */
reiserfs_warning(s, "vs-2180", "iget failed for %K",
&obj_key);
retval = remove_save_link_only(s, &save_link_key, 1);
@@ -303,10 +307,13 @@ static int finish_unfinished(struct super_block *s)
reiserfs_write_lock_nested(inode->i_sb, depth);
if (truncate && S_ISDIR(inode->i_mode)) {
- /* We got a truncate request for a dir which is impossible.
- The only imaginable way is to execute unfinished truncate request
- then boot into old kernel, remove the file and create dir with
- the same key. */
+ /*
+ * We got a truncate request for a dir which
+ * is impossible. The only imaginable way is to
+ * execute unfinished truncate request then boot
+ * into old kernel, remove the file and create dir
+ * with the same key.
+ */
reiserfs_warning(s, "green-2101",
"impossible truncate on a "
"directory %k. Please report",
@@ -320,14 +327,16 @@ static int finish_unfinished(struct super_block *s)
if (truncate) {
REISERFS_I(inode)->i_flags |=
i_link_saved_truncate_mask;
- /* not completed truncate found. New size was committed together
- with "save" link */
+ /*
+ * not completed truncate found. New size was
+ * committed together with "save" link
+ */
reiserfs_info(s, "Truncating %k to %Ld ..",
INODE_PKEY(inode), inode->i_size);
- reiserfs_truncate_file(inode,
- 0
- /*don't update modification time */
- );
+
+ /* don't update modification time */
+ reiserfs_truncate_file(inode, 0);
+
retval = remove_save_link(inode, truncate);
} else {
REISERFS_I(inode)->i_flags |= i_link_saved_unlink_mask;
@@ -373,10 +382,12 @@ static int finish_unfinished(struct super_block *s)
return retval;
}
-/* to protect file being unlinked from getting lost we "safe" link files
- being unlinked. This link will be deleted in the same transaction with last
- item of file. mounting the filesystem we scan all these links and remove
- files which almost got lost */
+/*
+ * to protect file being unlinked from getting lost we "safe" link files
+ * being unlinked. This link will be deleted in the same transaction with last
+ * item of file. mounting the filesystem we scan all these links and remove
+ * files which almost got lost
+ */
void add_save_link(struct reiserfs_transaction_handle *th,
struct inode *inode, int truncate)
{
@@ -495,7 +506,7 @@ int remove_save_link(struct inode *inode, int truncate)
} else
REISERFS_I(inode)->i_flags &= ~i_link_saved_truncate_mask;
- return journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
+ return journal_end(&th);
}
static void reiserfs_kill_sb(struct super_block *s)
@@ -530,19 +541,23 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_write_lock(s);
- /* change file system state to current state if it was mounted with read-write permissions */
+ /*
+ * change file system state to current state if it was mounted
+ * with read-write permissions
+ */
if (!(s->s_flags & MS_RDONLY)) {
if (!journal_begin(&th, s, 10)) {
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
1);
set_sb_umount_state(SB_DISK_SUPER_BLOCK(s),
REISERFS_SB(s)->s_mount_state);
- journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
}
}
- /* note, journal_release checks for readonly mount, and can decide not
- ** to do a journal_end
+ /*
+ * note, journal_release checks for readonly mount, and can
+ * decide not to do a journal_end
*/
journal_release(&th, s);
@@ -559,6 +574,7 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
+ destroy_workqueue(REISERFS_SB(s)->commit_wq);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
}
@@ -634,15 +650,16 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
}
reiserfs_write_lock(inode->i_sb);
- /* this is really only used for atime updates, so they don't have
- ** to be included in O_SYNC or fsync
+ /*
+ * this is really only used for atime updates, so they don't have
+ * to be included in O_SYNC or fsync
*/
err = journal_begin(&th, inode->i_sb, 1);
if (err)
goto out;
reiserfs_update_sd(&th, inode);
- journal_end(&th, inode->i_sb, 1);
+ journal_end(&th);
out:
reiserfs_write_unlock(inode->i_sb);
@@ -788,31 +805,53 @@ static const struct export_operations reiserfs_export_ops = {
.get_parent = reiserfs_get_parent,
};
-/* this struct is used in reiserfs_getopt () for containing the value for those
- mount options that have values rather than being toggles. */
+/*
+ * this struct is used in reiserfs_getopt () for containing the value for
+ * those mount options that have values rather than being toggles.
+ */
typedef struct {
char *value;
- int setmask; /* bitmask which is to set on mount_options bitmask when this
- value is found, 0 is no bits are to be changed. */
- int clrmask; /* bitmask which is to clear on mount_options bitmask when this
- value is found, 0 is no bits are to be changed. This is
- applied BEFORE setmask */
+ /*
+ * bitmask which is to set on mount_options bitmask
+ * when this value is found, 0 is no bits are to be changed.
+ */
+ int setmask;
+ /*
+ * bitmask which is to clear on mount_options bitmask
+ * when this value is found, 0 is no bits are to be changed.
+ * This is applied BEFORE setmask
+ */
+ int clrmask;
} arg_desc_t;
/* Set this bit in arg_required to allow empty arguments */
#define REISERFS_OPT_ALLOWEMPTY 31
-/* this struct is used in reiserfs_getopt() for describing the set of reiserfs
- mount options */
+/*
+ * this struct is used in reiserfs_getopt() for describing the
+ * set of reiserfs mount options
+ */
typedef struct {
char *option_name;
- int arg_required; /* 0 if argument is not required, not 0 otherwise */
- const arg_desc_t *values; /* list of values accepted by an option */
- int setmask; /* bitmask which is to set on mount_options bitmask when this
- value is found, 0 is no bits are to be changed. */
- int clrmask; /* bitmask which is to clear on mount_options bitmask when this
- value is found, 0 is no bits are to be changed. This is
- applied BEFORE setmask */
+
+ /* 0 if argument is not required, not 0 otherwise */
+ int arg_required;
+
+ /* list of values accepted by an option */
+ const arg_desc_t *values;
+
+ /*
+ * bitmask which is to set on mount_options bitmask
+ * when this value is found, 0 is no bits are to be changed.
+ */
+ int setmask;
+
+ /*
+ * bitmask which is to clear on mount_options bitmask
+ * when this value is found, 0 is no bits are to be changed.
+ * This is applied BEFORE setmask
+ */
+ int clrmask;
} opt_desc_t;
/* possible values for -o data= */
@@ -833,8 +872,10 @@ static const arg_desc_t barrier_mode[] = {
{.value = NULL}
};
-/* possible values for "-o block-allocator=" and bits which are to be set in
- s_mount_opt of reiserfs specific part of in-core super block */
+/*
+ * possible values for "-o block-allocator=" and bits which are to be set in
+ * s_mount_opt of reiserfs specific part of in-core super block
+ */
static const arg_desc_t balloc[] = {
{"noborder", 1 << REISERFS_NO_BORDER, 0},
{"border", 0, 1 << REISERFS_NO_BORDER},
@@ -864,21 +905,25 @@ static const arg_desc_t error_actions[] = {
{NULL, 0, 0},
};
-/* proceed only one option from a list *cur - string containing of mount options
- opts - array of options which are accepted
- opt_arg - if option is found and requires an argument and if it is specifed
- in the input - pointer to the argument is stored here
- bit_flags - if option requires to set a certain bit - it is set here
- return -1 if unknown option is found, opt->arg_required otherwise */
+/*
+ * proceed only one option from a list *cur - string containing of mount
+ * options
+ * opts - array of options which are accepted
+ * opt_arg - if option is found and requires an argument and if it is specifed
+ * in the input - pointer to the argument is stored here
+ * bit_flags - if option requires to set a certain bit - it is set here
+ * return -1 if unknown option is found, opt->arg_required otherwise
+ */
static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
char **opt_arg, unsigned long *bit_flags)
{
char *p;
- /* foo=bar,
- ^ ^ ^
- | | +-- option_end
- | +-- arg_start
- +-- option_start
+ /*
+ * foo=bar,
+ * ^ ^ ^
+ * | | +-- option_end
+ * | +-- arg_start
+ * +-- option_start
*/
const opt_desc_t *opt;
const arg_desc_t *arg;
@@ -893,9 +938,12 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
}
if (!strncmp(p, "alloc=", 6)) {
- /* Ugly special case, probably we should redo options parser so that
- it can understand several arguments for some options, also so that
- it can fill several bitfields with option values. */
+ /*
+ * Ugly special case, probably we should redo options
+ * parser so that it can understand several arguments for
+ * some options, also so that it can fill several bitfields
+ * with option values.
+ */
if (reiserfs_parse_alloc_options(s, p + 6)) {
return -1;
} else {
@@ -958,7 +1006,10 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
return -1;
}
- /* move to the argument, or to next option if argument is not required */
+ /*
+ * move to the argument, or to next option if argument is not
+ * required
+ */
p++;
if (opt->arg_required
@@ -995,12 +1046,20 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
}
/* returns 0 if something is wrong in option string, 1 - otherwise */
-static int reiserfs_parse_options(struct super_block *s, char *options, /* string given via mount's -o */
+static int reiserfs_parse_options(struct super_block *s,
+
+ /* string given via mount's -o */
+ char *options,
+
+ /*
+ * after the parsing phase, contains the
+ * collection of bitflags defining what
+ * mount options were selected.
+ */
unsigned long *mount_options,
- /* after the parsing phase, contains the
- collection of bitflags defining what
- mount options were selected. */
- unsigned long *blocks, /* strtol-ed from NNN of resize=NNN */
+
+ /* strtol-ed from NNN of resize=NNN */
+ unsigned long *blocks,
char **jdev_name,
unsigned int *commit_max_age,
char **qf_names,
@@ -1010,7 +1069,10 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
char *arg = NULL;
char *pos;
opt_desc_t opts[] = {
- /* Compatibility stuff, so that -o notail for old setups still work */
+ /*
+ * Compatibility stuff, so that -o notail for old
+ * setups still work
+ */
{"tails",.arg_required = 't',.values = tails},
{"notail",.clrmask =
(1 << REISERFS_LARGETAIL) | (1 << REISERFS_SMALLTAIL)},
@@ -1055,8 +1117,10 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
*blocks = 0;
if (!options || !*options)
- /* use default configuration: create tails, journaling on, no
- conversion to newest format */
+ /*
+ * use default configuration: create tails, journaling on, no
+ * conversion to newest format
+ */
return 1;
for (pos = options; pos;) {
@@ -1109,7 +1173,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
if (c == 'j') {
if (arg && *arg && jdev_name) {
- if (*jdev_name) { //Hm, already assigned?
+ /* Hm, already assigned? */
+ if (*jdev_name) {
reiserfs_warning(s, "super-6510",
"journal device was "
"already specified to "
@@ -1362,8 +1427,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
safe_mask |= 1 << REISERFS_USRQUOTA;
safe_mask |= 1 << REISERFS_GRPQUOTA;
- /* Update the bitmask, taking care to keep
- * the bits we're not allowed to change here */
+ /*
+ * Update the bitmask, taking care to keep
+ * the bits we're not allowed to change here
+ */
REISERFS_SB(s)->s_mount_opt =
(REISERFS_SB(s)->
s_mount_opt & ~safe_mask) | (mount_options & safe_mask);
@@ -1410,7 +1477,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
/* Mounting a rw partition read-only. */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
set_sb_umount_state(rs, REISERFS_SB(s)->s_mount_state);
- journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
} else {
/* remount read-write */
if (!(s->s_flags & MS_RDONLY)) {
@@ -1427,7 +1494,9 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
handle_data_mode(s, mount_options);
handle_barrier_mode(s, mount_options);
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
- s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
+
+ /* now it is safe to call journal_begin */
+ s->s_flags &= ~MS_RDONLY;
err = journal_begin(&th, s, 10);
if (err)
goto out_err_unlock;
@@ -1440,12 +1509,12 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
if (!old_format_only(s))
set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
/* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
- journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
REISERFS_SB(s)->s_mount_state = REISERFS_VALID_FS;
}
/* this will force a full flush of all journal lists */
SB_JOURNAL(s)->j_must_wait = 1;
- err = journal_end(&th, s, 10);
+ err = journal_end(&th);
if (err)
goto out_err_unlock;
@@ -1489,9 +1558,9 @@ static int read_super_block(struct super_block *s, int offset)
brelse(bh);
return 1;
}
- //
- // ok, reiserfs signature (old or new) found in at the given offset
- //
+ /*
+ * ok, reiserfs signature (old or new) found in at the given offset
+ */
fs_blocksize = sb_blocksize(rs);
brelse(bh);
sb_set_blocksize(s, fs_blocksize);
@@ -1529,9 +1598,11 @@ static int read_super_block(struct super_block *s, int offset)
SB_BUFFER_WITH_SB(s) = bh;
SB_DISK_SUPER_BLOCK(s) = rs;
+ /*
+ * magic is of non-standard journal filesystem, look at s_version to
+ * find which format is in use
+ */
if (is_reiserfs_jr(rs)) {
- /* magic is of non-standard journal filesystem, look at s_version to
- find which format is in use */
if (sb_version(rs) == REISERFS_VERSION_2)
reiserfs_info(s, "found reiserfs format \"3.6\""
" with non-standard journal\n");
@@ -1545,8 +1616,10 @@ static int read_super_block(struct super_block *s, int offset)
return 1;
}
} else
- /* s_version of standard format may contain incorrect information,
- so we just look at the magic string */
+ /*
+ * s_version of standard format may contain incorrect
+ * information, so we just look at the magic string
+ */
reiserfs_info(s,
"found reiserfs format \"%s\" with standard journal\n",
is_reiserfs_3_5(rs) ? "3.5" : "3.6");
@@ -1558,8 +1631,9 @@ static int read_super_block(struct super_block *s, int offset)
s->dq_op = &reiserfs_quota_operations;
#endif
- /* new format is limited by the 32 bit wide i_blocks field, want to
- ** be one full block below that.
+ /*
+ * new format is limited by the 32 bit wide i_blocks field, want to
+ * be one full block below that.
*/
s->s_maxbytes = (512LL << 32) - s->s_blocksize;
return 0;
@@ -1568,7 +1642,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s)
{
- ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
+ ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s));
wait_on_buffer(SB_BUFFER_WITH_SB(s));
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
@@ -1578,14 +1652,15 @@ static int reread_meta_blocks(struct super_block *s)
return 0;
}
-/////////////////////////////////////////////////////
-// hash detection stuff
+/* hash detection stuff */
-// if root directory is empty - we set default - Yura's - hash and
-// warn about it
-// FIXME: we look for only one name in a directory. If tea and yura
-// bith have the same value - we ask user to send report to the
-// mailing list
+/*
+ * if root directory is empty - we set default - Yura's - hash and
+ * warn about it
+ * FIXME: we look for only one name in a directory. If tea and yura
+ * both have the same value - we ask user to send report to the
+ * mailing list
+ */
static __u32 find_hash_out(struct super_block *s)
{
int retval;
@@ -1593,92 +1668,83 @@ static __u32 find_hash_out(struct super_block *s)
struct cpu_key key;
INITIALIZE_PATH(path);
struct reiserfs_dir_entry de;
+ struct reiserfs_de_head *deh;
__u32 hash = DEFAULT_HASH;
+ __u32 deh_hashval, teahash, r5hash, yurahash;
inode = s->s_root->d_inode;
- do { // Some serious "goto"-hater was there ;)
- u32 teahash, r5hash, yurahash;
+ make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3);
+ retval = search_by_entry_key(s, &key, &path, &de);
+ if (retval == IO_ERROR) {
+ pathrelse(&path);
+ return UNSET_HASH;
+ }
+ if (retval == NAME_NOT_FOUND)
+ de.de_entry_num--;
+
+ set_de_name_and_namelen(&de);
+ deh = de.de_deh + de.de_entry_num;
- make_cpu_key(&key, inode, ~0, TYPE_DIRENTRY, 3);
- retval = search_by_entry_key(s, &key, &path, &de);
- if (retval == IO_ERROR) {
- pathrelse(&path);
- return UNSET_HASH;
- }
- if (retval == NAME_NOT_FOUND)
- de.de_entry_num--;
- set_de_name_and_namelen(&de);
- if (deh_offset(&(de.de_deh[de.de_entry_num])) == DOT_DOT_OFFSET) {
- /* allow override in this case */
- if (reiserfs_rupasov_hash(s)) {
- hash = YURA_HASH;
- }
- reiserfs_info(s, "FS seems to be empty, autodetect "
- "is using the default hash\n");
- break;
- }
- r5hash = GET_HASH_VALUE(r5_hash(de.de_name, de.de_namelen));
- teahash = GET_HASH_VALUE(keyed_hash(de.de_name, de.de_namelen));
- yurahash = GET_HASH_VALUE(yura_hash(de.de_name, de.de_namelen));
- if (((teahash == r5hash)
- &&
- (GET_HASH_VALUE(deh_offset(&(de.de_deh[de.de_entry_num])))
- == r5hash)) || ((teahash == yurahash)
- && (yurahash ==
- GET_HASH_VALUE(deh_offset
- (&
- (de.
- de_deh[de.
- de_entry_num])))))
- || ((r5hash == yurahash)
- && (yurahash ==
- GET_HASH_VALUE(deh_offset
- (&(de.de_deh[de.de_entry_num])))))) {
- reiserfs_warning(s, "reiserfs-2506", "Unable to "
- "automatically detect hash function. "
- "Please mount with -o "
- "hash={tea,rupasov,r5}");
- hash = UNSET_HASH;
- break;
- }
- if (GET_HASH_VALUE(deh_offset(&(de.de_deh[de.de_entry_num]))) ==
- yurahash)
+ if (deh_offset(deh) == DOT_DOT_OFFSET) {
+ /* allow override in this case */
+ if (reiserfs_rupasov_hash(s))
hash = YURA_HASH;
- else if (GET_HASH_VALUE
- (deh_offset(&(de.de_deh[de.de_entry_num]))) == teahash)
- hash = TEA_HASH;
- else if (GET_HASH_VALUE
- (deh_offset(&(de.de_deh[de.de_entry_num]))) == r5hash)
- hash = R5_HASH;
- else {
- reiserfs_warning(s, "reiserfs-2506",
- "Unrecognised hash function");
- hash = UNSET_HASH;
- }
- } while (0);
+ reiserfs_info(s, "FS seems to be empty, autodetect is using the default hash\n");
+ goto out;
+ }
+
+ deh_hashval = GET_HASH_VALUE(deh_offset(deh));
+ r5hash = GET_HASH_VALUE(r5_hash(de.de_name, de.de_namelen));
+ teahash = GET_HASH_VALUE(keyed_hash(de.de_name, de.de_namelen));
+ yurahash = GET_HASH_VALUE(yura_hash(de.de_name, de.de_namelen));
+
+ if ((teahash == r5hash && deh_hashval == r5hash) ||
+ (teahash == yurahash && deh_hashval == yurahash) ||
+ (r5hash == yurahash && deh_hashval == yurahash)) {
+ reiserfs_warning(s, "reiserfs-2506",
+ "Unable to automatically detect hash "
+ "function. Please mount with -o "
+ "hash={tea,rupasov,r5}");
+ hash = UNSET_HASH;
+ goto out;
+ }
+ if (deh_hashval == yurahash)
+ hash = YURA_HASH;
+ else if (deh_hashval == teahash)
+ hash = TEA_HASH;
+ else if (deh_hashval == r5hash)
+ hash = R5_HASH;
+ else {
+ reiserfs_warning(s, "reiserfs-2506",
+ "Unrecognised hash function");
+ hash = UNSET_HASH;
+ }
+out:
pathrelse(&path);
return hash;
}
-// finds out which hash names are sorted with
+/* finds out which hash names are sorted with */
static int what_hash(struct super_block *s)
{
__u32 code;
code = sb_hash_function_code(SB_DISK_SUPER_BLOCK(s));
- /* reiserfs_hash_detect() == true if any of the hash mount options
- ** were used. We must check them to make sure the user isn't
- ** using a bad hash value
+ /*
+ * reiserfs_hash_detect() == true if any of the hash mount options
+ * were used. We must check them to make sure the user isn't
+ * using a bad hash value
*/
if (code == UNSET_HASH || reiserfs_hash_detect(s))
code = find_hash_out(s);
if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
- /* detection has found the hash, and we must check against the
- ** mount options
+ /*
+ * detection has found the hash, and we must check against the
+ * mount options
*/
if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
reiserfs_warning(s, "reiserfs-2507",
@@ -1700,7 +1766,10 @@ static int what_hash(struct super_block *s)
code = UNSET_HASH;
}
} else {
- /* find_hash_out was not called or could not determine the hash */
+ /*
+ * find_hash_out was not called or
+ * could not determine the hash
+ */
if (reiserfs_rupasov_hash(s)) {
code = YURA_HASH;
} else if (reiserfs_tea_hash(s)) {
@@ -1710,8 +1779,9 @@ static int what_hash(struct super_block *s)
}
}
- /* if we are mounted RW, and we have a new valid hash code, update
- ** the super
+ /*
+ * if we are mounted RW, and we have a new valid hash code, update
+ * the super
*/
if (code != UNSET_HASH &&
!(s->s_flags & MS_RDONLY) &&
@@ -1721,7 +1791,7 @@ static int what_hash(struct super_block *s)
return code;
}
-// return pointer to appropriate function
+/* return pointer to appropriate function */
static hashf_t hash_function(struct super_block *s)
{
switch (what_hash(s)) {
@@ -1738,7 +1808,7 @@ static hashf_t hash_function(struct super_block *s)
return NULL;
}
-// this is used to set up correct value for old partitions
+/* this is used to set up correct value for old partitions */
static int function2code(hashf_t func)
{
if (func == keyed_hash)
@@ -1748,7 +1818,7 @@ static int function2code(hashf_t func)
if (func == r5_hash)
return R5_HASH;
- BUG(); // should never happen
+ BUG(); /* should never happen */
return 0;
}
@@ -1783,8 +1853,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
- /* no preallocation minimum, be smart in
- reiserfs_file_write instead */
+ /* no preallocation minimum, be smart in reiserfs_file_write instead */
sbi->s_alloc_options.preallocmin = 0;
/* Preallocate by 16 blocks (17-1) at once */
sbi->s_alloc_options.preallocsize = 17;
@@ -1796,9 +1865,17 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
mutex_init(&sbi->lock);
sbi->lock_depth = -1;
+ sbi->commit_wq = alloc_workqueue("reiserfs/%s", WQ_MEM_RECLAIM, 0,
+ s->s_id);
+ if (!sbi->commit_wq) {
+ SWARN(silent, s, "", "Cannot allocate commit workqueue");
+ errval = -ENOMEM;
+ goto error_unlocked;
+ }
+
jdev_name = NULL;
if (reiserfs_parse_options
- (s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
+ (s, (char *)data, &sbi->s_mount_opt, &blocks, &jdev_name,
&commit_max_age, qf_names, &qfmt) == 0) {
goto error_unlocked;
}
@@ -1819,10 +1896,17 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
goto error_unlocked;
}
- /* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
+ /*
+ * try old format (undistributed bitmap, super block in 8-th 1k
+ * block of a device)
+ */
if (!read_super_block(s, REISERFS_OLD_DISK_OFFSET_IN_BYTES))
old_format = 1;
- /* try new format (64-th 1k block), which can contain reiserfs super block */
+
+ /*
+ * try new format (64-th 1k block), which can contain reiserfs
+ * super block
+ */
else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) {
SWARN(silent, s, "sh-2021", "can not find reiserfs on %s",
s->s_id);
@@ -1830,9 +1914,11 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
}
rs = SB_DISK_SUPER_BLOCK(s);
- /* Let's do basic sanity check to verify that underlying device is not
- smaller than the filesystem. If the check fails then abort and scream,
- because bad stuff will happen otherwise. */
+ /*
+ * Let's do basic sanity check to verify that underlying device is not
+ * smaller than the filesystem. If the check fails then abort and
+ * scream, because bad stuff will happen otherwise.
+ */
if (s->s_bdev && s->s_bdev->bd_inode
&& i_size_read(s->s_bdev->bd_inode) <
sb_block_count(rs) * sb_blocksize(rs)) {
@@ -1876,15 +1962,16 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
printk("reiserfs: using flush barriers\n");
}
- // set_device_ro(s->s_dev, 1) ;
if (journal_init(s, jdev_name, old_format, commit_max_age)) {
SWARN(silent, s, "sh-2022",
"unable to initialize journal space");
goto error_unlocked;
} else {
- jinit_done = 1; /* once this is set, journal_release must be called
- ** if we error out of the mount
- */
+ /*
+ * once this is set, journal_release must be called
+ * if we error out of the mount
+ */
+ jinit_done = 1;
}
if (reread_meta_blocks(s)) {
@@ -1905,7 +1992,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
root_inode =
iget5_locked(s, REISERFS_ROOT_OBJECTID, reiserfs_find_actor,
- reiserfs_init_locked_inode, (void *)(&args));
+ reiserfs_init_locked_inode, (void *)&args);
if (!root_inode) {
SWARN(silent, s, "jmacd-10", "get root inode failed");
goto error_unlocked;
@@ -1929,7 +2016,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
s->s_root = d_make_root(root_inode);
if (!s->s_root)
goto error;
- // define and initialize hash function
+ /* define and initialize hash function */
sbi->s_hash_function = hash_function(s);
if (sbi->s_hash_function == NULL) {
dput(s->s_root);
@@ -1939,11 +2026,11 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (is_reiserfs_3_5(rs)
|| (is_reiserfs_jr(rs) && SB_VERSION(s) == REISERFS_VERSION_1))
- set_bit(REISERFS_3_5, &(sbi->s_properties));
+ set_bit(REISERFS_3_5, &sbi->s_properties);
else if (old_format)
- set_bit(REISERFS_OLD_FORMAT, &(sbi->s_properties));
+ set_bit(REISERFS_OLD_FORMAT, &sbi->s_properties);
else
- set_bit(REISERFS_3_6, &(sbi->s_properties));
+ set_bit(REISERFS_3_6, &sbi->s_properties);
if (!(s->s_flags & MS_RDONLY)) {
@@ -1958,10 +2045,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
set_sb_umount_state(rs, REISERFS_ERROR_FS);
set_sb_fs_state(rs, 0);
- /* Clear out s_bmap_nr if it would wrap. We can handle this
+ /*
+ * Clear out s_bmap_nr if it would wrap. We can handle this
* case, but older revisions can't. This will cause the
* file system to fail mount on those older implementations,
- * avoiding corruption. -jeffm */
+ * avoiding corruption. -jeffm
+ */
if (bmap_would_wrap(reiserfs_bmap_count(s)) &&
sb_bmap_nr(rs) != 0) {
reiserfs_warning(s, "super-2030", "This file system "
@@ -1974,8 +2063,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
}
if (old_format_only(s)) {
- /* filesystem of format 3.5 either with standard or non-standard
- journal */
+ /*
+ * filesystem of format 3.5 either with standard
+ * or non-standard journal
+ */
if (convert_reiserfs(s)) {
/* and -o conv is given */
if (!silent)
@@ -1983,8 +2074,11 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
"converting 3.5 filesystem to the 3.6 format");
if (is_reiserfs_3_5(rs))
- /* put magic string of 3.6 format. 2.2 will not be able to
- mount this filesystem anymore */
+ /*
+ * put magic string of 3.6 format.
+ * 2.2 will not be able to
+ * mount this filesystem anymore
+ */
memcpy(rs->s_v1.s_magic,
reiserfs_3_6_magic_string,
sizeof
@@ -1992,8 +2086,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
set_sb_version(rs, REISERFS_VERSION_2);
reiserfs_convert_objectid_map_v1(s);
- set_bit(REISERFS_3_6, &(sbi->s_properties));
- clear_bit(REISERFS_3_5, &(sbi->s_properties));
+ set_bit(REISERFS_3_6, &sbi->s_properties);
+ clear_bit(REISERFS_3_5, &sbi->s_properties);
} else if (!silent) {
reiserfs_info(s, "using 3.5.x disk format\n");
}
@@ -2001,8 +2095,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
- journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
- errval = journal_end(&th, s, 1);
+ journal_mark_dirty(&th, SB_BUFFER_WITH_SB(s));
+ errval = journal_end(&th);
if (errval) {
dput(s->s_root);
s->s_root = NULL;
@@ -2018,7 +2112,9 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
}
reiserfs_write_lock(s);
- /* look for files which were to be removed in previous session */
+ /*
+ * look for files which were to be removed in previous session
+ */
finish_unfinished(s);
} else {
if (old_format_only(s) && !silent) {
@@ -2034,7 +2130,9 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
}
reiserfs_write_lock(s);
}
- // mark hash in super block: it could be unset. overwrite should be ok
+ /*
+ * mark hash in super block: it could be unset. overwrite should be ok
+ */
set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
handle_attrs(s);
@@ -2111,9 +2209,7 @@ static int reiserfs_write_dquot(struct dquot *dquot)
depth = reiserfs_write_unlock_nested(dquot->dq_sb);
ret = dquot_commit(dquot);
reiserfs_write_lock_nested(dquot->dq_sb, depth);
- err =
- journal_end(&th, dquot->dq_sb,
- REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
+ err = journal_end(&th);
if (!ret && err)
ret = err;
out:
@@ -2136,9 +2232,7 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
depth = reiserfs_write_unlock_nested(dquot->dq_sb);
ret = dquot_acquire(dquot);
reiserfs_write_lock_nested(dquot->dq_sb, depth);
- err =
- journal_end(&th, dquot->dq_sb,
- REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
+ err = journal_end(&th);
if (!ret && err)
ret = err;
out:
@@ -2163,9 +2257,7 @@ static int reiserfs_release_dquot(struct dquot *dquot)
}
ret = dquot_release(dquot);
reiserfs_write_lock(dquot->dq_sb);
- err =
- journal_end(&th, dquot->dq_sb,
- REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+ err = journal_end(&th);
if (!ret && err)
ret = err;
reiserfs_write_unlock(dquot->dq_sb);
@@ -2198,7 +2290,7 @@ static int reiserfs_write_info(struct super_block *sb, int type)
depth = reiserfs_write_unlock_nested(sb);
ret = dquot_commit_info(sb, type);
reiserfs_write_lock_nested(sb, depth);
- err = journal_end(&th, sb, 2);
+ err = journal_end(&th);
if (!ret && err)
ret = err;
out:
@@ -2238,7 +2330,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
goto out;
}
inode = path->dentry->d_inode;
- /* We must not pack tails for quota files on reiserfs for quota IO to work */
+ /*
+ * We must not pack tails for quota files on reiserfs for quota
+ * IO to work
+ */
if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
err = reiserfs_unpack(inode, NULL);
if (err) {
@@ -2268,7 +2363,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
err = journal_begin(&th, sb, 1);
if (err)
goto out;
- err = journal_end_sync(&th, sb, 1);
+ err = journal_end_sync(&th);
if (err)
goto out;
}
@@ -2279,10 +2374,12 @@ out:
return err;
}
-/* Read data from quotafile - avoid pagecache and such because we cannot afford
+/*
+ * Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
- * we don't have to be afraid of races */
+ * we don't have to be afraid of races
+ */
static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
@@ -2303,7 +2400,10 @@ static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
sb->s_blocksize - offset <
toread ? sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
- /* Quota files are without tails so we can safely use this function */
+ /*
+ * Quota files are without tails so we can safely
+ * use this function
+ */
reiserfs_write_lock(sb);
err = reiserfs_get_block(inode, blk, &tmp_bh, 0);
reiserfs_write_unlock(sb);
@@ -2326,8 +2426,10 @@ static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
return len;
}
-/* Write to quotafile (we know the transaction is already started and has
- * enough credits) */
+/*
+ * Write to quotafile (we know the transaction is already started and has
+ * enough credits)
+ */
static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
@@ -2368,7 +2470,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
unlock_buffer(bh);
reiserfs_write_lock(sb);
reiserfs_prepare_for_journal(sb, bh, 1);
- journal_mark_dirty(current->journal_info, sb, bh);
+ journal_mark_dirty(current->journal_info, bh);
if (!journal_quota)
reiserfs_add_ordered_list(inode, bh);
reiserfs_write_unlock(sb);
@@ -2402,18 +2504,18 @@ static int __init init_reiserfs_fs(void)
{
int ret;
- if ((ret = init_inodecache())) {
+ ret = init_inodecache();
+ if (ret)
return ret;
- }
reiserfs_proc_info_global_init();
ret = register_filesystem(&reiserfs_fs_type);
+ if (ret)
+ goto out;
- if (ret == 0) {
- return 0;
- }
-
+ return 0;
+out:
reiserfs_proc_info_global_done();
destroy_inodecache();
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index 5e2624d12f7..f41e19b4bb4 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -1,5 +1,6 @@
/*
- * Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
+ * Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright
+ * details
*/
#include <linux/time.h>
@@ -7,29 +8,41 @@
#include <linux/buffer_head.h>
#include "reiserfs.h"
-/* access to tail : when one is going to read tail it must make sure, that is not running.
- direct2indirect and indirect2direct can not run concurrently */
+/*
+ * access to tail : when one is going to read tail it must make sure, that is
+ * not running. direct2indirect and indirect2direct can not run concurrently
+ */
-/* Converts direct items to an unformatted node. Panics if file has no
- tail. -ENOSPC if no disk space for conversion */
-/* path points to first direct item of the file regarless of how many of
- them are there */
+/*
+ * Converts direct items to an unformatted node. Panics if file has no
+ * tail. -ENOSPC if no disk space for conversion
+ */
+/*
+ * path points to first direct item of the file regardless of how many of
+ * them are there
+ */
int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
struct treepath *path, struct buffer_head *unbh,
loff_t tail_offset)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *up_to_date_bh;
- struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
+ struct item_head *p_le_ih = tp_item_head(path);
unsigned long total_tail = 0;
- struct cpu_key end_key; /* Key to search for the last byte of the
- converted item. */
- struct item_head ind_ih; /* new indirect item to be inserted or
- key of unfm pointer to be pasted */
- int blk_size, retval; /* returned value for reiserfs_insert_item and clones */
- unp_t unfm_ptr; /* Handle on an unformatted node
- that will be inserted in the
- tree. */
+
+ /* Key to search for the last byte of the converted item. */
+ struct cpu_key end_key;
+
+ /*
+ * new indirect item to be inserted or key
+ * of unfm pointer to be pasted
+ */
+ struct item_head ind_ih;
+ int blk_size;
+ /* returned value for reiserfs_insert_item and clones */
+ int retval;
+ /* Handle on an unformatted node that will be inserted in the tree. */
+ unp_t unfm_ptr;
BUG_ON(!th->t_trans_id);
@@ -37,8 +50,10 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
blk_size = sb->s_blocksize;
- /* and key to search for append or insert pointer to the new
- unformatted node. */
+ /*
+ * and key to search for append or insert pointer to the new
+ * unformatted node.
+ */
copy_item_head(&ind_ih, p_le_ih);
set_le_ih_k_offset(&ind_ih, tail_offset);
set_le_ih_k_type(&ind_ih, TYPE_INDIRECT);
@@ -55,7 +70,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
return -EIO;
}
- p_le_ih = PATH_PITEM_HEAD(path);
+ p_le_ih = tp_item_head(path);
unfm_ptr = cpu_to_le32(unbh->b_blocknr);
@@ -76,36 +91,43 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
if (retval) {
return retval;
}
- // note: from here there are two keys which have matching first
- // three key components. They only differ by the fourth one.
+ /*
+ * note: from here there are two keys which have matching first
+ * three key components. They only differ by the fourth one.
+ */
/* Set the key to search for the direct items of the file */
make_cpu_key(&end_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT,
4);
- /* Move bytes from the direct items to the new unformatted node
- and delete them. */
+ /*
+ * Move bytes from the direct items to the new unformatted node
+ * and delete them.
+ */
while (1) {
int tail_size;
- /* end_key.k_offset is set so, that we will always have found
- last item of the file */
+ /*
+ * end_key.k_offset is set so, that we will always have found
+ * last item of the file
+ */
if (search_for_position_by_key(sb, &end_key, path) ==
POSITION_FOUND)
reiserfs_panic(sb, "PAP-14050",
"direct item (%K) not found", &end_key);
- p_le_ih = PATH_PITEM_HEAD(path);
+ p_le_ih = tp_item_head(path);
RFALSE(!is_direct_le_ih(p_le_ih),
"vs-14055: direct item expected(%K), found %h",
&end_key, p_le_ih);
tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
+ ih_item_len(p_le_ih) - 1;
- /* we only send the unbh pointer if the buffer is not up to date.
- ** this avoids overwriting good data from writepage() with old data
- ** from the disk or buffer cache
- ** Special case: unbh->b_page will be NULL if we are coming through
- ** DIRECT_IO handler here.
+ /*
+ * we only send the unbh pointer if the buffer is not
+ * up to date. this avoids overwriting good data from
+ * writepage() with old data from the disk or buffer cache
+ * Special case: unbh->b_page will be NULL if we are coming
+ * through DIRECT_IO handler here.
*/
if (!unbh->b_page || buffer_uptodate(unbh)
|| PageUptodate(unbh->b_page)) {
@@ -117,13 +139,15 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
up_to_date_bh);
total_tail += retval;
+
+ /* done: file does not have direct items anymore */
if (tail_size == retval)
- // done: file does not have direct items anymore
break;
}
- /* if we've copied bytes from disk into the page, we need to zero
- ** out the unused part of the block (it was not up to date before)
+ /*
+ * if we've copied bytes from disk into the page, we need to zero
+ * out the unused part of the block (it was not up to date before)
*/
if (up_to_date_bh) {
unsigned pgoff =
@@ -146,9 +170,11 @@ void reiserfs_unmap_buffer(struct buffer_head *bh)
BUG();
}
clear_buffer_dirty(bh);
- /* Remove the buffer from whatever list it belongs to. We are mostly
- interested in removing it from per-sb j_dirty_buffers list, to avoid
- BUG() on attempt to write not mapped buffer */
+ /*
+ * Remove the buffer from whatever list it belongs to. We are mostly
+ * interested in removing it from per-sb j_dirty_buffers list, to avoid
+ * BUG() on attempt to write not mapped buffer
+ */
if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
struct inode *inode = bh->b_page->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
@@ -164,12 +190,14 @@ void reiserfs_unmap_buffer(struct buffer_head *bh)
unlock_buffer(bh);
}
-/* this first locks inode (neither reads nor sync are permitted),
- reads tail through page cache, insert direct item. When direct item
- inserted successfully inode is left locked. Return value is always
- what we expect from it (number of cut bytes). But when tail remains
- in the unformatted node, we set mode to SKIP_BALANCING and unlock
- inode */
+/*
+ * this first locks inode (neither reads nor sync are permitted),
+ * reads tail through page cache, insert direct item. When direct item
+ * inserted successfully inode is left locked. Return value is always
+ * what we expect from it (number of cut bytes). But when tail remains
+ * in the unformatted node, we set mode to SKIP_BALANCING and unlock
+ * inode
+ */
int indirect2direct(struct reiserfs_transaction_handle *th,
struct inode *inode, struct page *page,
struct treepath *path, /* path to the indirect item. */
@@ -194,7 +222,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
*mode = M_SKIP_BALANCING;
/* store item head path points to. */
- copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ copy_item_head(&s_ih, tp_item_head(path));
tail_len = (n_new_file_size & (block_size - 1));
if (get_inode_sd_version(inode) == STAT_DATA_V2)
@@ -207,9 +235,11 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
1) * sb->s_blocksize;
pos1 = pos;
- // we are protected by i_mutex. The tail can not disapper, not
- // append can be done either
- // we are in truncate or packing tail in file_release
+ /*
+ * we are protected by i_mutex. The tail can not disapper, not
+ * append can be done either
+ * we are in truncate or packing tail in file_release
+ */
tail = (char *)kmap(page); /* this can schedule */
@@ -220,7 +250,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
reiserfs_panic(sb, "PAP-5520",
"item to be converted %K does not exist",
item_key);
- copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ copy_item_head(&s_ih, tp_item_head(path));
#ifdef CONFIG_REISERFS_CHECK
pos = le_ih_k_offset(&s_ih) - 1 +
(ih_item_len(&s_ih) / UNFM_P_SIZE -
@@ -236,9 +266,10 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
pos1 + 1, TYPE_DIRECT, round_tail_len,
0xffff /*ih_free_space */ );
- /* we want a pointer to the first byte of the tail in the page.
- ** the page was locked and this part of the page was up to date when
- ** indirect2direct was called, so we know the bytes are still valid
+ /*
+ * we want a pointer to the first byte of the tail in the page.
+ * the page was locked and this part of the page was up to date when
+ * indirect2direct was called, so we know the bytes are still valid
*/
tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
@@ -250,12 +281,14 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
/* Insert tail as new direct item in the tree */
if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
tail ? tail : NULL) < 0) {
- /* No disk memory. So we can not convert last unformatted node
- to the direct item. In this case we used to adjust
- indirect items's ih_free_space. Now ih_free_space is not
- used, it would be ideal to write zeros to corresponding
- unformatted node. For now i_size is considered as guard for
- going out of file size */
+ /*
+ * No disk memory. So we can not convert last unformatted node
+ * to the direct item. In this case we used to adjust
+ * indirect items's ih_free_space. Now ih_free_space is not
+ * used, it would be ideal to write zeros to corresponding
+ * unformatted node. For now i_size is considered as guard for
+ * going out of file size
+ */
kunmap(page);
return block_size - round_tail_len;
}
@@ -264,12 +297,16 @@ int indirect2direct(struct reiserfs_transaction_handle *th,
/* make sure to get the i_blocks changes from reiserfs_insert_item */
reiserfs_update_sd(th, inode);
- // note: we have now the same as in above direct2indirect
- // conversion: there are two keys which have matching first three
- // key components. They only differ by the fouhth one.
+ /*
+ * note: we have now the same as in above direct2indirect
+ * conversion: there are two keys which have matching first three
+ * key components. They only differ by the fourth one.
+ */
- /* We have inserted new direct item and must remove last
- unformatted node. */
+ /*
+ * We have inserted new direct item and must remove last
+ * unformatted node.
+ */
*mode = M_CUT;
/* we store position of first direct item in the in-core inode */
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5cdfbd638b5..ca416d099e7 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -56,9 +56,11 @@
#define XAROOT_NAME "xattrs"
-/* Helpers for inode ops. We do this so that we don't have all the VFS
+/*
+ * Helpers for inode ops. We do this so that we don't have all the VFS
* overhead and also for proper i_mutex annotation.
- * dir->i_mutex must be held for all of them. */
+ * dir->i_mutex must be held for all of them.
+ */
#ifdef CONFIG_REISERFS_FS_XATTR
static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
{
@@ -73,10 +75,12 @@ static int xattr_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return dir->i_op->mkdir(dir, dentry, mode);
}
-/* We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
+/*
+ * We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
* mutation ops aren't called during rename or splace, which are the
* only other users of I_MUTEX_CHILD. It violates the ordering, but that's
- * better than allocating another subclass just for this code. */
+ * better than allocating another subclass just for this code.
+ */
static int xattr_unlink(struct inode *dir, struct dentry *dentry)
{
int error;
@@ -166,9 +170,11 @@ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
return xadir;
}
-/* The following are side effects of other operations that aren't explicitly
+/*
+ * The following are side effects of other operations that aren't explicitly
* modifying extended attributes. This includes operations such as permissions
- * or ownership changes, object deletions, etc. */
+ * or ownership changes, object deletions, etc.
+ */
struct reiserfs_dentry_buf {
struct dir_context ctx;
struct dentry *xadir;
@@ -267,11 +273,13 @@ static int reiserfs_for_each_xattr(struct inode *inode,
cleanup_dentry_buf(&buf);
if (!err) {
- /* We start a transaction here to avoid a ABBA situation
+ /*
+ * We start a transaction here to avoid a ABBA situation
* between the xattr root's i_mutex and the journal lock.
* This doesn't incur much additional overhead since the
* new transaction will just nest inside the
- * outer transaction. */
+ * outer transaction.
+ */
int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
@@ -284,7 +292,7 @@ static int reiserfs_for_each_xattr(struct inode *inode,
I_MUTEX_XATTR);
err = action(dir, data);
reiserfs_write_lock(inode->i_sb);
- jerror = journal_end(&th, inode->i_sb, blocks);
+ jerror = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
mutex_unlock(&dir->d_parent->d_inode->i_mutex);
err = jerror ?: err;
@@ -349,9 +357,11 @@ int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs)
}
#ifdef CONFIG_REISERFS_FS_XATTR
-/* Returns a dentry corresponding to a specific extended attribute file
+/*
+ * Returns a dentry corresponding to a specific extended attribute file
* for the inode. If flags allow, the file is created. Otherwise, a
- * valid or negative dentry, or an error is returned. */
+ * valid or negative dentry, or an error is returned.
+ */
static struct dentry *xattr_lookup(struct inode *inode, const char *name,
int flags)
{
@@ -400,8 +410,10 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
{
struct address_space *mapping = dir->i_mapping;
struct page *page;
- /* We can deadlock if we try to free dentries,
- and an unlink/rmdir has just occurred - GFP_NOFS avoids this */
+ /*
+ * We can deadlock if we try to free dentries,
+ * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
+ */
mapping_set_gfp_mask(mapping, GFP_NOFS);
page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
if (!IS_ERR(page)) {
@@ -411,7 +423,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
}
return page;
- fail:
+fail:
reiserfs_put_page(page);
return ERR_PTR(-EIO);
}
@@ -589,7 +601,7 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
buffer, buffer_size, flags);
reiserfs_write_lock(inode->i_sb);
- error2 = journal_end(&th, inode->i_sb, jbegin_count);
+ error2 = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
if (error == 0)
error = error2;
@@ -615,8 +627,10 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
if (name == NULL)
return -EINVAL;
- /* We can't have xattrs attached to v1 items since they don't have
- * generation numbers */
+ /*
+ * We can't have xattrs attached to v1 items since they don't have
+ * generation numbers
+ */
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
@@ -913,12 +927,16 @@ static const struct xattr_handler *reiserfs_xattr_handlers[] = {
static int xattr_mount_check(struct super_block *s)
{
- /* We need generation numbers to ensure that the oid mapping is correct
- * v3.5 filesystems don't have them. */
+ /*
+ * We need generation numbers to ensure that the oid mapping is correct
+ * v3.5 filesystems don't have them.
+ */
if (old_format_only(s)) {
if (reiserfs_xattrs_optional(s)) {
- /* Old format filesystem, but optional xattrs have
- * been enabled. Error out. */
+ /*
+ * Old format filesystem, but optional xattrs have
+ * been enabled. Error out.
+ */
reiserfs_warning(s, "jdm-2005",
"xattrs/ACLs not supported "
"on pre-v3.6 format filesystems. "
@@ -972,9 +990,11 @@ int reiserfs_lookup_privroot(struct super_block *s)
return err;
}
-/* We need to take a copy of the mount flags since things like
+/*
+ * We need to take a copy of the mount flags since things like
* MS_RDONLY don't get set until *after* we're called.
- * mount_flags != mount_options */
+ * mount_flags != mount_options
+ */
int reiserfs_xattr_init(struct super_block *s, int mount_flags)
{
int err = 0;
@@ -1007,8 +1027,8 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
error:
if (err) {
- clear_bit(REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt));
- clear_bit(REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt));
+ clear_bit(REISERFS_XATTRS_USER, &REISERFS_SB(s)->s_mount_opt);
+ clear_bit(REISERFS_POSIXACL, &REISERFS_SB(s)->s_mount_opt);
}
/* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index f59626c5d33..857ec7e3016 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -61,7 +61,8 @@ static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
return ret;
}
-/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
+/*
+ * We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
* Let's try to be smart about it.
* xattr root: We cache it. If it's not cached, we may need to create it.
* xattr dir: If anything has been loaded for this inode, we can set a flag
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index a6ce532402d..44503e29379 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -25,8 +25,10 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int size = acl ? posix_acl_xattr_size(acl->a_count) : 0;
- /* Pessimism: We can't assume that anything from the xattr root up
- * has been created. */
+ /*
+ * Pessimism: We can't assume that anything from the xattr root up
+ * has been created.
+ */
jcreate_blocks = reiserfs_xattr_jcreate_nblocks(inode) +
reiserfs_xattr_nblocks(inode, size) * 2;
@@ -37,7 +39,7 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (error == 0) {
error = __reiserfs_set_acl(&th, inode, type, acl);
reiserfs_write_lock(inode->i_sb);
- error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
+ error2 = journal_end(&th);
reiserfs_write_unlock(inode->i_sb);
if (error2)
error = error2;
@@ -111,7 +113,7 @@ static struct posix_acl *reiserfs_posix_acl_from_disk(const void *value, size_t
goto fail;
return acl;
- fail:
+fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
@@ -164,7 +166,7 @@ static void *reiserfs_posix_acl_to_disk(const struct posix_acl *acl, size_t * si
}
return (char *)ext_acl;
- fail:
+fail:
kfree(ext_acl);
return ERR_PTR(-EINVAL);
}
@@ -208,8 +210,10 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
retval = reiserfs_xattr_get(inode, name, value, size);
if (retval == -ENODATA || retval == -ENOSYS) {
- /* This shouldn't actually happen as it should have
- been caught above.. but just in case */
+ /*
+ * This shouldn't actually happen as it should have
+ * been caught above.. but just in case
+ */
acl = NULL;
} else if (retval < 0) {
acl = ERR_PTR(retval);
@@ -290,8 +294,10 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
return error;
}
-/* dir->i_mutex: locked,
- * inode is new and not released into the wild yet */
+/*
+ * dir->i_mutex: locked,
+ * inode is new and not released into the wild yet
+ */
int
reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
struct inode *dir, struct dentry *dentry,
@@ -304,14 +310,18 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
if (S_ISLNK(inode->i_mode))
return 0;
- /* ACLs can only be used on "new" objects, so if it's an old object
- * there is nothing to inherit from */
+ /*
+ * ACLs can only be used on "new" objects, so if it's an old object
+ * there is nothing to inherit from
+ */
if (get_inode_sd_version(dir) == STAT_DATA_V1)
goto apply_umask;
- /* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
+ /*
+ * Don't apply ACLs to objects in the .reiserfs_priv tree.. This
* would be useless since permissions are ignored, and a pain because
- * it introduces locking cycles */
+ * it introduces locking cycles
+ */
if (IS_PRIVATE(dir)) {
inode->i_flags |= S_PRIVATE;
goto apply_umask;
@@ -335,7 +345,7 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
return err;
- apply_umask:
+apply_umask:
/* no ACL, apply umask */
inode->i_mode &= ~current_umask();
return err;