summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-06-27 09:59:40 +0900
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-07-02 08:48:15 +0900
commit763bfe1bc575dcce56dc5c570dc005d94911705f (patch)
tree4588fcb84b11e97a33dc3b75908dc6b0fccc735b /fs
parent6cc4af56066d8e9c62584cf61c6ce50fd0ab139a (diff)
f2fs: remove reusing any prefree segments
This patch removes check_prefree_segments initially designed to enhance the performance by narrowing the range of LBA usage across the whole block device. When allocating a new segment, previous f2fs tries to find proper prefree segments, and then, if finds a segment, it reuses the segment for further data or node block allocation. However, I found that this was totally wrong approach since the prefree segments have several data or node blocks that will be used by the roll-forward mechanism operated after sudden-power-off. Let's assume the following scenario. /* write 8MB with fsync */ for (i = 0; i < 2048; i++) { offset = i * 4096; write(fd, offset, 4KB); fsync(fd); } In this case, naive segment allocation sequence will be like: data segment: x, x+1, x+2, x+3 node segment: y, y+1, y+2, y+3. But, if we can reuse prefree segments, the sequence can be like: data segment: x, x+1, y, y+1 node segment: y, y+1, y+2, y+3. Because, y, y+1, and y+2 became prefree segments one by one, and those are reused by data allocation. After conducting this workload, we should consider how to recover the latest inode with its data. If we reuse the prefree segments such as y or y+1, we lost the old node blocks so that f2fs even cannot start roll-forward recovery. Therefore, I suggest that we should remove reusing prefree segments. Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/segment.c56
1 files changed, 1 insertions, 55 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 3ac4d29f0cd..a86d125a988 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -309,56 +309,6 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
f2fs_put_page(page, 1);
}
-static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
-{
- struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
- unsigned int segno;
- unsigned int ofs = 0;
-
- /*
- * If there is not enough reserved sections,
- * we should not reuse prefree segments.
- */
- if (has_not_enough_free_secs(sbi, 0))
- return NULL_SEGNO;
-
- /*
- * NODE page should not reuse prefree segment,
- * since those information is used for SPOR.
- */
- if (IS_NODESEG(type))
- return NULL_SEGNO;
-next:
- segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
- ofs += sbi->segs_per_sec;
-
- if (segno < TOTAL_SEGS(sbi)) {
- int i;
-
- /* skip intermediate segments in a section */
- if (segno % sbi->segs_per_sec)
- goto next;
-
- /* skip if the section is currently used */
- if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
- goto next;
-
- /* skip if whole section is not prefree */
- for (i = 1; i < sbi->segs_per_sec; i++)
- if (!test_bit(segno + i, prefree_segmap))
- goto next;
-
- /* skip if whole section was not free at the last checkpoint */
- for (i = 0; i < sbi->segs_per_sec; i++)
- if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
- goto next;
-
- return segno;
- }
- return NULL_SEGNO;
-}
-
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -597,11 +547,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
goto out;
}
- curseg->next_segno = check_prefree_segments(sbi, type);
-
- if (curseg->next_segno != NULL_SEGNO)
- change_curseg(sbi, type, false);
- else if (type == CURSEG_WARM_NODE)
+ if (type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
new_curseg(sbi, type, false);