From 351df4b201157351c7d26bf12c3eeb9dbce98854 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 2 Nov 2012 17:09:16 +0900 Subject: f2fs: add segment operations This adds specific functions not only to manage dirty/free segments, SIT pages, a cache for SIT entries, and summary entries, but also to allocate free blocks and write three types of pages: data, node, and meta. - F2FS maintains three types of bitmaps in memory, which indicate free, prefree, and dirty segments respectively. - The key information of an SIT entry consists of a segment number, the number of valid blocks in the segment, a bitmap to identify there-in valid or invalid blocks. - An SIT page is composed of a certain range of SIT entries, which is maintained by the address space of meta_inode. - To cache SIT entries, a simple array is used. The index for the array is the segment number. - A summary entry for data contains the parent node information. A summary entry for node contains its node offset from the inode. - F2FS manages information about six active logs and those summary entries in memory. Whenever one of them is changed, its summary entries are flushed to its SIT page maintained by the address space of meta_inode. - This patch adds a default block allocation function which supports heap-based allocation policy. - This patch adds core functions to write data, node, and meta pages. Since LFS basically produces a series of sequential writes, F2FS merges sequential bios with a single one as much as possible to reduce the IO scheduling overhead. Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 1798 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1798 insertions(+) create mode 100644 fs/f2fs/segment.c (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c new file mode 100644 index 00000000000..ed7c079cfc7 --- /dev/null +++ b/fs/f2fs/segment.c @@ -0,0 +1,1798 @@ +/** + * fs/f2fs/segment.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include "f2fs.h" +#include "segment.h" +#include "node.h" + +static int need_to_flush(struct f2fs_sb_info *sbi) +{ + unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) * + sbi->segs_per_sec; + int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1) + >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; + int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1) + >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; + + if (sbi->por_doing) + return 0; + + if (free_sections(sbi) <= (node_secs + 2 * dent_secs + + reserved_sections(sbi))) + return 1; + return 0; +} + +/** + * This function balances dirty node and dentry pages. + * In addition, it controls garbage collection. + */ +void f2fs_balance_fs(struct f2fs_sb_info *sbi) +{ + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .for_reclaim = 0, + }; + + if (sbi->por_doing) + return; + + /* + * We should do checkpoint when there are so many dirty node pages + * with enough free segments. After then, we should do GC. + */ + if (need_to_flush(sbi)) { + sync_dirty_dir_inodes(sbi); + sync_node_pages(sbi, 0, &wbc); + } + + if (has_not_enough_free_secs(sbi)) { + mutex_lock(&sbi->gc_mutex); + f2fs_gc(sbi, 1); + } +} + +static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, + enum dirty_type dirty_type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + /* need not be added */ + if (IS_CURSEG(sbi, segno)) + return; + + if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) + dirty_i->nr_dirty[dirty_type]++; + + if (dirty_type == DIRTY) { + struct seg_entry *sentry = get_seg_entry(sbi, segno); + dirty_type = sentry->type; + if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) + dirty_i->nr_dirty[dirty_type]++; + } +} + +static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, + enum dirty_type dirty_type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) + dirty_i->nr_dirty[dirty_type]--; + + if (dirty_type == DIRTY) { + struct seg_entry *sentry = get_seg_entry(sbi, segno); + dirty_type = sentry->type; + if (test_and_clear_bit(segno, + dirty_i->dirty_segmap[dirty_type])) + dirty_i->nr_dirty[dirty_type]--; + clear_bit(segno, dirty_i->victim_segmap[FG_GC]); + clear_bit(segno, dirty_i->victim_segmap[BG_GC]); + } +} + +/** + * Should not occur error such as -ENOMEM. + * Adding dirty entry into seglist is not critical operation. + * If a given segment is one of current working segments, it won't be added. + */ +void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned short valid_blocks; + + if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) + return; + + mutex_lock(&dirty_i->seglist_lock); + + valid_blocks = get_valid_blocks(sbi, segno, 0); + + if (valid_blocks == 0) { + __locate_dirty_segment(sbi, segno, PRE); + __remove_dirty_segment(sbi, segno, DIRTY); + } else if (valid_blocks < sbi->blocks_per_seg) { + __locate_dirty_segment(sbi, segno, DIRTY); + } else { + /* Recovery routine with SSR needs this */ + __remove_dirty_segment(sbi, segno, DIRTY); + } + + mutex_unlock(&dirty_i->seglist_lock); + return; +} + +/** + * Should call clear_prefree_segments after checkpoint is done. + */ +static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned int segno, offset = 0; + unsigned int total_segs = TOTAL_SEGS(sbi); + + mutex_lock(&dirty_i->seglist_lock); + while (1) { + segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, + offset); + if (segno >= total_segs) + break; + __set_test_and_free(sbi, segno); + offset = segno + 1; + } + mutex_unlock(&dirty_i->seglist_lock); +} + +void clear_prefree_segments(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned int segno, offset = 0; + unsigned int total_segs = TOTAL_SEGS(sbi); + + mutex_lock(&dirty_i->seglist_lock); + while (1) { + segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs, + offset); + if (segno >= total_segs) + break; + + offset = segno + 1; + if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE])) + dirty_i->nr_dirty[PRE]--; + + /* Let's use trim */ + if (test_opt(sbi, DISCARD)) + blkdev_issue_discard(sbi->sb->s_bdev, + START_BLOCK(sbi, segno) << + sbi->log_sectors_per_block, + 1 << (sbi->log_sectors_per_block + + sbi->log_blocks_per_seg), + GFP_NOFS, 0); + } + mutex_unlock(&dirty_i->seglist_lock); +} + +static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) + sit_i->dirty_sentries++; +} + +static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, + unsigned int segno, int modified) +{ + struct seg_entry *se = get_seg_entry(sbi, segno); + se->type = type; + if (modified) + __mark_sit_entry_dirty(sbi, segno); +} + +static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) +{ + struct seg_entry *se; + unsigned int segno, offset; + long int new_vblocks; + + segno = GET_SEGNO(sbi, blkaddr); + + se = get_seg_entry(sbi, segno); + new_vblocks = se->valid_blocks + del; + offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1); + + BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) || + (new_vblocks > sbi->blocks_per_seg))); + + se->valid_blocks = new_vblocks; + se->mtime = get_mtime(sbi); + SIT_I(sbi)->max_mtime = se->mtime; + + /* Update valid block bitmap */ + if (del > 0) { + if (f2fs_set_bit(offset, se->cur_valid_map)) + BUG(); + } else { + if (!f2fs_clear_bit(offset, se->cur_valid_map)) + BUG(); + } + if (!f2fs_test_bit(offset, se->ckpt_valid_map)) + se->ckpt_valid_blocks += del; + + __mark_sit_entry_dirty(sbi, segno); + + /* update total number of valid blocks to be written in ckpt area */ + SIT_I(sbi)->written_valid_blocks += del; + + if (sbi->segs_per_sec > 1) + get_sec_entry(sbi, segno)->valid_blocks += del; +} + +static void refresh_sit_entry(struct f2fs_sb_info *sbi, + block_t old_blkaddr, block_t new_blkaddr) +{ + update_sit_entry(sbi, new_blkaddr, 1); + if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) + update_sit_entry(sbi, old_blkaddr, -1); +} + +void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) +{ + unsigned int segno = GET_SEGNO(sbi, addr); + struct sit_info *sit_i = SIT_I(sbi); + + BUG_ON(addr == NULL_ADDR); + if (addr == NEW_ADDR) + return; + + /* add it into sit main buffer */ + mutex_lock(&sit_i->sentry_lock); + + update_sit_entry(sbi, addr, -1); + + /* add it into dirty seglist */ + locate_dirty_segment(sbi, segno); + + mutex_unlock(&sit_i->sentry_lock); +} + +/** + * This function should be resided under the curseg_mutex lock + */ +static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, + struct f2fs_summary *sum, unsigned short offset) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + void *addr = curseg->sum_blk; + addr += offset * sizeof(struct f2fs_summary); + memcpy(addr, sum, sizeof(struct f2fs_summary)); + return; +} + +/** + * Calculate the number of current summary pages for writing + */ +int npages_for_summary_flush(struct f2fs_sb_info *sbi) +{ + int total_size_bytes = 0; + int valid_sum_count = 0; + int i, sum_space; + + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + if (sbi->ckpt->alloc_type[i] == SSR) + valid_sum_count += sbi->blocks_per_seg; + else + valid_sum_count += curseg_blkoff(sbi, i); + } + + total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1) + + sizeof(struct nat_journal) + 2 + + sizeof(struct sit_journal) + 2; + sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE; + if (total_size_bytes < sum_space) + return 1; + else if (total_size_bytes < 2 * sum_space) + return 2; + return 3; +} + +/** + * Caller should put this summary page + */ +struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) +{ + return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); +} + +static void write_sum_page(struct f2fs_sb_info *sbi, + struct f2fs_summary_block *sum_blk, block_t blk_addr) +{ + struct page *page = grab_meta_page(sbi, blk_addr); + void *kaddr = page_address(page); + memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); + set_page_dirty(page); + f2fs_put_page(page, 1); +} + +static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, + int ofs_unit, int type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE]; + unsigned int segno, next_segno, i; + int ofs = 0; + + /* + * If there is not enough reserved sections, + * we should not reuse prefree segments. + */ + if (has_not_enough_free_secs(sbi)) + return NULL_SEGNO; + + /* + * NODE page should not reuse prefree segment, + * since those information is used for SPOR. + */ + if (IS_NODESEG(type)) + return NULL_SEGNO; +next: + segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++); + ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit; + if (segno < TOTAL_SEGS(sbi)) { + /* skip intermediate segments in a section */ + if (segno % ofs_unit) + goto next; + + /* skip if whole section is not prefree */ + next_segno = find_next_zero_bit(prefree_segmap, + TOTAL_SEGS(sbi), segno + 1); + if (next_segno - segno < ofs_unit) + goto next; + + /* skip if whole section was not free at the last checkpoint */ + for (i = 0; i < ofs_unit; i++) + if (get_seg_entry(sbi, segno)->ckpt_valid_blocks) + goto next; + return segno; + } + return NULL_SEGNO; +} + +/** + * Find a new segment from the free segments bitmap to right order + * This function should be returned with success, otherwise BUG + */ +static void get_new_segment(struct f2fs_sb_info *sbi, + unsigned int *newseg, bool new_sec, int dir) +{ + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int total_secs = sbi->total_sections; + unsigned int segno, secno, zoneno; + unsigned int total_zones = sbi->total_sections / sbi->secs_per_zone; + unsigned int hint = *newseg / sbi->segs_per_sec; + unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); + unsigned int left_start = hint; + bool init = true; + int go_left = 0; + int i; + + write_lock(&free_i->segmap_lock); + + if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { + segno = find_next_zero_bit(free_i->free_segmap, + TOTAL_SEGS(sbi), *newseg + 1); + if (segno < TOTAL_SEGS(sbi)) + goto got_it; + } +find_other_zone: + secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint); + if (secno >= total_secs) { + if (dir == ALLOC_RIGHT) { + secno = find_next_zero_bit(free_i->free_secmap, + total_secs, 0); + BUG_ON(secno >= total_secs); + } else { + go_left = 1; + left_start = hint - 1; + } + } + if (go_left == 0) + goto skip_left; + + while (test_bit(left_start, free_i->free_secmap)) { + if (left_start > 0) { + left_start--; + continue; + } + left_start = find_next_zero_bit(free_i->free_secmap, + total_secs, 0); + BUG_ON(left_start >= total_secs); + break; + } + secno = left_start; +skip_left: + hint = secno; + segno = secno * sbi->segs_per_sec; + zoneno = secno / sbi->secs_per_zone; + + /* give up on finding another zone */ + if (!init) + goto got_it; + if (sbi->secs_per_zone == 1) + goto got_it; + if (zoneno == old_zoneno) + goto got_it; + if (dir == ALLOC_LEFT) { + if (!go_left && zoneno + 1 >= total_zones) + goto got_it; + if (go_left && zoneno == 0) + goto got_it; + } + for (i = 0; i < NR_CURSEG_TYPE; i++) + if (CURSEG_I(sbi, i)->zone == zoneno) + break; + + if (i < NR_CURSEG_TYPE) { + /* zone is in user, try another */ + if (go_left) + hint = zoneno * sbi->secs_per_zone - 1; + else if (zoneno + 1 >= total_zones) + hint = 0; + else + hint = (zoneno + 1) * sbi->secs_per_zone; + init = false; + goto find_other_zone; + } +got_it: + /* set it as dirty segment in free segmap */ + BUG_ON(test_bit(segno, free_i->free_segmap)); + __set_inuse(sbi, segno); + *newseg = segno; + write_unlock(&free_i->segmap_lock); +} + +static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + struct summary_footer *sum_footer; + + curseg->segno = curseg->next_segno; + curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); + curseg->next_blkoff = 0; + curseg->next_segno = NULL_SEGNO; + + sum_footer = &(curseg->sum_blk->footer); + memset(sum_footer, 0, sizeof(struct summary_footer)); + if (IS_DATASEG(type)) + SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); + if (IS_NODESEG(type)) + SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); + __set_sit_entry_type(sbi, type, curseg->segno, modified); +} + +/** + * Allocate a current working segment. + * This function always allocates a free segment in LFS manner. + */ +static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int segno = curseg->segno; + int dir = ALLOC_LEFT; + + write_sum_page(sbi, curseg->sum_blk, + GET_SUM_BLOCK(sbi, curseg->segno)); + if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) + dir = ALLOC_RIGHT; + + if (test_opt(sbi, NOHEAP)) + dir = ALLOC_RIGHT; + + get_new_segment(sbi, &segno, new_sec, dir); + curseg->next_segno = segno; + reset_curseg(sbi, type, 1); + curseg->alloc_type = LFS; +} + +static void __next_free_blkoff(struct f2fs_sb_info *sbi, + struct curseg_info *seg, block_t start) +{ + struct seg_entry *se = get_seg_entry(sbi, seg->segno); + block_t ofs; + for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) { + if (!f2fs_test_bit(ofs, se->ckpt_valid_map) + && !f2fs_test_bit(ofs, se->cur_valid_map)) + break; + } + seg->next_blkoff = ofs; +} + +/** + * If a segment is written by LFS manner, next block offset is just obtained + * by increasing the current block offset. However, if a segment is written by + * SSR manner, next block offset obtained by calling __next_free_blkoff + */ +static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, + struct curseg_info *seg) +{ + if (seg->alloc_type == SSR) + __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); + else + seg->next_blkoff++; +} + +/** + * This function always allocates a used segment (from dirty seglist) by SSR + * manner, so it should recover the existing segment information of valid blocks + */ +static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int new_segno = curseg->next_segno; + struct f2fs_summary_block *sum_node; + struct page *sum_page; + + write_sum_page(sbi, curseg->sum_blk, + GET_SUM_BLOCK(sbi, curseg->segno)); + __set_test_and_inuse(sbi, new_segno); + + mutex_lock(&dirty_i->seglist_lock); + __remove_dirty_segment(sbi, new_segno, PRE); + __remove_dirty_segment(sbi, new_segno, DIRTY); + mutex_unlock(&dirty_i->seglist_lock); + + reset_curseg(sbi, type, 1); + curseg->alloc_type = SSR; + __next_free_blkoff(sbi, curseg, 0); + + if (reuse) { + sum_page = get_sum_page(sbi, new_segno); + sum_node = (struct f2fs_summary_block *)page_address(sum_page); + memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); + f2fs_put_page(sum_page, 1); + } +} + +/* + * flush out current segment and replace it with new segment + * This function should be returned with success, otherwise BUG + */ +static void allocate_segment_by_default(struct f2fs_sb_info *sbi, + int type, bool force) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int ofs_unit; + + if (force) { + new_curseg(sbi, type, true); + goto out; + } + + ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec; + curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type); + + if (curseg->next_segno != NULL_SEGNO) + change_curseg(sbi, type, false); + else if (type == CURSEG_WARM_NODE) + new_curseg(sbi, type, false); + else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) + change_curseg(sbi, type, true); + else + new_curseg(sbi, type, false); +out: + sbi->segment_count[curseg->alloc_type]++; +} + +void allocate_new_segments(struct f2fs_sb_info *sbi) +{ + struct curseg_info *curseg; + unsigned int old_curseg; + int i; + + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + curseg = CURSEG_I(sbi, i); + old_curseg = curseg->segno; + SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); + locate_dirty_segment(sbi, old_curseg); + } +} + +static const struct segment_allocation default_salloc_ops = { + .allocate_segment = allocate_segment_by_default, +}; + +static void f2fs_end_io_write(struct bio *bio, int err) +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct bio_private *p = bio->bi_private; + + do { + struct page *page = bvec->bv_page; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + if (!uptodate) { + SetPageError(page); + if (page->mapping) + set_bit(AS_EIO, &page->mapping->flags); + p->sbi->ckpt->ckpt_flags |= CP_ERROR_FLAG; + set_page_dirty(page); + } + end_page_writeback(page); + dec_page_count(p->sbi, F2FS_WRITEBACK); + } while (bvec >= bio->bi_io_vec); + + if (p->is_sync) + complete(p->wait); + kfree(p); + bio_put(bio); +} + +struct bio *f2fs_bio_alloc(struct block_device *bdev, sector_t first_sector, + int nr_vecs, gfp_t gfp_flags) +{ + struct bio *bio; +repeat: + /* allocate new bio */ + bio = bio_alloc(gfp_flags, nr_vecs); + + if (bio == NULL && (current->flags & PF_MEMALLOC)) { + while (!bio && (nr_vecs /= 2)) + bio = bio_alloc(gfp_flags, nr_vecs); + } + if (bio) { + bio->bi_bdev = bdev; + bio->bi_sector = first_sector; +retry: + bio->bi_private = kmalloc(sizeof(struct bio_private), + GFP_NOFS | __GFP_HIGH); + if (!bio->bi_private) { + cond_resched(); + goto retry; + } + } + if (bio == NULL) { + cond_resched(); + goto repeat; + } + return bio; +} + +static void do_submit_bio(struct f2fs_sb_info *sbi, + enum page_type type, bool sync) +{ + int rw = sync ? WRITE_SYNC : WRITE; + enum page_type btype = type > META ? META : type; + + if (type >= META_FLUSH) + rw = WRITE_FLUSH_FUA; + + if (sbi->bio[btype]) { + struct bio_private *p = sbi->bio[btype]->bi_private; + p->sbi = sbi; + sbi->bio[btype]->bi_end_io = f2fs_end_io_write; + if (type == META_FLUSH) { + DECLARE_COMPLETION_ONSTACK(wait); + p->is_sync = true; + p->wait = &wait; + submit_bio(rw, sbi->bio[btype]); + wait_for_completion(&wait); + } else { + p->is_sync = false; + submit_bio(rw, sbi->bio[btype]); + } + sbi->bio[btype] = NULL; + } +} + +void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync) +{ + down_write(&sbi->bio_sem); + do_submit_bio(sbi, type, sync); + up_write(&sbi->bio_sem); +} + +static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, + block_t blk_addr, enum page_type type) +{ + struct block_device *bdev = sbi->sb->s_bdev; + + verify_block_addr(sbi, blk_addr); + + down_write(&sbi->bio_sem); + + inc_page_count(sbi, F2FS_WRITEBACK); + + if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) + do_submit_bio(sbi, type, false); +alloc_new: + if (sbi->bio[type] == NULL) + sbi->bio[type] = f2fs_bio_alloc(bdev, + blk_addr << (sbi->log_blocksize - 9), + bio_get_nr_vecs(bdev), GFP_NOFS | __GFP_HIGH); + + if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + do_submit_bio(sbi, type, false); + goto alloc_new; + } + + sbi->last_block_in_bio[type] = blk_addr; + + up_write(&sbi->bio_sem); +} + +static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + if (curseg->next_blkoff < sbi->blocks_per_seg) + return true; + return false; +} + +static int __get_segment_type_2(struct page *page, enum page_type p_type) +{ + if (p_type == DATA) + return CURSEG_HOT_DATA; + else + return CURSEG_HOT_NODE; +} + +static int __get_segment_type_4(struct page *page, enum page_type p_type) +{ + if (p_type == DATA) { + struct inode *inode = page->mapping->host; + + if (S_ISDIR(inode->i_mode)) + return CURSEG_HOT_DATA; + else + return CURSEG_COLD_DATA; + } else { + if (IS_DNODE(page) && !is_cold_node(page)) + return CURSEG_HOT_NODE; + else + return CURSEG_COLD_NODE; + } +} + +static int __get_segment_type_6(struct page *page, enum page_type p_type) +{ + if (p_type == DATA) { + struct inode *inode = page->mapping->host; + + if (S_ISDIR(inode->i_mode)) + return CURSEG_HOT_DATA; + else if (is_cold_data(page) || is_cold_file(inode)) + return CURSEG_COLD_DATA; + else + return CURSEG_WARM_DATA; + } else { + if (IS_DNODE(page)) + return is_cold_node(page) ? CURSEG_WARM_NODE : + CURSEG_HOT_NODE; + else + return CURSEG_COLD_NODE; + } +} + +static int __get_segment_type(struct page *page, enum page_type p_type) +{ + struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); + switch (sbi->active_logs) { + case 2: + return __get_segment_type_2(page, p_type); + case 4: + return __get_segment_type_4(page, p_type); + case 6: + return __get_segment_type_6(page, p_type); + default: + BUG(); + } +} + +static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, + block_t old_blkaddr, block_t *new_blkaddr, + struct f2fs_summary *sum, enum page_type p_type) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct curseg_info *curseg; + unsigned int old_cursegno; + int type; + + type = __get_segment_type(page, p_type); + curseg = CURSEG_I(sbi, type); + + mutex_lock(&curseg->curseg_mutex); + + *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); + old_cursegno = curseg->segno; + + /* + * __add_sum_entry should be resided under the curseg_mutex + * because, this function updates a summary entry in the + * current summary block. + */ + __add_sum_entry(sbi, type, sum, curseg->next_blkoff); + + mutex_lock(&sit_i->sentry_lock); + __refresh_next_blkoff(sbi, curseg); + sbi->block_count[curseg->alloc_type]++; + + /* + * SIT information should be updated before segment allocation, + * since SSR needs latest valid block information. + */ + refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); + + if (!__has_curseg_space(sbi, type)) + sit_i->s_ops->allocate_segment(sbi, type, false); + + locate_dirty_segment(sbi, old_cursegno); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + mutex_unlock(&sit_i->sentry_lock); + + if (p_type == NODE) + fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); + + /* writeout dirty page into bdev */ + submit_write_page(sbi, page, *new_blkaddr, p_type); + + mutex_unlock(&curseg->curseg_mutex); +} + +int write_meta_page(struct f2fs_sb_info *sbi, struct page *page, + struct writeback_control *wbc) +{ + if (wbc->for_reclaim) + return AOP_WRITEPAGE_ACTIVATE; + + set_page_writeback(page); + submit_write_page(sbi, page, page->index, META); + return 0; +} + +void write_node_page(struct f2fs_sb_info *sbi, struct page *page, + unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) +{ + struct f2fs_summary sum; + set_summary(&sum, nid, 0, 0); + do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE); +} + +void write_data_page(struct inode *inode, struct page *page, + struct dnode_of_data *dn, block_t old_blkaddr, + block_t *new_blkaddr) +{ + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); + struct f2fs_summary sum; + struct node_info ni; + + BUG_ON(old_blkaddr == NULL_ADDR); + get_node_info(sbi, dn->nid, &ni); + set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); + + do_write_page(sbi, page, old_blkaddr, + new_blkaddr, &sum, DATA); +} + +void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page, + block_t old_blk_addr) +{ + submit_write_page(sbi, page, old_blk_addr, DATA); +} + +void recover_data_page(struct f2fs_sb_info *sbi, + struct page *page, struct f2fs_summary *sum, + block_t old_blkaddr, block_t new_blkaddr) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct curseg_info *curseg; + unsigned int segno, old_cursegno; + struct seg_entry *se; + int type; + + segno = GET_SEGNO(sbi, new_blkaddr); + se = get_seg_entry(sbi, segno); + type = se->type; + + if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { + if (old_blkaddr == NULL_ADDR) + type = CURSEG_COLD_DATA; + else + type = CURSEG_WARM_DATA; + } + curseg = CURSEG_I(sbi, type); + + mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); + + old_cursegno = curseg->segno; + + /* change the current segment */ + if (segno != curseg->segno) { + curseg->next_segno = segno; + change_curseg(sbi, type, true); + } + + curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & + (sbi->blocks_per_seg - 1); + __add_sum_entry(sbi, type, sum, curseg->next_blkoff); + + refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); + + locate_dirty_segment(sbi, old_cursegno); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + + mutex_unlock(&sit_i->sentry_lock); + mutex_unlock(&curseg->curseg_mutex); +} + +void rewrite_node_page(struct f2fs_sb_info *sbi, + struct page *page, struct f2fs_summary *sum, + block_t old_blkaddr, block_t new_blkaddr) +{ + struct sit_info *sit_i = SIT_I(sbi); + int type = CURSEG_WARM_NODE; + struct curseg_info *curseg; + unsigned int segno, old_cursegno; + block_t next_blkaddr = next_blkaddr_of_node(page); + unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); + + curseg = CURSEG_I(sbi, type); + + mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); + + segno = GET_SEGNO(sbi, new_blkaddr); + old_cursegno = curseg->segno; + + /* change the current segment */ + if (segno != curseg->segno) { + curseg->next_segno = segno; + change_curseg(sbi, type, true); + } + curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) & + (sbi->blocks_per_seg - 1); + __add_sum_entry(sbi, type, sum, curseg->next_blkoff); + + /* change the current log to the next block addr in advance */ + if (next_segno != segno) { + curseg->next_segno = next_segno; + change_curseg(sbi, type, true); + } + curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) & + (sbi->blocks_per_seg - 1); + + /* rewrite node page */ + set_page_writeback(page); + submit_write_page(sbi, page, new_blkaddr, NODE); + f2fs_submit_bio(sbi, NODE, true); + refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); + + locate_dirty_segment(sbi, old_cursegno); + locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); + + mutex_unlock(&sit_i->sentry_lock); + mutex_unlock(&curseg->curseg_mutex); +} + +static int read_compacted_summaries(struct f2fs_sb_info *sbi) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct curseg_info *seg_i; + unsigned char *kaddr; + struct page *page; + block_t start; + int i, j, offset; + + start = start_sum_block(sbi); + + page = get_meta_page(sbi, start++); + kaddr = (unsigned char *)page_address(page); + + /* Step 1: restore nat cache */ + seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); + memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); + + /* Step 2: restore sit cache */ + seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); + memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, + SUM_JOURNAL_SIZE); + offset = 2 * SUM_JOURNAL_SIZE; + + /* Step 3: restore summary entries */ + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + unsigned short blk_off; + unsigned int segno; + + seg_i = CURSEG_I(sbi, i); + segno = le32_to_cpu(ckpt->cur_data_segno[i]); + blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); + seg_i->next_segno = segno; + reset_curseg(sbi, i, 0); + seg_i->alloc_type = ckpt->alloc_type[i]; + seg_i->next_blkoff = blk_off; + + if (seg_i->alloc_type == SSR) + blk_off = sbi->blocks_per_seg; + + for (j = 0; j < blk_off; j++) { + struct f2fs_summary *s; + s = (struct f2fs_summary *)(kaddr + offset); + seg_i->sum_blk->entries[j] = *s; + offset += SUMMARY_SIZE; + if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + SUM_FOOTER_SIZE) + continue; + + f2fs_put_page(page, 1); + page = NULL; + + page = get_meta_page(sbi, start++); + kaddr = (unsigned char *)page_address(page); + offset = 0; + } + } + f2fs_put_page(page, 1); + return 0; +} + +static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) +{ + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct f2fs_summary_block *sum; + struct curseg_info *curseg; + struct page *new; + unsigned short blk_off; + unsigned int segno = 0; + block_t blk_addr = 0; + + /* get segment number and block addr */ + if (IS_DATASEG(type)) { + segno = le32_to_cpu(ckpt->cur_data_segno[type]); + blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - + CURSEG_HOT_DATA]); + if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) + blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); + else + blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); + } else { + segno = le32_to_cpu(ckpt->cur_node_segno[type - + CURSEG_HOT_NODE]); + blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - + CURSEG_HOT_NODE]); + if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) + blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, + type - CURSEG_HOT_NODE); + else + blk_addr = GET_SUM_BLOCK(sbi, segno); + } + + new = get_meta_page(sbi, blk_addr); + sum = (struct f2fs_summary_block *)page_address(new); + + if (IS_NODESEG(type)) { + if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) { + struct f2fs_summary *ns = &sum->entries[0]; + int i; + for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { + ns->version = 0; + ns->ofs_in_node = 0; + } + } else { + if (restore_node_summary(sbi, segno, sum)) { + f2fs_put_page(new, 1); + return -EINVAL; + } + } + } + + /* set uncompleted segment to curseg */ + curseg = CURSEG_I(sbi, type); + mutex_lock(&curseg->curseg_mutex); + memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); + curseg->next_segno = segno; + reset_curseg(sbi, type, 0); + curseg->alloc_type = ckpt->alloc_type[type]; + curseg->next_blkoff = blk_off; + mutex_unlock(&curseg->curseg_mutex); + f2fs_put_page(new, 1); + return 0; +} + +static int restore_curseg_summaries(struct f2fs_sb_info *sbi) +{ + int type = CURSEG_HOT_DATA; + + if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) { + /* restore for compacted data summary */ + if (read_compacted_summaries(sbi)) + return -EINVAL; + type = CURSEG_HOT_NODE; + } + + for (; type <= CURSEG_COLD_NODE; type++) + if (read_normal_summaries(sbi, type)) + return -EINVAL; + return 0; +} + +static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) +{ + struct page *page; + unsigned char *kaddr; + struct f2fs_summary *summary; + struct curseg_info *seg_i; + int written_size = 0; + int i, j; + + page = grab_meta_page(sbi, blkaddr++); + kaddr = (unsigned char *)page_address(page); + + /* Step 1: write nat cache */ + seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); + memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); + written_size += SUM_JOURNAL_SIZE; + + /* Step 2: write sit cache */ + seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); + memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, + SUM_JOURNAL_SIZE); + written_size += SUM_JOURNAL_SIZE; + + set_page_dirty(page); + + /* Step 3: write summary entries */ + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { + unsigned short blkoff; + seg_i = CURSEG_I(sbi, i); + if (sbi->ckpt->alloc_type[i] == SSR) + blkoff = sbi->blocks_per_seg; + else + blkoff = curseg_blkoff(sbi, i); + + for (j = 0; j < blkoff; j++) { + if (!page) { + page = grab_meta_page(sbi, blkaddr++); + kaddr = (unsigned char *)page_address(page); + written_size = 0; + } + summary = (struct f2fs_summary *)(kaddr + written_size); + *summary = seg_i->sum_blk->entries[j]; + written_size += SUMMARY_SIZE; + set_page_dirty(page); + + if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + SUM_FOOTER_SIZE) + continue; + + f2fs_put_page(page, 1); + page = NULL; + } + } + if (page) + f2fs_put_page(page, 1); +} + +static void write_normal_summaries(struct f2fs_sb_info *sbi, + block_t blkaddr, int type) +{ + int i, end; + if (IS_DATASEG(type)) + end = type + NR_CURSEG_DATA_TYPE; + else + end = type + NR_CURSEG_NODE_TYPE; + + for (i = type; i < end; i++) { + struct curseg_info *sum = CURSEG_I(sbi, i); + mutex_lock(&sum->curseg_mutex); + write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); + mutex_unlock(&sum->curseg_mutex); + } +} + +void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) +{ + if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) + write_compacted_summaries(sbi, start_blk); + else + write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); +} + +void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) +{ + if (sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) + write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); + return; +} + +int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, + unsigned int val, int alloc) +{ + int i; + + if (type == NAT_JOURNAL) { + for (i = 0; i < nats_in_cursum(sum); i++) { + if (le32_to_cpu(nid_in_journal(sum, i)) == val) + return i; + } + if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) + return update_nats_in_cursum(sum, 1); + } else if (type == SIT_JOURNAL) { + for (i = 0; i < sits_in_cursum(sum); i++) + if (le32_to_cpu(segno_in_journal(sum, i)) == val) + return i; + if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) + return update_sits_in_cursum(sum, 1); + } + return -1; +} + +static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, + unsigned int segno) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); + block_t blk_addr = sit_i->sit_base_addr + offset; + + check_seg_range(sbi, segno); + + /* calculate sit block address */ + if (f2fs_test_bit(offset, sit_i->sit_bitmap)) + blk_addr += sit_i->sit_blocks; + + return get_meta_page(sbi, blk_addr); +} + +static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, + unsigned int start) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct page *src_page, *dst_page; + pgoff_t src_off, dst_off; + void *src_addr, *dst_addr; + + src_off = current_sit_addr(sbi, start); + dst_off = next_sit_addr(sbi, src_off); + + /* get current sit block page without lock */ + src_page = get_meta_page(sbi, src_off); + dst_page = grab_meta_page(sbi, dst_off); + BUG_ON(PageDirty(src_page)); + + src_addr = page_address(src_page); + dst_addr = page_address(dst_page); + memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + + set_page_dirty(dst_page); + f2fs_put_page(src_page, 1); + + set_to_next_sit(sit_i, start); + + return dst_page; +} + +static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) +{ + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + int i; + + /* + * If the journal area in the current summary is full of sit entries, + * all the sit entries will be flushed. Otherwise the sit entries + * are not able to replace with newly hot sit entries. + */ + if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) { + for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { + unsigned int segno; + segno = le32_to_cpu(segno_in_journal(sum, i)); + __mark_sit_entry_dirty(sbi, segno); + } + update_sits_in_cursum(sum, -sits_in_cursum(sum)); + return 1; + } + return 0; +} + +/** + * CP calls this function, which flushes SIT entries including sit_journal, + * and moves prefree segs to free segs. + */ +void flush_sit_entries(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned long *bitmap = sit_i->dirty_sentries_bitmap; + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + unsigned long nsegs = TOTAL_SEGS(sbi); + struct page *page = NULL; + struct f2fs_sit_block *raw_sit = NULL; + unsigned int start = 0, end = 0; + unsigned int segno = -1; + bool flushed; + + mutex_lock(&curseg->curseg_mutex); + mutex_lock(&sit_i->sentry_lock); + + /* + * "flushed" indicates whether sit entries in journal are flushed + * to the SIT area or not. + */ + flushed = flush_sits_in_journal(sbi); + + while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) { + struct seg_entry *se = get_seg_entry(sbi, segno); + int sit_offset, offset; + + sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); + + if (flushed) + goto to_sit_page; + + offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); + if (offset >= 0) { + segno_in_journal(sum, offset) = cpu_to_le32(segno); + seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); + goto flush_done; + } +to_sit_page: + if (!page || (start > segno) || (segno > end)) { + if (page) { + f2fs_put_page(page, 1); + page = NULL; + } + + start = START_SEGNO(sit_i, segno); + end = start + SIT_ENTRY_PER_BLOCK - 1; + + /* read sit block that will be updated */ + page = get_next_sit_page(sbi, start); + raw_sit = page_address(page); + } + + /* udpate entry in SIT block */ + seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); +flush_done: + __clear_bit(segno, bitmap); + sit_i->dirty_sentries--; + } + mutex_unlock(&sit_i->sentry_lock); + mutex_unlock(&curseg->curseg_mutex); + + /* writeout last modified SIT block */ + f2fs_put_page(page, 1); + + set_prefree_as_free_segments(sbi); +} + +static int build_sit_info(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct sit_info *sit_i; + unsigned int sit_segs, start; + char *src_bitmap, *dst_bitmap; + unsigned int bitmap_size; + + /* allocate memory for SIT information */ + sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); + if (!sit_i) + return -ENOMEM; + + SM_I(sbi)->sit_info = sit_i; + + sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry)); + if (!sit_i->sentries) + return -ENOMEM; + + bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!sit_i->dirty_sentries_bitmap) + return -ENOMEM; + + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + sit_i->sentries[start].cur_valid_map + = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); + sit_i->sentries[start].ckpt_valid_map + = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); + if (!sit_i->sentries[start].cur_valid_map + || !sit_i->sentries[start].ckpt_valid_map) + return -ENOMEM; + } + + if (sbi->segs_per_sec > 1) { + sit_i->sec_entries = vzalloc(sbi->total_sections * + sizeof(struct sec_entry)); + if (!sit_i->sec_entries) + return -ENOMEM; + } + + /* get information related with SIT */ + sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; + + /* setup SIT bitmap from ckeckpoint pack */ + bitmap_size = __bitmap_size(sbi, SIT_BITMAP); + src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); + + dst_bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!dst_bitmap) + return -ENOMEM; + memcpy(dst_bitmap, src_bitmap, bitmap_size); + + /* init SIT information */ + sit_i->s_ops = &default_salloc_ops; + + sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); + sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; + sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); + sit_i->sit_bitmap = dst_bitmap; + sit_i->bitmap_size = bitmap_size; + sit_i->dirty_sentries = 0; + sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; + sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); + sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; + mutex_init(&sit_i->sentry_lock); + return 0; +} + +static int build_free_segmap(struct f2fs_sb_info *sbi) +{ + struct f2fs_sm_info *sm_info = SM_I(sbi); + struct free_segmap_info *free_i; + unsigned int bitmap_size, sec_bitmap_size; + + /* allocate memory for free segmap information */ + free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); + if (!free_i) + return -ENOMEM; + + SM_I(sbi)->free_info = free_i; + + bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); + if (!free_i->free_segmap) + return -ENOMEM; + + sec_bitmap_size = f2fs_bitmap_size(sbi->total_sections); + free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); + if (!free_i->free_secmap) + return -ENOMEM; + + /* set all segments as dirty temporarily */ + memset(free_i->free_segmap, 0xff, bitmap_size); + memset(free_i->free_secmap, 0xff, sec_bitmap_size); + + /* init free segmap information */ + free_i->start_segno = + (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr); + free_i->free_segments = 0; + free_i->free_sections = 0; + rwlock_init(&free_i->segmap_lock); + return 0; +} + +static int build_curseg(struct f2fs_sb_info *sbi) +{ + struct curseg_info *array = NULL; + int i; + + array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); + if (!array) + return -ENOMEM; + + SM_I(sbi)->curseg_array = array; + + for (i = 0; i < NR_CURSEG_TYPE; i++) { + mutex_init(&array[i].curseg_mutex); + array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + if (!array[i].sum_blk) + return -ENOMEM; + array[i].segno = NULL_SEGNO; + array[i].next_blkoff = 0; + } + return restore_curseg_summaries(sbi); +} + +static void build_sit_entries(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); + struct f2fs_summary_block *sum = curseg->sum_blk; + unsigned int start; + + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + struct seg_entry *se = &sit_i->sentries[start]; + struct f2fs_sit_block *sit_blk; + struct f2fs_sit_entry sit; + struct page *page; + int i; + + mutex_lock(&curseg->curseg_mutex); + for (i = 0; i < sits_in_cursum(sum); i++) { + if (le32_to_cpu(segno_in_journal(sum, i)) == start) { + sit = sit_in_journal(sum, i); + mutex_unlock(&curseg->curseg_mutex); + goto got_it; + } + } + mutex_unlock(&curseg->curseg_mutex); + page = get_current_sit_page(sbi, start); + sit_blk = (struct f2fs_sit_block *)page_address(page); + sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; + f2fs_put_page(page, 1); +got_it: + check_block_count(sbi, start, &sit); + seg_info_from_raw_sit(se, &sit); + if (sbi->segs_per_sec > 1) { + struct sec_entry *e = get_sec_entry(sbi, start); + e->valid_blocks += se->valid_blocks; + } + } +} + +static void init_free_segmap(struct f2fs_sb_info *sbi) +{ + unsigned int start; + int type; + + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + struct seg_entry *sentry = get_seg_entry(sbi, start); + if (!sentry->valid_blocks) + __set_free(sbi, start); + } + + /* set use the current segments */ + for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { + struct curseg_info *curseg_t = CURSEG_I(sbi, type); + __set_test_and_inuse(sbi, curseg_t->segno); + } +} + +static void init_dirty_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + struct free_segmap_info *free_i = FREE_I(sbi); + unsigned int segno = 0, offset = 0; + unsigned short valid_blocks; + + while (segno < TOTAL_SEGS(sbi)) { + /* find dirty segment based on free segmap */ + segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset); + if (segno >= TOTAL_SEGS(sbi)) + break; + offset = segno + 1; + valid_blocks = get_valid_blocks(sbi, segno, 0); + if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks) + continue; + mutex_lock(&dirty_i->seglist_lock); + __locate_dirty_segment(sbi, segno, DIRTY); + mutex_unlock(&dirty_i->seglist_lock); + } +} + +static int init_victim_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + + dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL); + dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL); + if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC]) + return -ENOMEM; + return 0; +} + +static int build_dirty_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i; + unsigned int bitmap_size, i; + + /* allocate memory for dirty segments list information */ + dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); + if (!dirty_i) + return -ENOMEM; + + SM_I(sbi)->dirty_info = dirty_i; + mutex_init(&dirty_i->seglist_lock); + + bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + + for (i = 0; i < NR_DIRTY_TYPE; i++) { + dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); + dirty_i->nr_dirty[i] = 0; + if (!dirty_i->dirty_segmap[i]) + return -ENOMEM; + } + + init_dirty_segmap(sbi); + return init_victim_segmap(sbi); +} + +/** + * Update min, max modified time for cost-benefit GC algorithm + */ +static void init_min_max_mtime(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int segno; + + mutex_lock(&sit_i->sentry_lock); + + sit_i->min_mtime = LLONG_MAX; + + for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) { + unsigned int i; + unsigned long long mtime = 0; + + for (i = 0; i < sbi->segs_per_sec; i++) + mtime += get_seg_entry(sbi, segno + i)->mtime; + + mtime = div_u64(mtime, sbi->segs_per_sec); + + if (sit_i->min_mtime > mtime) + sit_i->min_mtime = mtime; + } + sit_i->max_mtime = get_mtime(sbi); + mutex_unlock(&sit_i->sentry_lock); +} + +int build_segment_manager(struct f2fs_sb_info *sbi) +{ + struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + struct f2fs_sm_info *sm_info = NULL; + int err; + + sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); + if (!sm_info) + return -ENOMEM; + + /* init sm info */ + sbi->sm_info = sm_info; + INIT_LIST_HEAD(&sm_info->wblist_head); + spin_lock_init(&sm_info->wblist_lock); + sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); + sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); + sm_info->segment_count = le32_to_cpu(raw_super->segment_count); + sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); + sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); + sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); + sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); + + err = build_sit_info(sbi); + if (err) + return err; + err = build_free_segmap(sbi); + if (err) + return err; + err = build_curseg(sbi); + if (err) + return err; + + /* reinit free segmap based on SIT */ + build_sit_entries(sbi); + + init_free_segmap(sbi); + err = build_dirty_segmap(sbi); + if (err) + return err; + + init_min_max_mtime(sbi); + return 0; +} + +static void discard_dirty_segmap(struct f2fs_sb_info *sbi, + enum dirty_type dirty_type) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + mutex_lock(&dirty_i->seglist_lock); + kfree(dirty_i->dirty_segmap[dirty_type]); + dirty_i->nr_dirty[dirty_type] = 0; + mutex_unlock(&dirty_i->seglist_lock); +} + +void reset_victim_segmap(struct f2fs_sb_info *sbi) +{ + unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi)); + memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size); +} + +static void destroy_victim_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + + kfree(dirty_i->victim_segmap[FG_GC]); + kfree(dirty_i->victim_segmap[BG_GC]); +} + +static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) +{ + struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + int i; + + if (!dirty_i) + return; + + /* discard pre-free/dirty segments list */ + for (i = 0; i < NR_DIRTY_TYPE; i++) + discard_dirty_segmap(sbi, i); + + destroy_victim_segmap(sbi); + SM_I(sbi)->dirty_info = NULL; + kfree(dirty_i); +} + +static void destroy_curseg(struct f2fs_sb_info *sbi) +{ + struct curseg_info *array = SM_I(sbi)->curseg_array; + int i; + + if (!array) + return; + SM_I(sbi)->curseg_array = NULL; + for (i = 0; i < NR_CURSEG_TYPE; i++) + kfree(array[i].sum_blk); + kfree(array); +} + +static void destroy_free_segmap(struct f2fs_sb_info *sbi) +{ + struct free_segmap_info *free_i = SM_I(sbi)->free_info; + if (!free_i) + return; + SM_I(sbi)->free_info = NULL; + kfree(free_i->free_segmap); + kfree(free_i->free_secmap); + kfree(free_i); +} + +static void destroy_sit_info(struct f2fs_sb_info *sbi) +{ + struct sit_info *sit_i = SIT_I(sbi); + unsigned int start; + + if (!sit_i) + return; + + if (sit_i->sentries) { + for (start = 0; start < TOTAL_SEGS(sbi); start++) { + kfree(sit_i->sentries[start].cur_valid_map); + kfree(sit_i->sentries[start].ckpt_valid_map); + } + } + vfree(sit_i->sentries); + vfree(sit_i->sec_entries); + kfree(sit_i->dirty_sentries_bitmap); + + SM_I(sbi)->sit_info = NULL; + kfree(sit_i->sit_bitmap); + kfree(sit_i); +} + +void destroy_segment_manager(struct f2fs_sb_info *sbi) +{ + struct f2fs_sm_info *sm_info = SM_I(sbi); + destroy_dirty_segmap(sbi); + destroy_curseg(sbi); + destroy_free_segmap(sbi); + destroy_sit_info(sbi); + sbi->sm_info = NULL; + kfree(sm_info); +} -- cgit v1.2.3-70-g09d2 From 25ca923b2a766b9c93b63777ead351137533a623 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 28 Nov 2012 16:12:41 +0900 Subject: f2fs: fix endian conversion bugs reported by sparse This patch should resolve the bugs reported by the sparse tool. Initial reports were written by "kbuild test robot" managed by fengguang.wu. In my local machines, I've tested also by running: > make C=2 CF="-D__CHECK_ENDIAN__" Accordingly, I've found lots of warnings and bugs related to the endian conversion. And I've fixed all at this moment. Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 32 +++++++++++++++++--------------- fs/f2fs/data.c | 2 +- fs/f2fs/debug.c | 2 +- fs/f2fs/dir.c | 8 ++++---- fs/f2fs/f2fs.h | 25 +++++++++++++++++++++++-- fs/f2fs/hash.c | 3 +-- fs/f2fs/node.c | 4 ++-- fs/f2fs/node.h | 2 +- fs/f2fs/recovery.c | 2 +- fs/f2fs/segment.c | 14 +++++++------- fs/f2fs/super.c | 8 ++++---- include/linux/f2fs_fs.h | 6 +++--- 12 files changed, 65 insertions(+), 43 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index ab743f92ee0..7c18f8efaad 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -268,7 +268,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) { block_t start_blk, orphan_blkaddr, i, j; - if (!(F2FS_CKPT(sbi)->ckpt_flags & CP_ORPHAN_PRESENT_FLAG)) + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) return 0; sbi->por_doing = 1; @@ -287,7 +287,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) f2fs_put_page(page, 1); } /* clear Orphan Flag */ - F2FS_CKPT(sbi)->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG); + clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); sbi->por_doing = 0; return 0; } @@ -376,7 +376,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, pre_version = le64_to_cpu(cp_block->checkpoint_ver); /* Read the 2nd cp block in this CP pack */ - cp_addr += le64_to_cpu(cp_block->cp_pack_total_block_count) - 1; + cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; cp_page_2 = get_meta_page(sbi, cp_addr); cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); @@ -605,8 +605,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) block_t start_blk; struct page *cp_page; unsigned int data_sum_blocks, orphan_blocks; + unsigned int crc32 = 0; void *kaddr; - __u32 crc32 = 0; int i; /* Flush all the NAT/SIT pages */ @@ -646,33 +646,35 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) /* 2 cp + n data seg summary + orphan inode blocks */ data_sum_blocks = npages_for_summary_flush(sbi); if (data_sum_blocks < 3) - ckpt->ckpt_flags |= CP_COMPACT_SUM_FLAG; + set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); else - ckpt->ckpt_flags &= (~CP_COMPACT_SUM_FLAG); + clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) / F2FS_ORPHANS_PER_BLOCK; - ckpt->cp_pack_start_sum = 1 + orphan_blocks; - ckpt->cp_pack_total_block_count = 2 + data_sum_blocks + orphan_blocks; + ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks); if (is_umount) { - ckpt->ckpt_flags |= CP_UMOUNT_FLAG; - ckpt->cp_pack_total_block_count += NR_CURSEG_NODE_TYPE; + set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + ckpt->cp_pack_total_block_count = cpu_to_le32(2 + + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); } else { - ckpt->ckpt_flags &= (~CP_UMOUNT_FLAG); + clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + ckpt->cp_pack_total_block_count = cpu_to_le32(2 + + data_sum_blocks + orphan_blocks); } if (sbi->n_orphans) - ckpt->ckpt_flags |= CP_ORPHAN_PRESENT_FLAG; + set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); else - ckpt->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG); + clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); /* update SIT/NAT bitmap */ get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); - *(__u32 *)((unsigned char *)ckpt + + *(__le32 *)((unsigned char *)ckpt + le32_to_cpu(ckpt->checksum_offset)) = cpu_to_le32(crc32); @@ -716,7 +718,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) sbi->alloc_valid_block_count = 0; /* Here, we only have one bio having CP pack */ - if (sbi->ckpt->ckpt_flags & CP_ERROR_FLAG) + if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) sbi->sb->s_flags |= MS_RDONLY; else sync_meta_pages(sbi, META_FLUSH, LONG_MAX); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c2fd0a80db1..5635cc5a9d4 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -545,7 +545,7 @@ redirty_out: #define MAX_DESIRED_PAGES_WP 4096 -int f2fs_write_data_pages(struct address_space *mapping, +static int f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index a56181c1b28..fb62960a1dc 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -27,7 +27,7 @@ static LIST_HEAD(f2fs_stat_list); static struct dentry *debugfs_root; -void update_general_status(struct f2fs_sb_info *sbi) +static void update_general_status(struct f2fs_sb_info *sbi) { struct f2fs_stat_info *si = sbi->stat_info; int i; diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 5975568d03d..5ec7a06120e 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -80,7 +80,7 @@ static bool early_match_name(const char *name, int namelen, if (le16_to_cpu(de->name_len) != namelen) return false; - if (le32_to_cpu(de->hash_code) != namehash) + if (de->hash_code != namehash) return false; return true; @@ -143,7 +143,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, nbucket = dir_buckets(level); nblock = bucket_blocks(level); - bidx = dir_block_index(level, namehash % nbucket); + bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket); end_block = bidx + nblock; for (; bidx < end_block; bidx++) { @@ -406,7 +406,7 @@ start: nbucket = dir_buckets(level); nblock = bucket_blocks(level); - bidx = dir_block_index(level, (dentry_hash % nbucket)); + bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { mutex_lock_op(sbi, DENTRY_OPS); @@ -437,7 +437,7 @@ add_dentry: wait_on_page_writeback(dentry_page); de = &dentry_blk->dentry[bit_pos]; - de->hash_code = cpu_to_le32(dentry_hash); + de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); memcpy(dentry_blk->filename[bit_pos], name, namelen); de->ino = cpu_to_le32(inode->i_ino); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d3f5a70e2a4..8d7fde1bda1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -463,6 +463,26 @@ static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) sbi->s_dirty = 0; } +static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + return ckpt_flags & f; +} + +static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + ckpt_flags |= f; + cp->ckpt_flags = cpu_to_le32(ckpt_flags); +} + +static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + ckpt_flags &= (~f); + cp->ckpt_flags = cpu_to_le32(ckpt_flags); +} + static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t) { mutex_lock_nested(&sbi->fs_lock[t], t); @@ -577,7 +597,8 @@ static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); - int offset = (flag == NAT_BITMAP) ? ckpt->sit_ver_bitmap_bytesize : 0; + int offset = (flag == NAT_BITMAP) ? + le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; return &ckpt->sit_nat_version_bitmap + offset; } @@ -587,7 +608,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); - start_addr = le64_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); + start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); /* * odd numbered checkpoint should at cp segment 0 diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 098a1963d7c..beb155e8d06 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -92,7 +92,6 @@ f2fs_hash_t f2fs_dentry_hash(const char *name, int len) hash = buf[0]; minor_hash = buf[1]; - f2fs_hash = hash; - f2fs_hash &= ~F2FS_HASH_COL_BIT; + f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); return f2fs_hash; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 216f04dc117..5d421fe2257 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1445,8 +1445,8 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); dst->i.i_size = 0; - dst->i.i_blocks = 1; - dst->i.i_links = 1; + dst->i.i_blocks = cpu_to_le64(1); + dst->i.i_links = cpu_to_le32(1); dst->i.i_xattr_nid = 0; new_ni = old_ni; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 5d525ed312b..0ab92d64305 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -177,7 +177,7 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) void *kaddr = page_address(page); struct f2fs_node *rn = (struct f2fs_node *)kaddr; rn->footer.cp_ver = ckpt->checkpoint_ver; - rn->footer.next_blkaddr = blkaddr; + rn->footer.next_blkaddr = cpu_to_le32(blkaddr); } static inline nid_t ino_of_node(struct page *node_page) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 7a43df0b72c..222a7bb9221 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -81,7 +81,7 @@ static int recover_inode(struct inode *inode, struct page *node_page) struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; struct f2fs_inode *raw_inode = &(raw_node->i); - inode->i_mode = le32_to_cpu(raw_inode->i_mode); + inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_size_write(inode, le64_to_cpu(raw_inode->i_size)); inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index ed7c079cfc7..d973c56e8bd 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -630,7 +630,7 @@ static void f2fs_end_io_write(struct bio *bio, int err) SetPageError(page); if (page->mapping) set_bit(AS_EIO, &page->mapping->flags); - p->sbi->ckpt->ckpt_flags |= CP_ERROR_FLAG; + set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); set_page_dirty(page); } end_page_writeback(page); @@ -1067,7 +1067,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) segno = le32_to_cpu(ckpt->cur_data_segno[type]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); - if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); else blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); @@ -1076,7 +1076,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) CURSEG_HOT_NODE]); blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); - if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); else @@ -1087,7 +1087,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) sum = (struct f2fs_summary_block *)page_address(new); if (IS_NODESEG(type)) { - if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) { + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { struct f2fs_summary *ns = &sum->entries[0]; int i; for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { @@ -1119,7 +1119,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) { int type = CURSEG_HOT_DATA; - if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) { + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { /* restore for compacted data summary */ if (read_compacted_summaries(sbi)) return -EINVAL; @@ -1208,7 +1208,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi, void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { - if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) write_compacted_summaries(sbi, start_blk); else write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); @@ -1216,7 +1216,7 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { - if (sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); return; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 8661c93538a..878bf382f84 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -89,7 +89,7 @@ static void f2fs_i_callback(struct rcu_head *head) kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); } -void f2fs_destroy_inode(struct inode *inode) +static void f2fs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, f2fs_i_callback); } @@ -445,7 +445,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (sanity_check_raw_super(raw_super)) goto free_sb_buf; - sb->s_maxbytes = max_file_size(raw_super->log_blocksize); + sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); @@ -527,7 +527,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* if there are nt orphan nodes free them */ err = -EINVAL; - if (!(sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) && + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) && recover_orphan_inodes(sbi)) goto free_node_inode; @@ -547,7 +547,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) } /* recover fsynced data */ - if (!(sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) && + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) && !test_opt(sbi, DISABLE_ROLL_FORWARD)) recover_fsync_data(sbi); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 1429ece7caa..c2fbbc35c1e 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -272,8 +272,8 @@ struct f2fs_sit_block { * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) */ #define ENTRIES_IN_SUM 512 -#define SUMMARY_SIZE (sizeof(struct f2fs_summary)) -#define SUM_FOOTER_SIZE (sizeof(struct summary_footer)) +#define SUMMARY_SIZE (7) /* sizeof(struct summary) */ +#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ #define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) /* a summary entry for a 4KB-sized block in a segment */ @@ -297,7 +297,7 @@ struct summary_footer { __u32 check_sum; /* summary checksum */ } __packed; -#define SUM_JOURNAL_SIZE (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE -\ +#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\ SUM_ENTRY_SIZE) #define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ sizeof(struct nat_journal_entry)) -- cgit v1.2.3-70-g09d2 From 0a8165d7c2cf1395059db20ab07665baf3758fcd Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 29 Nov 2012 13:28:09 +0900 Subject: f2fs: adjust kernel coding style As pointed out by Randy Dunlap, this patch removes all usage of "/**" for comment blocks. Instead, just use "/*". Signed-off-by: Jaegeuk Kim --- fs/f2fs/acl.c | 2 +- fs/f2fs/acl.h | 2 +- fs/f2fs/checkpoint.c | 10 +++++----- fs/f2fs/data.c | 12 ++++++------ fs/f2fs/debug.c | 6 +++--- fs/f2fs/dir.c | 4 ++-- fs/f2fs/f2fs.h | 2 +- fs/f2fs/file.c | 2 +- fs/f2fs/gc.c | 10 +++++----- fs/f2fs/gc.h | 4 ++-- fs/f2fs/hash.c | 2 +- fs/f2fs/inode.c | 4 ++-- fs/f2fs/namei.c | 4 ++-- fs/f2fs/node.c | 22 +++++++++++----------- fs/f2fs/node.h | 2 +- fs/f2fs/recovery.c | 2 +- fs/f2fs/segment.c | 26 +++++++++++++------------- fs/f2fs/segment.h | 2 +- fs/f2fs/super.c | 2 +- fs/f2fs/xattr.c | 2 +- fs/f2fs/xattr.h | 4 ++-- 21 files changed, 63 insertions(+), 63 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index dff2a2bfa75..1ac9a4b24f6 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/acl.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h index c97675e18fe..80f43067441 100644 --- a/fs/f2fs/acl.h +++ b/fs/f2fs/acl.h @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/acl.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 7c18f8efaad..6ef36c37e2b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/checkpoint.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -24,7 +24,7 @@ static struct kmem_cache *orphan_entry_slab; static struct kmem_cache *inode_entry_slab; -/** +/* * We guarantee no failure on the returned page. */ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) @@ -44,7 +44,7 @@ repeat: return page; } -/** +/* * We guarantee no failure on the returned page. */ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) @@ -543,7 +543,7 @@ retry: goto retry; } -/** +/* * Freeze all the FS-operations for checkpoint. */ void block_operations(struct f2fs_sb_info *sbi) @@ -727,7 +727,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) F2FS_RESET_SB_DIRT(sbi); } -/** +/* * We guarantee that this checkpoint procedure should not fail. */ void write_checkpoint(struct f2fs_sb_info *sbi, bool blocked, bool is_umount) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5635cc5a9d4..444c2a6fbaa 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/data.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -21,7 +21,7 @@ #include "node.h" #include "segment.h" -/** +/* * Lock ordering for the change of data block address: * ->data_page * ->node_page @@ -207,7 +207,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index) return page; } -/** +/* * If it tries to access a hole, return an error. * Because, the callers, functions in dir.c and GC, should be able to know * whether this page exists or not. @@ -247,7 +247,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) return page; } -/** +/* * Caller ensures that this data page is never allocated. * A new zero-filled data page is allocated in the page cache. */ @@ -322,7 +322,7 @@ static void read_end_io(struct bio *bio, int err) bio_put(bio); } -/** +/* * Fill the locked page with data located in the block address. * Read operation is synchronous, and caller must unlock the page. */ @@ -367,7 +367,7 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, return 0; } -/** +/* * This function should be used by the data read flow only where it * does not check the "create" flag that indicates block allocation. * The reason for this special functionality is to exploit VFS readahead diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index fb62960a1dc..0e0380a588a 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -1,4 +1,4 @@ -/** +/* * f2fs debugging statistics * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -78,7 +78,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) } } -/** +/* * This function calculates BDF of every segments */ static void update_sit_info(struct f2fs_sb_info *sbi) @@ -113,7 +113,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi) si->avg_vblocks = 0; } -/** +/* * This function calculates memory footprint. */ static void update_mem_info(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 5ec7a06120e..089eb676689 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/dir.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -453,7 +453,7 @@ fail: return err; } -/** +/* * It only removes the dentry from the dentry page,corresponding name * entry in name page does not need to be touched during deletion. */ diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 8d7fde1bda1..8c3f1ef6ace 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/f2fs.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f5ae36d19f4..c1a108ffbfc 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/file.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 46774ce3ae0..3271be42c0b 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/gc.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -213,7 +213,7 @@ static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno, return get_cb_cost(sbi, segno); } -/** +/* * This function is called from two pathes. * One is garbage collection and the other is SSR segment selection. * When it is called during GC, it just gets a victim segment @@ -359,7 +359,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi, return ret ? GC_OK : GC_NEXT; } -/** +/* * This function compares node address got in summary with that in NAT. * On validity, copy that node with cold status, otherwise (invalid node) * ignore that. @@ -425,7 +425,7 @@ next_step: return GC_DONE; } -/** +/* * Calculate start block index that this node page contains */ block_t start_bidx_of_node(unsigned int node_ofs) @@ -516,7 +516,7 @@ out: f2fs_put_page(page, 1); } -/** +/* * This function tries to get parent node of victim data block, and identifies * data block validity. If the block is valid, copy that with cold status and * modify parent node. diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index cf42a554ca0..b026d9354cc 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/gc.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -42,7 +42,7 @@ struct inode_entry { struct inode *inode; }; -/** +/* * inline functions */ static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index beb155e8d06..a60f04200f8 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/hash.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 94f13d2815e..aa4ef4f48ff 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/inode.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -235,7 +235,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; } -/** +/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index aec362f6f0b..63efd77fab9 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/namei.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -96,7 +96,7 @@ static int is_multimedia_file(const unsigned char *s, const char *sub) return ret; } -/** +/* * Set multimedia files as cold files for hot/cold data separation */ static inline void set_cold_file(struct f2fs_sb_info *sbi, struct inode *inode, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 5d421fe2257..25d303646da 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/node.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -81,7 +81,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) return dst_page; } -/** +/* * Readahead NAT pages */ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) @@ -251,7 +251,7 @@ static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) return nr_shrink; } -/** +/* * This function returns always success */ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) @@ -302,7 +302,7 @@ cache: cache_nat_entry(NM_I(sbi), nid, &ne); } -/** +/* * The maximum depth is four. * Offset[0] will have raw inode offset. */ @@ -649,7 +649,7 @@ fail: return err; } -/** +/* * All the block addresses of data and nodes should be nullified. */ int truncate_inode_blocks(struct inode *inode, pgoff_t from) @@ -860,7 +860,7 @@ static int read_node_page(struct page *page, int type) return f2fs_readpage(sbi, page, ni.blk_addr, type); } -/** +/* * Readahead a node page */ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) @@ -910,7 +910,7 @@ struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) return page; } -/** +/* * Return a locked page for the desired node page. * And, readahead MAX_RA_NODE number of node pages. */ @@ -1186,7 +1186,7 @@ static int f2fs_release_node_page(struct page *page, gfp_t wait) return 0; } -/** +/* * Structure of the f2fs node operations */ const struct address_space_operations f2fs_node_aops = { @@ -1386,7 +1386,7 @@ retry: return true; } -/** +/* * alloc_nid() should be called prior to this function. */ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) @@ -1403,7 +1403,7 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) spin_unlock(&nm_i->free_nid_list_lock); } -/** +/* * alloc_nid() should be called prior to this function. */ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) @@ -1545,7 +1545,7 @@ retry: return true; } -/** +/* * This function is called during the checkpointing process. */ void flush_nat_entries(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 0ab92d64305..afdb130f782 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/node.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 222a7bb9221..b07e9b6ef37 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/recovery.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index d973c56e8bd..a177eb387d3 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/segment.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -36,7 +36,7 @@ static int need_to_flush(struct f2fs_sb_info *sbi) return 0; } -/** +/* * This function balances dirty node and dentry pages. * In addition, it controls garbage collection. */ @@ -105,7 +105,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, } } -/** +/* * Should not occur error such as -ENOMEM. * Adding dirty entry into seglist is not critical operation. * If a given segment is one of current working segments, it won't be added. @@ -136,7 +136,7 @@ void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) return; } -/** +/* * Should call clear_prefree_segments after checkpoint is done. */ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) @@ -269,7 +269,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) mutex_unlock(&sit_i->sentry_lock); } -/** +/* * This function should be resided under the curseg_mutex lock */ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, @@ -282,7 +282,7 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, return; } -/** +/* * Calculate the number of current summary pages for writing */ int npages_for_summary_flush(struct f2fs_sb_info *sbi) @@ -309,7 +309,7 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi) return 3; } -/** +/* * Caller should put this summary page */ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) @@ -371,7 +371,7 @@ next: return NULL_SEGNO; } -/** +/* * Find a new segment from the free segments bitmap to right order * This function should be returned with success, otherwise BUG */ @@ -483,7 +483,7 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) __set_sit_entry_type(sbi, type, curseg->segno, modified); } -/** +/* * Allocate a current working segment. * This function always allocates a free segment in LFS manner. */ @@ -520,7 +520,7 @@ static void __next_free_blkoff(struct f2fs_sb_info *sbi, seg->next_blkoff = ofs; } -/** +/* * If a segment is written by LFS manner, next block offset is just obtained * by increasing the current block offset. However, if a segment is written by * SSR manner, next block offset obtained by calling __next_free_blkoff @@ -534,7 +534,7 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, seg->next_blkoff++; } -/** +/* * This function always allocates a used segment (from dirty seglist) by SSR * manner, so it should recover the existing segment information of valid blocks */ @@ -1310,7 +1310,7 @@ static bool flush_sits_in_journal(struct f2fs_sb_info *sbi) return 0; } -/** +/* * CP calls this function, which flushes SIT entries including sit_journal, * and moves prefree segs to free segs. */ @@ -1624,7 +1624,7 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi) return init_victim_segmap(sbi); } -/** +/* * Update min, max modified time for cost-benefit GC algorithm */ static void init_min_max_mtime(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index e380a8ef13f..2c445f8947c 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/segment.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 878bf382f84..4360600c81e 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/super.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index aca50fe163f..5324d1e9d16 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/xattr.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index 29b0a08e1e1..49c9558305e 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -1,4 +1,4 @@ -/** +/* * fs/f2fs/xattr.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. @@ -77,7 +77,7 @@ struct f2fs_xattr_entry { #define MAX_VALUE_LEN (MIN_OFFSET - sizeof(struct f2fs_xattr_header) - \ sizeof(struct f2fs_xattr_entry)) -/** +/* * On-disk structure of f2fs_xattr * We use only 1 block for xattr. * -- cgit v1.2.3-70-g09d2 From 1042d60f917d78ef1a6eaea297a1020484d4bf74 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sat, 1 Dec 2012 10:56:13 +0900 Subject: f2fs: remove unneeded initialization No need to initialize "struct f2fs_gc_kthread *gc_th = NULL", as gc_th = NULL, will be taken care by the return values of kmalloc(). And fix codes in other places. Signed-off-by: Namjae Jeon Signed-off-by: Amit Sahrawat --- fs/f2fs/gc.c | 2 +- fs/f2fs/segment.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 3271be42c0b..644aa380827 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -89,7 +89,7 @@ static int gc_thread_func(void *data) int start_gc_thread(struct f2fs_sb_info *sbi) { - struct f2fs_gc_kthread *gc_th = NULL; + struct f2fs_gc_kthread *gc_th; gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); if (!gc_th) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index a177eb387d3..969df1a30d1 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1489,7 +1489,7 @@ static int build_free_segmap(struct f2fs_sb_info *sbi) static int build_curseg(struct f2fs_sb_info *sbi) { - struct curseg_info *array = NULL; + struct curseg_info *array; int i; array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL); @@ -1656,7 +1656,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); - struct f2fs_sm_info *sm_info = NULL; + struct f2fs_sm_info *sm_info; int err; sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); -- cgit v1.2.3-70-g09d2 From c212991a6bc3ba120d41205a294c5b89f05f1535 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sat, 8 Dec 2012 14:53:40 +0900 Subject: f2fs: rewrite f2fs_bio_alloc to make it simpler Since, GFP_NOFS(__GFP_WAIT) is used for allocation requests of bio in f2fs. So, there is no chance of returning NULL from the BIO allocation. Making the bio allocation routine for f2fs simpler. Signed-off-by: Namjae Jeon Signed-off-by: Amit Sahrawat --- fs/f2fs/segment.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 969df1a30d1..8894b399770 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -647,28 +647,18 @@ struct bio *f2fs_bio_alloc(struct block_device *bdev, sector_t first_sector, int nr_vecs, gfp_t gfp_flags) { struct bio *bio; -repeat: + /* allocate new bio */ bio = bio_alloc(gfp_flags, nr_vecs); - if (bio == NULL && (current->flags & PF_MEMALLOC)) { - while (!bio && (nr_vecs /= 2)) - bio = bio_alloc(gfp_flags, nr_vecs); - } - if (bio) { - bio->bi_bdev = bdev; - bio->bi_sector = first_sector; + bio->bi_bdev = bdev; + bio->bi_sector = first_sector; retry: - bio->bi_private = kmalloc(sizeof(struct bio_private), - GFP_NOFS | __GFP_HIGH); - if (!bio->bi_private) { - cond_resched(); - goto retry; - } - } - if (bio == NULL) { + bio->bi_private = kmalloc(sizeof(struct bio_private), + GFP_NOFS | __GFP_HIGH); + if (!bio->bi_private) { cond_resched(); - goto repeat; + goto retry; } return bio; } -- cgit v1.2.3-70-g09d2 From 3cd8a23948b29301f8f67b8d70c5c18fabbc05e1 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 10 Dec 2012 09:26:05 +0900 Subject: f2fs: cleanup the f2fs_bio_alloc routine Do cleanup more for better code readability. - Change the parameter set of f2fs_bio_alloc() This function should allocate a bio only since it is not something like f2fs_bio_init(). Instead, the caller should initialize the allocated bio. - Introduce SECTOR_FROM_BLOCK This macro translates a block address to its sector address. Signed-off-by: Jaegeuk Kim Reviewed-by: Namjae Jeon --- fs/f2fs/data.c | 5 +++-- fs/f2fs/f2fs.h | 2 +- fs/f2fs/segment.c | 33 ++++++++++++++++++--------------- fs/f2fs/segment.h | 3 +++ 4 files changed, 25 insertions(+), 18 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 444c2a6fbaa..655aeabc1dd 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -343,11 +343,12 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page, down_read(&sbi->bio_sem); /* Allocate a new bio */ - bio = f2fs_bio_alloc(bdev, blk_addr << (sbi->log_blocksize - 9), - 1, GFP_NOFS | __GFP_HIGH); + bio = f2fs_bio_alloc(bdev, 1); /* Initialize the bio */ + bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); bio->bi_end_io = read_end_io; + if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { kfree(bio->bi_private); bio_put(bio); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 8c3f1ef6ace..2bce3a62c0b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -924,7 +924,7 @@ void clear_prefree_segments(struct f2fs_sb_info *); int npages_for_summary_flush(struct f2fs_sb_info *); void allocate_new_segments(struct f2fs_sb_info *); struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); -struct bio *f2fs_bio_alloc(struct block_device *, sector_t, int, gfp_t); +struct bio *f2fs_bio_alloc(struct block_device *, int); void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); int write_meta_page(struct f2fs_sb_info *, struct page *, struct writeback_control *); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 8894b399770..1b26e4ea101 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -643,23 +643,21 @@ static void f2fs_end_io_write(struct bio *bio, int err) bio_put(bio); } -struct bio *f2fs_bio_alloc(struct block_device *bdev, sector_t first_sector, - int nr_vecs, gfp_t gfp_flags) +struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages) { struct bio *bio; - - /* allocate new bio */ - bio = bio_alloc(gfp_flags, nr_vecs); - - bio->bi_bdev = bdev; - bio->bi_sector = first_sector; + struct bio_private *priv; retry: - bio->bi_private = kmalloc(sizeof(struct bio_private), - GFP_NOFS | __GFP_HIGH); - if (!bio->bi_private) { + priv = kmalloc(sizeof(struct bio_private), GFP_NOFS); + if (!priv) { cond_resched(); goto retry; } + + /* No failure on bio allocation */ + bio = bio_alloc(GFP_NOIO, npages); + bio->bi_bdev = bdev; + bio->bi_private = priv; return bio; } @@ -711,10 +709,15 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page, if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1) do_submit_bio(sbi, type, false); alloc_new: - if (sbi->bio[type] == NULL) - sbi->bio[type] = f2fs_bio_alloc(bdev, - blk_addr << (sbi->log_blocksize - 9), - bio_get_nr_vecs(bdev), GFP_NOFS | __GFP_HIGH); + if (sbi->bio[type] == NULL) { + sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev)); + sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); + /* + * The end_io will be assigned at the sumbission phase. + * Until then, let bio_add_page() merge consecutive IOs as much + * as possible. + */ + } if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 2c445f8947c..0948405af6f 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -82,6 +82,9 @@ (BITS_TO_LONGS(nr) * sizeof(unsigned long)) #define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments) +#define SECTOR_FROM_BLOCK(sbi, blk_addr) \ + (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) + /* during checkpoint, bio_private is used to synchronize the last bio */ struct bio_private { struct f2fs_sb_info *sbi; -- cgit v1.2.3-70-g09d2 From dfb7c0ceab57fee7618f4c9c31c5a89254e8530a Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 12 Dec 2012 09:47:39 +0900 Subject: f2fs: remove set_page_dirty for atomic f2fs_end_io_write We should guarantee not to do *scheduling while atomic*. I found, in atomic f2fs_end_io_write(), there is a set_page_dirty() call to deal with IO errors. But, set_page_dirty() calls: -> f2fs_set_data_page_dirty() -> set_dirty_dir_page() -> cond_resched() which results in scheduling. In order to avoid this, I'd like to remove simply set_page_dirty(), since the page is already marked as ERROR and f2fs will be operated as the read-only mode as well. So, there is no recovery issue with this. Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 1b26e4ea101..8bc1b6fdcf7 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -631,7 +631,6 @@ static void f2fs_end_io_write(struct bio *bio, int err) if (page->mapping) set_bit(AS_EIO, &page->mapping->flags); set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); - set_page_dirty(page); } end_page_writeback(page); dec_page_count(p->sbi, F2FS_WRITEBACK); -- cgit v1.2.3-70-g09d2 From 690e4a3ead5f88fc95f7650816d1376aa2e79db5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 19 Dec 2012 22:19:30 +0100 Subject: f2fs: add missing #include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit m68k allmodconfig: fs/f2fs/data.c: In function ‘read_end_io’: fs/f2fs/data.c:311: error: implicit declaration of function ‘prefetchw’ fs/f2fs/segment.c: In function ‘f2fs_end_io_write’: fs/f2fs/segment.c:628: error: implicit declaration of function ‘prefetchw’ Signed-off-by: Geert Uytterhoeven Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 1 + fs/f2fs/segment.c | 1 + 2 files changed, 2 insertions(+) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 655aeabc1dd..3aa5ce7cab8 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "f2fs.h" #include "node.h" diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 8bc1b6fdcf7..ca7b5ffb09d 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "f2fs.h" -- cgit v1.2.3-70-g09d2 From 12a67146e35ba1d04ac4a5430eaaa8790158d60e Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 21 Dec 2012 11:47:05 +0900 Subject: f2fs: return a default value for non-void function This patch resolves a build warning reported by kbuild test robot. " fs/f2fs/segment.c: In function '__get_segment_type': fs/f2fs/segment.c:806:1: warning: control reaches end of non-void function [-Wreturn-type] " Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 7 +++---- fs/f2fs/super.c | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index ca7b5ffb09d..fe2cc0bdc11 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -791,11 +791,10 @@ static int __get_segment_type(struct page *page, enum page_type p_type) return __get_segment_type_2(page, p_type); case 4: return __get_segment_type_4(page, p_type); - case 6: - return __get_segment_type_6(page, p_type); - default: - BUG(); } + /* NR_CURSEG_TYPE(6) logs by default */ + BUG_ON(sbi->active_logs != NR_CURSEG_TYPE); + return __get_segment_type_6(page, p_type); } static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 50240d28ca2..cf0ffb80065 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -302,7 +302,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options) case Opt_active_logs: if (args->from && match_int(args, &arg)) return -EINVAL; - if (arg != 2 && arg != 4 && arg != 6) + if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) return -EINVAL; sbi->active_logs = arg; break; -- cgit v1.2.3-70-g09d2 From 029cd28c1f739bbfc5105035696d5f1f4e45d161 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 21 Dec 2012 17:20:21 +0900 Subject: f2fs: fix equation of has_not_enough_free_secs() Practically, has_not_enough_free_secs() should calculate with the numbers of current node and directory data blocks together. Actually the equation was implemented in need_to_flush(). So, this patch removes need_flush() and moves the equation into has_not_enough_free_secs(). Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 36 ++---------------------------------- fs/f2fs/segment.h | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 35 deletions(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index fe2cc0bdc11..66f5e82ec32 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -19,48 +19,16 @@ #include "segment.h" #include "node.h" -static int need_to_flush(struct f2fs_sb_info *sbi) -{ - unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) * - sbi->segs_per_sec; - int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1) - >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; - int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1) - >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; - - if (sbi->por_doing) - return 0; - - if (free_sections(sbi) <= (node_secs + 2 * dent_secs + - reserved_sections(sbi))) - return 1; - return 0; -} - /* * This function balances dirty node and dentry pages. * In addition, it controls garbage collection. */ void f2fs_balance_fs(struct f2fs_sb_info *sbi) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = LONG_MAX, - .for_reclaim = 0, - }; - - if (sbi->por_doing) - return; - /* - * We should do checkpoint when there are so many dirty node pages - * with enough free segments. After then, we should do GC. + * We should do GC or end up with checkpoint, if there are so many dirty + * dir/node pages without enough free segments. */ - if (need_to_flush(sbi)) { - sync_dirty_dir_inodes(sbi); - sync_node_pages(sbi, 0, &wbc); - } - if (has_not_enough_free_secs(sbi)) { mutex_lock(&sbi->gc_mutex); f2fs_gc(sbi, 1); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 0948405af6f..66a288a52fd 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -459,7 +459,20 @@ static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type) static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi) { - return free_sections(sbi) <= reserved_sections(sbi); + unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) * + sbi->segs_per_sec; + int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1) + >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; + int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1) + >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; + + if (sbi->por_doing) + return false; + + if (free_sections(sbi) <= (node_secs + 2 * dent_secs + + reserved_sections(sbi))) + return true; + return false; } static inline int utilization(struct f2fs_sb_info *sbi) -- cgit v1.2.3-70-g09d2 From 344324f10fad05e40b1047c5e09ebbc77e43c24f Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sat, 22 Dec 2012 12:09:58 +0900 Subject: f2fs: remove unneeded initialization of nr_dirty in dirty_seglist_info Since, the memory for the object of dirty_seglist_info is allocated using kzalloc - which returns zeroed out memory. So, there is no need to initialize the nr_dirty values with zeroes. Signed-off-by: Namjae Jeon Signed-off-by: Amit Sahrawat Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs/f2fs/segment.c') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 66f5e82ec32..de6240922b0 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1575,7 +1575,6 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi) for (i = 0; i < NR_DIRTY_TYPE; i++) { dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); - dirty_i->nr_dirty[i] = 0; if (!dirty_i->dirty_segmap[i]) return -ENOMEM; } -- cgit v1.2.3-70-g09d2