summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2007-05-06 14:49:25 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 12:12:52 -0700
commitec0f16372277052a29a6c17527c6cae5e898b3fd (patch)
tree35636edac6ed01baf301f3aca96f090caae82c9d
parentb813e931b4c8235bb42e301096ea97dbdee3e8fe (diff)
readahead: improve heuristic detecting sequential reads
Introduce ra.offset and store in it an offset where the previous read ended. This way we can detect whether reads are really sequential (and thus we should not mark the page as accessed repeatedly) or whether they are random and just happen to be in the same page (and the page should really be marked accessed again). Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Cc: WU Fengguang <wfg@mail.ustc.edu.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/fs.h1
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/readahead.c3
3 files changed, 10 insertions, 3 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7c0077f06e2..0949e243b8b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -702,6 +702,7 @@ struct file_ra_state {
unsigned long ra_pages; /* Maximum readahead window */
unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
unsigned long mmap_miss; /* Cache miss stat for mmap accesses */
+ unsigned int offset; /* Offset where last read() ended in a page */
};
#define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */
#define RA_FLAG_INCACHE 0x02 /* file is already in cache */
diff --git a/mm/filemap.c b/mm/filemap.c
index cbea95a2528..07f5b77114a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -868,6 +868,7 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned long last_index;
unsigned long next_index;
unsigned long prev_index;
+ unsigned int prev_offset;
loff_t isize;
struct page *cached_page;
int error;
@@ -877,6 +878,7 @@ void do_generic_mapping_read(struct address_space *mapping,
index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index;
prev_index = ra.prev_page;
+ prev_offset = ra.offset;
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
@@ -924,10 +926,10 @@ page_ok:
flush_dcache_page(page);
/*
- * When (part of) the same page is read multiple times
- * in succession, only mark it as accessed the first time.
+ * When a sequential read accesses a page several times,
+ * only mark it as accessed the first time.
*/
- if (prev_index != index)
+ if (prev_index != index || offset != prev_offset)
mark_page_accessed(page);
prev_index = index;
@@ -945,6 +947,7 @@ page_ok:
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
+ prev_offset = ra.offset = offset;
page_cache_release(page);
if (ret == nr && desc->count)
diff --git a/mm/readahead.c b/mm/readahead.c
index 93d9ee692fd..0a6fed9d365 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -207,6 +207,8 @@ out:
* If page_cache_readahead sees that it is again being called for
* a page which it just looked at, it can return immediately without
* making any state changes.
+ * offset: Offset in the prev_page where the last read ended - used for
+ * detection of sequential file reading.
* ahead_start,
* ahead_size: Together, these form the "ahead window".
* ra_pages: The externally controlled max readahead for this fd.
@@ -473,6 +475,7 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
/* Note that prev_page == -1 if it is a first read */
sequential = (offset == ra->prev_page + 1);
ra->prev_page = offset;
+ ra->offset = 0;
max = get_max_readahead(ra);
newsize = min(req_size, max);