summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2007-07-19 01:48:08 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 10:04:44 -0700
commitcf914a7d656e62b9dd3e0dffe4f62b953ae6048d (patch)
treebaf7e79de006ca80eac426d2d1be4c52f5f19624 /mm
parentfe3cba17c49471e99d3421e675fc8b3deaaf0b70 (diff)
readahead: split ondemand readahead interface into two functions
Split ondemand readahead interface into two functions. I think this makes it a little clearer for non-readahead experts (like Rusty). Internally they both call ondemand_readahead(), but the page argument is changed to an obvious boolean flag. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/readahead.c97
2 files changed, 64 insertions, 43 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5eb0a6b9d60..49a6fe375d0 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -894,15 +894,15 @@ void do_generic_mapping_read(struct address_space *mapping,
find_page:
page = find_get_page(mapping, index);
if (!page) {
- page_cache_readahead_ondemand(mapping,
- &ra, filp, page,
+ page_cache_sync_readahead(mapping,
+ &ra, filp,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
- page_cache_readahead_ondemand(mapping,
+ page_cache_async_readahead(mapping,
&ra, filp, page,
index, last_index - index);
}
@@ -1348,14 +1348,14 @@ retry_find:
*/
if (VM_SequentialReadHint(vma)) {
if (!page) {
- page_cache_readahead_ondemand(mapping, ra, file, page,
+ page_cache_sync_readahead(mapping, ra, file,
vmf->pgoff, 1);
page = find_lock_page(mapping, vmf->pgoff);
if (!page)
goto no_cached_page;
}
if (PageReadahead(page)) {
- page_cache_readahead_ondemand(mapping, ra, file, page,
+ page_cache_async_readahead(mapping, ra, file, page,
vmf->pgoff, 1);
}
}
diff --git a/mm/readahead.c b/mm/readahead.c
index 205a4a43151..3d262bb738a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -359,7 +359,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
static unsigned long
ondemand_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
- struct page *page, pgoff_t offset,
+ bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
unsigned long max; /* max readahead pages */
@@ -387,7 +387,7 @@ ondemand_readahead(struct address_space *mapping,
* Standalone, small read.
* Read as is, and do not pollute the readahead state.
*/
- if (!page && !sequential) {
+ if (!hit_readahead_marker && !sequential) {
return __do_page_cache_readahead(mapping, filp,
offset, req_size, 0);
}
@@ -408,7 +408,7 @@ ondemand_readahead(struct address_space *mapping,
* E.g. interleaved reads.
* Not knowing its readahead pos/size, bet on the minimal possible one.
*/
- if (page) {
+ if (hit_readahead_marker) {
ra_index++;
ra_size = min(4 * ra_size, max);
}
@@ -421,50 +421,71 @@ fill_ra:
}
/**
- * page_cache_readahead_ondemand - generic file readahead
+ * page_cache_sync_readahead - generic file readahead
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @filp: passed on to ->readpage() and ->readpages()
- * @page: the page at @offset, or NULL if non-present
- * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
+ * @offset: start offset into @mapping, in pagecache page-sized units
* @req_size: hint: total size of the read which the caller is performing in
- * PAGE_CACHE_SIZE units
+ * pagecache pages
*
- * page_cache_readahead_ondemand() is the entry point of readahead logic.
- * This function should be called when it is time to perform readahead:
- * 1) @page == NULL
- * A cache miss happened, time for synchronous readahead.
- * 2) @page != NULL && PageReadahead(@page)
- * A look-ahead hit occured, time for asynchronous readahead.
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read. The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
*/
-unsigned long
-page_cache_readahead_ondemand(struct address_space *mapping,
- struct file_ra_state *ra, struct file *filp,
- struct page *page, pgoff_t offset,
- unsigned long req_size)
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *filp,
+ pgoff_t offset, unsigned long req_size)
{
/* no read-ahead */
if (!ra->ra_pages)
- return 0;
-
- if (page) {
- /*
- * It can be PG_reclaim.
- */
- if (PageWriteback(page))
- return 0;
-
- ClearPageReadahead(page);
-
- /*
- * Defer asynchronous read-ahead on IO congestion.
- */
- if (bdi_read_congested(mapping->backing_dev_info))
- return 0;
- }
+ return;
+
+ /* do read-ahead */
+ ondemand_readahead(mapping, ra, filp, false, offset, req_size);
+}
+EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @filp: passed on to ->readpage() and ->readpages()
+ * @page: the page at @offset which has the PG_readahead flag set
+ * @offset: start offset into @mapping, in pagecache page-sized units
+ * @req_size: hint: total size of the read which the caller is performing in
+ * pagecache pages
+ *
+ * page_cache_async_ondemand() should be called when a page is used which
+ * has the PG_readahead flag: this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages. */
+void
+page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *filp,
+ struct page *page, pgoff_t offset,
+ unsigned long req_size)
+{
+ /* no read-ahead */
+ if (!ra->ra_pages)
+ return;
+
+ /*
+ * Same bit is used for PG_readahead and PG_reclaim.
+ */
+ if (PageWriteback(page))
+ return;
+
+ ClearPageReadahead(page);
+
+ /*
+ * Defer asynchronous read-ahead on IO congestion.
+ */
+ if (bdi_read_congested(mapping->backing_dev_info))
+ return;
/* do read-ahead */
- return ondemand_readahead(mapping, ra, filp, page,
- offset, req_size);
+ ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}
-EXPORT_SYMBOL_GPL(page_cache_readahead_ondemand);
+EXPORT_SYMBOL_GPL(page_cache_async_readahead);