summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2013-02-22 16:35:06 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:19 -0800
commit8aafa6a485ae77ce4a49eb1280f3d2c6074a03fb (patch)
treeb267e3bc9602e2a7975b7973036beaa79c0ab148
parentee0ea59cf9ea95369d686bdc4b3d8c027e2b99cd (diff)
ksm: get_ksm_page locked
In some places where get_ksm_page() is used, we need the page to be locked. When KSM migration is fully enabled, we shall want that to make sure that the page just acquired cannot be migrated beneath us (raised page count is only effective when there is serialization to make sure migration notices). Whereas when navigating through the stable tree, we certainly do not want to lock each node (raised page count is enough to guarantee the memcmps, even if page is migrated to another node). Since we're about to add another use case, add the locked argument to get_ksm_page() now. Hmm, what's that rcu_read_lock() about? Complete misunderstanding, I really got the wrong end of the stick on that! There's a configuration in which page_cache_get_speculative() can do something cheaper than get_page_unless_zero(), relying on its caller's rcu_read_lock() to have disabled preemption for it. There's no need for rcu_read_lock() around get_page_unless_zero() (and mapping checks) here. Cut out that silliness before making this any harder to understand. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Petr Holasek <pholasek@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/ksm.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 70daa35266e..e02430fb26f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -514,15 +514,14 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
* but this is different - made simpler by ksm_thread_mutex being held, but
* interesting for assuming that no other use of the struct page could ever
* put our expected_mapping into page->mapping (or a field of the union which
- * coincides with page->mapping). The RCU calls are not for KSM at all, but
- * to keep the page_count protocol described with page_cache_get_speculative.
+ * coincides with page->mapping).
*
* Note: it is possible that get_ksm_page() will return NULL one moment,
* then page the next, if the page is in between page_freeze_refs() and
* page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct stable_node *stable_node)
+static struct page *get_ksm_page(struct stable_node *stable_node, bool locked)
{
struct page *page;
void *expected_mapping;
@@ -530,7 +529,6 @@ static struct page *get_ksm_page(struct stable_node *stable_node)
page = pfn_to_page(stable_node->kpfn);
expected_mapping = (void *)stable_node +
(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
- rcu_read_lock();
if (page->mapping != expected_mapping)
goto stale;
if (!get_page_unless_zero(page))
@@ -539,10 +537,16 @@ static struct page *get_ksm_page(struct stable_node *stable_node)
put_page(page);
goto stale;
}
- rcu_read_unlock();
+ if (locked) {
+ lock_page(page);
+ if (page->mapping != expected_mapping) {
+ unlock_page(page);
+ put_page(page);
+ goto stale;
+ }
+ }
return page;
stale:
- rcu_read_unlock();
remove_node_from_stable_tree(stable_node);
return NULL;
}
@@ -558,11 +562,10 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
struct page *page;
stable_node = rmap_item->head;
- page = get_ksm_page(stable_node);
+ page = get_ksm_page(stable_node, true);
if (!page)
goto out;
- lock_page(page);
hlist_del(&rmap_item->hlist);
unlock_page(page);
put_page(page);
@@ -1042,7 +1045,7 @@ static struct page *stable_tree_search(struct page *page)
cond_resched();
stable_node = rb_entry(node, struct stable_node, node);
- tree_page = get_ksm_page(stable_node);
+ tree_page = get_ksm_page(stable_node, false);
if (!tree_page)
return NULL;
@@ -1086,7 +1089,7 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
cond_resched();
stable_node = rb_entry(*new, struct stable_node, node);
- tree_page = get_ksm_page(stable_node);
+ tree_page = get_ksm_page(stable_node, false);
if (!tree_page)
return NULL;