diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 886ea0d5a13..e6248271801 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -918,6 +918,8 @@ int clear_page_dirty_for_io(struct page *page) { struct address_space *mapping = page_mapping(page); + BUG_ON(!PageLocked(page)); + if (mapping && mapping_cap_account_dirty(mapping)) { /* * Yes, Virginia, this is indeed insane. @@ -943,14 +945,19 @@ int clear_page_dirty_for_io(struct page *page) * We basically use the page "master dirty bit" * as a serialization point for all the different * threads doing their things. - * - * FIXME! We still have a race here: if somebody - * adds the page back to the page tables in - * between the "page_mkclean()" and the "TestClearPageDirty()", - * we might have it mapped without the dirty bit set. */ if (page_mkclean(page)) set_page_dirty(page); + /* + * We carefully synchronise fault handlers against + * installing a dirty pte and marking the page dirty + * at this point. We do this by having them hold the + * page lock at some point after installing their + * pte, but before marking the page dirty. + * Pages are always locked coming in here, so we get + * the desired exclusion. See mm/memory.c:do_wp_page() + * for more comments. + */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); return 1; |