summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:16:23 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:40 -0700
commitc74df32c724a1652ad8399b4891bb02c9d43743a (patch)
tree5a79d56fdcf7dc2053a277dbf6db7c3b339e9659 /fs
parent1bb3630e89cb8a7b3d3807629c20c5bad88290ff (diff)
[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not to hold page_table_lock, whether it's on init_mm or a user mm; take page_table_lock internally to check if a racing task already allocated. Convert their callers from common code. But avoid coming back to change them again later: instead of moving the spin_lock(&mm->page_table_lock) down, switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which encapsulate the mapping+locking and unlocking+unmapping together, and in the end may use alternatives to the mm page_table_lock itself. These callers all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them; and pte_alloc uses the "atomic" pmd_present to test whether it needs to allocate. It appears that on all arches we can safely descend without page_table_lock. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/exec.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 9bb55c8cf22..ba73797eb4c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -309,25 +309,24 @@ void install_arg_page(struct vm_area_struct *vma,
pud_t * pud;
pmd_t * pmd;
pte_t * pte;
+ spinlock_t *ptl;
if (unlikely(anon_vma_prepare(vma)))
- goto out_sig;
+ goto out;
flush_dcache_page(page);
pgd = pgd_offset(mm, address);
-
- spin_lock(&mm->page_table_lock);
pud = pud_alloc(mm, pgd, address);
if (!pud)
goto out;
pmd = pmd_alloc(mm, pud, address);
if (!pmd)
goto out;
- pte = pte_alloc_map(mm, pmd, address);
+ pte = pte_alloc_map_lock(mm, pmd, address, &ptl);
if (!pte)
goto out;
if (!pte_none(*pte)) {
- pte_unmap(pte);
+ pte_unmap_unlock(pte, ptl);
goto out;
}
inc_mm_counter(mm, anon_rss);
@@ -335,14 +334,11 @@ void install_arg_page(struct vm_area_struct *vma,
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
page_add_anon_rmap(page, vma, address);
- pte_unmap(pte);
- spin_unlock(&mm->page_table_lock);
+ pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */
return;
out:
- spin_unlock(&mm->page_table_lock);
-out_sig:
__free_page(page);
force_sig(SIGKILL, current);
}