diff options
author | Borislav Petkov <bp@suse.de> | 2013-10-31 17:25:05 +0100 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2013-11-02 11:09:28 +0000 |
commit | 0bb8aeee7b73b21e09d3ea12f2120d974f70b669 (patch) | |
tree | e25ffc5c37466f056d7818e0f73caed0ec32c9d3 /arch/x86/mm/pageattr.c | |
parent | c6b6f363f7b24aa448994e3a65c4d5b3116acfcc (diff) |
x86/mm/pageattr: Add a PUD error unwinding path
In case we encounter an error during the mapping of a region, we want to
unwind what we've established so far exactly the way we did the mapping.
This is the PUD part kept deliberately small for easier review.
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 60 |
1 files changed, 58 insertions, 2 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 2a1308a8c07..1cbdbbc35b4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -666,6 +666,51 @@ static int split_large_page(pte_t *kpte, unsigned long address) return 0; } +#define unmap_pmd_range(pud, start, pre_end) do {} while (0) + +static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) +{ + pud_t *pud = pud_offset(pgd, start); + + /* + * Not on a GB page boundary? + */ + if (start & (PUD_SIZE - 1)) { + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + unmap_pmd_range(pud, start, pre_end); + + start = pre_end; + pud++; + } + + /* + * Try to unmap in 1G chunks? + */ + while (end - start >= PUD_SIZE) { + + if (pud_large(*pud)) + pud_clear(pud); + else + unmap_pmd_range(pud, start, start + PUD_SIZE); + + start += PUD_SIZE; + pud++; + } + + /* + * 2M leftovers? + */ + if (start < end) + unmap_pmd_range(pud, start, end); + + /* + * No need to try to free the PUD page because we'll free it in + * populate_pgd's error path + */ +} + static int alloc_pte_page(pmd_t *pmd) { pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); @@ -883,9 +928,20 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); ret = populate_pud(cpa, addr, pgd_entry, pgprot); - if (ret < 0) - return ret; + if (ret < 0) { + unmap_pud_range(pgd_entry, addr, + addr + (cpa->numpages << PAGE_SHIFT)); + if (allocd_pgd) { + /* + * If I allocated this PUD page, I can just as well + * free it in this error path. + */ + pgd_clear(pgd_entry); + free_page((unsigned long)pud); + } + return ret; + } cpa->numpages = ret; return 0; } |