summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-01-30 13:34:07 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:34:07 +0100
commit488fd99588bf23da951b524a806e44feaa1aa366 (patch)
treea4ff5c8ddf932920fc2700e261f92c08b293d040 /arch/x86
parent5398f9854f60d670e8ef1ea08c0e0310f253eeb1 (diff)
x86: fix pageattr-selftest
In Ingo's testing, he found a bug in the CPA selftest code. What would happen is that the test would call change_page_attr_addr on a range of memory, part of which was read only, part of which was writable. The only thing the test wanted to change was the global bit... What actually happened was that the selftest would take the permissions of the first page, and then the change_page_attr_addr call would then set the permissions of the entire range to this first page. In the rodata section case, this resulted in pages after the .rodata becoming read only... which made the kernel rather unhappy in many interesting ways. This is just another example of how dangerous the cpa API is (was); this patch changes the test to use the incremental clear/set APIs instead, and it changes the clear/set implementation to work on a 1 page at a time basis. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/pageattr-test.c8
-rw-r--r--arch/x86/mm/pageattr.c96
2 files changed, 58 insertions, 46 deletions
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 6a41a0f0c14..fe73905d075 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -162,8 +162,8 @@ static __init int exercise_pageattr(void)
continue;
}
- err = change_page_attr_addr(addr[i], len[i],
- pte_pgprot(pte_clrhuge(pte_clrglobal(pte0))));
+ err = change_page_attr_clear(addr[i], len[i],
+ __pgprot(_PAGE_GLOBAL));
if (err < 0) {
printk(KERN_ERR "CPA %d failed %d\n", i, err);
failed++;
@@ -197,8 +197,8 @@ static __init int exercise_pageattr(void)
failed++;
continue;
}
- err = change_page_attr_addr(addr[i], len[i],
- pte_pgprot(pte_mkglobal(*pte)));
+ err = change_page_attr_set(addr[i], len[i],
+ __pgprot(_PAGE_GLOBAL));
if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err);
failed++;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a2d747c0614..23f0aa3d01c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -228,7 +228,6 @@ repeat:
/**
* change_page_attr_addr - Change page table attributes in linear mapping
* @address: Virtual address in linear mapping.
- * @numpages: Number of pages to change
* @prot: New page table attribute (PAGE_*)
*
* Change page attributes of a page in the direct mapping. This is a variant
@@ -240,10 +239,10 @@ repeat:
* Modules and drivers should use the set_memory_* APIs instead.
*/
-static int change_page_attr_addr(unsigned long address, int numpages,
- pgprot_t prot)
+static int change_page_attr_addr(unsigned long address, pgprot_t prot)
{
- int err = 0, kernel_map = 0, i;
+ int err = 0, kernel_map = 0;
+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
#ifdef CONFIG_X86_64
if (address >= __START_KERNEL_map &&
@@ -254,30 +253,27 @@ static int change_page_attr_addr(unsigned long address, int numpages,
}
#endif
- for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
- unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+ if (!kernel_map || pte_present(pfn_pte(0, prot))) {
+ err = __change_page_attr(address, pfn, prot);
+ if (err)
+ return err;
+ }
- if (!kernel_map || pte_present(pfn_pte(0, prot))) {
- err = __change_page_attr(address, pfn, prot);
- if (err)
- break;
- }
#ifdef CONFIG_X86_64
- /*
- * Handle kernel mapping too which aliases part of
- * lowmem:
- */
- if (__pa(address) < KERNEL_TEXT_SIZE) {
- unsigned long addr2;
- pgprot_t prot2;
-
- addr2 = __START_KERNEL_map + __pa(address);
- /* Make sure the kernel mappings stay executable */
- prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
- err = __change_page_attr(addr2, pfn, prot2);
- }
-#endif
+ /*
+ * Handle kernel mapping too which aliases part of
+ * lowmem:
+ */
+ if (__pa(address) < KERNEL_TEXT_SIZE) {
+ unsigned long addr2;
+ pgprot_t prot2;
+
+ addr2 = __START_KERNEL_map + __pa(address);
+ /* Make sure the kernel mappings stay executable */
+ prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
+ err = __change_page_attr(addr2, pfn, prot2);
}
+#endif
return err;
}
@@ -307,16 +303,24 @@ static int change_page_attr_set(unsigned long addr, int numpages,
pgprot_t current_prot;
int level;
pte_t *pte;
+ int i, ret;
- pte = lookup_address(addr, &level);
- if (pte)
- current_prot = pte_pgprot(*pte);
- else
- pgprot_val(current_prot) = 0;
+ for (i = 0; i < numpages ; i++) {
- pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
+ pte = lookup_address(addr, &level);
+ if (pte)
+ current_prot = pte_pgprot(*pte);
+ else
+ pgprot_val(current_prot) = 0;
- return change_page_attr_addr(addr, numpages, prot);
+ pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
+
+ ret = change_page_attr_addr(addr, prot);
+ if (ret)
+ return ret;
+ addr += PAGE_SIZE;
+ }
+ return 0;
}
/**
@@ -344,16 +348,24 @@ static int change_page_attr_clear(unsigned long addr, int numpages,
pgprot_t current_prot;
int level;
pte_t *pte;
-
- pte = lookup_address(addr, &level);
- if (pte)
- current_prot = pte_pgprot(*pte);
- else
- pgprot_val(current_prot) = 0;
-
- pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot);
-
- return change_page_attr_addr(addr, numpages, prot);
+ int i, ret;
+
+ for (i = 0; i < numpages; i++) {
+ pte = lookup_address(addr, &level);
+ if (pte)
+ current_prot = pte_pgprot(*pte);
+ else
+ pgprot_val(current_prot) = 0;
+
+ pgprot_val(prot) =
+ pgprot_val(current_prot) & ~pgprot_val(prot);
+
+ ret = change_page_attr_addr(addr, prot);
+ if (ret)
+ return ret;
+ addr += PAGE_SIZE;
+ }
+ return 0;
}
int set_memory_uc(unsigned long addr, int numpages)