summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2013-01-29 10:48:30 +0100
committerJiri Kosina <jkosina@suse.cz>2013-01-29 10:48:30 +0100
commit617677295b53a40d0e54aac4cbbc216ffbc755dd (patch)
tree51b9e87213243ed5efff252c8e8d8fec4eebc588 /arch/s390/mm
parent5c8d1b68e01a144813e38795fe6dbe7ebb506131 (diff)
parent6abb7c25775b7fb2225ad0508236d63ca710e65f (diff)
Merge branch 'master' into for-next
Conflicts: drivers/devfreq/exynos4_bus.c Sync with Linus' tree to be able to apply patches that are against newer code (mvneta).
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile12
-rw-r--r--arch/s390/mm/dump_pagetables.c7
-rw-r--r--arch/s390/mm/fault.c33
-rw-r--r--arch/s390/mm/gup.c7
-rw-r--r--arch/s390/mm/init.c29
-rw-r--r--arch/s390/mm/pageattr.c82
-rw-r--r--arch/s390/mm/pgtable.c16
-rw-r--r--arch/s390/mm/vmem.c46
8 files changed, 158 insertions, 74 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 1bea6d1f55a..640bea12303 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,9 +2,9 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
- page-states.o gup.o extable.o
-obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
-obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
+obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y += page-states.o gup.o extable.o pageattr.o
+
+obj-$(CONFIG_CMM) += cmm.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index cbc6668acb8..04e4892247d 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -150,6 +150,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
+ unsigned int prot;
pud_t *pud;
int i;
@@ -157,7 +158,11 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
st->current_address = addr;
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
- walk_pmd_level(m, st, pud, addr);
+ if (pud_large(*pud)) {
+ prot = pud_val(*pud) & _PAGE_RO;
+ note_page(m, st, prot, 2);
+ } else
+ walk_pmd_level(m, st, pud, addr);
else
note_page(m, st, _PAGE_INVALID, 2);
addr += PUD_SIZE;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 04ad4001a28..2fb9e63b8fc 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,15 +49,19 @@
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
-#define VM_FAULT_SIGNAL 0x080000
+#define VM_FAULT_SIGNAL 0x080000
-static unsigned long store_indication;
+static unsigned long store_indication __read_mostly;
-void fault_init(void)
+#ifdef CONFIG_64BIT
+static int __init fault_init(void)
{
- if (test_facility(2) && test_facility(75))
+ if (test_facility(75))
store_indication = 0xc00;
+ return 0;
}
+early_initcall(fault_init);
+#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -273,10 +277,16 @@ static inline int do_exception(struct pt_regs *regs, int access)
unsigned int flags;
int fault;
+ tsk = current;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
+
if (notify_page_fault(regs))
return 0;
- tsk = current;
mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
@@ -372,11 +382,6 @@ retry:
goto retry;
}
}
- /*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -423,6 +428,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
struct vm_area_struct *vma;
unsigned long trans_exc_code;
+ /*
+ * The instruction that caused the program check has
+ * been nullified. Don't signal single step via SIGTRAP.
+ */
+ clear_tsk_thread_flag(current, TIF_PER_TRAP);
+
trans_exc_code = regs->int_parm_long;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
@@ -558,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code,
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
- kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
+ inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 60acb93a468..1f5315d1215 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -126,7 +126,7 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (unlikely(pmd_huge(pmd))) {
+ if (unlikely(pmd_large(pmd))) {
if (!gup_huge_pmd(pmdp, pmd, addr, next,
write, pages, nr))
return 0;
@@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
- (void __user *)start, len)))
+ if ((end < start) || (end > TASK_SIZE))
return 0;
local_irq_save(flags);
@@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (end < start)
+ if ((end < start) || (end > TASK_SIZE))
goto slow_irqon;
/*
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81e596c65de..ae672f41c46 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -125,7 +125,6 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
- fault_init();
}
void __init mem_init(void)
@@ -159,34 +158,6 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- int i;
-
- for (i = 0; i < numpages; i++) {
- address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- __ptep_ipte(address, pte);
- pte_val(*pte) = _PAGE_TYPE_EMPTY;
- continue;
- }
- *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
- /* Flush cpu write queue. */
- mb();
- }
-}
-#endif
-
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 00be01c4b4f..29ccee3651f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -2,11 +2,46 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
+#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/hugetlb.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
+#include <asm/page.h>
+
+void storage_key_init_range(unsigned long start, unsigned long end)
+{
+ unsigned long boundary, function, size;
+
+ while (start < end) {
+ if (MACHINE_HAS_EDAT2) {
+ /* set storage keys for a 2GB frame */
+ function = 0x22000 | PAGE_DEFAULT_KEY;
+ size = 1UL << 31;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = pfmf(function, start);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ if (MACHINE_HAS_EDAT1) {
+ /* set storage keys for a 1MB frame */
+ function = 0x21000 | PAGE_DEFAULT_KEY;
+ size = 1UL << 20;
+ boundary = (start + size) & ~(size - 1);
+ if (boundary <= end) {
+ do {
+ start = pfmf(function, start);
+ } while (start < boundary);
+ continue;
+ }
+ }
+ page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
+ start += PAGE_SIZE;
+ }
+}
static pte_t *walk_page_table(unsigned long addr)
{
@@ -19,7 +54,7 @@ static pte_t *walk_page_table(unsigned long addr)
if (pgd_none(*pgdp))
return NULL;
pudp = pud_offset(pgdp, addr);
- if (pud_none(*pudp))
+ if (pud_none(*pudp) || pud_large(*pudp))
return NULL;
pmdp = pmd_offset(pudp, addr);
if (pmd_none(*pmdp) || pmd_large(*pmdp))
@@ -70,3 +105,46 @@ int set_memory_x(unsigned long addr, int numpages)
{
return 0;
}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long address;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ address = page_to_phys(page + i);
+ pgd = pgd_offset_k(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+ if (!enable) {
+ __ptep_ipte(address, pte);
+ pte_val(*pte) = _PAGE_TYPE_EMPTY;
+ continue;
+ }
+ *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+ }
+}
+
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm volatile(
+ " lra %1,0(%1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (cc), "+a" (addr) : : "cc");
+ return cc == 0;
+}
+#endif /* CONFIG_HIBERNATION */
+
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c8188a18af0..ae44d2a3431 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -881,22 +881,6 @@ int s390_enable_sie(void)
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
-bool kernel_page_present(struct page *page)
-{
- unsigned long addr;
- int cc;
-
- addr = page_to_phys(page);
- asm volatile(
- " lra %1,0(%1)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "+a" (addr) : : "cc");
- return cc == 0;
-}
-#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 387c7c60b5b..6ed1426d27c 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -89,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
int ret = -ENOMEM;
while (address < end) {
+ pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -96,18 +97,24 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
goto out;
pgd_populate(&init_mm, pg_dir, pu_dir);
}
-
pu_dir = pud_offset(pg_dir, address);
+#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+ if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
+ !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
+ pte_val(pte) |= _REGION3_ENTRY_LARGE;
+ pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
+ pud_val(*pu_dir) = pte_val(pte);
+ address += PUD_SIZE;
+ continue;
+ }
+#endif
if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
pud_populate(&init_mm, pu_dir, pm_dir);
}
-
- pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
pm_dir = pmd_offset(pu_dir, address);
-
#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
@@ -160,6 +167,11 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
address += PUD_SIZE;
continue;
}
+ if (pud_large(*pu_dir)) {
+ pud_clear(pu_dir);
+ address += PUD_SIZE;
+ continue;
+ }
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
address += PMD_SIZE;
@@ -193,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
start_addr = (unsigned long) start;
end_addr = (unsigned long) (start + nr);
- for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
+ for (address = start_addr; address < end_addr;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -212,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
+#ifdef CONFIG_64BIT
+ /* Use 1MB frames for vmemmap if available. We always
+ * use large frames even if they are only partially
+ * used.
+ * Otherwise we would have also page tables since
+ * vmemmap_populate gets called for each section
+ * separately. */
+ if (MACHINE_HAS_EDAT1) {
+ void *new_page;
+
+ new_page = vmemmap_alloc_block(PMD_SIZE, node);
+ if (!new_page)
+ goto out;
+ pte = mk_pte_phys(__pa(new_page), PAGE_RW);
+ pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+ pmd_val(*pm_dir) = pte_val(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
+ } else if (pmd_large(*pm_dir)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
}
pt_dir = pte_offset_kernel(pm_dir, address);
@@ -228,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
*pt_dir = pte;
}
+ address += PAGE_SIZE;
}
memset(start, 0, nr * sizeof(struct page));
ret = 0;