summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/s390/mm/maccess.c56
-rw-r--r--arch/s390/mm/mmap.c1
-rw-r--r--arch/s390/mm/pageattr.c1
-rw-r--r--arch/s390/mm/pgtable.c83
-rw-r--r--arch/s390/mm/vmem.c6
6 files changed, 144 insertions, 9 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9564fc779b2..1766def5bc3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -307,7 +307,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
#ifdef CONFIG_PGSTE
if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
- address = gmap_fault(address,
+ address = __gmap_fault(address,
(struct gmap *) S390_lowcore.gmap);
if (address == -EFAULT) {
fault = VM_FAULT_BADMAP;
@@ -393,7 +393,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
int fault;
/* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr -= (pgm_int_code >> 16);
+ regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
@@ -454,7 +454,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
struct pt_regs regs;
int access, fault;
- regs.psw.mask = psw_kernel_bits;
+ regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 5dbbaa6e594..1cb8427bedf 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/gfp.h>
#include <asm/system.h>
/*
@@ -60,6 +61,9 @@ long probe_kernel_write(void *dst, const void *src, size_t size)
return copied < 0 ? -EFAULT : 0;
}
+/*
+ * Copy memory in real mode (kernel to kernel)
+ */
int memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
@@ -101,3 +105,55 @@ void copy_to_absolute_zero(void *dest, void *src, size_t count)
__ctl_load(cr0, 0, 0);
preempt_enable();
}
+
+/*
+ * Copy memory from kernel (real) to user (virtual)
+ */
+int copy_to_user_real(void __user *dest, void *src, size_t count)
+{
+ int offs = 0, size, rc;
+ char *buf;
+
+ buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rc = -EFAULT;
+ while (offs < count) {
+ size = min(PAGE_SIZE, count - offs);
+ if (memcpy_real(buf, src + offs, size))
+ goto out;
+ if (copy_to_user(dest + offs, buf, size))
+ goto out;
+ offs += size;
+ }
+ rc = 0;
+out:
+ free_page((unsigned long) buf);
+ return rc;
+}
+
+/*
+ * Copy memory from user (virtual) to kernel (real)
+ */
+int copy_from_user_real(void *dest, void __user *src, size_t count)
+{
+ int offs = 0, size, rc;
+ char *buf;
+
+ buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rc = -EFAULT;
+ while (offs < count) {
+ size = min(PAGE_SIZE, count - offs);
+ if (copy_from_user(buf, src + offs, size))
+ goto out;
+ if (memcpy_real(dest + offs, buf, size))
+ goto out;
+ offs += size;
+ }
+ rc = 0;
+out:
+ free_page((unsigned long) buf);
+ return rc;
+}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index c9a9f7f1818..f09c74881b7 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -26,6 +26,7 @@
#include <linux/personality.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include <linux/module.h>
#include <linux/random.h>
#include <asm/pgalloc.h>
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d013ed39743..b36537a5f43 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <asm/cacheflush.h>
#include <asm/pgtable.h>
static void change_page_attr(unsigned long addr, int numpages,
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5d56c2b95b1..301c84d3b54 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2007,2009
+ * Copyright IBM Corp. 2007,2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
@@ -222,6 +222,7 @@ void gmap_free(struct gmap *gmap)
/* Free all segment & region tables. */
down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
table = (unsigned long *) page_to_phys(page);
if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
@@ -230,6 +231,7 @@ void gmap_free(struct gmap *gmap)
gmap_unlink_segment(gmap, table);
__free_pages(page, ALLOC_ORDER);
}
+ spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem);
list_del(&gmap->list);
kfree(gmap);
@@ -256,6 +258,9 @@ void gmap_disable(struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_disable);
+/*
+ * gmap_alloc_table is assumed to be called with mmap_sem held
+ */
static int gmap_alloc_table(struct gmap *gmap,
unsigned long *table, unsigned long init)
{
@@ -267,14 +272,12 @@ static int gmap_alloc_table(struct gmap *gmap,
return -ENOMEM;
new = (unsigned long *) page_to_phys(page);
crst_table_init(new, init);
- down_read(&gmap->mm->mmap_sem);
if (*table & _REGION_ENTRY_INV) {
list_add(&page->lru, &gmap->crst_list);
*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
} else
__free_pages(page, ALLOC_ORDER);
- up_read(&gmap->mm->mmap_sem);
return 0;
}
@@ -299,6 +302,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
flush = 0;
down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -320,6 +324,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
*table = _SEGMENT_ENTRY_INV;
}
out:
+ spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem);
if (flush)
gmap_flush_tlb(gmap);
@@ -350,6 +355,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
flush = 0;
down_read(&gmap->mm->mmap_sem);
+ spin_lock(&gmap->mm->page_table_lock);
for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the gmap address space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
@@ -373,19 +379,24 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
flush |= gmap_unlink_segment(gmap, table);
*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
}
+ spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem);
if (flush)
gmap_flush_tlb(gmap);
return 0;
out_unmap:
+ spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem);
gmap_unmap_segment(gmap, to, len);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
-unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
{
unsigned long *table, vmaddr, segment;
struct mm_struct *mm;
@@ -445,16 +456,75 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
page = pmd_page(*pmd);
mp = (struct gmap_pgtable *) page->index;
rmap->entry = table;
+ spin_lock(&mm->page_table_lock);
list_add(&rmap->list, &mp->mapper);
+ spin_unlock(&mm->page_table_lock);
/* Set gmap segment table entry to page table. */
*table = pmd_val(*pmd) & PAGE_MASK;
return vmaddr | (address & ~PMD_MASK);
}
return -EFAULT;
+}
+unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_fault(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(gmap_fault);
+void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
+{
+
+ unsigned long *table, address, size;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct page *page;
+
+ down_read(&gmap->mm->mmap_sem);
+ address = from;
+ while (address < to) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ vma = find_vma(gmap->mm, mp->vmaddr);
+ size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
+ zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
+ size, NULL);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ }
+ up_read(&gmap->mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(gmap_discard);
+
void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
{
struct gmap_rmap *rmap, *next;
@@ -662,8 +732,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
void __tlb_remove_table(void *_table)
{
- void *table = (void *)((unsigned long) _table & PAGE_MASK);
- unsigned type = (unsigned long) _table & ~PAGE_MASK;
+ const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
+ void *table = (void *)((unsigned long) _table & ~mask);
+ unsigned type = (unsigned long) _table & mask;
if (type)
__page_table_free_rcu(table, type);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 781ff516956..4799383e2df 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -335,6 +335,9 @@ void __init vmem_map_init(void)
ro_start = ((unsigned long)&_stext) & PAGE_MASK;
ro_end = PFN_ALIGN((unsigned long)&_eshared);
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+ if (memory_chunk[i].type == CHUNK_CRASHK ||
+ memory_chunk[i].type == CHUNK_OLDMEM)
+ continue;
start = memory_chunk[i].addr;
end = memory_chunk[i].addr + memory_chunk[i].size;
if (start >= ro_end || end <= ro_start)
@@ -368,6 +371,9 @@ static int __init vmem_convert_memory_chunk(void)
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size)
continue;
+ if (memory_chunk[i].type == CHUNK_CRASHK ||
+ memory_chunk[i].type == CHUNK_OLDMEM)
+ continue;
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg)
panic("Out of memory...\n");