summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile1
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/page-states.c79
-rw-r--r--arch/s390/mm/vmem.c19
4 files changed, 89 insertions, 13 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index fb988a48a75..2a745813454 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -5,3 +5,4 @@
obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index fa31de6ae97..29f3a63806b 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -126,6 +126,9 @@ void __init mem_init(void)
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
+ /* Setup guest page hinting */
+ cmma_init();
+
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
new file mode 100644
index 00000000000..fc0ad73ffd9
--- /dev/null
+++ b/arch/s390/mm/page-states.c
@@ -0,0 +1,79 @@
+/*
+ * arch/s390/mm/page-states.c
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Guest page hinting for unused pages.
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#define ESSA_SET_STABLE 1
+#define ESSA_SET_UNUSED 2
+
+static int cmma_flag;
+
+static int __init cmma(char *str)
+{
+ char *parm;
+ parm = strstrip(str);
+ if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
+ cmma_flag = 1;
+ return 1;
+ }
+ cmma_flag = 0;
+ if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
+ return 1;
+ return 0;
+}
+
+__setup("cmma=", cmma);
+
+void __init cmma_init(void)
+{
+ register unsigned long tmp asm("0") = 0;
+ register int rc asm("1") = -EOPNOTSUPP;
+
+ if (!cmma_flag)
+ return;
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
+ "0: la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+&d" (rc), "+&d" (tmp));
+ if (rc)
+ cmma_flag = 0;
+}
+
+void arch_free_page(struct page *page, int order)
+{
+ int i, rc;
+
+ if (!cmma_flag)
+ return;
+ for (i = 0; i < (1 << order); i++)
+ asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
+ : "=&d" (rc)
+ : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
+ "i" (ESSA_SET_UNUSED));
+}
+
+void arch_alloc_page(struct page *page, int order)
+{
+ int i, rc;
+
+ if (!cmma_flag)
+ return;
+ for (i = 0; i < (1 << order); i++)
+ asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
+ : "=&d" (rc)
+ : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT),
+ "i" (ESSA_SET_STABLE));
+}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index beccacf907f..ea2804808f3 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -27,19 +27,12 @@ struct memory_segment {
static LIST_HEAD(mem_segs);
-static void __ref *vmem_alloc_pages(unsigned int order)
-{
- if (slab_is_available())
- return (void *)__get_free_pages(GFP_KERNEL, order);
- return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
-}
-
-static inline pud_t *vmem_pud_alloc(void)
+static pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
#ifdef CONFIG_64BIT
- pud = vmem_alloc_pages(2);
+ pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -47,12 +40,12 @@ static inline pud_t *vmem_pud_alloc(void)
return pud;
}
-static inline pmd_t *vmem_pmd_alloc(void)
+static pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
#ifdef CONFIG_64BIT
- pmd = vmem_alloc_pages(2);
+ pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -60,7 +53,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
-static pte_t __init_refok *vmem_pte_alloc(void)
+static pte_t __ref *vmem_pte_alloc(void)
{
pte_t *pte;
@@ -214,7 +207,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
if (pte_none(*pt_dir)) {
unsigned long new_page;
- new_page =__pa(vmem_alloc_pages(0));
+ new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
if (!new_page)
goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);