summaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h30
1 files changed, 23 insertions, 7 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 979fe3dc078..dd647c919a6 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -119,13 +119,12 @@ static inline int is_zero_pfn(unsigned long pfn)
#ifndef __ASSEMBLY__
/*
- * The vmalloc area will always be on the topmost area of the kernel
- * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
- * which should be enough for any sane case.
- * By putting vmalloc at the top, we maximise the gap between physical
- * memory and vmalloc to catch misplaced memory accesses. As a side
- * effect, this also makes sure that 64 bit module code cannot be used
- * as system call address.
+ * The vmalloc and module area will always be on the topmost area of the kernel
+ * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
+ * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
+ * modules will reside. That makes sure that inter module branches always
+ * happen without trampolines and in addition the placement within a 2GB frame
+ * is branch prediction unit friendly.
*/
extern unsigned long VMALLOC_START;
extern unsigned long VMALLOC_END;
@@ -133,6 +132,14 @@ extern struct page *vmemmap;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
+#ifdef CONFIG_64BIT
+extern unsigned long MODULES_VADDR;
+extern unsigned long MODULES_END;
+#define MODULES_VADDR MODULES_VADDR
+#define MODULES_END MODULES_END
+#define MODULES_LEN (1UL << 31)
+#endif
+
/*
* A 31 bit pagetable entry of S390 has following format:
* | PFRA | | OS |
@@ -507,6 +514,15 @@ static inline int pmd_none(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
}
+static inline int pmd_large(pmd_t pmd)
+{
+#ifdef CONFIG_64BIT
+ return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+#else
+ return 0;
+#endif
+}
+
static inline int pmd_bad(pmd_t pmd)
{
unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;