summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/cache.h8
-rw-r--r--include/asm-sh/cacheflush.h1
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h8
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h29
-rw-r--r--include/asm-sh/page.h12
-rw-r--r--include/asm-sh/pgalloc.h37
-rw-r--r--include/asm-sh/pgtable.h80
7 files changed, 73 insertions, 102 deletions
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index 33f13367054..e3a180cf506 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -10,7 +10,6 @@
#ifdef __KERNEL__
#include <asm/cpu/cache.h>
-#include <asm/cpu/cacheflush.h>
#define SH_CACHE_VALID 1
#define SH_CACHE_UPDATED 2
@@ -49,12 +48,5 @@ struct cache_info {
unsigned long flags;
};
-/* Flush (write-back only) a region (smaller than a page) */
-extern void __flush_wback_region(void *start, int size);
-/* Flush (write-back & invalidate) a region (smaller than a page) */
-extern void __flush_purge_region(void *start, int size);
-/* Flush (invalidate only) a region (smaller than a page) */
-extern void __flush_invalidate_region(void *start, int size);
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h
index 9dfb33edb00..92930b4a40d 100644
--- a/include/asm-sh/cacheflush.h
+++ b/include/asm-sh/cacheflush.h
@@ -2,6 +2,7 @@
#define __ASM_SH_CACHEFLUSH_H
#ifdef __KERNEL__
+#include <linux/mm.h>
#include <asm/cpu/cacheflush.h>
/* Flush (write-back only) a region (smaller than a page) */
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index f51aed00c68..db0cb071ea8 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -10,7 +10,7 @@
#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
#define __ASM_CPU_SH3_CACHEFLUSH_H
-/*
+/*
* Cache flushing:
*
* - flush_cache_all() flushes entire cache
@@ -35,10 +35,6 @@
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */
#define CACHE_ALIAS 0x00001000
-struct page;
-struct mm_struct;
-struct vm_area_struct;
-
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
@@ -79,8 +75,6 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define p3_cache_init() do { } while (0)
-#define HAVE_ARCH_UNMAPPED_AREA
-
#endif
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index ea58c4c5944..a95fc951aff 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -16,30 +16,26 @@
* caching; in which case they're only semi-broken),
* so we need them.
*/
-struct page;
-struct mm_struct;
-struct vm_area_struct;
-
-extern void flush_cache_all(void);
-extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
-extern void flush_dcache_page(struct page *pg);
+void flush_cache_all(void);
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+void flush_dcache_page(struct page *pg);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_cache_sigtramp(unsigned long addr);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr,
- int len);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_sigtramp(unsigned long addr);
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
#define flush_icache_page(vma,pg) do { } while (0)
/* Initialization of P3 area for copy_user_page */
-extern void p3_cache_init(void);
+void p3_cache_init(void);
#define PG_mapped PG_arch_1
@@ -57,4 +53,3 @@ static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
}
#endif /* CONFIG_MMU */
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
-
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 4811d410d12..51d7281a546 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -41,7 +41,8 @@ extern void (*copy_page)(void *to, void *from);
extern void clear_page_slow(void *to);
extern void copy_page_slow(void *to, void *from);
-#if defined(CONFIG_SH7705_CACHE_32KB) && defined(CONFIG_MMU)
+#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
+ defined(CONFIG_SH7705_CACHE_32KB))
struct page;
extern void clear_user_page(void *to, unsigned long address, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
@@ -50,29 +51,20 @@ extern void __copy_user_page(void *to, void *from, void *orig_to);
#elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#elif defined(CONFIG_CPU_SH4)
-struct page;
-extern void clear_user_page(void *to, unsigned long address, struct page *pg);
-extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
-extern void __clear_user_page(void *to, void *orig_to);
-extern void __copy_user_page(void *to, void *from, void *orig_to);
#endif
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index f4f233f7a4f..e841465ab4d 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -1,15 +1,6 @@
#ifndef __ASM_SH_PGALLOC_H
#define __ASM_SH_PGALLOC_H
-#include <linux/threads.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#define pgd_quicklist ((unsigned long *)0)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist ((unsigned long *)0)
-#define pgtable_cache_size 0L
-
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
@@ -24,38 +15,24 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
- pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
-
- if (pgd)
- memset(pgd, 0, pgd_size);
-
- return pgd;
+ return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
static inline void pgd_free(pgd_t *pgd)
{
- kfree(pgd);
+ free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
-
- return pte;
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-
- return pte;
+ return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
static inline void pte_free_kernel(pte_t *pte)
@@ -75,14 +52,8 @@ static inline void pte_free(struct page *pte)
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
-#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0)
-#ifdef CONFIG_CPU_SH4
-#define PG_mapped PG_arch_1
-#endif
-
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 40d41a78041..9728b58f7c1 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -1,42 +1,42 @@
-#ifndef __ASM_SH_PGTABLE_H
-#define __ASM_SH_PGTABLE_H
-
-#include <asm-generic/4level-fixup.h>
-
/*
+ * This file contains the functions and defines necessary to modify and
+ * use the SuperH page table tree.
+ *
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2002 - 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
*/
+#ifndef __ASM_SH_PGTABLE_H
+#define __ASM_SH_PGTABLE_H
-#include <asm/pgtable-2level.h>
+#include <asm-generic/pgtable-nopmd.h>
+#include <asm/page.h>
+
+#define PTRS_PER_PGD 1024
-/*
- * This file contains the functions and defines necessary to modify and use
- * the SuperH page table tree.
- */
#ifndef __ASSEMBLY__
-#include <asm/processor.h>
#include <asm/addrspace.h>
#include <asm/fixmap.h>
-#include <linux/threads.h>
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
/*
- * Basically we have the same two-level (which is the logical three level
- * Linux page table layout folded) page tables as the i386.
- */
-
-/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[1024];
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */
+/* traditional two-level paging structure */
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PTE 1024
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
@@ -47,7 +47,6 @@ extern unsigned long empty_zero_page[1024];
#define PTE_PHYS_MASK 0x1ffff000
-#ifndef __ASSEMBLY__
/*
* First 1MB map is used by fixed purpose.
* Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
@@ -65,7 +64,7 @@ extern unsigned long empty_zero_page[1024];
#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
#define _PAGE_PROTNONE 0x200 /* software: if not present */
-#define _PAGE_ACCESSED 0x400 /* software: page referenced */
+#define _PAGE_ACCESSED 0x400 /* software: page referenced */
#define _PAGE_U0_SHARED 0x800 /* software: page is shared in user space */
#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
@@ -83,7 +82,6 @@ extern unsigned long empty_zero_page[1024];
#define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */
#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
-
/* Mask which drop software flags
* We also drop WT bit since it is used for _PAGE_FILE
* bit in this implementation.
@@ -115,6 +113,8 @@ extern unsigned long empty_zero_page[1024];
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_SHARED)
+#ifndef __ASSEMBLY__
+
#ifdef CONFIG_MMU
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_SHARED | _PAGE_FLAGS_HARD)
@@ -137,12 +137,13 @@ extern unsigned long empty_zero_page[1024];
#define PAGE_KERNEL_PCC __pgprot(0)
#endif
+#endif /* __ASSEMBLY__ */
+
/*
* As i386 and MIPS, SuperH can't do page protection for execute, and
* considers that the same as a read. Also, write permissions imply
- * read permissions. This is the closest we can get..
+ * read permissions. This is the closest we can get..
*/
-
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
@@ -161,6 +162,26 @@ extern unsigned long empty_zero_page[1024];
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
+#ifndef __ASSEMBLY__
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
#define pte_none(x) (!pte_val(x))
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
@@ -171,7 +192,7 @@ extern unsigned long empty_zero_page[1024];
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
+#define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK)
/*
* The following only work if pte_present() is true.
@@ -248,6 +269,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
@@ -272,8 +298,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
typedef pte_t *pte_addr_t;
-#endif /* !__ASSEMBLY__ */
-
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
@@ -301,5 +325,7 @@ extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
#include <asm-generic/pgtable.h>
+#endif /* !__ASSEMBLY__ */
+
#endif /* __ASM_SH_PAGE_H */