diff options
Diffstat (limited to 'include/asm-powerpc/pgtable-ppc32.h')
-rw-r--r-- | include/asm-powerpc/pgtable-ppc32.h | 67 |
1 files changed, 0 insertions, 67 deletions
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index 7fb730c62f8..86a54a4a8a2 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -6,11 +6,7 @@ #ifndef __ASSEMBLY__ #include <linux/sched.h> #include <linux/threads.h> -#include <asm/processor.h> /* For TASK_SIZE */ -#include <asm/mmu.h> -#include <asm/page.h> #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ -struct mm_struct; extern unsigned long va_to_phys(unsigned long address); extern pte_t *va_to_pte(unsigned long address); @@ -488,14 +484,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ pgprot_val(prot)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) - -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern unsigned long empty_zero_page[1024]; -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) - #endif /* __ASSEMBLY__ */ #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) @@ -512,9 +500,7 @@ extern unsigned long empty_zero_page[1024]; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } -static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } @@ -522,21 +508,13 @@ static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } -static inline pte_t pte_rdprotect(pte_t pte) { - pte_val(pte) &= ~_PAGE_USER; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } -static inline pte_t pte_exprotect(pte_t pte) { - pte_val(pte) &= ~_PAGE_EXEC; return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkread(pte_t pte) { - pte_val(pte) |= _PAGE_USER; return pte; } -static inline pte_t pte_mkexec(pte_t pte) { - pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { @@ -643,13 +621,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ - return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; -} - #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -734,10 +705,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - -extern void paging_init(void); - /* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry @@ -755,40 +722,6 @@ extern void paging_init(void); #define pte_to_pgoff(pte) (pte_val(pte) >> 3) #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) -/* CONFIG_APUS */ -/* For virtual address to physical address conversion */ -extern void cache_clear(__u32 addr, int length); -extern void cache_push(__u32 addr, int length); -extern int mm_end_of_chunk (unsigned long addr, int len); -extern unsigned long iopa(unsigned long addr); -extern unsigned long mm_ptov(unsigned long addr) __attribute_const__; - -/* Values for nocacheflag and cmode */ -/* These are not used by the APUS kernel_map, but prevents - compilation errors. */ -#define KERNELMAP_FULL_CACHING 0 -#define KERNELMAP_NOCACHE_SER 1 -#define KERNELMAP_NOCACHE_NONSER 2 -#define KERNELMAP_NO_COPYBACK 3 - -/* - * Map some physical address range into the kernel address space. - */ -extern unsigned long kernel_map(unsigned long paddr, unsigned long size, - int nocacheflag, unsigned long *memavailp ); - -/* - * Set cache mode of (kernel space) address range. - */ -extern void kernel_set_cachemode (unsigned long address, unsigned long size, - unsigned int cmode); - -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -#define kern_addr_valid(addr) (1) - -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - /* * No page table caches to initialise */ |