summaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc64')
-rw-r--r--include/asm-ppc64/abs_addr.h73
-rw-r--r--include/asm-ppc64/btext.h27
-rw-r--r--include/asm-ppc64/cache.h36
-rw-r--r--include/asm-ppc64/cacheflush.h48
-rw-r--r--include/asm-ppc64/compat.h205
-rw-r--r--include/asm-ppc64/current.h16
-rw-r--r--include/asm-ppc64/delay.h48
-rw-r--r--include/asm-ppc64/dma-mapping.h136
-rw-r--r--include/asm-ppc64/eeh.h384
-rw-r--r--include/asm-ppc64/floppy.h106
-rw-r--r--include/asm-ppc64/hvcall.h173
-rw-r--r--include/asm-ppc64/hvconsole.h49
-rw-r--r--include/asm-ppc64/hvcserver.h57
-rw-r--r--include/asm-ppc64/imalloc.h26
-rw-r--r--include/asm-ppc64/io.h458
-rw-r--r--include/asm-ppc64/lppaca.h132
-rw-r--r--include/asm-ppc64/mmu.h391
-rw-r--r--include/asm-ppc64/mmu_context.h85
-rw-r--r--include/asm-ppc64/mmzone.h105
-rw-r--r--include/asm-ppc64/nvram.h116
-rw-r--r--include/asm-ppc64/paca.h121
-rw-r--r--include/asm-ppc64/page.h333
-rw-r--r--include/asm-ppc64/pci-bridge.h146
-rw-r--r--include/asm-ppc64/pci.h193
-rw-r--r--include/asm-ppc64/pgalloc.h151
-rw-r--r--include/asm-ppc64/pgtable-4k.h91
-rw-r--r--include/asm-ppc64/pgtable-64k.h90
-rw-r--r--include/asm-ppc64/pgtable.h512
-rw-r--r--include/asm-ppc64/prom.h218
-rw-r--r--include/asm-ppc64/ptrace-common.h164
-rw-r--r--include/asm-ppc64/serial.h23
-rw-r--r--include/asm-ppc64/signal.h132
-rw-r--r--include/asm-ppc64/spinlock.h241
-rw-r--r--include/asm-ppc64/system.h308
-rw-r--r--include/asm-ppc64/systemcfg.h64
-rw-r--r--include/asm-ppc64/tce.h64
-rw-r--r--include/asm-ppc64/udbg.h31
-rw-r--r--include/asm-ppc64/vdso.h83
38 files changed, 0 insertions, 5636 deletions
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-ppc64/abs_addr.h
deleted file mode 100644
index dc3fc3fefef..00000000000
--- a/include/asm-ppc64/abs_addr.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _ABS_ADDR_H
-#define _ABS_ADDR_H
-
-#include <linux/config.h>
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/types.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-#include <asm/lmb.h>
-#include <asm/firmware.h>
-
-struct mschunks_map {
- unsigned long num_chunks;
- unsigned long chunk_size;
- unsigned long chunk_shift;
- unsigned long chunk_mask;
- u32 *mapping;
-};
-
-extern struct mschunks_map mschunks_map;
-
-/* Chunks are 256 KB */
-#define MSCHUNKS_CHUNK_SHIFT (18)
-#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
-#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
-
-static inline unsigned long chunk_to_addr(unsigned long chunk)
-{
- return chunk << MSCHUNKS_CHUNK_SHIFT;
-}
-
-static inline unsigned long addr_to_chunk(unsigned long addr)
-{
- return addr >> MSCHUNKS_CHUNK_SHIFT;
-}
-
-static inline unsigned long phys_to_abs(unsigned long pa)
-{
- unsigned long chunk;
-
- /* This is a no-op on non-iSeries */
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return pa;
-
- chunk = addr_to_chunk(pa);
-
- if (chunk < mschunks_map.num_chunks)
- chunk = mschunks_map.mapping[chunk];
-
- return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
-}
-
-/* Convenience macros */
-#define virt_to_abs(va) phys_to_abs(__pa(va))
-#define abs_to_virt(aa) __va(aa)
-
-/*
- * Converts Virtual Address to Real Address for
- * Legacy iSeries Hypervisor calls
- */
-#define iseries_hv_addr(virtaddr) \
- (0x8000000000000000 | virt_to_abs(virtaddr))
-
-#endif /* _ABS_ADDR_H */
diff --git a/include/asm-ppc64/btext.h b/include/asm-ppc64/btext.h
deleted file mode 100644
index 71cce36bc63..00000000000
--- a/include/asm-ppc64/btext.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Definitions for using the procedures in btext.c.
- *
- * Benjamin Herrenschmidt <benh@kernel.crashing.org>
- */
-#ifndef __PPC_BTEXT_H
-#define __PPC_BTEXT_H
-#ifdef __KERNEL__
-
-extern void btext_clearscreen(void);
-extern void btext_flushscreen(void);
-
-extern int boot_text_mapped;
-
-extern int btext_initialize(struct device_node *np);
-
-extern void map_boot_text(void);
-extern void init_boot_display(void);
-extern void btext_update_display(unsigned long phys, int width, int height,
- int depth, int pitch);
-
-extern void btext_drawchar(char c);
-extern void btext_drawstring(const char *str);
-extern void btext_drawhex(unsigned long v);
-
-#endif /* __KERNEL__ */
-#endif /* __PPC_BTEXT_H */
diff --git a/include/asm-ppc64/cache.h b/include/asm-ppc64/cache.h
deleted file mode 100644
index 92140a7efbd..00000000000
--- a/include/asm-ppc64/cache.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef __ARCH_PPC64_CACHE_H
-#define __ARCH_PPC64_CACHE_H
-
-#include <asm/types.h>
-
-/* bytes per L1 cache line */
-#define L1_CACHE_SHIFT 7
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
-#ifndef __ASSEMBLY__
-
-struct ppc64_caches {
- u32 dsize; /* L1 d-cache size */
- u32 dline_size; /* L1 d-cache line size */
- u32 log_dline_size;
- u32 dlines_per_page;
- u32 isize; /* L1 i-cache size */
- u32 iline_size; /* L1 i-cache line size */
- u32 log_iline_size;
- u32 ilines_per_page;
-};
-
-extern struct ppc64_caches ppc64_caches;
-
-#endif
-
-#endif
diff --git a/include/asm-ppc64/cacheflush.h b/include/asm-ppc64/cacheflush.h
deleted file mode 100644
index ffbc08be8e5..00000000000
--- a/include/asm-ppc64/cacheflush.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef _PPC64_CACHEFLUSH_H
-#define _PPC64_CACHEFLUSH_H
-
-#include <linux/mm.h>
-#include <asm/cputable.h>
-
-/*
- * No cache flushing is required when address mappings are
- * changed, because the caches on PowerPCs are physically
- * addressed.
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma, page) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-extern void __flush_icache_range(unsigned long, unsigned long);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr,
- int len);
-
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
-extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-extern void __flush_dcache_icache(void *page_va);
-
-static inline void flush_icache_range(unsigned long start, unsigned long stop)
-{
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- __flush_icache_range(start, stop);
-}
-
-#endif /* _PPC64_CACHEFLUSH_H */
diff --git a/include/asm-ppc64/compat.h b/include/asm-ppc64/compat.h
deleted file mode 100644
index 6ec62cd2d1d..00000000000
--- a/include/asm-ppc64/compat.h
+++ /dev/null
@@ -1,205 +0,0 @@
-#ifndef _ASM_PPC64_COMPAT_H
-#define _ASM_PPC64_COMPAT_H
-/*
- * Architecture specific compatibility types
- */
-#include <linux/types.h>
-#include <linux/sched.h>
-
-#define COMPAT_USER_HZ 100
-
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_time_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
-typedef u32 __compat_uid_t;
-typedef u32 __compat_gid_t;
-typedef u32 __compat_uid32_t;
-typedef u32 __compat_gid32_t;
-typedef u32 compat_mode_t;
-typedef u32 compat_ino_t;
-typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
-typedef s16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
-typedef u32 compat_caddr_t;
-typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
-
-struct compat_timespec {
- compat_time_t tv_sec;
- s32 tv_nsec;
-};
-
-struct compat_timeval {
- compat_time_t tv_sec;
- s32 tv_usec;
-};
-
-struct compat_stat {
- compat_dev_t st_dev;
- compat_ino_t st_ino;
- compat_mode_t st_mode;
- compat_nlink_t st_nlink;
- __compat_uid32_t st_uid;
- __compat_gid32_t st_gid;
- compat_dev_t st_rdev;
- compat_off_t st_size;
- compat_off_t st_blksize;
- compat_off_t st_blocks;
- compat_time_t st_atime;
- u32 st_atime_nsec;
- compat_time_t st_mtime;
- u32 st_mtime_nsec;
- compat_time_t st_ctime;
- u32 st_ctime_nsec;
- u32 __unused4[2];
-};
-
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
-struct compat_statfs {
- int f_type;
- int f_bsize;
- int f_blocks;
- int f_bfree;
- int f_bavail;
- int f_files;
- int f_ffree;
- compat_fsid_t f_fsid;
- int f_namelen; /* SunOS ignores this field. */
- int f_frsize;
- int f_spare[5];
-};
-
-#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-typedef u32 compat_old_sigset_t;
-
-#define _COMPAT_NSIG 64
-#define _COMPAT_NSIG_BPW 32
-
-typedef u32 compat_sigset_word;
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
-
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately comverted them already.
- */
-typedef u32 compat_uptr_t;
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- return (void __user *)(unsigned long)uptr;
-}
-
-static inline void __user *compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = current->thread.regs;
- unsigned long usp = regs->gpr[1];
-
- /*
- * We cant access below the stack pointer in the 32bit ABI and
- * can access 288 bytes in the 64bit ABI
- */
- if (!(test_thread_flag(TIF_32BIT)))
- usp -= 288;
-
- return (void __user *) (usp - len);
-}
-
-/*
- * ipc64_perm is actually 32/64bit clean but since the compat layer refers to
- * it we may as well define it.
- */
-struct compat_ipc64_perm {
- compat_key_t key;
- __compat_uid_t uid;
- __compat_gid_t gid;
- __compat_uid_t cuid;
- __compat_gid_t cgid;
- compat_mode_t mode;
- unsigned int seq;
- unsigned int __pad2;
- unsigned long __unused1; /* yes they really are 64bit pads */
- unsigned long __unused2;
-};
-
-struct compat_semid64_ds {
- struct compat_ipc64_perm sem_perm;
- unsigned int __unused1;
- compat_time_t sem_otime;
- unsigned int __unused2;
- compat_time_t sem_ctime;
- compat_ulong_t sem_nsems;
- compat_ulong_t __unused3;
- compat_ulong_t __unused4;
-};
-
-struct compat_msqid64_ds {
- struct compat_ipc64_perm msg_perm;
- unsigned int __unused1;
- compat_time_t msg_stime;
- unsigned int __unused2;
- compat_time_t msg_rtime;
- unsigned int __unused3;
- compat_time_t msg_ctime;
- compat_ulong_t msg_cbytes;
- compat_ulong_t msg_qnum;
- compat_ulong_t msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- compat_ulong_t __unused4;
- compat_ulong_t __unused5;
-};
-
-struct compat_shmid64_ds {
- struct compat_ipc64_perm shm_perm;
- unsigned int __unused1;
- compat_time_t shm_atime;
- unsigned int __unused2;
- compat_time_t shm_dtime;
- unsigned int __unused3;
- compat_time_t shm_ctime;
- unsigned int __unused4;
- compat_size_t shm_segsz;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- compat_ulong_t shm_nattch;
- compat_ulong_t __unused5;
- compat_ulong_t __unused6;
-};
-
-#endif /* _ASM_PPC64_COMPAT_H */
diff --git a/include/asm-ppc64/current.h b/include/asm-ppc64/current.h
deleted file mode 100644
index 52ddc60c8b6..00000000000
--- a/include/asm-ppc64/current.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _PPC64_CURRENT_H
-#define _PPC64_CURRENT_H
-
-#include <asm/paca.h>
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define get_current() (get_paca()->__current)
-#define current get_current()
-
-#endif /* !(_PPC64_CURRENT_H) */
diff --git a/include/asm-ppc64/delay.h b/include/asm-ppc64/delay.h
deleted file mode 100644
index 05f198cf73d..00000000000
--- a/include/asm-ppc64/delay.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef _PPC64_DELAY_H
-#define _PPC64_DELAY_H
-
-/*
- * Copyright 1996, Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
- * Anton Blanchard.
- */
-
-extern unsigned long tb_ticks_per_usec;
-
-/* define these here to prevent circular dependencies */
-#define __HMT_low() asm volatile("or 1,1,1")
-#define __HMT_medium() asm volatile("or 2,2,2")
-#define __barrier() asm volatile("":::"memory")
-
-static inline unsigned long __get_tb(void)
-{
- unsigned long rval;
-
- asm volatile("mftb %0" : "=r" (rval));
- return rval;
-}
-
-static inline void __delay(unsigned long loops)
-{
- unsigned long start = __get_tb();
-
- while((__get_tb()-start) < loops)
- __HMT_low();
- __HMT_medium();
- __barrier();
-}
-
-static inline void udelay(unsigned long usecs)
-{
- unsigned long loops = tb_ticks_per_usec * usecs;
-
- __delay(loops);
-}
-
-#endif /* _PPC64_DELAY_H */
diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h
deleted file mode 100644
index fb68fa23bea..00000000000
--- a/include/asm-ppc64/dma-mapping.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (C) 2004 IBM
- *
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
- */
-
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <linux/types.h>
-#include <linux/cache.h>
-/* need struct page definitions */
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-#include <asm/bug.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-
-/* Now for the API extensions over the pci_ one */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
-
-static inline int
-dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
- void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *ptr,
- size_t size, enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- int (*dma_supported)(struct device *dev, u64 mask);
- int (*dac_dma_supported)(struct device *dev, u64 mask);
-};
-
-#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-ppc64/eeh.h b/include/asm-ppc64/eeh.h
deleted file mode 100644
index 40c8eb57493..00000000000
--- a/include/asm-ppc64/eeh.h
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * eeh.h
- * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PPC64_EEH_H
-#define _PPC64_EEH_H
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/string.h>
-
-struct pci_dev;
-struct device_node;
-struct device_node;
-struct notifier_block;
-
-#ifdef CONFIG_EEH
-
-/* Values for eeh_mode bits in device_node */
-#define EEH_MODE_SUPPORTED (1<<0)
-#define EEH_MODE_NOCHECK (1<<1)
-#define EEH_MODE_ISOLATED (1<<2)
-
-void __init eeh_init(void);
-unsigned long eeh_check_failure(const volatile void __iomem *token,
- unsigned long val);
-int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
-void __init pci_addr_cache_build(void);
-
-/**
- * eeh_add_device_early
- * eeh_add_device_late
- *
- * Perform eeh initialization for devices added after boot.
- * Call eeh_add_device_early before doing any i/o to the
- * device (including config space i/o). Call eeh_add_device_late
- * to finish the eeh setup for this device.
- */
-void eeh_add_device_early(struct device_node *);
-void eeh_add_device_late(struct pci_dev *);
-
-/**
- * eeh_remove_device - undo EEH setup for the indicated pci device
- * @dev: pci device to be removed
- *
- * This routine should be when a device is removed from a running
- * system (e.g. by hotplug or dlpar).
- */
-void eeh_remove_device(struct pci_dev *);
-
-#define EEH_DISABLE 0
-#define EEH_ENABLE 1
-#define EEH_RELEASE_LOADSTORE 2
-#define EEH_RELEASE_DMA 3
-
-/**
- * Notifier event flags.
- */
-#define EEH_NOTIFY_FREEZE 1
-
-/** EEH event -- structure holding pci slot data that describes
- * a change in the isolation status of a PCI slot. A pointer
- * to this struct is passed as the data pointer in a notify callback.
- */
-struct eeh_event {
- struct list_head list;
- struct pci_dev *dev;
- struct device_node *dn;
- int reset_state;
-};
-
-/** Register to find out about EEH events. */
-int eeh_register_notifier(struct notifier_block *nb);
-int eeh_unregister_notifier(struct notifier_block *nb);
-
-/**
- * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
- *
- * If this macro yields TRUE, the caller relays to eeh_check_failure()
- * which does further tests out of line.
- */
-#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0)
-
-/*
- * Reads from a device which has been isolated by EEH will return
- * all 1s. This macro gives an all-1s value of the given size (in
- * bytes: 1, 2, or 4) for comparing with the result of a read.
- */
-#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
-
-#else /* !CONFIG_EEH */
-static inline void eeh_init(void) { }
-
-static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
-{
- return val;
-}
-
-static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
-{
- return 0;
-}
-
-static inline void pci_addr_cache_build(void) { }
-
-static inline void eeh_add_device_early(struct device_node *dn) { }
-
-static inline void eeh_add_device_late(struct pci_dev *dev) { }
-
-static inline void eeh_remove_device(struct pci_dev *dev) { }
-
-#define EEH_POSSIBLE_ERROR(val, type) (0)
-#define EEH_IO_ERROR_VALUE(size) (-1UL)
-#endif /* CONFIG_EEH */
-
-/*
- * MMIO read/write operations with EEH support.
- */
-static inline u8 eeh_readb(const volatile void __iomem *addr)
-{
- u8 val = in_8(addr);
- if (EEH_POSSIBLE_ERROR(val, u8))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
-{
- out_8(addr, val);
-}
-
-static inline u16 eeh_readw(const volatile void __iomem *addr)
-{
- u16 val = in_le16(addr);
- if (EEH_POSSIBLE_ERROR(val, u16))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_writew(u16 val, volatile void __iomem *addr)
-{
- out_le16(addr, val);
-}
-static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
-{
- u16 val = in_be16(addr);
- if (EEH_POSSIBLE_ERROR(val, u16))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
- volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
- out_be16(vaddr, val);
-}
-
-static inline u32 eeh_readl(const volatile void __iomem *addr)
-{
- u32 val = in_le32(addr);
- if (EEH_POSSIBLE_ERROR(val, u32))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_writel(u32 val, volatile void __iomem *addr)
-{
- out_le32(addr, val);
-}
-static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
-{
- u32 val = in_be32(addr);
- if (EEH_POSSIBLE_ERROR(val, u32))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
-{
- out_be32(addr, val);
-}
-
-static inline u64 eeh_readq(const volatile void __iomem *addr)
-{
- u64 val = in_le64(addr);
- if (EEH_POSSIBLE_ERROR(val, u64))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
-{
- out_le64(addr, val);
-}
-static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
-{
- u64 val = in_be64(addr);
- if (EEH_POSSIBLE_ERROR(val, u64))
- return eeh_check_failure(addr, val);
- return val;
-}
-static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
-{
- out_be64(addr, val);
-}
-
-#define EEH_CHECK_ALIGN(v,a) \
- ((((unsigned long)(v)) & ((a) - 1)) == 0)
-
-static inline void eeh_memset_io(volatile void __iomem *addr, int c,
- unsigned long n)
-{
- void *p = (void __force *)addr;
- u32 lc = c;
- lc |= lc << 8;
- lc |= lc << 16;
-
- while(n && !EEH_CHECK_ALIGN(p, 4)) {
- *((volatile u8 *)p) = c;
- p++;
- n--;
- }
- while(n >= 4) {
- *((volatile u32 *)p) = lc;
- p += 4;
- n -= 4;
- }
- while(n) {
- *((volatile u8 *)p) = c;
- p++;
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
-}
-static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
- unsigned long n)
-{
- void *vsrc = (void __force *) src;
- void *destsave = dest;
- unsigned long nsave = n;
-
- while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
- *((u8 *)dest) = *((volatile u8 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc++;
- dest++;
- n--;
- }
- while(n > 4) {
- *((u32 *)dest) = *((volatile u32 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc += 4;
- dest += 4;
- n -= 4;
- }
- while(n) {
- *((u8 *)dest) = *((volatile u8 *)vsrc);
- __asm__ __volatile__ ("eieio" : : : "memory");
- vsrc++;
- dest++;
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
-
- /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
- * were copied. Check all four bytes.
- */
- if ((nsave >= 4) &&
- (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
- eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
- }
-}
-
-static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
- unsigned long n)
-{
- void *vdest = (void __force *) dest;
-
- while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
- *((volatile u8 *)vdest) = *((u8 *)src);
- src++;
- vdest++;
- n--;
- }
- while(n > 4) {
- *((volatile u32 *)vdest) = *((volatile u32 *)src);
- src += 4;
- vdest += 4;
- n-=4;
- }
- while(n) {
- *((volatile u8 *)vdest) = *((u8 *)src);
- src++;
- vdest++;
- n--;
- }
- __asm__ __volatile__ ("sync" : : : "memory");
-}
-
-#undef EEH_CHECK_ALIGN
-
-static inline u8 eeh_inb(unsigned long port)
-{
- u8 val;
- if (!_IO_IS_VALID(port))
- return ~0;
- val = in_8((u8 __iomem *)(port+pci_io_base));
- if (EEH_POSSIBLE_ERROR(val, u8))
- return eeh_check_failure((void __iomem *)(port), val);
- return val;
-}
-
-static inline void eeh_outb(u8 val, unsigned long port)
-{
- if (_IO_IS_VALID(port))
- out_8((u8 __iomem *)(port+pci_io_base), val);
-}
-
-static inline u16 eeh_inw(unsigned long port)
-{
- u16 val;
- if (!_IO_IS_VALID(port))
- return ~0;
- val = in_le16((u16 __iomem *)(port+pci_io_base));
- if (EEH_POSSIBLE_ERROR(val, u16))
- return eeh_check_failure((void __iomem *)(port), val);
- return val;
-}
-
-static inline void eeh_outw(u16 val, unsigned long port)
-{
- if (_IO_IS_VALID(port))
- out_le16((u16 __iomem *)(port+pci_io_base), val);
-}
-
-static inline u32 eeh_inl(unsigned long port)
-{
- u32 val;
- if (!_IO_IS_VALID(port))
- return ~0;
- val = in_le32((u32 __iomem *)(port+pci_io_base));
- if (EEH_POSSIBLE_ERROR(val, u32))
- return eeh_check_failure((void __iomem *)(port), val);
- return val;
-}
-
-static inline void eeh_outl(u32 val, unsigned long port)
-{
- if (_IO_IS_VALID(port))
- out_le32((u32 __iomem *)(port+pci_io_base), val);
-}
-
-/* in-string eeh macros */
-static inline void eeh_insb(unsigned long port, void * buf, int ns)
-{
- _insb((u8 __iomem *)(port+pci_io_base), buf, ns);
- if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
- eeh_check_failure((void __iomem *)(port), *(u8*)buf);
-}
-
-static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
-{
- _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
- if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
- eeh_check_failure((void __iomem *)(port), *(u16*)buf);
-}
-
-static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
-{
- _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
- if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
- eeh_check_failure((void __iomem *)(port), *(u32*)buf);
-}
-
-#endif /* _PPC64_EEH_H */
diff --git a/include/asm-ppc64/floppy.h b/include/asm-ppc64/floppy.h
deleted file mode 100644
index 5c497b588e5..00000000000
--- a/include/asm-ppc64/floppy.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Architecture specific parts of the Floppy driver
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995
- */
-#ifndef __ASM_PPC64_FLOPPY_H
-#define __ASM_PPC64_FLOPPY_H
-
-#include <linux/config.h>
-#include <asm/machdep.h>
-
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
-
-#define fd_enable_dma() enable_dma(FLOPPY_DMA)
-#define fd_disable_dma() disable_dma(FLOPPY_DMA)
-#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
-#define fd_free_dma() free_dma(FLOPPY_DMA)
-#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
-#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
-#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
-#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
-#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
-#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \
- SA_INTERRUPT|SA_SAMPLE_RANDOM, \
- "floppy", NULL)
-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
-
-#ifdef CONFIG_PCI
-
-#include <linux/pci.h>
-
-#define fd_dma_setup(addr,size,mode,io) ppc64_fd_dma_setup(addr,size,mode,io)
-
-static __inline__ int
-ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
-{
- static unsigned long prev_size;
- static dma_addr_t bus_addr = 0;
- static char *prev_addr;
- static int prev_dir;
- int dir;
-
- dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
-
- if (bus_addr
- && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
- /* different from last time -- unmap prev */
- pci_unmap_single(NULL, bus_addr, prev_size, prev_dir);
- bus_addr = 0;
- }
-
- if (!bus_addr) /* need to map it */ {
- bus_addr = pci_map_single(NULL, addr, size, dir);
- }
-
- /* remember this one as prev */
- prev_addr = addr;
- prev_size = size;
- prev_dir = dir;
-
- fd_clear_dma_ff();
- fd_cacheflush(addr, size);
- fd_set_dma_mode(mode);
- set_dma_addr(FLOPPY_DMA, bus_addr);
- fd_set_dma_count(size);
- virtual_dma_port = io;
- fd_enable_dma();
-
- return 0;
-}
-
-#endif /* CONFIG_PCI */
-
-__inline__ void virtual_dma_init(void)
-{
- /* Nothing to do on PowerPC */
-}
-
-static int FDC1 = 0x3f0;
-static int FDC2 = -1;
-
-/*
- * Again, the CMOS information not available
- */
-#define FLOPPY0_TYPE 6
-#define FLOPPY1_TYPE 0
-
-#define N_FDC 2 /* Don't change this! */
-#define N_DRIVE 8
-
-#define FLOPPY_MOTOR_MASK 0xf0
-
-/*
- * The PowerPC has no problems with floppy DMA crossing 64k borders.
- */
-#define CROSS_64KB(a,s) (0)
-
-#define EXTRA_FLOPPY_PARAMS
-
-#endif /* __ASM_PPC64_FLOPPY_H */
diff --git a/include/asm-ppc64/hvcall.h b/include/asm-ppc64/hvcall.h
deleted file mode 100644
index ab7c3cf2488..00000000000
--- a/include/asm-ppc64/hvcall.h
+++ /dev/null
@@ -1,173 +0,0 @@
-#ifndef _PPC64_HVCALL_H
-#define _PPC64_HVCALL_H
-
-#define HVSC .long 0x44000022
-
-#define H_Success 0
-#define H_Busy 1 /* Hardware busy -- retry later */
-#define H_Constrained 4 /* Resource request constrained to max allowed */
-#define H_LongBusyStartRange 9900 /* Start of long busy range */
-#define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */
-#define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */
-#define H_LongBusyOrder100msec 9902 /* Long busy, hint that 100msec is a good time to retry */
-#define H_LongBusyOrder1sec 9903 /* Long busy, hint that 1sec is a good time to retry */
-#define H_LongBusyOrder10sec 9904 /* Long busy, hint that 10sec is a good time to retry */
-#define H_LongBusyOrder100sec 9905 /* Long busy, hint that 100sec is a good time to retry */
-#define H_LongBusyEndRange 9905 /* End of long busy range */
-#define H_Hardware -1 /* Hardware error */
-#define H_Function -2 /* Function not supported */
-#define H_Privilege -3 /* Caller not privileged */
-#define H_Parameter -4 /* Parameter invalid, out-of-range or conflicting */
-#define H_Bad_Mode -5 /* Illegal msr value */
-#define H_PTEG_Full -6 /* PTEG is full */
-#define H_Not_Found -7 /* PTE was not found" */
-#define H_Reserved_DABR -8 /* DABR address is reserved by the hypervisor on this processor" */
-#define H_NoMem -9
-#define H_Authority -10
-#define H_Permission -11
-#define H_Dropped -12
-#define H_SourceParm -13
-#define H_DestParm -14
-#define H_RemoteParm -15
-#define H_Resource -16
-
-/* Long Busy is a condition that can be returned by the firmware
- * when a call cannot be completed now, but the identical call
- * should be retried later. This prevents calls blocking in the
- * firmware for long periods of time. Annoyingly the firmware can return
- * a range of return codes, hinting at how long we should wait before
- * retrying. If you don't care for the hint, the macro below is a good
- * way to check for the long_busy return codes
- */
-#define H_isLongBusy(x) ((x >= H_LongBusyStartRange) && (x <= H_LongBusyEndRange))
-
-/* Flags */
-#define H_LARGE_PAGE (1UL<<(63-16))
-#define H_EXACT (1UL<<(63-24)) /* Use exact PTE or return H_PTEG_FULL */
-#define H_R_XLATE (1UL<<(63-25)) /* include a valid logical page num in the pte if the valid bit is set */
-#define H_READ_4 (1UL<<(63-26)) /* Return 4 PTEs */
-#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
-#define H_ANDCOND (1UL<<(63-33))
-#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
-#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
-#define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */
-#define H_COPY_PAGE (1UL<<(63-49))
-#define H_N (1UL<<(63-61))
-#define H_PP1 (1UL<<(63-62))
-#define H_PP2 (1UL<<(63-63))
-
-/* DABRX flags */
-#define H_DABRX_HYPERVISOR (1UL<<(63-61))
-#define H_DABRX_KERNEL (1UL<<(63-62))
-#define H_DABRX_USER (1UL<<(63-63))
-
-/* pSeries hypervisor opcodes */
-#define H_REMOVE 0x04
-#define H_ENTER 0x08
-#define H_READ 0x0c
-#define H_CLEAR_MOD 0x10
-#define H_CLEAR_REF 0x14
-#define H_PROTECT 0x18
-#define H_GET_TCE 0x1c
-#define H_PUT_TCE 0x20
-#define H_SET_SPRG0 0x24
-#define H_SET_DABR 0x28
-#define H_PAGE_INIT 0x2c
-#define H_SET_ASR 0x30
-#define H_ASR_ON 0x34
-#define H_ASR_OFF 0x38
-#define H_LOGICAL_CI_LOAD 0x3c
-#define H_LOGICAL_CI_STORE 0x40
-#define H_LOGICAL_CACHE_LOAD 0x44
-#define H_LOGICAL_CACHE_STORE 0x48
-#define H_LOGICAL_ICBI 0x4c
-#define H_LOGICAL_DCBF 0x50
-#define H_GET_TERM_CHAR 0x54
-#define H_PUT_TERM_CHAR 0x58
-#define H_REAL_TO_LOGICAL 0x5c
-#define H_HYPERVISOR_DATA 0x60
-#define H_EOI 0x64
-#define H_CPPR 0x68
-#define H_IPI 0x6c
-#define H_IPOLL 0x70
-#define H_XIRR 0x74
-#define H_PERFMON 0x7c
-#define H_MIGRATE_DMA 0x78
-#define H_REGISTER_VPA 0xDC
-#define H_CEDE 0xE0
-#define H_CONFER 0xE4
-#define H_PROD 0xE8
-#define H_GET_PPP 0xEC
-#define H_SET_PPP 0xF0
-#define H_PURR 0xF4
-#define H_PIC 0xF8
-#define H_REG_CRQ 0xFC
-#define H_FREE_CRQ 0x100
-#define H_VIO_SIGNAL 0x104
-#define H_SEND_CRQ 0x108
-#define H_COPY_RDMA 0x110
-#define H_SET_XDABR 0x134
-#define H_STUFF_TCE 0x138
-#define H_PUT_TCE_INDIRECT 0x13C
-#define H_VTERM_PARTNER_INFO 0x150
-#define H_REGISTER_VTERM 0x154
-#define H_FREE_VTERM 0x158
-#define H_POLL_PENDING 0x1D8
-
-#ifndef __ASSEMBLY__
-
-/* plpar_hcall() -- Generic call interface using above opcodes
- *
- * The actual call interface is a hypervisor call instruction with
- * the opcode in R3 and input args in R4-R7.
- * Status is returned in R3 with variable output values in R4-R11.
- * Only H_PTE_READ with H_READ_4 uses R6-R11 so we ignore it for now
- * and return only two out args which MUST ALWAYS BE PROVIDED.
- */
-long plpar_hcall(unsigned long opcode,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4,
- unsigned long *out1,
- unsigned long *out2,
- unsigned long *out3);
-
-/* Same as plpar_hcall but for those opcodes that return no values
- * other than status. Slightly more efficient.
- */
-long plpar_hcall_norets(unsigned long opcode, ...);
-
-/*
- * Special hcall interface for ibmveth support.
- * Takes 8 input parms. Returns a rc and stores the
- * R4 return value in *out1.
- */
-long plpar_hcall_8arg_2ret(unsigned long opcode,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4,
- unsigned long arg5,
- unsigned long arg6,
- unsigned long arg7,
- unsigned long arg8,
- unsigned long *out1);
-
-/* plpar_hcall_4out()
- *
- * same as plpar_hcall except with 4 output arguments.
- *
- */
-long plpar_hcall_4out(unsigned long opcode,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4,
- unsigned long *out1,
- unsigned long *out2,
- unsigned long *out3,
- unsigned long *out4);
-
-#endif /* __ASSEMBLY__ */
-#endif /* _PPC64_HVCALL_H */
diff --git a/include/asm-ppc64/hvconsole.h b/include/asm-ppc64/hvconsole.h
deleted file mode 100644
index 6da93ce74dc..00000000000
--- a/include/asm-ppc64/hvconsole.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * hvconsole.h
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * LPAR console support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PPC64_HVCONSOLE_H
-#define _PPC64_HVCONSOLE_H
-
-/*
- * This is the max number of console adapters that can/will be found as
- * console devices on first stage console init. Any number beyond this range
- * can't be used as a console device but is still a valid tty device.
- */
-#define MAX_NR_HVC_CONSOLES 16
-
-/* implemented by a low level driver */
-struct hv_ops {
- int (*get_chars)(uint32_t vtermno, char *buf, int count);
- int (*put_chars)(uint32_t vtermno, const char *buf, int count);
-};
-extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
-extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
-
-struct hvc_struct;
-
-/* Register a vterm and a slot index for use as a console (console_init) */
-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
-/* register a vterm for hvc tty operation (module_init or hotplug add) */
-extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq,
- struct hv_ops *ops);
-/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */
-extern int __devexit hvc_remove(struct hvc_struct *hp);
-#endif /* _PPC64_HVCONSOLE_H */
diff --git a/include/asm-ppc64/hvcserver.h b/include/asm-ppc64/hvcserver.h
deleted file mode 100644
index aecba966579..00000000000
--- a/include/asm-ppc64/hvcserver.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * hvcserver.h
- * Copyright (C) 2004 Ryan S Arnold, IBM Corporation
- *
- * PPC64 virtual I/O console server support.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PPC64_HVCSERVER_H
-#define _PPC64_HVCSERVER_H
-
-#include <linux/list.h>
-
-/* Converged Location Code length */
-#define HVCS_CLC_LENGTH 79
-
-/**
- * hvcs_partner_info - an element in a list of partner info
- * @node: list_head denoting this partner_info struct's position in the list of
- * partner info.
- * @unit_address: The partner unit address of this entry.
- * @partition_ID: The partner partition ID of this entry.
- * @location_code: The converged location code of this entry + 1 char for the
- * null-term.
- *
- * This structure outlines the format that partner info is presented to a caller
- * of the hvcs partner info fetching functions. These are strung together into
- * a list using linux kernel lists.
- */
-struct hvcs_partner_info {
- struct list_head node;
- uint32_t unit_address;
- uint32_t partition_ID;
- char location_code[HVCS_CLC_LENGTH + 1]; /* CLC + 1 null-term char */
-};
-
-extern int hvcs_free_partner_info(struct list_head *head);
-extern int hvcs_get_partner_info(uint32_t unit_address,
- struct list_head *head, unsigned long *pi_buff);
-extern int hvcs_register_connection(uint32_t unit_address,
- uint32_t p_partition_ID, uint32_t p_unit_address);
-extern int hvcs_free_connection(uint32_t unit_address);
-
-#endif /* _PPC64_HVCSERVER_H */
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
deleted file mode 100644
index 42adf7033a8..00000000000
--- a/include/asm-ppc64/imalloc.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _PPC64_IMALLOC_H
-#define _PPC64_IMALLOC_H
-
-/*
- * Define the address range of the imalloc VM area.
- */
-#define PHBS_IO_BASE VMALLOC_END
-#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
-#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
-
-
-/* imalloc region types */
-#define IM_REGION_UNUSED 0x1
-#define IM_REGION_SUBSET 0x2
-#define IM_REGION_EXISTS 0x4
-#define IM_REGION_OVERLAP 0x8
-#define IM_REGION_SUPERSET 0x10
-
-extern struct vm_struct * im_get_free_area(unsigned long size);
-extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
- int region_type);
-extern void im_free(void *addr);
-
-extern unsigned long ioremap_bot;
-
-#endif /* _PPC64_IMALLOC_H */
diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h
deleted file mode 100644
index 77fc07c3c6b..00000000000
--- a/include/asm-ppc64/io.h
+++ /dev/null
@@ -1,458 +0,0 @@
-#ifndef _PPC64_IO_H
-#define _PPC64_IO_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/byteorder.h>
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/iseries/iseries_io.h>
-#endif
-#include <asm/synch.h>
-#include <asm/delay.h>
-
-#include <asm-generic/iomap.h>
-
-#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c))
-#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c))
-#define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c))
-#define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c))
-
-
-#define SIO_CONFIG_RA 0x398
-#define SIO_CONFIG_RD 0x399
-
-#define SLOW_DOWN_IO
-
-extern unsigned long isa_io_base;
-extern unsigned long pci_io_base;
-extern unsigned long io_page_mask;
-
-#define MAX_ISA_PORT 0x10000
-
-#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) \
- & io_page_mask)
-
-#ifdef CONFIG_PPC_ISERIES
-/* __raw_* accessors aren't supported on iSeries */
-#define __raw_readb(addr) { BUG(); 0; }
-#define __raw_readw(addr) { BUG(); 0; }
-#define __raw_readl(addr) { BUG(); 0; }
-#define __raw_readq(addr) { BUG(); 0; }
-#define __raw_writeb(v, addr) { BUG(); 0; }
-#define __raw_writew(v, addr) { BUG(); 0; }
-#define __raw_writel(v, addr) { BUG(); 0; }
-#define __raw_writeq(v, addr) { BUG(); 0; }
-#define readb(addr) iSeries_Read_Byte(addr)
-#define readw(addr) iSeries_Read_Word(addr)
-#define readl(addr) iSeries_Read_Long(addr)
-#define writeb(data, addr) iSeries_Write_Byte((data),(addr))
-#define writew(data, addr) iSeries_Write_Word((data),(addr))
-#define writel(data, addr) iSeries_Write_Long((data),(addr))
-#define memset_io(a,b,c) iSeries_memset_io((a),(b),(c))
-#define memcpy_fromio(a,b,c) iSeries_memcpy_fromio((a), (b), (c))
-#define memcpy_toio(a,b,c) iSeries_memcpy_toio((a), (b), (c))
-
-#define inb(addr) readb(((void __iomem *)(long)(addr)))
-#define inw(addr) readw(((void __iomem *)(long)(addr)))
-#define inl(addr) readl(((void __iomem *)(long)(addr)))
-#define outb(data,addr) writeb(data,((void __iomem *)(long)(addr)))
-#define outw(data,addr) writew(data,((void __iomem *)(long)(addr)))
-#define outl(data,addr) writel(data,((void __iomem *)(long)(addr)))
-/*
- * The *_ns versions below don't do byte-swapping.
- * Neither do the standard versions now, these are just here
- * for older code.
- */
-#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
-#else
-
-static inline unsigned char __raw_readb(const volatile void __iomem *addr)
-{
- return *(volatile unsigned char __force *)addr;
-}
-static inline unsigned short __raw_readw(const volatile void __iomem *addr)
-{
- return *(volatile unsigned short __force *)addr;
-}
-static inline unsigned int __raw_readl(const volatile void __iomem *addr)
-{
- return *(volatile unsigned int __force *)addr;
-}
-static inline unsigned long __raw_readq(const volatile void __iomem *addr)
-{
- return *(volatile unsigned long __force *)addr;
-}
-static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
-{
- *(volatile unsigned char __force *)addr = v;
-}
-static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
-{
- *(volatile unsigned short __force *)addr = v;
-}
-static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
-{
- *(volatile unsigned int __force *)addr = v;
-}
-static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
-{
- *(volatile unsigned long __force *)addr = v;
-}
-#define readb(addr) eeh_readb(addr)
-#define readw(addr) eeh_readw(addr)
-#define readl(addr) eeh_readl(addr)
-#define readq(addr) eeh_readq(addr)
-#define writeb(data, addr) eeh_writeb((data), (addr))
-#define writew(data, addr) eeh_writew((data), (addr))
-#define writel(data, addr) eeh_writel((data), (addr))
-#define writeq(data, addr) eeh_writeq((data), (addr))
-#define memset_io(a,b,c) eeh_memset_io((a),(b),(c))
-#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c))
-#define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c))
-#define inb(port) eeh_inb((unsigned long)port)
-#define outb(val, port) eeh_outb(val, (unsigned long)port)
-#define inw(port) eeh_inw((unsigned long)port)
-#define outw(val, port) eeh_outw(val, (unsigned long)port)
-#define inl(port) eeh_inl((unsigned long)port)
-#define outl(val, port) eeh_outl(val, (unsigned long)port)
-
-/*
- * The insw/outsw/insl/outsl macros don't do byte-swapping.
- * They are only used in practice for transferring buffers which
- * are arrays of bytes, and byte-swapping is not appropriate in
- * that case. - paulus */
-#define insb(port, buf, ns) eeh_insb((port), (buf), (ns))
-#define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns))
-#define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl))
-#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns))
-#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl))
-
-#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
-
-#endif
-
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-#define readq_relaxed(addr) readq(addr)
-
-extern void _insb(volatile u8 __iomem *port, void *buf, int ns);
-extern void _outsb(volatile u8 __iomem *port, const void *buf, int ns);
-extern void _insw(volatile u16 __iomem *port, void *buf, int ns);
-extern void _outsw(volatile u16 __iomem *port, const void *buf, int ns);
-extern void _insl(volatile u32 __iomem *port, void *buf, int nl);
-extern void _outsl(volatile u32 __iomem *port, const void *buf, int nl);
-extern void _insw_ns(volatile u16 __iomem *port, void *buf, int ns);
-extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns);
-extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl);
-extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl);
-
-#define mmiowb()
-
-/*
- * output pause versions need a delay at least for the
- * w83c105 ide controller in a p610.
- */
-#define inb_p(port) inb(port)
-#define outb_p(val, port) (udelay(1), outb((val), (port)))
-#define inw_p(port) inw(port)
-#define outw_p(val, port) (udelay(1), outw((val), (port)))
-#define inl_p(port) inl(port)
-#define outl_p(val, port) (udelay(1), outl((val), (port)))
-
-/*
- * The *_ns versions below don't do byte-swapping.
- * Neither do the standard versions now, these are just here
- * for older code.
- */
-#define outsw_ns(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsl_ns(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
-
-
-#define IO_SPACE_LIMIT ~(0UL)
-
-
-#ifdef __KERNEL__
-extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr,
- unsigned long size, unsigned long flags);
-extern void __iomem *__ioremap(unsigned long address, unsigned long size,
- unsigned long flags);
-
-/**
- * ioremap - map bus memory into CPU space
- * @address: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- */
-extern void __iomem *ioremap(unsigned long address, unsigned long size);
-
-#define ioremap_nocache(addr, size) ioremap((addr), (size))
-extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size);
-extern void iounmap(volatile void __iomem *addr);
-extern void __iomem * reserve_phb_iospace(unsigned long size);
-
-/**
- * virt_to_phys - map virtual addresses to physical
- * @address: address to remap
- *
- * The returned physical address is the physical (CPU) mapping for
- * the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
- *
- * This function does not give bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-static inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa((unsigned long)address);
-}
-
-/**
- * phys_to_virt - map physical address to virtual
- * @address: address to remap
- *
- * The returned virtual address is a current CPU mapping for
- * the memory address given. It is only valid to use this function on
- * addresses that have a kernel mapping
- *
- * This function does not handle bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-static inline void * phys_to_virt(unsigned long address)
-{
- return (void *)__va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-/* We do NOT want virtual merging, it would put too much pressure on
- * our iommu allocator. Instead, we want drivers to be smart enough
- * to coalesce sglists that happen to have been mapped in a contiguous
- * way by the iommu
- */
-#define BIO_VMERGE_BOUNDARY 0
-
-#endif /* __KERNEL__ */
-
-static inline void iosync(void)
-{
- __asm__ __volatile__ ("sync" : : : "memory");
-}
-
-/* Enforce in-order execution of data I/O.
- * No distinction between read/write on PPC; use eieio for all three.
- */
-#define iobarrier_rw() eieio()
-#define iobarrier_r() eieio()
-#define iobarrier_w() eieio()
-
-/*
- * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
- * These routines do not perform EEH-related I/O address translation,
- * and should not be used directly by device drivers. Use inb/readb
- * instead.
- */
-static inline int in_8(const volatile unsigned char __iomem *addr)
-{
- int ret;
-
- __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
- : "=r" (ret) : "m" (*addr));
- return ret;
-}
-
-static inline void out_8(volatile unsigned char __iomem *addr, int val)
-{
- __asm__ __volatile__("stb%U0%X0 %1,%0; sync"
- : "=m" (*addr) : "r" (val));
-}
-
-static inline int in_le16(const volatile unsigned short __iomem *addr)
-{
- int ret;
-
- __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync"
- : "=r" (ret) : "r" (addr), "m" (*addr));
- return ret;
-}
-
-static inline int in_be16(const volatile unsigned short __iomem *addr)
-{
- int ret;
-
- __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync"
- : "=r" (ret) : "m" (*addr));
- return ret;
-}
-
-static inline void out_le16(volatile unsigned short __iomem *addr, int val)
-{
- __asm__ __volatile__("sthbrx %1,0,%2; sync"
- : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static inline void out_be16(volatile unsigned short __iomem *addr, int val)
-{
- __asm__ __volatile__("sth%U0%X0 %1,%0; sync"
- : "=m" (*addr) : "r" (val));
-}
-
-static inline unsigned in_le32(const volatile unsigned __iomem *addr)
-{
- unsigned ret;
-
- __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync"
- : "=r" (ret) : "r" (addr), "m" (*addr));
- return ret;
-}
-
-static inline unsigned in_be32(const volatile unsigned __iomem *addr)
-{
- unsigned ret;
-
- __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
- : "=r" (ret) : "m" (*addr));
- return ret;
-}
-
-static inline void out_le32(volatile unsigned __iomem *addr, int val)
-{
- __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr)
- : "r" (val), "r" (addr));
-}
-
-static inline void out_be32(volatile unsigned __iomem *addr, int val)
-{
- __asm__ __volatile__("stw%U0%X0 %1,%0; sync"
- : "=m" (*addr) : "r" (val));
-}
-
-static inline unsigned long in_le64(const volatile unsigned long __iomem *addr)
-{
- unsigned long tmp, ret;
-
- __asm__ __volatile__(
- "ld %1,0(%2)\n"
- "twi 0,%1,0\n"
- "isync\n"
- "rldimi %0,%1,5*8,1*8\n"
- "rldimi %0,%1,3*8,2*8\n"
- "rldimi %0,%1,1*8,3*8\n"
- "rldimi %0,%1,7*8,4*8\n"
- "rldicl %1,%1,32,0\n"
- "rlwimi %0,%1,8,8,31\n"
- "rlwimi %0,%1,24,16,23\n"
- : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr));
- return ret;
-}
-
-static inline unsigned long in_be64(const volatile unsigned long __iomem *addr)
-{
- unsigned long ret;
-
- __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync"
- : "=r" (ret) : "m" (*addr));
- return ret;
-}
-
-static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
- "rldimi %0,%1,5*8,1*8\n"
- "rldimi %0,%1,3*8,2*8\n"
- "rldimi %0,%1,1*8,3*8\n"
- "rldimi %0,%1,7*8,4*8\n"
- "rldicl %1,%1,32,0\n"
- "rlwimi %0,%1,8,8,31\n"
- "rlwimi %0,%1,24,16,23\n"
- "std %0,0(%3)\n"
- "sync"
- : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr));
-}
-
-static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
-{
- __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val));
-}
-
-#ifndef CONFIG_PPC_ISERIES
-#include <asm/eeh.h>
-#endif
-
-#ifdef __KERNEL__
-
-/**
- * check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the mmio address io_addr. This
- * address should have been obtained by ioremap.
- * Returns 1 on a match.
- */
-static inline int check_signature(const volatile void __iomem * io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
-#ifndef CONFIG_PPC_ISERIES
- do {
- if (readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
-#endif
- return retval;
-}
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
-/* Check of existence of legacy devices */
-extern int check_legacy_ioport(unsigned long base_port);
-
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-#endif /* __KERNEL__ */
-
-#endif /* _PPC64_IO_H */
diff --git a/include/asm-ppc64/lppaca.h b/include/asm-ppc64/lppaca.h
deleted file mode 100644
index 9e2a6c0649a..00000000000
--- a/include/asm-ppc64/lppaca.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * lppaca.h
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_LPPACA_H
-#define _ASM_LPPACA_H
-
-//=============================================================================
-//
-// This control block contains the data that is shared between the
-// hypervisor (PLIC) and the OS.
-//
-//
-//----------------------------------------------------------------------------
-#include <asm/types.h>
-
-struct lppaca
-{
-//=============================================================================
-// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
-// NOTE: The xDynXyz fields are fields that will be dynamically changed by
-// PLIC when preparing to bring a processor online or when dispatching a
-// virtual processor!
-//=============================================================================
- u32 desc; // Eye catcher 0xD397D781 x00-x03
- u16 size; // Size of this struct x04-x05
- u16 reserved1; // Reserved x06-x07
- u16 reserved2:14; // Reserved x08-x09
- u8 shared_proc:1; // Shared processor indicator ...
- u8 secondary_thread:1; // Secondary thread indicator ...
- volatile u8 dyn_proc_status:8; // Dynamic Status of this proc x0A-x0A
- u8 secondary_thread_count; // Secondary thread count x0B-x0B
- volatile u16 dyn_hv_phys_proc_index;// Dynamic HV Physical Proc Index0C-x0D
- volatile u16 dyn_hv_log_proc_index;// Dynamic HV Logical Proc Indexx0E-x0F
- u32 decr_val; // Value for Decr programming x10-x13
- u32 pmc_val; // Value for PMC regs x14-x17
- volatile u32 dyn_hw_node_id; // Dynamic Hardware Node id x18-x1B
- volatile u32 dyn_hw_proc_id; // Dynamic Hardware Proc Id x1C-x1F
- volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
- u32 dsei_data; // DSEI data x24-x27
- u64 sprg3; // SPRG3 value x28-x2F
- u8 reserved3[80]; // Reserved x30-x7F
-
-//=============================================================================
-// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
-//=============================================================================
- // This Dword contains a byte for each type of interrupt that can occur.
- // The IPI is a count while the others are just a binary 1 or 0.
- union {
- u64 any_int;
- struct {
- u16 reserved; // Reserved - cleared by #mpasmbl
- u8 xirr_int; // Indicates xXirrValue is valid or Immed IO
- u8 ipi_cnt; // IPI Count
- u8 decr_int; // DECR interrupt occurred
- u8 pdc_int; // PDC interrupt occurred
- u8 quantum_int; // Interrupt quantum reached
- u8 old_plic_deferred_ext_int; // Old PLIC has a deferred XIRR pending
- } fields;
- } int_dword;
-
- // Whenever any fields in this Dword are set then PLIC will defer the
- // processing of external interrupts. Note that PLIC will store the
- // XIRR directly into the xXirrValue field so that another XIRR will
- // not be presented until this one clears. The layout of the low
- // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the
- // entire Dword is zero or not. A non-zero value in the low order
- // 2-bytes will result in SLIC being granted the highest thread
- // priority upon return. A 0 will return to SLIC as medium priority.
- u64 plic_defer_ints_area; // Entire Dword
-
- // Used to pass the real SRR0/1 from PLIC to SLIC as well as to
- // pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid.
- u64 saved_srr0; // Saved SRR0 x10-x17
- u64 saved_srr1; // Saved SRR1 x18-x1F
-
- // Used to pass parms from the OS to PLIC for SetAsrAndRfid
- u64 saved_gpr3; // Saved GPR3 x20-x27
- u64 saved_gpr4; // Saved GPR4 x28-x2F
- u64 saved_gpr5; // Saved GPR5 x30-x37
-
- u8 reserved4; // Reserved x38-x38
- u8 cpuctls_task_attrs; // Task attributes for cpuctls x39-x39
- u8 fpregs_in_use; // FP regs in use x3A-x3A
- u8 pmcregs_in_use; // PMC regs in use x3B-x3B
- volatile u32 saved_decr; // Saved Decr Value x3C-x3F
- volatile u64 emulated_time_base;// Emulated TB for this thread x40-x47
- volatile u64 cur_plic_latency; // Unaccounted PLIC latency x48-x4F
- u64 tot_plic_latency; // Accumulated PLIC latency x50-x57
- u64 wait_state_cycles; // Wait cycles for this proc x58-x5F
- u64 end_of_quantum; // TB at end of quantum x60-x67
- u64 pdc_saved_sprg1; // Saved SPRG1 for PMC int x68-x6F
- u64 pdc_saved_srr0; // Saved SRR0 for PMC int x70-x77
- volatile u32 virtual_decr; // Virtual DECR for shared procsx78-x7B
- u16 slb_count; // # of SLBs to maintain x7C-x7D
- u8 idle; // Indicate OS is idle x7E
- u8 vmxregs_in_use; // VMX registers in use x7F
-
-
-//=============================================================================
-// CACHE_LINE_3 0x0100 - 0x007F: This line is shared with other processors
-//=============================================================================
- // This is the yield_count. An "odd" value (low bit on) means that
- // the processor is yielded (either because of an OS yield or a PLIC
- // preempt). An even value implies that the processor is currently
- // executing.
- // NOTE: This value will ALWAYS be zero for dedicated processors and
- // will NEVER be zero for shared processors (ie, initialized to a 1).
- volatile u32 yield_count; // PLIC increments each dispatchx00-x03
- u8 reserved6[124]; // Reserved x04-x7F
-
-//=============================================================================
-// CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
-//=============================================================================
- u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
-};
-
-#endif /* _ASM_LPPACA_H */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
deleted file mode 100644
index 4c18a5cb69f..00000000000
--- a/include/asm-ppc64/mmu.h
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * PowerPC memory management structures
- *
- * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
- * PPC64 rework.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_MMU_H_
-#define _PPC64_MMU_H_
-
-#include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
-#include <asm/page.h>
-
-/*
- * Segment table
- */
-
-#define STE_ESID_V 0x80
-#define STE_ESID_KS 0x20
-#define STE_ESID_KP 0x10
-#define STE_ESID_N 0x08
-
-#define STE_VSID_SHIFT 12
-
-/* Location of cpu0's segment table */
-#define STAB0_PAGE 0x6
-#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
-
-#ifndef __ASSEMBLY__
-extern char initial_stab[];
-#endif /* ! __ASSEMBLY */
-
-/*
- * SLB
- */
-
-#define SLB_NUM_BOLTED 3
-#define SLB_CACHE_ENTRIES 8
-
-/* Bits in the SLB ESID word */
-#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
-
-/* Bits in the SLB VSID word */
-#define SLB_VSID_SHIFT 12
-#define SLB_VSID_B ASM_CONST(0xc000000000000000)
-#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
-#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
-#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
-#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
-#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
-#define SLB_VSID_L ASM_CONST(0x0000000000000100)
-#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
-#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
-#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
-#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
-#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
-#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
-#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
-
-#define SLB_VSID_KERNEL (SLB_VSID_KP)
-#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
-
-#define SLBIE_C (0x08000000)
-
-/*
- * Hash table
- */
-
-#define HPTES_PER_GROUP 8
-
-#define HPTE_V_AVPN_SHIFT 7
-#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
-#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
-#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
-#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
-#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
-#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
-#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
-#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
-
-#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
-#define HPTE_R_TS ASM_CONST(0x4000000000000000)
-#define HPTE_R_RPN_SHIFT 12
-#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
-#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
-#define HPTE_R_PP ASM_CONST(0x0000000000000003)
-#define HPTE_R_N ASM_CONST(0x0000000000000004)
-
-/* Values for PP (assumes Ks=0, Kp=1) */
-/* pp0 will always be 0 for linux */
-#define PP_RWXX 0 /* Supervisor read/write, User none */
-#define PP_RWRX 1 /* Supervisor read/write, User read */
-#define PP_RWRW 2 /* Supervisor read/write, User read/write */
-#define PP_RXRX 3 /* Supervisor read, User read */
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
- unsigned long v;
- unsigned long r;
-} hpte_t;
-
-extern hpte_t *htab_address;
-extern unsigned long htab_hash_mask;
-
-/*
- * Page size definition
- *
- * shift : is the "PAGE_SHIFT" value for that page size
- * sllp : is a bit mask with the value of SLB L || LP to be or'ed
- * directly to a slbmte "vsid" value
- * penc : is the HPTE encoding mask for the "LP" field:
- *
- */
-struct mmu_psize_def
-{
- unsigned int shift; /* number of bits */
- unsigned int penc; /* HPTE encoding */
- unsigned int tlbiel; /* tlbiel supported for that page size */
- unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
- unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
-};
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * The kernel use the constants below to index in the page sizes array.
- * The use of fixed constants for this purpose is better for performances
- * of the low level hash refill handlers.
- *
- * A non supported page size has a "shift" field set to 0
- *
- * Any new page size being implemented can get a new entry in here. Whether
- * the kernel will use it or not is a different matter though. The actual page
- * size used by hugetlbfs is not defined here and may be made variable
- */
-
-#define MMU_PAGE_4K 0 /* 4K */
-#define MMU_PAGE_64K 1 /* 64K */
-#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
-#define MMU_PAGE_1M 3 /* 1M */
-#define MMU_PAGE_16M 4 /* 16M */
-#define MMU_PAGE_16G 5 /* 16G */
-#define MMU_PAGE_COUNT 6
-
-#ifndef __ASSEMBLY__
-
-/*
- * The current system page sizes
- */
-extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
-extern int mmu_linear_psize;
-extern int mmu_virtual_psize;
-
-#ifdef CONFIG_HUGETLB_PAGE
-/*
- * The page size index of the huge pages for use by hugetlbfs
- */
-extern int mmu_huge_psize;
-
-#endif /* CONFIG_HUGETLB_PAGE */
-
-/*
- * This function sets the AVPN and L fields of the HPTE appropriately
- * for the page size
- */
-static inline unsigned long hpte_encode_v(unsigned long va, int psize)
-{
- unsigned long v =
- v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
- v <<= HPTE_V_AVPN_SHIFT;
- if (psize != MMU_PAGE_4K)
- v |= HPTE_V_LARGE;
- return v;
-}
-
-/*
- * This function sets the ARPN, and LP fields of the HPTE appropriately
- * for the page size. We assume the pa is already "clean" that is properly
- * aligned for the requested page size
- */
-static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
-{
- unsigned long r;
-
- /* A 4K page needs no special encoding */
- if (psize == MMU_PAGE_4K)
- return pa & HPTE_R_RPN;
- else {
- unsigned int penc = mmu_psize_defs[psize].penc;
- unsigned int shift = mmu_psize_defs[psize].shift;
- return (pa & ~((1ul << shift) - 1)) | (penc << 12);
- }
- return r;
-}
-
-/*
- * This hashes a virtual address for a 256Mb segment only for now
- */
-
-static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
-{
- return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
-}
-
-extern int __hash_page_4K(unsigned long ea, unsigned long access,
- unsigned long vsid, pte_t *ptep, unsigned long trap,
- unsigned int local);
-extern int __hash_page_64K(unsigned long ea, unsigned long access,
- unsigned long vsid, pte_t *ptep, unsigned long trap,
- unsigned int local);
-struct mm_struct;
-extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
- unsigned long ea, unsigned long vsid, int local);
-
-extern void htab_finish_init(void);
-extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
- unsigned long pstart, unsigned long mode,
- int psize);
-
-extern void hpte_init_native(void);
-extern void hpte_init_lpar(void);
-extern void hpte_init_iSeries(void);
-
-extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long va, unsigned long prpn,
- unsigned long rflags,
- unsigned long vflags, int psize);
-
-extern long native_hpte_insert(unsigned long hpte_group,
- unsigned long va, unsigned long prpn,
- unsigned long rflags,
- unsigned long vflags, int psize);
-
-extern long iSeries_hpte_insert(unsigned long hpte_group,
- unsigned long va, unsigned long prpn,
- unsigned long rflags,
- unsigned long vflags, int psize);
-
-extern void stabs_alloc(void);
-extern void slb_initialize(void);
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * VSID allocation
- *
- * We first generate a 36-bit "proto-VSID". For kernel addresses this
- * is equal to the ESID, for user addresses it is:
- * (context << 15) | (esid & 0x7fff)
- *
- * The two forms are distinguishable because the top bit is 0 for user
- * addresses, whereas the top two bits are 1 for kernel addresses.
- * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
- * now.
- *
- * The proto-VSIDs are then scrambled into real VSIDs with the
- * multiplicative hash:
- *
- * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
- * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
- * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
- *
- * This scramble is only well defined for proto-VSIDs below
- * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
- * reserved. VSID_MULTIPLIER is prime, so in particular it is
- * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
- * Because the modulus is 2^n-1 we can compute it efficiently without
- * a divide or extra multiply (see below).
- *
- * This scheme has several advantages over older methods:
- *
- * - We have VSIDs allocated for every kernel address
- * (i.e. everything above 0xC000000000000000), except the very top
- * segment, which simplifies several things.
- *
- * - We allow for 15 significant bits of ESID and 20 bits of
- * context for user addresses. i.e. 8T (43 bits) of address space for
- * up to 1M contexts (although the page table structure and context
- * allocation will need changes to take advantage of this).
- *
- * - The scramble function gives robust scattering in the hash
- * table (at least based on some initial results). The previous
- * method was more susceptible to pathological cases giving excessive
- * hash collisions.
- */
-/*
- * WARNING - If you change these you must make sure the asm
- * implementations in slb_allocate (slb_low.S), do_stab_bolted
- * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
- *
- * You'll also need to change the precomputed VSID values in head.S
- * which are used by the iSeries firmware.
- */
-
-#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
-#define VSID_BITS 36
-#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
-
-#define CONTEXT_BITS 19
-#define USER_ESID_BITS 16
-
-#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
-
-/*
- * This macro generates asm code to compute the VSID scramble
- * function. Used in slb_allocate() and do_stab_bolted. The function
- * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
- *
- * rt = register continaing the proto-VSID and into which the
- * VSID will be stored
- * rx = scratch register (clobbered)
- *
- * - rt and rx must be different registers
- * - The answer will end up in the low 36 bits of rt. The higher
- * bits may contain other garbage, so you may need to mask the
- * result.
- */
-#define ASM_VSID_SCRAMBLE(rt, rx) \
- lis rx,VSID_MULTIPLIER@h; \
- ori rx,rx,VSID_MULTIPLIER@l; \
- mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
- \
- srdi rx,rt,VSID_BITS; \
- clrldi rt,rt,(64-VSID_BITS); \
- add rt,rt,rx; /* add high and low bits */ \
- /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
- * 2^36-1+2^28-1. That in particular means that if r3 >= \
- * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
- * the bit clear, r3 already has the answer we want, if it \
- * doesn't, the answer is the low 36 bits of r3+1. So in all \
- * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
- addi rx,rt,1; \
- srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
- add rt,rt,rx
-
-
-#ifndef __ASSEMBLY__
-
-typedef unsigned long mm_context_id_t;
-
-typedef struct {
- mm_context_id_t id;
-#ifdef CONFIG_HUGETLB_PAGE
- u16 low_htlb_areas, high_htlb_areas;
-#endif
-} mm_context_t;
-
-
-static inline unsigned long vsid_scramble(unsigned long protovsid)
-{
-#if 0
- /* The code below is equivalent to this function for arguments
- * < 2^VSID_BITS, which is all this should ever be called
- * with. However gcc is not clever enough to compute the
- * modulus (2^n-1) without a second multiply. */
- return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
-#else /* 1 */
- unsigned long x;
-
- x = protovsid * VSID_MULTIPLIER;
- x = (x >> VSID_BITS) + (x & VSID_MODULUS);
- return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
-#endif /* 1 */
-}
-
-/* This is only valid for addresses >= KERNELBASE */
-static inline unsigned long get_kernel_vsid(unsigned long ea)
-{
- return vsid_scramble(ea >> SID_SHIFT);
-}
-
-/* This is only valid for user addresses (which are below 2^41) */
-static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
-{
- return vsid_scramble((context << USER_ESID_BITS)
- | (ea >> SID_SHIFT));
-}
-
-#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
-#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
-
-#endif /* __ASSEMBLY */
-
-#endif /* _PPC64_MMU_H_ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
deleted file mode 100644
index 4f512e9fa6b..00000000000
--- a/include/asm-ppc64/mmu_context.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef __PPC64_MMU_CONTEXT_H
-#define __PPC64_MMU_CONTEXT_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <asm/mmu.h>
-#include <asm/cputable.h>
-
-/*
- * Copyright (C) 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * Getting into a kernel thread, there is no valid user segment, mark
- * paca->pgdir NULL so that SLB miss on user addresses will fault
- */
-static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk)
-{
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = NULL;
-#endif /* CONFIG_PPC_64K_PAGES */
-}
-
-#define NO_CONTEXT 0
-#define MAX_CONTEXT (0x100000-1)
-
-extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-extern void destroy_context(struct mm_struct *mm);
-
-extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
-extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-
-/*
- * switch_mm is the entry point called from the architecture independent
- * code in kernel/sched.c
- */
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
- if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
- cpu_set(smp_processor_id(), next->cpu_vm_mask);
-
- /* No need to flush userspace segments if the mm doesnt change */
-#ifdef CONFIG_PPC_64K_PAGES
- if (prev == next && get_paca()->pgdir == next->pgd)
- return;
-#else
- if (prev == next)
- return;
-#endif /* CONFIG_PPC_64K_PAGES */
-
-#ifdef CONFIG_ALTIVEC
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall");
-#endif /* CONFIG_ALTIVEC */
-
- if (cpu_has_feature(CPU_FTR_SLB))
- switch_slb(tsk, next);
- else
- switch_stab(tsk, next);
-}
-
-#define deactivate_mm(tsk,mm) do { } while (0)
-
-/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
- */
-static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- switch_mm(prev, next, current);
- local_irq_restore(flags);
-}
-
-#endif /* __PPC64_MMU_CONTEXT_H */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h
deleted file mode 100644
index 80a708e7093..00000000000
--- a/include/asm-ppc64/mmzone.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
- *
- * PowerPC64 port:
- * Copyright (C) 2002 Anton Blanchard, IBM Corp.
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <linux/config.h>
-#include <asm/smp.h>
-
-/* generic non-linear memory support:
- *
- * 1) we will not split memory into more chunks than will fit into the
- * flags field of the struct page
- */
-
-
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-
-extern struct pglist_data *node_data[];
-/*
- * Return a pointer to the node data for node n.
- */
-#define NODE_DATA(nid) (node_data[nid])
-
-/*
- * Following are specific to this numa platform.
- */
-
-extern int numa_cpu_lookup_table[];
-extern char *numa_memory_lookup_table;
-extern cpumask_t numa_cpumask_lookup_table[];
-extern int nr_cpus_in_node[];
-
-/* 16MB regions */
-#define MEMORY_INCREMENT_SHIFT 24
-#define MEMORY_INCREMENT (1UL << MEMORY_INCREMENT_SHIFT)
-
-/* NUMA debugging, will not work on a DLPAR machine */
-#undef DEBUG_NUMA
-
-static inline int pa_to_nid(unsigned long pa)
-{
- int nid;
-
- nid = numa_memory_lookup_table[pa >> MEMORY_INCREMENT_SHIFT];
-
-#ifdef DEBUG_NUMA
- /* the physical address passed in is not in the map for the system */
- if (nid == -1) {
- printk("bad address: %lx\n", pa);
- BUG();
- }
-#endif
-
- return nid;
-}
-
-#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-
-/*
- * Following are macros that each numa implmentation must define.
- */
-
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
-
-#ifdef CONFIG_DISCONTIGMEM
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
-
-#define pfn_to_nid(pfn) pa_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
-
-/* Written this way to avoid evaluating arguments twice */
-#define discontigmem_pfn_to_page(pfn) \
-({ \
- unsigned long __tmp = pfn; \
- (NODE_DATA(pfn_to_nid(__tmp))->node_mem_map + \
- node_localnr(__tmp, pfn_to_nid(__tmp))); \
-})
-
-#define discontigmem_page_to_pfn(p) \
-({ \
- struct page *__tmp = p; \
- (((__tmp) - page_zone(__tmp)->zone_mem_map) + \
- page_zone(__tmp)->zone_start_pfn); \
-})
-
-/* XXX fix for discontiguous physical memory */
-#define discontigmem_pfn_valid(pfn) ((pfn) < num_physpages)
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
-
-#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
-#define early_pfn_to_nid(pfn) pa_to_nid(((unsigned long)pfn) << PAGE_SHIFT)
-#endif
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-ppc64/nvram.h b/include/asm-ppc64/nvram.h
deleted file mode 100644
index def47d720d3..00000000000
--- a/include/asm-ppc64/nvram.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * PreP compliant NVRAM access
- * This needs to be updated for PPC64
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_NVRAM_H
-#define _PPC64_NVRAM_H
-
-#define NVRW_CNT 0x20
-#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
-#define NVRAM_BLOCK_LEN 16
-#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
-#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
-
-#define NVRAM_AS0 0x74
-#define NVRAM_AS1 0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS 0x1FF9
-#define MOTO_RTC_MINUTES 0x1FFA
-#define MOTO_RTC_HOURS 0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
-#define MOTO_RTC_MONTH 0x1FFE
-#define MOTO_RTC_YEAR 0x1FFF
-#define MOTO_RTC_CONTROLA 0x1FF8
-#define MOTO_RTC_CONTROLB 0x1FF9
-
-#define NVRAM_SIG_SP 0x02 /* support processor */
-#define NVRAM_SIG_OF 0x50 /* open firmware config */
-#define NVRAM_SIG_FW 0x51 /* general firmware */
-#define NVRAM_SIG_HW 0x52 /* hardware (VPD) */
-#define NVRAM_SIG_FLIP 0x5a /* Apple flip/flop header */
-#define NVRAM_SIG_APPL 0x5f /* Apple "system" (???) */
-#define NVRAM_SIG_SYS 0x70 /* system env vars */
-#define NVRAM_SIG_CFG 0x71 /* config data */
-#define NVRAM_SIG_ELOG 0x72 /* error log */
-#define NVRAM_SIG_VEND 0x7e /* vendor defined */
-#define NVRAM_SIG_FREE 0x7f /* Free space */
-#define NVRAM_SIG_OS 0xa0 /* OS defined */
-#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
-
-/* If change this size, then change the size of NVNAME_LEN */
-struct nvram_header {
- unsigned char signature;
- unsigned char checksum;
- unsigned short length;
- char name[12];
-};
-
-struct nvram_partition {
- struct list_head partition;
- struct nvram_header header;
- unsigned int index;
-};
-
-
-extern int nvram_write_error_log(char * buff, int length, unsigned int err_type);
-extern int nvram_read_error_log(char * buff, int length, unsigned int * err_type);
-extern int nvram_clear_error_log(void);
-extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
-
-extern int pSeries_nvram_init(void);
-extern int pmac_nvram_init(void);
-extern int mmio_nvram_init(void);
-
-/* PowerMac specific nvram stuffs */
-
-enum {
- pmac_nvram_OF, /* Open Firmware partition */
- pmac_nvram_XPRAM, /* MacOS XPRAM partition */
- pmac_nvram_NR /* MacOS Name Registry partition */
-};
-
-/* Return partition offset in nvram */
-extern int pmac_get_partition(int partition);
-
-/* Direct access to XPRAM on PowerMacs */
-extern u8 pmac_xpram_read(int xpaddr);
-extern void pmac_xpram_write(int xpaddr, u8 data);
-
-/* Synchronize NVRAM */
-extern int nvram_sync(void);
-
-/* Some offsets in XPRAM */
-#define PMAC_XPRAM_MACHINE_LOC 0xe4
-#define PMAC_XPRAM_SOUND_VOLUME 0x08
-
-/* Machine location structure in PowerMac XPRAM */
-struct pmac_machine_location {
- unsigned int latitude; /* 2+30 bit Fractional number */
- unsigned int longitude; /* 2+30 bit Fractional number */
- unsigned int delta; /* mix of GMT delta and DLS */
-};
-
-/*
- * /dev/nvram ioctls
- *
- * Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
- * definitely obsolete. Do not use it if you can avoid it
- */
-
-#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
- _IOWR('p', 0x40, int)
-
-#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int) /* Get NVRAM partition offset */
-
-#endif /* _PPC64_NVRAM_H */
diff --git a/include/asm-ppc64/paca.h b/include/asm-ppc64/paca.h
deleted file mode 100644
index bccacd6aa93..00000000000
--- a/include/asm-ppc64/paca.h
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef _PPC64_PACA_H
-#define _PPC64_PACA_H
-
-/*
- * include/asm-ppc64/paca.h
- *
- * This control block defines the PACA which defines the processor
- * specific data for each logical processor on the system.
- * There are some pointers defined that are utilized by PLIC.
- *
- * C 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/types.h>
-#include <asm/lppaca.h>
-#include <asm/iseries/it_lp_reg_save.h>
-#include <asm/mmu.h>
-
-register struct paca_struct *local_paca asm("r13");
-#define get_paca() local_paca
-
-struct task_struct;
-
-/*
- * Defines the layout of the paca.
- *
- * This structure is not directly accessed by firmware or the service
- * processor except for the first two pointers that point to the
- * lppaca area and the ItLpRegSave area for this CPU. Both the
- * lppaca and ItLpRegSave objects are currently contained within the
- * PACA but they do not need to be.
- */
-struct paca_struct {
- /*
- * Because hw_cpu_id, unlike other paca fields, is accessed
- * routinely from other CPUs (from the IRQ code), we stick to
- * read-only (after boot) fields in the first cacheline to
- * avoid cacheline bouncing.
- */
-
- /*
- * MAGIC: These first two pointers can't be moved - they're
- * accessed by the firmware
- */
- struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
- struct ItLpRegSave *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
-
- /*
- * MAGIC: the spinlock functions in arch/ppc64/lib/locks.c
- * load lock_token and paca_index with a single lwz
- * instruction. They must travel together and be properly
- * aligned.
- */
- u16 lock_token; /* Constant 0x8000, used in locks */
- u16 paca_index; /* Logical processor number */
-
- u32 default_decr; /* Default decrementer value */
- u64 kernel_toc; /* Kernel TOC address */
- u64 stab_real; /* Absolute address of segment table */
- u64 stab_addr; /* Virtual address of segment table */
- void *emergency_sp; /* pointer to emergency stack */
- s16 hw_cpu_id; /* Physical processor number */
- u8 cpu_start; /* At startup, processor spins until */
- /* this becomes non-zero. */
-
- /*
- * Now, starting in cacheline 2, the exception save areas
- */
- /* used for most interrupts/exceptions */
- u64 exgen[10] __attribute__((aligned(0x80)));
- u64 exmc[10]; /* used for machine checks */
- u64 exslb[10]; /* used for SLB/segment table misses
- * on the linear mapping */
-#ifdef CONFIG_PPC_64K_PAGES
- pgd_t *pgdir;
-#endif /* CONFIG_PPC_64K_PAGES */
-
- mm_context_t context;
- u16 slb_cache[SLB_CACHE_ENTRIES];
- u16 slb_cache_ptr;
-
- /*
- * then miscellaneous read-write fields
- */
- struct task_struct *__current; /* Pointer to current */
- u64 kstack; /* Saved Kernel stack addr */
- u64 stab_rr; /* stab/slb round-robin counter */
- u64 next_jiffy_update_tb; /* TB value for next jiffy update */
- u64 saved_r1; /* r1 save for RTAS calls */
- u64 saved_msr; /* MSR saved here by enter_rtas */
- u8 proc_enabled; /* irq soft-enable flag */
-
- /* not yet used */
- u64 exdsi[8]; /* used for linear mapping hash table misses */
-
- /*
- * iSeries structure which the hypervisor knows about -
- * this structure should not cross a page boundary.
- * The vpa_init/register_vpa call is now known to fail if the
- * lppaca structure crosses a page boundary.
- * The lppaca is also used on POWER5 pSeries boxes.
- * The lppaca is 640 bytes long, and cannot readily change
- * since the hypervisor knows its layout, so a 1kB
- * alignment will suffice to ensure that it doesn't
- * cross a page boundary.
- */
- struct lppaca lppaca __attribute__((__aligned__(0x400)));
-#ifdef CONFIG_PPC_ISERIES
- struct ItLpRegSave reg_save;
-#endif
-};
-
-extern struct paca_struct paca[];
-
-#endif /* _PPC64_PACA_H */
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
deleted file mode 100644
index 82ce187e5be..00000000000
--- a/include/asm-ppc64/page.h
+++ /dev/null
@@ -1,333 +0,0 @@
-#ifndef _PPC64_PAGE_H
-#define _PPC64_PAGE_H
-
-/*
- * Copyright (C) 2001 PPC64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
-
-/*
- * We support either 4k or 64k software page size. When using 64k pages
- * however, wether we are really supporting 64k pages in HW or not is
- * irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
- * as use of 64k pages remains a linux kernel specific, every notion of
- * page number shared with the firmware, TCEs, iommu, etc... still assumes
- * a page size of 4096.
- */
-#ifdef CONFIG_PPC_64K_PAGES
-#define PAGE_SHIFT 16
-#else
-#define PAGE_SHIFT 12
-#endif
-
-#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-/* HW_PAGE_SHIFT is always 4k pages */
-#define HW_PAGE_SHIFT 12
-#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
-#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
-
-/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
- * HW_PAGE_SHIFT, that is 4k pages
- */
-#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
-
-/* Segment size */
-#define SID_SHIFT 28
-#define SID_MASK 0xfffffffffUL
-#define ESID_MASK 0xfffffffff0000000UL
-#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
-
-/* Large pages size */
-
-#ifndef __ASSEMBLY__
-extern unsigned int HPAGE_SHIFT;
-#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-
-#define HTLB_AREA_SHIFT 40
-#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
-#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
-
-#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- - (1U << GET_ESID(addr))) & 0xffff)
-#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- - (1U << GET_HTLB_AREA(addr))) & 0xffff)
-
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-
-#define touches_hugepage_low_range(mm, addr, len) \
- (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
-#define touches_hugepage_high_range(mm, addr, len) \
- (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
-
-#define __within_hugepage_low_range(addr, len, segmask) \
- ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
-#define within_hugepage_low_range(addr, len) \
- __within_hugepage_low_range((addr), (len), \
- current->mm->context.low_htlb_areas)
-#define __within_hugepage_high_range(addr, len, zonemask) \
- ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
-#define within_hugepage_high_range(addr, len) \
- __within_hugepage_high_range((addr), (len), \
- current->mm->context.high_htlb_areas)
-
-#define is_hugepage_only_range(mm, addr, len) \
- (touches_hugepage_high_range((mm), (addr), (len)) || \
- touches_hugepage_low_range((mm), (addr), (len)))
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-
-#define in_hugepage_area(context, addr) \
- (cpu_has_feature(CPU_FTR_16M_PAGE) && \
- ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
- ( ((addr) < 0x100000000L) && \
- ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
-
-#else /* !CONFIG_HUGETLB_PAGE */
-
-#define in_hugepage_area(mm, addr) 0
-
-#endif /* !CONFIG_HUGETLB_PAGE */
-
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
-#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
-
-/* to align the pointer to the (next) page boundary */
-#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#include <asm/cache.h>
-
-#undef STRICT_MM_TYPECHECKS
-
-#define REGION_SIZE 4UL
-#define REGION_SHIFT 60UL
-#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
-
-static __inline__ void clear_page(void *addr)
-{
- unsigned long lines, line_size;
-
- line_size = ppc64_caches.dline_size;
- lines = ppc64_caches.dlines_per_page;
-
- __asm__ __volatile__(
- "mtctr %1 # clear_page\n\
-1: dcbz 0,%0\n\
- add %0,%0,%3\n\
- bdnz+ 1b"
- : "=r" (addr)
- : "r" (lines), "0" (addr), "r" (line_size)
- : "ctr", "memory");
-}
-
-extern void copy_4K_page(void *to, void *from);
-
-#ifdef CONFIG_PPC_64K_PAGES
-static inline void copy_page(void *to, void *from)
-{
- unsigned int i;
- for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
- copy_4K_page(to, from);
- to += 4096;
- from += 4096;
- }
-}
-#else /* CONFIG_PPC_64K_PAGES */
-static inline void copy_page(void *to, void *from)
-{
- copy_4K_page(to, from);
-}
-#endif /* CONFIG_PPC_64K_PAGES */
-
-struct page;
-extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
-
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking.
- * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
- */
-
-/* PTE level */
-typedef struct { unsigned long pte; } pte_t;
-#define pte_val(x) ((x).pte)
-#define __pte(x) ((pte_t) { (x) })
-
-/* 64k pages additionally define a bigger "real PTE" type that gathers
- * the "second half" part of the PTE for pseudo 64k pages
- */
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef struct { pte_t pte; } real_pte_t;
-#endif
-
-/* PMD level */
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x) ((x).pmd)
-#define __pmd(x) ((pmd_t) { (x) })
-
-/* PUD level exusts only on 4k pages */
-#ifndef CONFIG_PPC_64K_PAGES
-typedef struct { unsigned long pud; } pud_t;
-#define pud_val(x) ((x).pud)
-#define __pud(x) ((pud_t) { (x) })
-#endif
-
-/* PGD level */
-typedef struct { unsigned long pgd; } pgd_t;
-#define pgd_val(x) ((x).pgd)
-#define __pgd(x) ((pgd_t) { (x) })
-
-/* Page protection bits */
-typedef struct { unsigned long pgprot; } pgprot_t;
-#define pgprot_val(x) ((x).pgprot)
-#define __pgprot(x) ((pgprot_t) { (x) })
-
-#else
-
-/*
- * .. while these make it easier on the compiler
- */
-
-typedef unsigned long pte_t;
-#define pte_val(x) (x)
-#define __pte(x) (x)
-
-#ifdef CONFIG_PPC_64K_PAGES
-typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
-#else
-typedef unsigned long real_pte_t;
-#endif
-
-
-typedef unsigned long pmd_t;
-#define pmd_val(x) (x)
-#define __pmd(x) (x)
-
-#ifndef CONFIG_PPC_64K_PAGES
-typedef unsigned long pud_t;
-#define pud_val(x) (x)
-#define __pud(x) (x)
-#endif
-
-typedef unsigned long pgd_t;
-#define pgd_val(x) (x)
-#define pgprot_val(x) (x)
-
-typedef unsigned long pgprot_t;
-#define __pgd(x) (x)
-#define __pgprot(x) (x)
-
-#endif
-
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-
-extern int page_is_ram(unsigned long pfn);
-
-extern u64 ppc64_pft_size; /* Log 2 of page table size */
-
-/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
-#define __HAVE_ARCH_GATE_AREA 1
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef MODULE
-#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
-#else
-#define __page_aligned \
- __attribute__((__aligned__(PAGE_SIZE), \
- __section__(".data.page_aligned")))
-#endif
-
-
-/* This must match the -Ttext linker address */
-/* Note: tophys & tovirt make assumptions about how */
-/* KERNELBASE is defined for performance reasons. */
-/* When KERNELBASE moves, those macros may have */
-/* to change! */
-#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
-#define KERNELBASE PAGE_OFFSET
-#define VMALLOCBASE ASM_CONST(0xD000000000000000)
-
-#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
-#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
-#define USER_REGION_ID (0UL)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
-
-#ifdef CONFIG_DISCONTIGMEM
-#define page_to_pfn(page) discontigmem_page_to_pfn(page)
-#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
-#define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
-#endif
-#ifdef CONFIG_FLATMEM
-#define pfn_to_page(pfn) (mem_map + (pfn))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif
-
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-
-/*
- * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
- * and needs to be executable. This means the whole heap ends
- * up being executable.
- */
-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
-
-/*
- * This is the default if a program doesn't have a PT_GNU_STACK
- * program header entry. The PPC64 ELF ABI has a non executable stack
- * stack by default, so in the absense of a PT_GNU_STACK program header
- * we turn execute permission off.
- */
-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS \
- (test_thread_flag(TIF_32BIT) ? \
- VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/page.h>
-
-#endif /* _PPC64_PAGE_H */
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
deleted file mode 100644
index 60cf8c838af..00000000000
--- a/include/asm-ppc64/pci-bridge.h
+++ /dev/null
@@ -1,146 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _ASM_PCI_BRIDGE_H
-#define _ASM_PCI_BRIDGE_H
-
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <linux/list.h>
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * Structure of a PCI controller (host bridge)
- */
-struct pci_controller {
- struct pci_bus *bus;
- char is_dynamic;
- void *arch_data;
- struct list_head list_node;
-
- int first_busno;
- int last_busno;
-
- void __iomem *io_base_virt;
- unsigned long io_base_phys;
-
- /* Some machines have a non 1:1 mapping of
- * the PCI memory space in the CPU bus space
- */
- unsigned long pci_mem_offset;
- unsigned long pci_io_size;
-
- struct pci_ops *ops;
- volatile unsigned int __iomem *cfg_addr;
- volatile void __iomem *cfg_data;
-
- /* Currently, we limit ourselves to 1 IO range and 3 mem
- * ranges since the common pci_bus structure can't handle more
- */
- struct resource io_resource;
- struct resource mem_resources[3];
- int global_number;
- int local_number;
- unsigned long buid;
- unsigned long dma_window_base_cur;
- unsigned long dma_window_size;
-};
-
-/*
- * PCI stuff, for nodes representing PCI devices, pointed to
- * by device_node->data.
- */
-struct pci_controller;
-struct iommu_table;
-
-struct pci_dn {
- int busno; /* for pci devices */
- int bussubno; /* for pci devices */
- int devfn; /* for pci devices */
- int eeh_mode; /* See eeh.h for possible EEH_MODEs */
- int eeh_config_addr;
- int eeh_capable; /* from firmware */
- int eeh_check_count; /* # times driver ignored error */
- int eeh_freeze_count; /* # times this device froze up. */
- int eeh_is_bridge; /* device is pci-to-pci bridge */
-
- int pci_ext_config_space; /* for pci devices */
- struct pci_controller *phb; /* for pci devices */
- struct iommu_table *iommu_table; /* for phb's or bridges */
- struct pci_dev *pcidev; /* back-pointer to the pci device */
- struct device_node *node; /* back-pointer to the device_node */
-#ifdef CONFIG_PPC_ISERIES
- struct list_head Device_List;
- int Irq; /* Assigned IRQ */
- int Flags; /* Possible flags(disable/bist)*/
- u8 LogicalSlot; /* Hv Slot Index for Tces */
-#endif
- u32 config_space[16]; /* saved PCI config space */
-};
-
-/* Get the pointer to a device_node's pci_dn */
-#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
-
-struct device_node *fetch_dev_dn(struct pci_dev *dev);
-
-/* Get a device_node from a pci_dev. This code must be fast except
- * in the case where the sysdata is incorrect and needs to be fixed
- * up (this will only happen once).
- * In this case the sysdata will have been inherited from a PCI host
- * bridge or a PCI-PCI bridge further up the tree, so it will point
- * to a valid struct pci_dn, just not the one we want.
- */
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
-{
- struct device_node *dn = dev->sysdata;
- struct pci_dn *pdn = dn->data;
-
- if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number)
- return dn; /* fast path. sysdata is good */
- return fetch_dev_dn(dev);
-}
-
-static inline int pci_device_from_OF_node(struct device_node *np,
- u8 *bus, u8 *devfn)
-{
- if (!PCI_DN(np))
- return -ENODEV;
- *bus = PCI_DN(np)->busno;
- *devfn = PCI_DN(np)->devfn;
- return 0;
-}
-
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- else
- return bus->sysdata; /* Must be root bus (PHB) */
-}
-
-extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
- struct device_node *dev, int primary);
-
-extern int pcibios_remove_root_bus(struct pci_controller *phb);
-
-extern void phbs_remap_io(void);
-
-static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
-{
- struct device_node *busdn = bus->sysdata;
-
- BUG_ON(busdn == NULL);
- return PCI_DN(busdn)->phb;
-}
-
-/* Return values for ppc_md.pci_probe_mode function */
-#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
-#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
-#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
-
-#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/pci.h b/include/asm-ppc64/pci.h
deleted file mode 100644
index fafdf885a3c..00000000000
--- a/include/asm-ppc64/pci.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef __PPC64_PCI_H
-#define __PPC64_PCI_H
-#ifdef __KERNEL__
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/machdep.h>
-#include <asm/scatterlist.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-
-#include <asm-generic/pci-dma-compat.h>
-
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM 0x10000000
-
-struct pci_dev;
-
-#ifdef CONFIG_PPC_ISERIES
-#define pcibios_scan_all_fns(a, b) 0
-#else
-extern int pcibios_scan_all_fns(struct pci_bus *bus, int devfn);
-#endif
-
-static inline void pcibios_set_master(struct pci_dev *dev)
-{
- /* No special bus mastering setup handling */
-}
-
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- if (ppc_md.pci_get_legacy_ide_irq)
- return ppc_md.pci_get_legacy_ide_irq(dev, channel);
- return channel ? 15 : 14;
-}
-
-#define HAVE_ARCH_PCI_MWI 1
-static inline int pcibios_prep_mwi(struct pci_dev *dev)
-{
- /*
- * We would like to avoid touching the cacheline size or MWI bit
- * but we cant do that with the current pcibios_prep_mwi
- * interface. pSeries firmware sets the cacheline size (which is not
- * the cpu cacheline size in all cases) and hardware treats MWI
- * the same as memory write. So we dont touch the cacheline size
- * here and allow the generic code to set the MWI bit.
- */
- return 0;
-}
-
-extern unsigned int pcibios_assign_all_busses(void);
-
-extern struct dma_mapping_ops pci_dma_ops;
-
-/* For DAC DMA, we currently don't support it by default, but
- * we let the platform override this
- */
-static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
-{
- if (pci_dma_ops.dac_dma_supported)
- return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask);
- return 0;
-}
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- unsigned long cacheline_size;
- u8 byte;
-
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
- if (byte == 0)
- cacheline_size = 1024;
- else
- cacheline_size = (int) byte * 4;
-
- *strat = PCI_DMA_BURST_MULTIPLE;
- *strategy_parameter = cacheline_size;
-}
-#endif
-
-extern int pci_domain_nr(struct pci_bus *bus);
-
-/* Decide whether to display the domain number in /proc */
-extern int pci_proc_domain(struct pci_bus *bus);
-
-struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
-/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
-
-/* pci_unmap_{single,page} is not a nop, thus... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
- * this boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (0)
-
-extern void
-pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res);
-
-extern void
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region);
-
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-
-extern int
-unmap_bus_range(struct pci_bus *bus);
-
-extern int
-remap_bus_range(struct pci_bus *bus);
-
-extern void
-pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus);
-
-extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
-
-extern struct pci_dev *of_create_pci_dev(struct device_node *node,
- struct pci_bus *bus, int devfn);
-
-extern void of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev);
-
-extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
-
-extern int pci_read_irq_line(struct pci_dev *dev);
-
-extern void pcibios_add_platform_entries(struct pci_dev *dev);
-
-struct file;
-extern pgprot_t pci_phys_mem_access_prot(struct file *file,
- unsigned long pfn,
- unsigned long size,
- pgprot_t prot);
-
-#ifdef CONFIG_PPC_MULTIPLATFORM
-#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc,
- u64 *start, u64 *end);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
-
-#endif /* __KERNEL__ */
-
-#endif /* __PPC64_PCI_H */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
deleted file mode 100644
index 98da0e4262b..00000000000
--- a/include/asm-ppc64/pgalloc.h
+++ /dev/null
@@ -1,151 +0,0 @@
-#ifndef _PPC64_PGALLOC_H
-#define _PPC64_PGALLOC_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/cpumask.h>
-#include <linux/percpu.h>
-
-extern kmem_cache_t *pgtable_cache[];
-
-#ifdef CONFIG_PPC_64K_PAGES
-#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 0
-#define PGD_CACHE_NUM 1
-#else
-#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 1
-#define PUD_CACHE_NUM 1
-#define PGD_CACHE_NUM 0
-#endif
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
-}
-
-static inline void pgd_free(pgd_t *pgd)
-{
- kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
-}
-
-#ifndef CONFIG_PPC_64K_PAGES
-
-#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
-
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pud_free(pud_t *pud)
-{
- kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
-}
-
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-{
- pud_set(pud, (unsigned long)pmd);
-}
-
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
-
-
-#else /* CONFIG_PPC_64K_PAGES */
-
-#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
-
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
-{
- pmd_set(pmd, (unsigned long)pte);
-}
-
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pmd_free(pmd_t *pmd)
-{
- kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- return virt_to_page(pte_alloc_one_kernel(mm, address));
-}
-
-static inline void pte_free_kernel(pte_t *pte)
-{
- kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
-}
-
-static inline void pte_free(struct page *ptepage)
-{
- pte_free_kernel(page_address(ptepage));
-}
-
-#define PGF_CACHENUM_MASK 0xf
-
-typedef struct pgtable_free {
- unsigned long val;
-} pgtable_free_t;
-
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
- unsigned long mask)
-{
- BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
- return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
-}
-
-static inline void pgtable_free(pgtable_free_t pgf)
-{
- void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
- int cachenum = pgf.val & PGF_CACHENUM_MASK;
-
- kmem_cache_free(pgtable_cache[cachenum], p);
-}
-
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-
-#define __pte_free_tlb(tlb, ptepage) \
- pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
-#define __pmd_free_tlb(tlb, pmd) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
- PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
-#ifndef CONFIG_PPC_64K_PAGES
-#define __pud_free_tlb(tlb, pmd) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
- PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
-#endif /* CONFIG_PPC_64K_PAGES */
-
-#define check_pgt_cache() do { } while (0)
-
-#endif /* _PPC64_PGALLOC_H */
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-ppc64/pgtable-4k.h
deleted file mode 100644
index e9590c06ad9..00000000000
--- a/include/asm-ppc64/pgtable-4k.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Entries per page directory level. The PTE level must use a 64b record
- * for each page table entry. The PMD and PGD level use a 32b record for
- * each entry by assuming that each entry is page aligned.
- */
-#define PTE_INDEX_SIZE 9
-#define PMD_INDEX_SIZE 7
-#define PUD_INDEX_SIZE 7
-#define PGD_INDEX_SIZE 9
-
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
-#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
-#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-
-#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
-
-/* PMD_SHIFT determines what a second-level page table entry can map */
-#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PMD_SHIFT
-
-/* PUD_SHIFT determines what a third-level page table entry can map */
-#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
-#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
-#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-/* PTE bits */
-#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
-#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
-#define _PAGE_F_SECOND _PAGE_SECONDARY
-#define _PAGE_F_GIX _PAGE_GROUP_IX
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
- _PAGE_SECONDARY | _PAGE_GROUP_IX)
-
-/* PAGE_MASK gives the right answer below, but only by accident */
-/* It should be preserving the high 48 bits and then specifically */
-/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_HPTEFLAGS)
-
-/* Bits to mask out from a PMD to get to the PTE page */
-#define PMD_MASKED_BITS 0
-/* Bits to mask out from a PUD to get to the PMD page */
-#define PUD_MASKED_BITS 0
-/* Bits to mask out from a PGD to get to the PUD page */
-#define PGD_MASKED_BITS 0
-
-/* shift to put page number into pte */
-#define PTE_RPN_SHIFT (17)
-
-#define __real_pte(e,p) ((real_pte_t)(e))
-#define __rpte_to_pte(r) (r)
-#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12)
-
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- index = 0; \
- shift = mmu_psize_defs[psize].shift; \
-
-#define pte_iterate_hashed_end() } while(0)
-
-/*
- * 4-level page tables related bits
- */
-
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (pgd_val(pgd) == 0)
-#define pgd_present(pgd) (pgd_val(pgd) != 0)
-#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
-#define pgd_page(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
-
-#define pud_offset(pgdp, addr) \
- (((pud_t *) pgd_page(*(pgdp))) + \
- (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-
-#define pud_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-ppc64/pgtable-64k.h
deleted file mode 100644
index 154f1840ece..00000000000
--- a/include/asm-ppc64/pgtable-64k.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#include <asm-generic/pgtable-nopud.h>
-
-
-#define PTE_INDEX_SIZE 12
-#define PMD_INDEX_SIZE 12
-#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 4
-
-#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
-#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-
-#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
-
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
-
-/* PMD_SHIFT determines what a second-level page table entry can map */
-#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-/* Additional PTE bits (don't change without checking asm in hash_low.S) */
-#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
-#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
-#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
-#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
-#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\
- _PAGE_COMBO)
-
-/* Shift to put page number into pte.
- *
- * That gives us a max RPN of 32 bits, which means a max of 48 bits
- * of addressable physical space.
- * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but
- * 32 makes PTEs more readable for debugging for now :)
- */
-#define PTE_RPN_SHIFT (32)
-#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
-#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
-
-/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
- * pgprot changes
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
- _PAGE_ACCESSED)
-
-/* Bits to mask out from a PMD to get to the PTE page */
-#define PMD_MASKED_BITS 0x1ff
-/* Bits to mask out from a PGD/PUD to get to the PMD page */
-#define PUD_MASKED_BITS 0x1ff
-
-#ifndef __ASSEMBLY__
-
-/* Manipulate "rpte" values */
-#define __real_pte(e,p) ((real_pte_t) { \
- (e), pte_val(*((p) + PTRS_PER_PTE)) })
-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
-#define __rpte_to_pte(r) ((r).pte)
-#define __rpte_sub_valid(rpte, index) \
- (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
-
-
-/* Trick: we set __end to va + 64k, which happens works for
- * a 16M page as well as we want only one iteration
- */
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- unsigned long __end = va + PAGE_SIZE; \
- unsigned __split = (psize == MMU_PAGE_4K || \
- psize == MMU_PAGE_64K_AP); \
- shift = mmu_psize_defs[psize].shift; \
- for (index = 0; va < __end; index++, va += (1 << shift)) { \
- if (!__split || __rpte_sub_valid(rpte, index)) do { \
-
-#define pte_iterate_hashed_end() } while(0); } } while(0)
-
-
-#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
deleted file mode 100644
index a9783ba7fe9..00000000000
--- a/include/asm-ppc64/pgtable.h
+++ /dev/null
@@ -1,512 +0,0 @@
-#ifndef _PPC64_PGTABLE_H
-#define _PPC64_PGTABLE_H
-
-/*
- * This file contains the functions and defines necessary to modify and use
- * the ppc64 hashed page table.
- */
-
-#ifndef __ASSEMBLY__
-#include <linux/config.h>
-#include <linux/stddef.h>
-#include <asm/processor.h> /* For TASK_SIZE */
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-struct mm_struct;
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pgtable-64k.h>
-#else
-#include <asm/pgtable-4k.h>
-#endif
-
-#define FIRST_USER_ADDRESS 0
-
-/*
- * Size of EA range mapped by our pagetables.
- */
-#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
- PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
-#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
-
-#if TASK_SIZE_USER64 > PGTABLE_RANGE
-#error TASK_SIZE_USER64 exceeds pagetable range
-#endif
-
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
-#error TASK_SIZE_USER64 exceeds user VSID range
-#endif
-
-/*
- * Define the address range of the vmalloc VM area.
- */
-#define VMALLOC_START (0xD000000000000000ul)
-#define VMALLOC_SIZE (0x80000000000UL)
-#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
-
-/*
- * Common bits in a linux-style PTE. These match the bits in the
- * (hardware-defined) PowerPC PTE as closely as possible. Additional
- * bits may be defined in pgtable-*.h
- */
-#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
-#define _PAGE_USER 0x0002 /* matches one of the PP bits */
-#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
-#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED 0x0008
-#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x0080 /* C: page changed */
-#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
-#define _PAGE_RW 0x0200 /* software: user write access allowed */
-#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
-#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
-
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
-
-#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-
-/* __pgprot defined in asm-ppc64/page.h */
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
-#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
- _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
-
-#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
-#define HAVE_PAGE_AGP
-
-/* PTEIDX nibble */
-#define _PTEIDX_SECONDARY 0x8
-#define _PTEIDX_GROUP_IX 0x7
-
-
-/*
- * POWER4 and newer have per page execute protection, older chips can only
- * do this on a segment (256MB) basis.
- *
- * Also, write permissions imply read permissions.
- * This is the closest we can get..
- *
- * Note due to the way vm flags are laid out, the bits are XWR
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
-#ifndef __ASSEMBLY__
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
-#endif
-
-#ifndef __ASSEMBLY__
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * mk_pte takes a (struct page *) as input
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
-{
- pte_t pte;
-
-
- pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
- return pte;
-}
-
-#define pte_modify(_pte, newprot) \
- (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
-
-#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
-#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-
-/* pte_clear moved to later in this file */
-
-#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
-#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
-#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (pmd_val(pmd) == 0)
-#define pmd_present(pmd) (pmd_val(pmd) != 0)
-#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
-#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
-#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
-
-#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
-#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) ((pud_val(pud)) == 0)
-#define pud_present(pud) (pud_val(pud) != 0)
-#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
-#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
-
-#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
-
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while(0)
-#define pte_unmap_nested(pte) do { } while(0)
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
-
-static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-
-static inline pte_t pte_rdprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_EXEC; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_RW); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
- pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkread(pte_t pte) {
- pte_val(pte) |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte) {
- pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) {
- pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
- pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
- pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) {
- return pte; }
-
-/* Atomic PTE updates */
-static inline unsigned long pte_update(pte_t *p, unsigned long clr)
-{
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%3 # pte_update\n\
- andi. %1,%0,%6\n\
- bne- 1b \n\
- andc %1,%0,%4 \n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY)
- : "cc" );
- return old;
-}
-
-/* PTE updating functions, this function puts the PTE in the
- * batch, doesn't actually triggers the hash flush immediately,
- * you need to call flush_tlb_pending() to do that.
- * Pass -1 for "normal" size (4K or 64K)
- */
-extern void hpte_update(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, unsigned long pte, int huge);
-
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
- return 0;
- old = pte_update(ptep, _PAGE_ACCESSED);
- if (old & _PAGE_HASHPTE) {
- hpte_update(mm, addr, ptep, old, 0);
- flush_tlb_pending();
- }
- return (old & _PAGE_ACCESSED) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
-/*
- * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
- * moment we always flush but we need to fix hpte_update and test if the
- * optimisation is worth it.
- */
-static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
- return 0;
- old = pte_update(ptep, _PAGE_DIRTY);
- if (old & _PAGE_HASHPTE)
- hpte_update(mm, addr, ptep, old, 0);
- return (old & _PAGE_DIRTY) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
- old = pte_update(ptep, _PAGE_RW);
- if (old & _PAGE_HASHPTE)
- hpte_update(mm, addr, ptep, old, 0);
-}
-
-/*
- * We currently remove entries from the hashtable regardless of whether
- * the entry was young or dirty. The generic routines only flush if the
- * entry was young or dirty which is not good enough.
- *
- * We should be more intelligent about this but for the moment we override
- * these functions and force a tlb flush unconditionally
- */
-#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep) \
-({ \
- int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
- __ptep); \
- __young; \
-})
-
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
- __ptep); \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old = pte_update(ptep, ~0UL);
-
- if (old & _PAGE_HASHPTE)
- hpte_update(mm, addr, ptep, old, 0);
- return __pte(old);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t * ptep)
-{
- unsigned long old = pte_update(ptep, ~0UL);
-
- if (old & _PAGE_HASHPTE)
- hpte_update(mm, addr, ptep, old, 0);
-}
-
-/*
- * set_pte stores a linux PTE into the linux page table.
- */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- if (pte_present(*ptep)) {
- pte_clear(mm, addr, ptep);
- flush_tlb_pending();
- }
- pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
-
-#ifdef CONFIG_PPC_64K_PAGES
- if (mmu_virtual_psize != MMU_PAGE_64K)
- pte = __pte(pte_val(pte) | _PAGE_COMBO);
-#endif /* CONFIG_PPC_64K_PAGES */
-
- *ptep = pte;
-}
-
-/* Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to flush the hash entry
- */
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
-{
- unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%4\n\
- andi. %1,%0,%6\n\
- bne- 1b \n\
- or %0,%3,%0\n\
- stdcx. %0,0,%4\n\
- bne- 1b"
- :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
- :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
- :"cc");
-}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
-
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
-#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
-
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-extern pgd_t swapper_pg_dir[];
-
-extern void paging_init(void);
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- free_pgd_range(tlb, addr, end, floor, ceiling)
-#endif
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to put a corresponding HPTE into the hash table
- * ahead of time, instead of waiting for the inevitable extra
- * hash-table miss exception.
- */
-struct vm_area_struct;
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
-
-/* Encode and de-code a swap entry */
-#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
-#define __swp_offset(entry) ((entry).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
-#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
-#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
-#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
-
-/*
- * kern_addr_valid is intended to indicate whether an address is a valid
- * kernel address. Most 32-bit archs define it as always true (like this)
- * but most 64-bit archs actually perform a test. What should we do here?
- * The only use is in fs/ncpfs/dir.c
- */
-#define kern_addr_valid(addr) (1)
-
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
-void pgtable_cache_init(void);
-
-/*
- * find_linux_pte returns the address of a linux pte for a given
- * effective address and directory. If not found, it returns zero.
- */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
-{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
- pte_t *pt = NULL;
-
- pg = pgdir + pgd_index(ea);
- if (!pgd_none(*pg)) {
- pu = pud_offset(pg, ea);
- if (!pud_none(*pu)) {
- pm = pmd_offset(pu, ea);
- if (pmd_present(*pm))
- pt = pte_offset_kernel(pm, ea);
- }
- }
- return pt;
-}
-
-#include <asm-generic/pgtable.h>
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _PPC64_PGTABLE_H */
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
deleted file mode 100644
index 76bb0266d67..00000000000
--- a/include/asm-ppc64/prom.h
+++ /dev/null
@@ -1,218 +0,0 @@
-#ifndef _PPC64_PROM_H
-#define _PPC64_PROM_H
-
-/*
- * Definitions for talking to the Open Firmware PROM on
- * Power Macintosh computers.
- *
- * Copyright (C) 1996 Paul Mackerras.
- *
- * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/config.h>
-#include <linux/proc_fs.h>
-#include <asm/atomic.h>
-
-#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
-#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
-#define RELOC(x) (*PTRRELOC(&(x)))
-
-/* Definitions used by the flattened device tree */
-#define OF_DT_HEADER 0xd00dfeed /* marker */
-#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
-#define OF_DT_END_NODE 0x2 /* End node */
-#define OF_DT_PROP 0x3 /* Property: name off, size,
- * content */
-#define OF_DT_NOP 0x4 /* nop */
-#define OF_DT_END 0x9
-
-#define OF_DT_VERSION 0x10
-
-/*
- * This is what gets passed to the kernel by prom_init or kexec
- *
- * The dt struct contains the device tree structure, full pathes and
- * property contents. The dt strings contain a separate block with just
- * the strings for the property names, and is fully page aligned and
- * self contained in a page, so that it can be kept around by the kernel,
- * each property name appears only once in this page (cheap compression)
- *
- * the mem_rsvmap contains a map of reserved ranges of physical memory,
- * passing it here instead of in the device-tree itself greatly simplifies
- * the job of everybody. It's just a list of u64 pairs (base/size) that
- * ends when size is 0
- */
-struct boot_param_header
-{
- u32 magic; /* magic word OF_DT_HEADER */
- u32 totalsize; /* total size of DT block */
- u32 off_dt_struct; /* offset to structure */
- u32 off_dt_strings; /* offset to strings */
- u32 off_mem_rsvmap; /* offset to memory reserve map */
- u32 version; /* format version */
- u32 last_comp_version; /* last compatible version */
- /* version 2 fields below */
- u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
- /* version 3 fields below */
- u32 dt_strings_size; /* size of the DT strings block */
-};
-
-
-
-typedef u32 phandle;
-typedef u32 ihandle;
-
-struct address_range {
- unsigned long space;
- unsigned long address;
- unsigned long size;
-};
-
-struct interrupt_info {
- int line;
- int sense; /* +ve/-ve logic, edge or level, etc. */
-};
-
-struct pci_address {
- u32 a_hi;
- u32 a_mid;
- u32 a_lo;
-};
-
-struct isa_address {
- u32 a_hi;
- u32 a_lo;
-};
-
-struct isa_range {
- struct isa_address isa_addr;
- struct pci_address pci_addr;
- unsigned int size;
-};
-
-struct reg_property {
- unsigned long address;
- unsigned long size;
-};
-
-struct reg_property32 {
- unsigned int address;
- unsigned int size;
-};
-
-struct reg_property64 {
- unsigned long address;
- unsigned long size;
-};
-
-struct property {
- char *name;
- int length;
- unsigned char *value;
- struct property *next;
-};
-
-struct device_node {
- char *name;
- char *type;
- phandle node;
- phandle linux_phandle;
- int n_addrs;
- struct address_range *addrs;
- int n_intrs;
- struct interrupt_info *intrs;
- char *full_name;
-
- struct property *properties;
- struct device_node *parent;
- struct device_node *child;
- struct device_node *sibling;
- struct device_node *next; /* next device of same type */
- struct device_node *allnext; /* next in list of all nodes */
- struct proc_dir_entry *pde; /* this node's proc directory */
- struct kref kref;
- unsigned long _flags;
- void *data;
-#ifdef CONFIG_PPC_ISERIES
- struct list_head Device_List;
-#endif
-};
-
-extern struct device_node *of_chosen;
-
-/* flag descriptions */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-
-#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
-#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
-
-/*
- * Until 32-bit ppc can add proc_dir_entries to its device_node
- * definition, we cannot refer to pde, name_link, and addr_link
- * in arch-independent code.
- */
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
-{
- dn->pde = de;
-}
-
-
-/* OBSOLETE: Old stlye node lookup */
-extern struct device_node *find_devices(const char *name);
-extern struct device_node *find_type_devices(const char *type);
-extern struct device_node *find_path_device(const char *path);
-extern struct device_node *find_compatible_devices(const char *type,
- const char *compat);
-extern struct device_node *find_all_nodes(void);
-
-/* New style node lookup */
-extern struct device_node *of_find_node_by_name(struct device_node *from,
- const char *name);
-extern struct device_node *of_find_node_by_type(struct device_node *from,
- const char *type);
-extern struct device_node *of_find_compatible_node(struct device_node *from,
- const char *type, const char *compat);
-extern struct device_node *of_find_node_by_path(const char *path);
-extern struct device_node *of_find_node_by_phandle(phandle handle);
-extern struct device_node *of_find_all_nodes(struct device_node *prev);
-extern struct device_node *of_get_parent(const struct device_node *node);
-extern struct device_node *of_get_next_child(const struct device_node *node,
- struct device_node *prev);
-extern struct device_node *of_node_get(struct device_node *node);
-extern void of_node_put(struct device_node *node);
-
-/* For scanning the flat device-tree at boot time */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data);
-void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size);
-
-/* For updating the device tree at runtime */
-extern void of_attach_node(struct device_node *);
-extern void of_detach_node(const struct device_node *);
-
-/* Other Prototypes */
-extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern void finish_device_tree(void);
-extern int device_is_compatible(struct device_node *device, const char *);
-extern int machine_is_compatible(const char *compat);
-extern unsigned char *get_property(struct device_node *node, const char *name,
- int *lenp);
-extern void print_properties(struct device_node *node);
-extern int prom_n_addr_cells(struct device_node* np);
-extern int prom_n_size_cells(struct device_node* np);
-extern int prom_n_intr_cells(struct device_node* np);
-extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
-extern int prom_add_property(struct device_node* np, struct property* prop);
-
-#endif /* _PPC64_PROM_H */
diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h
deleted file mode 100644
index b1babb72967..00000000000
--- a/include/asm-ppc64/ptrace-common.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * linux/arch/ppc64/kernel/ptrace-common.h
- *
- * Copyright (c) 2002 Stephen Rothwell, IBM Coproration
- * Extracted from ptrace.c and ptrace32.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#ifndef _PPC64_PTRACE_COMMON_H
-#define _PPC64_PTRACE_COMMON_H
-
-#include <linux/config.h>
-#include <asm/system.h>
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
-{
- unsigned long tmp = 0;
-
- /*
- * Put the correct FP bits in, they might be wrong as a result
- * of our lazy FP restore.
- */
- if (regno == PT_MSR) {
- tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
- tmp |= task->thread.fpexc_mode;
- } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
- tmp = ((unsigned long *)task->thread.regs)[regno];
- }
-
- return tmp;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
- unsigned long data)
-{
- if (regno < PT_SOFTE) {
- if (regno == PT_MSR)
- data = (data & MSR_DEBUGCHANGE)
- | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
- ((unsigned long *)task->thread.regs)[regno] = data;
- return 0;
- }
- return -EIO;
-}
-
-static inline void set_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
- if (regs != NULL)
- regs->msr |= MSR_SE;
- set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
-}
-
-static inline void clear_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
- if (regs != NULL)
- regs->msr &= ~MSR_SE;
- clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the
- * same structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static inline int get_vrregs(unsigned long __user *data,
- struct task_struct *task)
-{
- unsigned long regsize;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_to_user(data, task->thread.vr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_to_user(data, &task->thread.vscr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VRSAVE */
- if (put_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task,
- unsigned long __user *data)
-{
- unsigned long regsize;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_from_user(task->thread.vr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_from_user(&task->thread.vscr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VRSAVE */
- if (get_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-static inline int ptrace_set_debugreg(struct task_struct *task,
- unsigned long addr, unsigned long data)
-{
- /* We only support one DABR and no IABRS at the moment */
- if (addr > 0)
- return -EINVAL;
-
- /* The bottom 3 bits are flags */
- if ((data & ~0x7UL) >= TASK_SIZE)
- return -EIO;
-
- /* Ensure translation is on */
- if (data && !(data & DABR_TRANSLATION))
- return -EIO;
-
- task->thread.dabr = data;
- return 0;
-}
-
-#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/include/asm-ppc64/serial.h b/include/asm-ppc64/serial.h
deleted file mode 100644
index d6bcb79b7d7..00000000000
--- a/include/asm-ppc64/serial.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * include/asm-ppc64/serial.h
- */
-#ifndef _PPC64_SERIAL_H
-#define _PPC64_SERIAL_H
-
-/*
- * This assumes you have a 1.8432 MHz clock for your UART.
- *
- * It'd be nice if someone built a serial card with a 24.576 MHz
- * clock, since the 16550A is capable of handling a top speed of 1.5
- * megabits/second; but this requires the faster clock.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Default baud base if not found in device-tree */
-#define BASE_BAUD ( 1843200 / 16 )
-
-#endif /* _PPC64_SERIAL_H */
diff --git a/include/asm-ppc64/signal.h b/include/asm-ppc64/signal.h
deleted file mode 100644
index 432df7dd355..00000000000
--- a/include/asm-ppc64/signal.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _ASMPPC64_SIGNAL_H
-#define _ASMPPC64_SIGNAL_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <asm/siginfo.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#define _NSIG 64
-#define _NSIG_BPW 64
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
- * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-#define SA_INTERRUPT 0x20000000u /* dummy -- ignored */
-
-#define SA_RESTORER 0x04000000u
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal.h>
-
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-struct pt_regs;
-struct timespec;
-extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
-extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-#endif /* _ASMPPC64_SIGNAL_H */
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h
deleted file mode 100644
index 7d84fb5e39f..00000000000
--- a/include/asm-ppc64/spinlock.h
+++ /dev/null
@@ -1,241 +0,0 @@
-#ifndef __ASM_SPINLOCK_H
-#define __ASM_SPINLOCK_H
-
-/*
- * Simple spin lock operations.
- *
- * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
- * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
- * Rework to support virtual processors
- *
- * Type of int is used as a full 64b word is not necessary.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * (the type definitions are in asm/spinlock_types.h)
- */
-#include <linux/config.h>
-#include <asm/paca.h>
-#include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
-
-#define __raw_spin_is_locked(x) ((x)->slock != 0)
-
-/*
- * This returns the old value in the lock, so we succeeded
- * in getting the lock if the return value is 0.
- */
-static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
-{
- unsigned long tmp, tmp2;
-
- __asm__ __volatile__(
-" lwz %1,%3(13) # __spin_trylock\n\
-1: lwarx %0,0,%2\n\
- cmpwi 0,%0,0\n\
- bne- 2f\n\
- stwcx. %1,0,%2\n\
- bne- 1b\n\
- isync\n\
-2:" : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token))
- : "cr0", "memory");
-
- return tmp;
-}
-
-static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
-{
- return __spin_trylock(lock) == 0;
-}
-
-/*
- * On a system with shared processors (that is, where a physical
- * processor is multiplexed between several virtual processors),
- * there is no point spinning on a lock if the holder of the lock
- * isn't currently scheduled on a physical processor. Instead
- * we detect this situation and ask the hypervisor to give the
- * rest of our timeslice to the lock holder.
- *
- * So that we can tell which virtual processor is holding a lock,
- * we put 0x80000000 | smp_processor_id() in the lock when it is
- * held. Conveniently, we have a word in the paca that holds this
- * value.
- */
-
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
-/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
-extern void __spin_yield(raw_spinlock_t *lock);
-extern void __rw_yield(raw_rwlock_t *lock);
-#else /* SPLPAR || ISERIES */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
-#endif
-
-static void __inline__ __raw_spin_lock(raw_spinlock_t *lock)
-{
- while (1) {
- if (likely(__spin_trylock(lock) == 0))
- break;
- do {
- HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
- } while (unlikely(lock->slock != 0));
- HMT_medium();
- }
-}
-
-static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
-{
- unsigned long flags_dis;
-
- while (1) {
- if (likely(__spin_trylock(lock) == 0))
- break;
- local_save_flags(flags_dis);
- local_irq_restore(flags);
- do {
- HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
- } while (unlikely(lock->slock != 0));
- HMT_medium();
- local_irq_restore(flags_dis);
- }
-}
-
-static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
-{
- __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory");
- lock->slock = 0;
-}
-
-extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- */
-
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw) (!(rw)->lock)
-
-/*
- * This returns the old value in the lock + 1,
- * so we got a read lock if the return value is > 0.
- */
-static long __inline__ __read_trylock(raw_rwlock_t *rw)
-{
- long tmp;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%1 # read_trylock\n\
- extsw %0,%0\n\
- addic. %0,%0,1\n\
- ble- 2f\n\
- stwcx. %0,0,%1\n\
- bne- 1b\n\
- isync\n\
-2:" : "=&r" (tmp)
- : "r" (&rw->lock)
- : "cr0", "xer", "memory");
-
- return tmp;
-}
-
-/*
- * This returns the old value in the lock,
- * so we got the write lock if the return value is 0.
- */
-static __inline__ long __write_trylock(raw_rwlock_t *rw)
-{
- long tmp, tmp2;
-
- __asm__ __volatile__(
-" lwz %1,%3(13) # write_trylock\n\
-1: lwarx %0,0,%2\n\
- cmpwi 0,%0,0\n\
- bne- 2f\n\
- stwcx. %1,0,%2\n\
- bne- 1b\n\
- isync\n\
-2:" : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
- : "cr0", "memory");
-
- return tmp;
-}
-
-static void __inline__ __raw_read_lock(raw_rwlock_t *rw)
-{
- while (1) {
- if (likely(__read_trylock(rw) > 0))
- break;
- do {
- HMT_low();
- if (SHARED_PROCESSOR)
- __rw_yield(rw);
- } while (unlikely(rw->lock < 0));
- HMT_medium();
- }
-}
-
-static void __inline__ __raw_write_lock(raw_rwlock_t *rw)
-{
- while (1) {
- if (likely(__write_trylock(rw) == 0))
- break;
- do {
- HMT_low();
- if (SHARED_PROCESSOR)
- __rw_yield(rw);
- } while (unlikely(rw->lock != 0));
- HMT_medium();
- }
-}
-
-static int __inline__ __raw_read_trylock(raw_rwlock_t *rw)
-{
- return __read_trylock(rw) > 0;
-}
-
-static int __inline__ __raw_write_trylock(raw_rwlock_t *rw)
-{
- return __write_trylock(rw) == 0;
-}
-
-static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
-{
- long tmp;
-
- __asm__ __volatile__(
- "eieio # read_unlock\n\
-1: lwarx %0,0,%1\n\
- addic %0,%0,-1\n\
- stwcx. %0,0,%1\n\
- bne- 1b"
- : "=&r"(tmp)
- : "r"(&rw->lock)
- : "cr0", "memory");
-}
-
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
-{
- __asm__ __volatile__("lwsync # write_unlock": : :"memory");
- rw->lock = 0;
-}
-
-#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
deleted file mode 100644
index 0cdd66c9f4b..00000000000
--- a/include/asm-ppc64/system.h
+++ /dev/null
@@ -1,308 +0,0 @@
-#ifndef __PPC64_SYSTEM_H
-#define __PPC64_SYSTEM_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/processor.h>
-#include <asm/hw_irq.h>
-#include <asm/synch.h>
-
-/*
- * Memory barrier.
- * The sync instruction guarantees that all memory accesses initiated
- * by this processor have been performed (with respect to all other
- * mechanisms that access memory). The eieio instruction is a barrier
- * providing an ordering (separately) for (a) cacheable stores and (b)
- * loads and stores to non-cacheable memory (e.g. I/O devices).
- *
- * mb() prevents loads and stores being reordered across this point.
- * rmb() prevents loads being reordered across this point.
- * wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
- *
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though.
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight eieio barrier on
- * SMP since it is only used to order updates to system memory.
- */
-#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
-#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define read_barrier_depends() do { } while(0)
-
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
-#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() eieio()
-#define smp_read_barrier_depends() read_barrier_depends()
-#else
-#define smp_mb() __asm__ __volatile__("": : :"memory")
-#define smp_rmb() __asm__ __volatile__("": : :"memory")
-#define smp_wmb() __asm__ __volatile__("": : :"memory")
-#define smp_read_barrier_depends() do { } while(0)
-#endif /* CONFIG_SMP */
-
-#ifdef __KERNEL__
-struct task_struct;
-struct pt_regs;
-
-#ifdef CONFIG_DEBUGGER
-
-extern int (*__debugger)(struct pt_regs *regs);
-extern int (*__debugger_ipi)(struct pt_regs *regs);
-extern int (*__debugger_bpt)(struct pt_regs *regs);
-extern int (*__debugger_sstep)(struct pt_regs *regs);
-extern int (*__debugger_iabr_match)(struct pt_regs *regs);
-extern int (*__debugger_dabr_match)(struct pt_regs *regs);
-extern int (*__debugger_fault_handler)(struct pt_regs *regs);
-
-#define DEBUGGER_BOILERPLATE(__NAME) \
-static inline int __NAME(struct pt_regs *regs) \
-{ \
- if (unlikely(__ ## __NAME)) \
- return __ ## __NAME(regs); \
- return 0; \
-}
-
-DEBUGGER_BOILERPLATE(debugger)
-DEBUGGER_BOILERPLATE(debugger_ipi)
-DEBUGGER_BOILERPLATE(debugger_bpt)
-DEBUGGER_BOILERPLATE(debugger_sstep)
-DEBUGGER_BOILERPLATE(debugger_iabr_match)
-DEBUGGER_BOILERPLATE(debugger_dabr_match)
-DEBUGGER_BOILERPLATE(debugger_fault_handler)
-
-#ifdef CONFIG_XMON
-extern void xmon_init(int enable);
-#endif
-
-#else
-static inline int debugger(struct pt_regs *regs) { return 0; }
-static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
-static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
-static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
-static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
-#endif
-
-extern int set_dabr(unsigned long dabr);
-extern void _exception(int signr, struct pt_regs *regs, int code,
- unsigned long addr);
-extern int fix_alignment(struct pt_regs *regs);
-extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
- int sig);
-extern void show_regs(struct pt_regs * regs);
-extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
-extern int die(const char *str, struct pt_regs *regs, long err);
-
-extern int _get_PVR(void);
-extern void giveup_fpu(struct task_struct *);
-extern void disable_kernel_fp(void);
-extern void flush_fp_to_thread(struct task_struct *);
-extern void enable_kernel_fp(void);
-extern void giveup_altivec(struct task_struct *);
-extern void disable_kernel_altivec(void);
-extern void enable_kernel_altivec(void);
-extern int emulate_altivec(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
-extern void cvt_df(double *from, float *to, struct thread_struct *thread);
-
-#ifdef CONFIG_ALTIVEC
-extern void flush_altivec_to_thread(struct task_struct *);
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-#endif
-
-static inline void flush_spe_to_thread(struct task_struct *t)
-{
-}
-
-extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern unsigned long memory_limit;
-
-/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
-extern unsigned char e2a(unsigned char);
-
-extern struct task_struct *__switch_to(struct task_struct *,
- struct task_struct *);
-#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
-
-struct thread_struct;
-extern struct task_struct * _switch(struct thread_struct *prev,
- struct thread_struct *next);
-
-extern int powersave_nap; /* set if nap mode can be used in idle loop */
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- *
- * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
- * is more like most of the other architectures.
- */
-static __inline__ unsigned long
-__xchg_u32(volatile unsigned int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: lwarx %0,0,%3 # __xchg_u32\n\
- stwcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-static __inline__ unsigned long
-__xchg_u64(volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- EIEIO_ON_SMP
-"1: ldarx %0,0,%3 # __xchg_u64\n\
- stdcx. %2,0,%3\n\
-2: bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (dummy), "=m" (*m)
- : "r" (val), "r" (m)
- : "cc", "memory");
-
- return (dummy);
-}
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
- case 8:
- return __xchg_u64(ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-#define tas(ptr) (xchg((ptr),1))
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static __inline__ unsigned long
-__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n\
- stwcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
- EIEIO_ON_SMP
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
- : "cc", "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be very expensive on some ppc64 IO chips (it does
- * powers of 2 writes until it reaches sufficient alignment).
- *
- * Based on this we disable the IP header alignment in network drivers.
- */
-#define NET_IP_ALIGN 0
-
-#define arch_align_stack(x) (x)
-
-extern unsigned long reloc_offset(void);
-
-#endif /* __KERNEL__ */
-#endif
diff --git a/include/asm-ppc64/systemcfg.h b/include/asm-ppc64/systemcfg.h
deleted file mode 100644
index 9b86b53129a..00000000000
--- a/include/asm-ppc64/systemcfg.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef _SYSTEMCFG_H
-#define _SYSTEMCFG_H
-
-/*
- * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Change Activity:
- * 2002/09/30 : bergner : Created
- * End Change Activity
- */
-
-/*
- * If the major version changes we are incompatible.
- * Minor version changes are a hint.
- */
-#define SYSTEMCFG_MAJOR 1
-#define SYSTEMCFG_MINOR 1
-
-#ifndef __ASSEMBLY__
-
-#include <linux/unistd.h>
-
-#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
-
-struct systemcfg {
- __u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */
- struct { /* Systemcfg version numbers */
- __u32 major; /* Major number 0x10 */
- __u32 minor; /* Minor number 0x14 */
- } version;
-
- __u32 platform; /* Platform flags 0x18 */
- __u32 processor; /* Processor type 0x1C */
- __u64 processorCount; /* # of physical processors 0x20 */
- __u64 physicalMemorySize; /* Size of real memory(B) 0x28 */
- __u64 tb_orig_stamp; /* Timebase at boot 0x30 */
- __u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
- __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */
- __u64 stamp_xsec; /* 0x48 */
- __u64 tb_update_count; /* Timebase atomicity ctr 0x50 */
- __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */
- __u32 tz_dsttime; /* Type of dst correction 0x5C */
- /* next four are no longer used except to be exported to /proc */
- __u32 dcache_size; /* L1 d-cache size 0x60 */
- __u32 dcache_line_size; /* L1 d-cache line size 0x64 */
- __u32 icache_size; /* L1 i-cache size 0x68 */
- __u32 icache_line_size; /* L1 i-cache line size 0x6C */
- __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of available syscalls 0x70 */
- __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of available syscalls */
-};
-
-#ifdef __KERNEL__
-extern struct systemcfg *systemcfg;
-#endif
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _SYSTEMCFG_H */
diff --git a/include/asm-ppc64/tce.h b/include/asm-ppc64/tce.h
deleted file mode 100644
index d40b6b42ab3..00000000000
--- a/include/asm-ppc64/tce.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- * Rewrite, cleanup:
- * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ASM_TCE_H
-#define _ASM_TCE_H
-
-/*
- * Tces come in two formats, one for the virtual bus and a different
- * format for PCI
- */
-#define TCE_VB 0
-#define TCE_PCI 1
-
-/* TCE page size is 4096 bytes (1 << 12) */
-
-#define TCE_SHIFT 12
-#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
-#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
-
-
-/* tce_entry
- * Used by pSeries (SMP) and iSeries/pSeries LPAR, but there it's
- * abstracted so layout is irrelevant.
- */
-union tce_entry {
- unsigned long te_word;
- struct {
- unsigned int tb_cacheBits :6; /* Cache hash bits - not used */
- unsigned int tb_rsvd :6;
- unsigned long tb_rpn :40; /* Real page number */
- unsigned int tb_valid :1; /* Tce is valid (vb only) */
- unsigned int tb_allio :1; /* Tce is valid for all lps (vb only) */
- unsigned int tb_lpindex :8; /* LpIndex for user of TCE (vb only) */
- unsigned int tb_pciwr :1; /* Write allowed (pci only) */
- unsigned int tb_rdwr :1; /* Read allowed (pci), Write allowed (vb) */
- } te_bits;
-#define te_cacheBits te_bits.tb_cacheBits
-#define te_rpn te_bits.tb_rpn
-#define te_valid te_bits.tb_valid
-#define te_allio te_bits.tb_allio
-#define te_lpindex te_bits.tb_lpindex
-#define te_pciwr te_bits.tb_pciwr
-#define te_rdwr te_bits.tb_rdwr
-};
-
-
-#endif
diff --git a/include/asm-ppc64/udbg.h b/include/asm-ppc64/udbg.h
deleted file mode 100644
index e3b92799185..00000000000
--- a/include/asm-ppc64/udbg.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __UDBG_HDR
-#define __UDBG_HDR
-
-#include <linux/compiler.h>
-#include <linux/init.h>
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-extern void (*udbg_putc)(unsigned char c);
-extern unsigned char (*udbg_getc)(void);
-extern int (*udbg_getc_poll)(void);
-
-extern void udbg_puts(const char *s);
-extern int udbg_write(const char *s, int n);
-extern int udbg_read(char *buf, int buflen);
-
-extern void register_early_udbg_console(void);
-extern void udbg_printf(const char *fmt, ...);
-
-extern void udbg_init_uart(void __iomem *comport, unsigned int speed);
-
-struct device_node;
-extern void udbg_init_scc(struct device_node *np);
-#endif
diff --git a/include/asm-ppc64/vdso.h b/include/asm-ppc64/vdso.h
deleted file mode 100644
index 85d8a7be25c..00000000000
--- a/include/asm-ppc64/vdso.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef __PPC64_VDSO_H__
-#define __PPC64_VDSO_H__
-
-#ifdef __KERNEL__
-
-/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0x100000
-#define VDSO64_LBASE 0x100000
-
-/* Default map addresses */
-#define VDSO32_MBASE VDSO32_LBASE
-#define VDSO64_MBASE VDSO64_LBASE
-
-#define VDSO_VERSION_STRING LINUX_2.6.12
-
-/* Define if 64 bits VDSO has procedure descriptors */
-#undef VDS64_HAS_DESCRIPTORS
-
-#ifndef __ASSEMBLY__
-
-extern unsigned int vdso64_pages;
-extern unsigned int vdso32_pages;
-
-/* Offsets relative to thread->vdso_base */
-extern unsigned long vdso64_rt_sigtramp;
-extern unsigned long vdso32_sigtramp;
-extern unsigned long vdso32_rt_sigtramp;
-
-extern void vdso_init(void);
-
-#else /* __ASSEMBLY__ */
-
-#ifdef __VDSO64__
-#ifdef VDS64_HAS_DESCRIPTORS
-#define V_FUNCTION_BEGIN(name) \
- .globl name; \
- .section ".opd","a"; \
- .align 3; \
- name: \
- .quad .name,.TOC.@tocbase,0; \
- .previous; \
- .globl .name; \
- .type .name,@function; \
- .name: \
-
-#define V_FUNCTION_END(name) \
- .size .name,.-.name;
-
-#define V_LOCAL_FUNC(name) (.name)
-
-#else /* VDS64_HAS_DESCRIPTORS */
-
-#define V_FUNCTION_BEGIN(name) \
- .globl name; \
- name: \
-
-#define V_FUNCTION_END(name) \
- .size name,.-name;
-
-#define V_LOCAL_FUNC(name) (name)
-
-#endif /* VDS64_HAS_DESCRIPTORS */
-#endif /* __VDSO64__ */
-
-#ifdef __VDSO32__
-
-#define V_FUNCTION_BEGIN(name) \
- .globl name; \
- .type name,@function; \
- name: \
-
-#define V_FUNCTION_END(name) \
- .size name,.-name;
-
-#define V_LOCAL_FUNC(name) (name)
-
-#endif /* __VDSO32__ */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-#endif /* __PPC64_VDSO_H__ */