summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-ixp4xx/io.h9
-rw-r--r--include/asm-arm/hardware/amba_serial.h5
-rw-r--r--include/asm-arm/io.h21
-rw-r--r--include/asm-arm/uaccess.h8
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h19
-rw-r--r--include/asm-parisc/tlbflush.h16
-rw-r--r--include/asm-powerpc/dma-mapping.h (renamed from include/asm-ppc/dma-mapping.h)138
-rw-r--r--include/asm-powerpc/io.h (renamed from include/asm-ppc64/io.h)12
-rw-r--r--include/asm-powerpc/mmu.h (renamed from include/asm-ppc64/mmu.h)14
-rw-r--r--include/asm-powerpc/mmu_context.h (renamed from include/asm-ppc64/mmu_context.h)12
-rw-r--r--include/asm-powerpc/mmzone.h (renamed from include/asm-ppc64/mmzone.h)0
-rw-r--r--include/asm-powerpc/pci-bridge.h (renamed from include/asm-ppc64/pci-bridge.h)12
-rw-r--r--include/asm-powerpc/pci.h (renamed from include/asm-ppc64/pci.h)110
-rw-r--r--include/asm-powerpc/pgalloc.h (renamed from include/asm-ppc64/pgalloc.h)11
-rw-r--r--include/asm-powerpc/pgtable-4k.h (renamed from include/asm-ppc64/pgtable-4k.h)0
-rw-r--r--include/asm-powerpc/pgtable-64k.h (renamed from include/asm-ppc64/pgtable-64k.h)0
-rw-r--r--include/asm-powerpc/pgtable.h (renamed from include/asm-ppc64/pgtable.h)20
-rw-r--r--include/asm-powerpc/ppc-pci.h2
-rw-r--r--include/asm-powerpc/spinlock.h (renamed from include/asm-ppc64/spinlock.h)72
-rw-r--r--include/asm-ppc/io.h17
-rw-r--r--include/asm-ppc64/dma-mapping.h136
-rw-r--r--include/asm-ppc64/imalloc.h26
-rw-r--r--include/asm-ppc64/ptrace-common.h164
-rw-r--r--include/asm-x86_64/msr.h2
-rw-r--r--include/linux/cciss_ioctl.h2
-rw-r--r--include/linux/hdreg.h6
-rw-r--r--include/linux/ide.h30
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netfilter_ipv4/ipt_sctp.h12
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/uinput.h13
-rw-r--r--include/net/ipv6.h2
35 files changed, 374 insertions, 541 deletions
diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h
index 688f7f90d93..942b622455b 100644
--- a/include/asm-arm/arch-ixp4xx/io.h
+++ b/include/asm-arm/arch-ixp4xx/io.h
@@ -59,11 +59,10 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
* fallback to the default.
*/
static inline void __iomem *
-__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned long align)
+__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags)
{
- extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
if((addr < 0x48000000) || (addr > 0x4fffffff))
- return __ioremap(addr, size, flags, align);
+ return __ioremap(addr, size, flags);
return (void *)addr;
}
@@ -71,13 +70,11 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned
static inline void
__ixp4xx_iounmap(void __iomem *addr)
{
- extern void __iounmap(void __iomem *addr);
-
if ((u32)addr >= VMALLOC_START)
__iounmap(addr);
}
-#define __arch_ioremap(a, s, f, x) __ixp4xx_ioremap(a, s, f, x)
+#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
#define __arch_iounmap(a) __ixp4xx_iounmap(a)
#define writeb(v, p) __ixp4xx_writeb(v, p)
diff --git a/include/asm-arm/hardware/amba_serial.h b/include/asm-arm/hardware/amba_serial.h
index 71770aa6389..dc726ffcceb 100644
--- a/include/asm-arm/hardware/amba_serial.h
+++ b/include/asm-arm/hardware/amba_serial.h
@@ -50,6 +50,11 @@
#define UART011_ICR 0x44 /* Interrupt clear register. */
#define UART011_DMACR 0x48 /* DMA control register. */
+#define UART011_DR_OE (1 << 11)
+#define UART011_DR_BE (1 << 10)
+#define UART011_DR_PE (1 << 9)
+#define UART011_DR_FE (1 << 8)
+
#define UART01x_RSR_OE 0x08
#define UART01x_RSR_BE 0x04
#define UART01x_RSR_PE 0x02
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index 2e6799632f1..ae69db4a101 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -55,6 +55,12 @@ extern void __raw_readsl(void __iomem *addr, void *data, int longlen);
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
/*
+ * Architecture ioremap implementation.
+ */
+extern void __iomem * __ioremap(unsigned long, size_t, unsigned long);
+extern void __iounmap(void __iomem *addr);
+
+/*
* Bad read/write accesses...
*/
extern void __readwrite_bug(const char *fn);
@@ -256,18 +262,15 @@ out:
* ioremap takes a PCI memory address, as specified in
* Documentation/IO-mapping.txt.
*/
-extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
-extern void __iounmap(void __iomem *addr);
-
#ifndef __arch_ioremap
-#define ioremap(cookie,size) __ioremap(cookie,size,0,1)
-#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1)
-#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE,1)
+#define ioremap(cookie,size) __ioremap(cookie,size,0)
+#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0)
+#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE)
#define iounmap(cookie) __iounmap(cookie)
#else
-#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1)
-#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1)
-#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE,1)
+#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0)
+#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0)
+#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE)
#define iounmap(cookie) __arch_iounmap(cookie)
#endif
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index a2fdad0138b..064f0f5e8e2 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -100,7 +100,6 @@ static inline void set_fs (mm_segment_t fs)
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-extern int __get_user_8(void *);
extern int __get_user_bad(void);
#define __get_user_x(__r2,__p,__e,__s,__i...) \
@@ -114,7 +113,7 @@ extern int __get_user_bad(void);
#define get_user(x,p) \
({ \
const register typeof(*(p)) __user *__p asm("r0") = (p);\
- register typeof(*(p)) __r2 asm("r2"); \
+ register unsigned int __r2 asm("r2"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
@@ -126,12 +125,9 @@ extern int __get_user_bad(void);
case 4: \
__get_user_x(__r2, __p, __e, 4, "lr"); \
break; \
- case 8: \
- __get_user_x(__r2, __p, __e, 8, "lr"); \
- break; \
default: __e = __get_user_bad(); break; \
} \
- x = __r2; \
+ x = (typeof(*(p))) __r2; \
__e; \
})
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf2205..b0a30e2c981 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
#define _ASM_PARISC_IRQ_H
#include <linux/config.h>
+#include <linux/cpumask.h>
#include <asm/types.h>
#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(unsigned int);
extern unsigned long txn_alloc_addr(unsigned int);
+extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
-
-extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
+extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540..dbdbd2e9fdf 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
#define cpu_logical_map(cpu) (cpu)
extern void smp_send_reschedule(int cpu);
+extern void smp_send_all_nop(void);
#endif /* !ASSEMBLY */
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* CONFIG_SMP */
+#else /* CONFIG_SMP */
+
+static inline void smp_send_all_nop(void) { return; }
+
+#endif
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 7c3f406a746..16c2ac075fc 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
return *a == 0;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
-static inline void __raw_spin_lock(raw_spinlock_t *x)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+ unsigned long flags)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
- while (*a == 0);
+ while (*a == 0)
+ if (flags & PSW_SM_I) {
+ local_irq_enable();
+ cpu_relax();
+ local_irq_disable();
+ } else
+ cpu_relax();
mb();
}
@@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{
- unsigned long flags;
- local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter++;
__raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
}
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{
- unsigned long flags;
- local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter--;
__raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
}
/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index e97aa8d1eff..c9ec39c6fc6 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -12,21 +12,15 @@
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
- * it on all SMP systems not just the N class. */
-#ifdef CONFIG_SMP
+ * it on all SMP systems not just the N class. We also need to have
+ * preemption disabled on uniprocessor machines, and spin_lock does that
+ * nicely.
+ */
extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
-#else
-
-#define purge_tlb_start(x) do { } while(0)
-#define purge_tlb_end(x) do { } while (0)
-
-#endif
-
-
extern void flush_tlb_all(void);
/*
@@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
- preempt_disable();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
@@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
pdtlb(start);
start += PAGE_SIZE;
}
- preempt_enable();
}
purge_tlb_end();
}
diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 6e963511443..59a80163f75 100644
--- a/include/asm-ppc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -1,15 +1,22 @@
/*
- * This is based on both include/asm-sh/dma-mapping.h and
- * include/asm-ppc/pci.h
+ * Copyright (C) 2004 IBM
+ *
+ * Implements the generic device dma API for powerpc.
+ * the pci and vio busses
*/
-#ifndef __ASM_PPC_DMA_MAPPING_H
-#define __ASM_PPC_DMA_MAPPING_H
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
+#include <asm/bug.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
@@ -24,22 +31,12 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
-#define dma_cache_inv(_start,_size) \
- invalidate_dcache_range(_start, (_start + _size))
-#define dma_cache_wback(_start,_size) \
- clean_dcache_range(_start, (_start + _size))
-#define dma_cache_wback_inv(_start,_size) \
- flush_dcache_range(_start, (_start + _size))
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
* Cache coherent cores.
*/
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
#define __dma_alloc_coherent(gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) do { } while (0)
#define __dma_sync(addr, size, rw) do { } while (0)
@@ -47,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
#endif /* ! CONFIG_NOT_COHERENT_CACHE */
+#ifdef CONFIG_PPC64
+
+extern int dma_supported(struct device *dev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle);
+extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction);
+extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction);
+extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction);
+extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction);
+
+#else /* CONFIG_PPC64 */
+
#define dma_supported(dev, mask) (1)
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
@@ -144,29 +165,27 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
/* We don't do anything here. */
#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+#endif /* CONFIG_PPC64 */
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
-
__dma_sync(bus_to_virt(dma_handle), size, direction);
}
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
-
__dma_sync(bus_to_virt(dma_handle), size, direction);
}
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
{
int i;
@@ -176,9 +195,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
{
int i;
@@ -188,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+#ifdef CONFIG_PPC64
+ return (dma_addr == DMA_ERROR_CODE);
+#else
+ return 0;
+#endif
+}
+
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -198,40 +226,60 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
static inline int dma_get_cache_alignment(void)
{
+#ifdef CONFIG_PPC64
+ /* no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe */
+ return (1 << L1_CACHE_SHIFT_MAX);
+#else
/*
* Each processor family will define its own L1_CACHE_SHIFT,
* L1_CACHE_BYTES wraps to this, so this is always safe.
*/
return L1_CACHE_BYTES;
+#endif
}
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}
static inline void dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
+ BUG_ON(direction == DMA_NONE);
__dma_sync(vaddr, size, (int)direction);
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- return 0;
-}
-
-#endif /* __ASM_PPC_DMA_MAPPING_H */
+/*
+ * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
+ */
+struct dma_mapping_ops {
+ void * (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ int (*dac_dma_supported)(struct device *dev, u64 mask);
+};
+
+#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-ppc64/io.h b/include/asm-powerpc/io.h
index 77fc07c3c6b..48938d84d05 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-powerpc/io.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_IO_H
-#define _PPC64_IO_H
+#ifndef _ASM_POWERPC_IO_H
+#define _ASM_POWERPC_IO_H
/*
* This program is free software; you can redistribute it and/or
@@ -8,7 +8,10 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
+#ifndef CONFIG_PPC64
+#include <asm-ppc/io.h>
+#else
+
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/byteorder.h>
@@ -455,4 +458,5 @@ extern int check_legacy_ioport(unsigned long base_port);
#endif /* __KERNEL__ */
-#endif /* _PPC64_IO_H */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_IO_H */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-powerpc/mmu.h
index 1a7e0afa2dc..c1b4bbabbe9 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -1,3 +1,10 @@
+#ifndef _ASM_POWERPC_MMU_H_
+#define _ASM_POWERPC_MMU_H_
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/mmu.h>
+#else
+
/*
* PowerPC memory management structures
*
@@ -10,10 +17,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _PPC64_MMU_H_
-#define _PPC64_MMU_H_
-
-#include <linux/config.h>
#include <asm/asm-compat.h>
#include <asm/page.h>
@@ -392,4 +395,5 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
#endif /* __ASSEMBLY */
-#endif /* _PPC64_MMU_H_ */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-powerpc/mmu_context.h
index 4f512e9fa6b..ea6798c7d5f 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -1,7 +1,10 @@
-#ifndef __PPC64_MMU_CONTEXT_H
-#define __PPC64_MMU_CONTEXT_H
+#ifndef __ASM_POWERPC_MMU_CONTEXT_H
+#define __ASM_POWERPC_MMU_CONTEXT_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/mmu_context.h>
+#else
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/mmu.h>
@@ -82,4 +85,5 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
local_irq_restore(flags);
}
-#endif /* __PPC64_MMU_CONTEXT_H */
+#endif /* CONFIG_PPC64 */
+#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-powerpc/mmzone.h
index 54958d6cae0..54958d6cae0 100644
--- a/include/asm-ppc64/mmzone.h
+++ b/include/asm-powerpc/mmzone.h
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index cf04327a597..223ec7bd81d 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -1,8 +1,10 @@
-#ifdef __KERNEL__
-#ifndef _ASM_PCI_BRIDGE_H
-#define _ASM_PCI_BRIDGE_H
+#ifndef _ASM_POWERPC_PCI_BRIDGE_H
+#define _ASM_POWERPC_PCI_BRIDGE_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/pci-bridge.h>
+#else
-#include <linux/config.h>
#include <linux/pci.h>
#include <linux/list.h>
@@ -147,5 +149,5 @@ extern void pcibios_free_controller(struct pci_controller *phb);
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
+#endif /* CONFIG_PPC64 */
#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/pci.h b/include/asm-powerpc/pci.h
index fafdf885a3c..d5934a076bd 100644
--- a/include/asm-ppc64/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -1,5 +1,5 @@
-#ifndef __PPC64_PCI_H
-#define __PPC64_PCI_H
+#ifndef __ASM_POWERPC_PCI_H
+#define __ASM_POWERPC_PCI_H
#ifdef __KERNEL__
/*
@@ -18,6 +18,7 @@
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
+#include <asm/pci-bridge.h>
#include <asm-generic/pci-dma-compat.h>
@@ -26,11 +27,21 @@
struct pci_dev;
-#ifdef CONFIG_PPC_ISERIES
+/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
+#define IOBASE_BRIDGE_NUMBER 0
+#define IOBASE_MEMORY 1
+#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
+
+/*
+ * Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers
+ */
+extern int pci_assign_all_buses;
+#define pcibios_assign_all_busses() (pci_assign_all_buses)
+
#define pcibios_scan_all_fns(a, b) 0
-#else
-extern int pcibios_scan_all_fns(struct pci_bus *bus, int devfn);
-#endif
static inline void pcibios_set_master(struct pci_dev *dev)
{
@@ -50,6 +61,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
+#ifdef CONFIG_PPC64
#define HAVE_ARCH_PCI_MWI 1
static inline int pcibios_prep_mwi(struct pci_dev *dev)
{
@@ -64,12 +76,10 @@ static inline int pcibios_prep_mwi(struct pci_dev *dev)
return 0;
}
-extern unsigned int pcibios_assign_all_busses(void);
-
extern struct dma_mapping_ops pci_dma_ops;
/* For DAC DMA, we currently don't support it by default, but
- * we let the platform override this
+ * we let 64-bit platforms override this.
*/
static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
{
@@ -102,6 +112,35 @@ extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
extern int pci_proc_domain(struct pci_bus *bus);
+#else /* 32-bit */
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+
+/*
+ * At present there are very few 32-bit PPC machines that can have
+ * memory above the 4GB point, and we don't support that.
+ */
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* Return the index of the PCI controller for device PDEV. */
+#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
+
+/* Set the name of the bus as it appears in /proc/bus/pci */
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
@@ -110,6 +149,7 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
+#ifdef CONFIG_PPC64
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
@@ -124,22 +164,40 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
+/* The PCI address space does not equal the physical memory address
+ * space (we have an IOMMU). The IDE and SCSI device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (0)
+
+#else /* 32-bit */
+
+/* The PCI address space does equal the physical memory
+ * address space (no IOMMU). The IDE and SCSI device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+#endif /* CONFIG_PPC64 */
-extern void
-pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+extern void pcibios_resource_to_bus(struct pci_dev *dev,
+ struct pci_bus_region *region,
struct resource *res);
-extern void
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+extern void pcibios_bus_to_resource(struct pci_dev *dev,
+ struct resource *res,
struct pci_bus_region *region);
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
+ struct resource *res)
{
struct resource *root = NULL;
@@ -151,14 +209,12 @@ pcibios_select_root(struct pci_dev *pdev, struct resource *res)
return root;
}
-extern int
-unmap_bus_range(struct pci_bus *bus);
+extern int unmap_bus_range(struct pci_bus *bus);
-extern int
-remap_bus_range(struct pci_bus *bus);
+extern int remap_bus_range(struct pci_bus *bus);
-extern void
-pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus);
+extern void pcibios_fixup_device_resources(struct pci_dev *dev,
+ struct pci_bus *bus);
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
@@ -180,14 +236,12 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long size,
pgprot_t prot);
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#if defined(CONFIG_PPC_MULTIPLATFORM) || defined(CONFIG_PPC32)
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
u64 *start, u64 *end);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
+#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */
#endif /* __KERNEL__ */
-
-#endif /* __PPC64_PCI_H */
+#endif /* __ASM_POWERPC_PCI_H */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-powerpc/pgalloc.h
index dcf3622d194..bfc2113b363 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -1,5 +1,9 @@
-#ifndef _PPC64_PGALLOC_H
-#define _PPC64_PGALLOC_H
+#ifndef _ASM_POWERPC_PGALLOC_H
+#define _ASM_POWERPC_PGALLOC_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/pgalloc.h>
+#else
#include <linux/mm.h>
#include <linux/slab.h>
@@ -148,4 +152,5 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#define check_pgt_cache() do { } while (0)
-#endif /* _PPC64_PGALLOC_H */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index e9590c06ad9..e9590c06ad9 100644
--- a/include/asm-ppc64/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 154f1840ece..154f1840ece 100644
--- a/include/asm-ppc64/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-powerpc/pgtable.h
index a9783ba7fe9..0303f57366c 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -1,5 +1,9 @@
-#ifndef _PPC64_PGTABLE_H
-#define _PPC64_PGTABLE_H
+#ifndef _ASM_POWERPC_PGTABLE_H
+#define _ASM_POWERPC_PGTABLE_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/pgtable.h>
+#else
/*
* This file contains the functions and defines necessary to modify and use
@@ -47,6 +51,13 @@ struct mm_struct;
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
+ * Define the address range of the imalloc VM area.
+ */
+#define PHBS_IO_BASE VMALLOC_END
+#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
+#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
+
+/*
* Common bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible. Additional
* bits may be defined in pgtable-*.h
@@ -69,7 +80,7 @@ struct mm_struct;
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-/* __pgprot defined in asm-ppc64/page.h */
+/* __pgprot defined in asm-powerpc/page.h */
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -509,4 +520,5 @@ void pgtable_cache_init(void);
#endif /* __ASSEMBLY__ */
-#endif /* _PPC64_PGTABLE_H */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 2e36e5a7f4f..36cdc869e58 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -48,8 +48,6 @@ extern void pSeries_final_fixup(void);
extern void pSeries_irq_bus_setup(struct pci_bus *bus);
extern unsigned long pci_probe_only;
-extern unsigned long pci_assign_all_buses;
-extern int pci_read_irq_line(struct pci_dev *pci_dev);
/* ---- EEH internal-use-only related routines ---- */
#ifdef CONFIG_EEH
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-powerpc/spinlock.h
index 7d84fb5e39f..caa4b14e0e9 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-powerpc/spinlock.h
@@ -18,31 +18,41 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
-#include <linux/config.h>
+#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
#include <asm/iseries/hv_call.h>
+#endif
+#include <asm/asm-compat.h>
+#include <asm/synch.h>
#define __raw_spin_is_locked(x) ((x)->slock != 0)
+#ifdef CONFIG_PPC64
+/* use 0x800000yy when locked, where yy == CPU number */
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN 1
+#endif
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
{
- unsigned long tmp, tmp2;
+ unsigned long tmp, token;
+ token = LOCK_TOKEN;
__asm__ __volatile__(
-" lwz %1,%3(13) # __spin_trylock\n\
-1: lwarx %0,0,%2\n\
+"1: lwarx %0,0,%2 # __spin_trylock\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
bne- 1b\n\
isync\n\
-2:" : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token))
+2:" : "=&r" (tmp)
+ : "r" (token), "r" (&lock->slock)
: "cr0", "memory");
return tmp;
@@ -113,11 +123,17 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory");
+ __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock"
+ : : :"memory");
lock->slock = 0;
}
+#ifdef CONFIG_PPC64
extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+#else
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#endif
/*
* Read-write spinlocks, allowing multiple readers
@@ -133,6 +149,14 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
#define __raw_write_can_lock(rw) (!(rw)->lock)
+#ifdef CONFIG_PPC64
+#define __DO_SIGN_EXTEND "extsw %0,%0\n"
+#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
+#else
+#define __DO_SIGN_EXTEND
+#define WRLOCK_TOKEN (-1)
+#endif
+
/*
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
@@ -142,11 +166,12 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%1 # read_trylock\n\
- extsw %0,%0\n\
- addic. %0,%0,1\n\
- ble- 2f\n\
- stwcx. %0,0,%1\n\
+"1: lwarx %0,0,%1 # read_trylock\n"
+ __DO_SIGN_EXTEND
+" addic. %0,%0,1\n\
+ ble- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
bne- 1b\n\
isync\n\
2:" : "=&r" (tmp)
@@ -162,18 +187,19 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
*/
static __inline__ long __write_trylock(raw_rwlock_t *rw)
{
- long tmp, tmp2;
+ long tmp, token;
+ token = WRLOCK_TOKEN;
__asm__ __volatile__(
-" lwz %1,%3(13) # write_trylock\n\
-1: lwarx %0,0,%2\n\
+"1: lwarx %0,0,%2 # write_trylock\n\
cmpwi 0,%0,0\n\
- bne- 2f\n\
- stwcx. %1,0,%2\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %1,0,%2\n\
bne- 1b\n\
isync\n\
-2:" : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
+2:" : "=&r" (tmp)
+ : "r" (token), "r" (&rw->lock)
: "cr0", "memory");
return tmp;
@@ -224,8 +250,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
__asm__ __volatile__(
"eieio # read_unlock\n\
1: lwarx %0,0,%1\n\
- addic %0,%0,-1\n\
- stwcx. %0,0,%1\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
bne- 1b"
: "=&r"(tmp)
: "r"(&rw->lock)
@@ -234,7 +261,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
- __asm__ __volatile__("lwsync # write_unlock": : :"memory");
+ __asm__ __volatile__(SYNC_ON_SMP" # write_unlock"
+ : : :"memory");
rw->lock = 0;
}
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 2bfdf9c9845..84ac6e258ee 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -545,6 +545,23 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#endif
+#ifdef CONFIG_NOT_COHERENT_CACHE
+
+#define dma_cache_inv(_start,_size) \
+ invalidate_dcache_range(_start, (_start + _size))
+#define dma_cache_wback(_start,_size) \
+ clean_dcache_range(_start, (_start + _size))
+#define dma_cache_wback_inv(_start,_size) \
+ flush_dcache_range(_start, (_start + _size))
+
+#else
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#endif
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h
deleted file mode 100644
index fb68fa23bea..00000000000
--- a/include/asm-ppc64/dma-mapping.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (C) 2004 IBM
- *
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
- */
-
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <linux/types.h>
-#include <linux/cache.h>
-/* need struct page definitions */
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-#include <asm/bug.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-
-/* Now for the API extensions over the pci_ one */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
-
-static inline int
-dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
- void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *ptr,
- size_t size, enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- int (*dma_supported)(struct device *dev, u64 mask);
- int (*dac_dma_supported)(struct device *dev, u64 mask);
-};
-
-#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
deleted file mode 100644
index 42adf7033a8..00000000000
--- a/include/asm-ppc64/imalloc.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _PPC64_IMALLOC_H
-#define _PPC64_IMALLOC_H
-
-/*
- * Define the address range of the imalloc VM area.
- */
-#define PHBS_IO_BASE VMALLOC_END
-#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
-#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
-
-
-/* imalloc region types */
-#define IM_REGION_UNUSED 0x1
-#define IM_REGION_SUBSET 0x2
-#define IM_REGION_EXISTS 0x4
-#define IM_REGION_OVERLAP 0x8
-#define IM_REGION_SUPERSET 0x10
-
-extern struct vm_struct * im_get_free_area(unsigned long size);
-extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
- int region_type);
-extern void im_free(void *addr);
-
-extern unsigned long ioremap_bot;
-
-#endif /* _PPC64_IMALLOC_H */
diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h
deleted file mode 100644
index b1babb72967..00000000000
--- a/include/asm-ppc64/ptrace-common.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * linux/arch/ppc64/kernel/ptrace-common.h
- *
- * Copyright (c) 2002 Stephen Rothwell, IBM Coproration
- * Extracted from ptrace.c and ptrace32.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#ifndef _PPC64_PTRACE_COMMON_H
-#define _PPC64_PTRACE_COMMON_H
-
-#include <linux/config.h>
-#include <asm/system.h>
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
-{
- unsigned long tmp = 0;
-
- /*
- * Put the correct FP bits in, they might be wrong as a result
- * of our lazy FP restore.
- */
- if (regno == PT_MSR) {
- tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
- tmp |= task->thread.fpexc_mode;
- } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
- tmp = ((unsigned long *)task->thread.regs)[regno];
- }
-
- return tmp;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
- unsigned long data)
-{
- if (regno < PT_SOFTE) {
- if (regno == PT_MSR)
- data = (data & MSR_DEBUGCHANGE)
- | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
- ((unsigned long *)task->thread.regs)[regno] = data;
- return 0;
- }
- return -EIO;
-}
-
-static inline void set_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
- if (regs != NULL)
- regs->msr |= MSR_SE;
- set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
-}
-
-static inline void clear_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
- if (regs != NULL)
- regs->msr &= ~MSR_SE;
- clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the
- * same structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static inline int get_vrregs(unsigned long __user *data,
- struct task_struct *task)
-{
- unsigned long regsize;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_to_user(data, task->thread.vr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_to_user(data, &task->thread.vscr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VRSAVE */
- if (put_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task,
- unsigned long __user *data)
-{
- unsigned long regsize;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_from_user(task->thread.vr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_from_user(&task->thread.vscr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VRSAVE */
- if (get_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-static inline int ptrace_set_debugreg(struct task_struct *task,
- unsigned long addr, unsigned long data)
-{
- /* We only support one DABR and no IABRS at the moment */
- if (addr > 0)
- return -EINVAL;
-
- /* The bottom 3 bits are flags */
- if ((data & ~0x7UL) >= TASK_SIZE)
- return -EIO;
-
- /* Ensure translation is on */
- if (data && !(data & DABR_TRANSLATION))
- return -EIO;
-
- task->thread.dabr = data;
- return 0;
-}
-
-#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 24dc39651bc..10f8b51cec8 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -56,7 +56,7 @@
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 1b,3b\n" \
- ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\
+ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
:"c"(msr), "i"(-EIO), "0"(0)); \
ret__; })
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 424d5e622b4..6e27f42e3a5 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -10,8 +10,8 @@
typedef struct _cciss_pci_info_struct
{
unsigned char bus;
- unsigned short domain;
unsigned char dev_fn;
+ unsigned short domain;
__u32 board_id;
} cciss_pci_info_struct;
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index b5d660089de..2b54eac738e 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -80,10 +80,12 @@
/*
* Define standard taskfile in/out register
*/
-#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE
#define IDE_TASKFILE_STD_IN_FLAGS 0xFE
-#define IDE_HOB_STD_OUT_FLAGS 0x3C
#define IDE_HOB_STD_IN_FLAGS 0x3C
+#ifndef __KERNEL__
+#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE
+#define IDE_HOB_STD_OUT_FLAGS 0x3C
+#endif
typedef unsigned char task_ioreg_t;
typedef unsigned long sata_ioreg_t;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ac8b25fa650..a39c3c59789 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1089,9 +1089,11 @@ enum {
/*
* Subdrivers support.
+ *
+ * The gendriver.owner field should be set to the module owner of this driver.
+ * The gendriver.name field should be set to the name of this driver
*/
typedef struct ide_driver_s {
- struct module *owner;
const char *version;
u8 media;
unsigned supports_dsc_overlap : 1;
@@ -1199,37 +1201,11 @@ extern u64 ide_get_error_location(ide_drive_t *, char *);
*/
typedef enum {
ide_wait, /* insert rq at end of list, and wait for it */
- ide_next, /* insert rq immediately after current request */
ide_preempt, /* insert rq in front of current request */
ide_head_wait, /* insert rq in front of current request and wait for it */
ide_end /* insert rq at end of list, but don't wait for it */
} ide_action_t;
-/*
- * This function issues a special IDE device request
- * onto the request queue.
- *
- * If action is ide_wait, then the rq is queued at the end of the
- * request queue, and the function sleeps until it has been processed.
- * This is for use when invoked from an ioctl handler.
- *
- * If action is ide_preempt, then the rq is queued at the head of
- * the request queue, displacing the currently-being-processed
- * request and this function returns immediately without waiting
- * for the new rq to be completed. This is VERY DANGEROUS, and is
- * intended for careful use by the ATAPI tape/cdrom driver code.
- *
- * If action is ide_next, then the rq is queued immediately after
- * the currently-being-processed-request (if any), and the function
- * returns without waiting for the new rq to be completed. As above,
- * This is VERY DANGEROUS, and is intended for careful use by the
- * ATAPI tape/cdrom driver code.
- *
- * If action is ide_end, then the rq is queued at the end of the
- * request queue, and the function returns immediately without waiting
- * for the new rq to be completed. This is again intended for careful
- * use by the ATAPI tape/cdrom driver code.
- */
extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t);
/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1013a42d10b..0986d19be0b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+#ifdef CONFIG_IA64
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/include/linux/netfilter_ipv4/ipt_sctp.h b/include/linux/netfilter_ipv4/ipt_sctp.h
index e93a9ec99fc..80b3dbacd19 100644
--- a/include/linux/netfilter_ipv4/ipt_sctp.h
+++ b/include/linux/netfilter_ipv4/ipt_sctp.h
@@ -7,8 +7,6 @@
#define IPT_SCTP_VALID_FLAGS 0x07
-#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
-
struct ipt_sctp_flag_info {
u_int8_t chunktype;
@@ -59,21 +57,21 @@ struct ipt_sctp_info {
#define SCTP_CHUNKMAP_RESET(chunkmap) \
do { \
int i; \
- for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
+ for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
chunkmap[i] = 0; \
} while (0)
#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
do { \
int i; \
- for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
+ for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
chunkmap[i] = ~0; \
} while (0)
#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
do { \
int i; \
- for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
+ for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \
destmap[i] = srcmap[i]; \
} while (0)
@@ -81,7 +79,7 @@ struct ipt_sctp_info {
({ \
int i; \
int flag = 1; \
- for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
+ for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
if (chunkmap[i]) { \
flag = 0; \
break; \
@@ -94,7 +92,7 @@ struct ipt_sctp_info {
({ \
int i; \
int flag = 1; \
- for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
+ for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \
if (chunkmap[i] != ~0) { \
flag = 0; \
break; \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7b387faedb4..1e737e269db 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -620,6 +620,7 @@
#define PCI_DEVICE_ID_SI_961 0x0961
#define PCI_DEVICE_ID_SI_962 0x0962
#define PCI_DEVICE_ID_SI_963 0x0963
+#define PCI_DEVICE_ID_SI_965 0x0965
#define PCI_DEVICE_ID_SI_5511 0x5511
#define PCI_DEVICE_ID_SI_5513 0x5513
#define PCI_DEVICE_ID_SI_5518 0x5518
@@ -1198,6 +1199,7 @@
#define PCI_DEVICE_ID_VIA_3269_0 0x0269
#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
+#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
#define PCI_DEVICE_ID_VIA_8371_0 0x0391
#define PCI_DEVICE_ID_VIA_8501_0 0x0501
#define PCI_DEVICE_ID_VIA_82C561 0x0561
@@ -1234,6 +1236,7 @@
#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148
#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149
#define PCI_DEVICE_ID_VIA_XN266 0x3156
+#define PCI_DEVICE_ID_VIA_6410 0x3164
#define PCI_DEVICE_ID_VIA_8754C_0 0x3168
#define PCI_DEVICE_ID_VIA_8235 0x3177
#define PCI_DEVICE_ID_VIA_8385_0 0x3188
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0a8ea8b3581..8c5d6001a92 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -206,6 +206,7 @@ enum {
* @nfct: Associated connection, if any
* @ipvs_property: skbuff is owned by ipvs
* @nfctinfo: Relationship of this skb to the connection
+ * @nfct_reasm: netfilter conntrack re-assembly pointer
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @tc_index: Traffic control index
* @tc_verd: traffic control verdict
@@ -264,16 +265,14 @@ struct sk_buff {
nohdr:1,
nfctinfo:3;
__u8 pkt_type:3,
- fclone:2;
+ fclone:2,
+ ipvs_property:1;
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
#ifdef CONFIG_NETFILTER
__u32 nfmark;
struct nf_conntrack *nfct;
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
- __u8 ipvs_property:1;
-#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct sk_buff *nfct_reasm;
#endif
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 84876077027..0ff7ca68e5c 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -34,8 +34,7 @@
#define UINPUT_BUFFER_SIZE 16
#define UINPUT_NUM_REQUESTS 16
-/* state flags => bit index for {set|clear|test}_bit ops */
-#define UIST_CREATED 0
+enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
struct uinput_request {
int id;
@@ -52,11 +51,12 @@ struct uinput_request {
struct uinput_device {
struct input_dev *dev;
- unsigned long state;
+ struct semaphore sem;
+ enum uinput_state state;
wait_queue_head_t waitq;
- unsigned char ready,
- head,
- tail;
+ unsigned char ready;
+ unsigned char head;
+ unsigned char tail;
struct input_event buff[UINPUT_BUFFER_SIZE];
struct uinput_request *requests[UINPUT_NUM_REQUESTS];
@@ -91,6 +91,7 @@ struct uinput_ff_erase {
#define UI_SET_SNDBIT _IOW(UINPUT_IOCTL_BASE, 106, int)
#define UI_SET_FFBIT _IOW(UINPUT_IOCTL_BASE, 107, int)
#define UI_SET_PHYS _IOW(UINPUT_IOCTL_BASE, 108, char*)
+#define UI_SET_SWBIT _IOW(UINPUT_IOCTL_BASE, 109, int)
#define UI_BEGIN_FF_UPLOAD _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload)
#define UI_END_FF_UPLOAD _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 6addb4d464d..0a2ad51cff8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -237,6 +237,8 @@ extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_t
int newtype,
struct ipv6_opt_hdr __user *newopt,
int newoptlen);
+struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
+ struct ipv6_txoptions *opt);
extern int ip6_frag_nqueues;
extern atomic_t ip6_frag_mem;