summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/atomic.h11
-rw-r--r--arch/parisc/include/asm/byteorder.h76
-rw-r--r--arch/parisc/include/asm/checksum.h2
-rw-r--r--arch/parisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/include/asm/io.h12
-rw-r--r--arch/parisc/include/asm/mmu_context.h13
-rw-r--r--arch/parisc/include/asm/module.h6
-rw-r--r--arch/parisc/include/asm/processor.h4
-rw-r--r--arch/parisc/include/asm/smp.h2
-rw-r--r--arch/parisc/include/asm/swab.h66
-rw-r--r--arch/parisc/include/asm/uaccess.h2
-rw-r--r--arch/parisc/kernel/drivers.c40
-rw-r--r--arch/parisc/kernel/hpmc.S8
-rw-r--r--arch/parisc/kernel/init_task.c1
-rw-r--r--arch/parisc/kernel/irq.c17
-rw-r--r--arch/parisc/kernel/module.c216
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/parisc/kernel/pdc_cons.c2
-rw-r--r--arch/parisc/kernel/perf.c4
-rw-r--r--arch/parisc/kernel/processor.c68
-rw-r--r--arch/parisc/kernel/setup.c11
-rw-r--r--arch/parisc/kernel/smp.c47
-rw-r--r--arch/parisc/kernel/time.c4
-rw-r--r--arch/parisc/kernel/topology.c4
-rw-r--r--arch/parisc/kernel/traps.c9
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/lib/iomap.c2
-rw-r--r--arch/parisc/lib/memcpy.c2
-rw-r--r--arch/parisc/mm/fault.c58
31 files changed, 379 insertions, 317 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 644a70b1b04..aacf11d3372 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,6 +11,7 @@ config PARISC
select HAVE_OPROFILE
select RTC_CLASS
select RTC_DRV_PARISC
+ select INIT_ALL_POSSIBLE
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 5ddad7bd60a..0d428278356 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -77,7 +77,7 @@ libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
-PALO := $(shell if which palo; then : ; \
+PALO := $(shell if (which palo 2>&1); then : ; \
elif [ -x /sbin/palo ]; then echo /sbin/palo; \
fi)
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 57fcc4a5ebb..edbfe25c5fc 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -155,14 +155,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
-/* Note that we need not lock read accesses - aligned word writes/reads
- * are atomic, so a reader never sees unconsistent values.
- *
- * Cache-line alignment would conflict with, for example, linux/module.h
+/*
+ * Note that we need not lock read accesses - aligned word writes/reads
+ * are atomic, so a reader never sees inconsistent values.
*/
-typedef struct { volatile int counter; } atomic_t;
-
/* It's possible to reduce all atomic operations to either
* __atomic_add_return, atomic_set and atomic_read (the latter
* is there only for consistency).
@@ -260,8 +257,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
-typedef struct { volatile s64 counter; } atomic64_t;
-
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
static __inline__ int
diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h
index db148313de5..58af2c5f5d6 100644
--- a/arch/parisc/include/asm/byteorder.h
+++ b/arch/parisc/include/asm/byteorder.h
@@ -1,82 +1,6 @@
#ifndef _PARISC_BYTEORDER_H
#define _PARISC_BYTEORDER_H
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
-{
- __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
- "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
- : "=r" (x)
- : "0" (x));
- return x;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
-{
- __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
- "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
- "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
- : "=r" (x)
- : "0" (x));
- return x;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
-{
- unsigned int temp;
- __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
- "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
- "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
- : "=r" (x), "=&r" (temp)
- : "0" (x));
- return x;
-}
-
-
-#if BITS_PER_LONG > 32
-/*
-** From "PA-RISC 2.0 Architecture", HP Professional Books.
-** See Appendix I page 8 , "Endian Byte Swapping".
-**
-** Pretty cool algorithm: (* == zero'd bits)
-** PERMH 01234567 -> 67452301 into %0
-** HSHL 67452301 -> 7*5*3*1* into %1
-** HSHR 67452301 -> *6*4*2*0 into %0
-** OR %0 | %1 -> 76543210 into %0 (all done!)
-*/
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
- __u64 temp;
- __asm__("permh,3210 %0, %0\n\t"
- "hshl %0, 8, %1\n\t"
- "hshr,u %0, 8, %0\n\t"
- "or %1, %0, %0"
- : "=r" (x), "=&r" (temp)
- : "0" (x));
- return x;
-}
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __BYTEORDER_HAS_U64__
-#elif !defined(__STRICT_ANSI__)
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
-{
- __u32 t1 = ___arch__swab32((__u32) x);
- __u32 t2 = ___arch__swab32((__u32) (x >> 32));
- return (((__u64) t1 << 32) | t2);
-}
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __BYTEORDER_HAS_U64__
-#endif
-
-#define __arch__swab16(x) ___arch__swab16(x)
-#define __arch__swab24(x) ___arch__swab24(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-#endif /* __GNUC__ */
-
#include <linux/byteorder/big_endian.h>
#endif /* _PARISC_BYTEORDER_H */
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index e9639ccc3fc..c84b2fcb18a 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
- : "r19", "r20", "r21", "r22");
+ : "r19", "r20", "r21", "r22", "memory");
return csum_fold(sum);
}
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 53af696f23d..da694338090 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -5,7 +5,7 @@
#include <asm/cacheflush.h>
#include <asm/scatterlist.h>
-/* See Documentation/DMA-mapping.txt */
+/* See Documentation/PCI/PCI-DMA-mapping.txt */
struct hppa_dma_ops {
int (*dma_supported)(struct device *dev, u64 mask);
void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 55ddb184210..d3031d1f9d0 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -4,12 +4,6 @@
#include <linux/types.h>
#include <asm/pgtable.h>
-extern unsigned long parisc_vmerge_boundary;
-extern unsigned long parisc_vmerge_max_size;
-
-#define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary
-#define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size
-
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
#define virt_to_bus virt_to_phys
@@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add
/* readb can never be const, so use __fswab instead of le*_to_cpu */
#define readb(addr) __raw_readb(addr)
-#define readw(addr) __fswab16(__raw_readw(addr))
-#define readl(addr) __fswab32(__raw_readl(addr))
-#define readq(addr) __fswab64(__raw_readq(addr))
+#define readw(addr) le16_to_cpu(__raw_readw(addr))
+#define readl(addr) le32_to_cpu(__raw_readl(addr))
+#define readq(addr) le64_to_cpu(__raw_readq(addr))
#define writeb(b, addr) __raw_writeb(b, addr)
#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 85856c74ad1..354b2aca990 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm)
mm->context = 0;
}
-static inline void load_context(mm_context_t context)
+static inline unsigned long __space_to_prot(mm_context_t context)
{
- mtsp(context, 3);
#if SPACEID_SHIFT == 0
- mtctl(context << 1,8);
+ return context << 1;
#else
- mtctl(context >> (SPACEID_SHIFT - 1),8);
+ return context >> (SPACEID_SHIFT - 1);
#endif
}
+static inline void load_context(mm_context_t context)
+{
+ mtsp(context, 3);
+ mtctl(__space_to_prot(context), 8);
+}
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
diff --git a/arch/parisc/include/asm/module.h b/arch/parisc/include/asm/module.h
index c2cb49e934c..1f4123427ea 100644
--- a/arch/parisc/include/asm/module.h
+++ b/arch/parisc/include/asm/module.h
@@ -23,8 +23,10 @@ struct mod_arch_specific
{
unsigned long got_offset, got_count, got_max;
unsigned long fdesc_offset, fdesc_count, fdesc_max;
- unsigned long stub_offset, stub_count, stub_max;
- unsigned long init_stub_offset, init_stub_count, init_stub_max;
+ struct {
+ unsigned long stub_offset;
+ unsigned int stub_entries;
+ } *section;
int unwind_section;
struct unwind_table *unwind;
};
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 3c9d34844c8..9d64df8754b 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -17,6 +17,7 @@
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/system.h>
+#include <asm/percpu.h>
#endif /* __ASSEMBLY__ */
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
@@ -109,8 +110,7 @@ struct cpuinfo_parisc {
};
extern struct system_cpuinfo_parisc boot_cpu_data;
-extern struct cpuinfo_parisc cpu_data[NR_CPUS];
-#define current_cpu_data cpu_data[smp_processor_id()]
+DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 409e698f436..6ef4b7867b1 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -16,8 +16,6 @@
#include <linux/cpumask.h>
typedef unsigned long address_t;
-extern cpumask_t cpu_online_map;
-
/*
* Private routines/data
diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h
new file mode 100644
index 00000000000..3ff16c5a335
--- /dev/null
+++ b/arch/parisc/include/asm/swab.h
@@ -0,0 +1,66 @@
+#ifndef _PARISC_SWAB_H
+#define _PARISC_SWAB_H
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#define __SWAB_64_THRU_32__
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */
+ "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
+{
+ __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */
+ "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */
+ "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */
+ : "=r" (x)
+ : "0" (x));
+ return x;
+}
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ unsigned int temp;
+ __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */
+ "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */
+ "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */
+ : "=r" (x), "=&r" (temp)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#if BITS_PER_LONG > 32
+/*
+** From "PA-RISC 2.0 Architecture", HP Professional Books.
+** See Appendix I page 8 , "Endian Byte Swapping".
+**
+** Pretty cool algorithm: (* == zero'd bits)
+** PERMH 01234567 -> 67452301 into %0
+** HSHL 67452301 -> 7*5*3*1* into %1
+** HSHR 67452301 -> *6*4*2*0 into %0
+** OR %0 | %1 -> 76543210 into %0 (all done!)
+*/
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
+ __u64 temp;
+ __asm__("permh,3210 %0, %0\n\t"
+ "hshl %0, 8, %1\n\t"
+ "hshr,u %0, 8, %0\n\t"
+ "or %1, %0, %0"
+ : "=r" (x), "=&r" (temp)
+ : "0" (x));
+ return x;
+}
+#define __arch_swab64 __arch_swab64
+#endif /* BITS_PER_LONG > 32 */
+
+#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4878b9501f2..1c6dbb6f6e5 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
+int fixup_exception(struct pt_regs *regs);
+
#endif /* __PARISC_UACCESS_H */
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 884b7ce16a3..994bcd98090 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv)
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
+static ssize_t make_modalias(struct device *dev, char *buf)
+{
+ const struct parisc_device *padev = to_parisc_device(dev);
+ const struct parisc_device_id *id = &padev->id;
+
+ return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
+ (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
+ (u32)id->sversion);
+}
+
+static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct parisc_device *padev;
+ char modalias[40];
+
+ if (!dev)
+ return -ENODEV;
+
+ padev = to_parisc_device(dev);
+ if (!padev)
+ return -ENODEV;
+
+ if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
+ return -ENOMEM;
+
+ make_modalias(dev, modalias);
+ if (add_uevent_var(env, "MODALIAS=%s", modalias))
+ return -ENOMEM;
+
+ return 0;
+}
+
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
@@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct parisc_device *padev = to_parisc_device(dev);
- struct parisc_device_id *id = &padev->id;
-
- return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
- (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
- (u32)id->sversion);
+ return make_modalias(dev, buf);
}
static struct device_attribute parisc_device_attrs[] = {
@@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = {
struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
+ .uevent = parisc_uevent,
.dev_attrs = parisc_device_attrs,
.probe = parisc_driver_probe,
.remove = parisc_driver_remove,
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 2cbf13b3ef1..5595a2f3118 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -80,6 +80,7 @@ END(hpmc_pim_data)
.import intr_save, code
ENTRY(os_hpmc)
+.os_hpmc:
/*
* registers modified:
@@ -295,5 +296,10 @@ os_hpmc_6:
b .
nop
ENDPROC(os_hpmc)
-ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */
+.os_hpmc_end:
nop
+.data
+.align 4
+ .export os_hpmc_size
+os_hpmc_size:
+ .word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index f5941c08655..1e25a45d64c 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -34,7 +34,6 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
-static struct fs_struct init_fs = INIT_FS;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 23ef950df00..ac2c822928c 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
return 0;
}
-static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
+static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
{
- if (cpu_check_affinity(irq, &dest))
+ if (cpu_check_affinity(irq, dest))
return;
- irq_desc[irq].affinity = dest;
+ irq_desc[irq].affinity = *dest;
}
#endif
@@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
#endif
- return cpu_data[cpu].txn_addr;
+ return per_cpu(cpu_data, cpu).txn_addr;
}
@@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
- while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
- !cpu_online(next_cpu)))
+ while ((next_cpu < NR_CPUS) &&
+ (!per_cpu(cpu_data, next_cpu).txn_addr ||
+ !cpu_online(next_cpu)))
next_cpu++;
if (next_cpu >= NR_CPUS)
@@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
- cpu_data[cpu].hpa);
+ per_cpu(cpu_data, cpu).hpa);
goto set_out;
}
#endif
@@ -421,5 +422,5 @@ void __init init_IRQ(void)
void ack_bad_irq(unsigned int irq)
{
- printk("unexpected IRQ %d\n", irq);
+ printk(KERN_WARNING "unexpected IRQ %d\n", irq);
}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 44138c3e6ea..9013243cecc 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -6,6 +6,7 @@
*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 2003 Randolph Chung <tausq at debian . org>
+ * Copyright (C) 2008 Helge Deller <deller@gmx.de>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -24,6 +25,19 @@
*
*
* Notes:
+ * - PLT stub handling
+ * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
+ * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
+ * fail to reach their PLT stub if we only create one big stub array for
+ * all sections at the beginning of the core or init section.
+ * Instead we now insert individual PLT stub entries directly in front of
+ * of the code sections where the stubs are actually called.
+ * This reduces the distance between the PCREL location and the stub entry
+ * so that the relocations can be fulfilled.
+ * While calculating the final layout of the kernel module in memory, the
+ * kernel module loader calls arch_mod_section_prepend() to request the
+ * to be reserved amount of memory in front of each individual section.
+ *
* - SEGREL32 handling
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
@@ -58,9 +72,13 @@
#define DEBUGP(fmt...)
#endif
+#define RELOC_REACHABLE(val, bits) \
+ (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
+ ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
+ 0 : 1)
+
#define CHECK_RELOC(val, bits) \
- if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
- ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \
+ if (!RELOC_REACHABLE(val, bits)) { \
printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
me->name, strtab + sym->st_name, (unsigned long)val, bits); \
return -ENOEXEC; \
@@ -92,13 +110,6 @@ static inline int in_local(struct module *me, void *loc)
return in_init(me, loc) || in_core(me, loc);
}
-static inline int in_local_section(struct module *me, void *loc, void *dot)
-{
- return (in_init(me, loc) && in_init(me, dot)) ||
- (in_core(me, loc) && in_core(me, dot));
-}
-
-
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
@@ -258,23 +269,42 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
+ kfree(mod->arch.section);
+ mod->arch.section = NULL;
+
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
}
+/* Additional bytes needed in front of individual sections */
+unsigned int arch_mod_section_prepend(struct module *mod,
+ unsigned int section)
+{
+ /* size needed for all stubs of this section (including
+ * one additional for correct alignment of the stubs) */
+ return (mod->arch.section[section].stub_entries + 1)
+ * sizeof(struct stub_entry);
+}
+
#define CONST
int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
CONST Elf_Shdr *sechdrs,
CONST char *secstrings,
struct module *me)
{
- unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0;
+ unsigned long gots = 0, fdescs = 0, len;
unsigned int i;
+ len = hdr->e_shnum * sizeof(me->arch.section[0]);
+ me->arch.section = kzalloc(len, GFP_KERNEL);
+ if (!me->arch.section)
+ return -ENOMEM;
+
for (i = 1; i < hdr->e_shnum; i++) {
- const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset;
+ const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
+ unsigned int count, s;
if (strncmp(secstrings + sechdrs[i].sh_name,
".PARISC.unwind", 14) == 0)
@@ -290,11 +320,23 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
*/
gots += count_gots(rels, nrels);
fdescs += count_fdescs(rels, nrels);
- if(strncmp(secstrings + sechdrs[i].sh_name,
- ".rela.init", 10) == 0)
- init_stubs += count_stubs(rels, nrels);
- else
- stubs += count_stubs(rels, nrels);
+
+ /* XXX: By sorting the relocs and finding duplicate entries
+ * we could reduce the number of necessary stubs and save
+ * some memory. */
+ count = count_stubs(rels, nrels);
+ if (!count)
+ continue;
+
+ /* so we need relocation stubs. reserve necessary memory. */
+ /* sh_info gives the section for which we need to add stubs. */
+ s = sechdrs[i].sh_info;
+
+ /* each code section should only have one relocation section */
+ WARN_ON(me->arch.section[s].stub_entries);
+
+ /* store number of stubs we need for this section */
+ me->arch.section[s].stub_entries += count;
}
/* align things a bit */
@@ -306,18 +348,8 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
me->arch.fdesc_offset = me->core_size;
me->core_size += fdescs * sizeof(Elf_Fdesc);
- me->core_size = ALIGN(me->core_size, 16);
- me->arch.stub_offset = me->core_size;
- me->core_size += stubs * sizeof(struct stub_entry);
-
- me->init_size = ALIGN(me->init_size, 16);
- me->arch.init_stub_offset = me->init_size;
- me->init_size += init_stubs * sizeof(struct stub_entry);
-
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
- me->arch.stub_max = stubs;
- me->arch.init_stub_max = init_stubs;
return 0;
}
@@ -380,23 +412,27 @@ enum elf_stub_type {
};
static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
- enum elf_stub_type stub_type, int init_section)
+ enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
{
- unsigned long i;
struct stub_entry *stub;
- if(init_section) {
- i = me->arch.init_stub_count++;
- BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max);
- stub = me->module_init + me->arch.init_stub_offset +
- i * sizeof(struct stub_entry);
- } else {
- i = me->arch.stub_count++;
- BUG_ON(me->arch.stub_count > me->arch.stub_max);
- stub = me->module_core + me->arch.stub_offset +
- i * sizeof(struct stub_entry);
+ /* initialize stub_offset to point in front of the section */
+ if (!me->arch.section[targetsec].stub_offset) {
+ loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
+ sizeof(struct stub_entry);
+ /* get correct alignment for the stubs */
+ loc0 = ALIGN(loc0, sizeof(struct stub_entry));
+ me->arch.section[targetsec].stub_offset = loc0;
}
+ /* get address of stub entry */
+ stub = (void *) me->arch.section[targetsec].stub_offset;
+ me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
+
+ /* do not write outside available stub area */
+ BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
+
+
#ifndef CONFIG_64BIT
/* for 32-bit the stub looks like this:
* ldil L'XXX,%r1
@@ -489,15 +525,19 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
Elf32_Addr val;
Elf32_Sword addend;
Elf32_Addr dot;
+ Elf_Addr loc0;
+ unsigned int targetsec = sechdrs[relsec].sh_info;
//unsigned long dp = (unsigned long)$global$;
register unsigned long dp asm ("r27");
DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
- loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
+ /* This is the start of the target section */
+ loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
@@ -569,19 +609,32 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
break;
case R_PARISC_PCREL17F:
/* 17-bit PC relative address */
- val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc));
+ /* calculate direct call offset */
+ val += addend;
val = (val - dot - 8)/4;
- CHECK_RELOC(val, 17)
+ if (!RELOC_REACHABLE(val, 17)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value, addend,
+ ELF_STUB_DIRECT, loc0, targetsec);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 17);
+ }
*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
break;
case R_PARISC_PCREL22F:
/* 22-bit PC relative address; only defined for pa20 */
- val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc));
- DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n",
- strtab + sym->st_name, (unsigned long)loc, addend,
- val)
+ /* calculate direct call offset */
+ val += addend;
val = (val - dot - 8)/4;
- CHECK_RELOC(val, 22);
+ if (!RELOC_REACHABLE(val, 22)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value, addend,
+ ELF_STUB_DIRECT, loc0, targetsec);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
+ }
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
@@ -610,13 +663,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
Elf64_Addr val;
Elf64_Sxword addend;
Elf64_Addr dot;
+ Elf_Addr loc0;
+ unsigned int targetsec = sechdrs[relsec].sh_info;
DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
- loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
+ /* This is the start of the target section */
+ loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
@@ -672,42 +729,40 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
+ val += addend;
/* can we reach it locally? */
- if(!in_local_section(me, (void *)val, (void *)dot)) {
-
- if (in_local(me, (void *)val))
- /* this is the case where the
- * symbol is local to the
- * module, but in a different
- * section, so stub the jump
- * in case it's more than 22
- * bits away */
- val = get_stub(me, val, addend, ELF_STUB_DIRECT,
- in_init(me, loc));
- else if (strncmp(strtab + sym->st_name, "$$", 2)
+ if (in_local(me, (void *)val)) {
+ /* this is the case where the symbol is local
+ * to the module, but in a different section,
+ * so stub the jump in case it's more than 22
+ * bits away */
+ val = (val - dot - 8)/4;
+ if (!RELOC_REACHABLE(val, 22)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value,
+ addend, ELF_STUB_DIRECT,
+ loc0, targetsec);
+ } else {
+ /* Ok, we can reach it directly. */
+ val = sym->st_value;
+ val += addend;
+ }
+ } else {
+ val = sym->st_value;
+ if (strncmp(strtab + sym->st_name, "$$", 2)
== 0)
val = get_stub(me, val, addend, ELF_STUB_MILLI,
- in_init(me, loc));
+ loc0, targetsec);
else
val = get_stub(me, val, addend, ELF_STUB_GOT,
- in_init(me, loc));
+ loc0, targetsec);
}
DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
strtab + sym->st_name, loc, sym->st_value,
addend, val);
- /* FIXME: local symbols work as long as the
- * core and init pieces aren't separated too
- * far. If this is ever broken, you will trip
- * the check below. The way to fix it would
- * be to generate local stubs to go between init
- * and core */
- if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 ||
- (Elf64_Sxword)(val - dot - 8) < -0x800000) {
- printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n",
- me->name, strtab + sym->st_name);
- return -ENOEXEC;
- }
val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
case R_PARISC_DIR64:
@@ -794,12 +849,8 @@ int module_finalize(const Elf_Ehdr *hdr,
addr = (u32 *)entry->addr;
printk("INSNS: %x %x %x %x\n",
addr[0], addr[1], addr[2], addr[3]);
- printk("stubs used %ld, stubs max %ld\n"
- "init_stubs used %ld, init stubs max %ld\n"
- "got entries used %ld, gots max %ld\n"
+ printk("got entries used %ld, gots max %ld\n"
"fdescs used %ld, fdescs max %ld\n",
- me->arch.stub_count, me->arch.stub_max,
- me->arch.init_stub_count, me->arch.init_stub_max,
me->arch.got_count, me->arch.got_max,
me->arch.fdesc_count, me->arch.fdesc_max);
#endif
@@ -829,7 +880,10 @@ int module_finalize(const Elf_Ehdr *hdr,
me->name, me->arch.got_count, MAX_GOTS);
return -EINVAL;
}
-
+
+ kfree(me->arch.section);
+ me->arch.section = NULL;
+
/* no symbol table */
if(symhdr == NULL)
return 0;
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index ccd61b9567a..df47895db82 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -2,7 +2,7 @@
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
-** See Documentation/DMA-mapping.txt for interface definitions.
+** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index ccb68090781..1ff366cb968 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -52,7 +52,7 @@
#include <linux/tty.h>
#include <asm/pdc.h> /* for iodc_call() proto and friends */
-static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pdc_console_lock);
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index f696f57faa1..75099efb3bf 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -541,9 +541,9 @@ static int __init perf_init(void)
spin_lock_init(&perf_lock);
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
- cpu_device = cpu_data[0].dev;
+ cpu_device = per_cpu(cpu_data, 0).dev;
printk("Performance monitoring counters enabled for %s\n",
- cpu_data[0].dev->name);
+ per_cpu(cpu_data, 0).dev->name);
return 0;
}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 370086fb833..ecb609342fe 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -3,7 +3,7 @@
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
- * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
@@ -46,7 +46,7 @@
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
-struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
+DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
extern int update_cr16_clocksource(void); /* from time.c */
@@ -69,6 +69,23 @@ extern int update_cr16_clocksource(void); /* from time.c */
*/
/**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static void __cpuinit
+init_percpu_prof(unsigned long cpunum)
+{
+ struct cpuinfo_parisc *p;
+
+ p = &per_cpu(cpu_data, cpunum);
+ p->prof_counter = 1;
+ p->prof_multiplier = 1;
+}
+
+
+/**
* processor_probe - Determine if processor driver should claim this device.
* @dev: The device which has been found.
*
@@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
}
#endif
- p = &cpu_data[cpuid];
+ p = &per_cpu(cpu_data, cpuid);
boot_cpu_data.cpu_count++;
/* initialize counters - CPU 0 gets it_value set in time_init() */
@@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
- ** for boot_cpu by the above memset().
+ ** for boot_cpu by the above memset().
*/
-
- /* stolen from init_percpu_prof() */
- cpu_data[cpuid].prof_counter = 1;
- cpu_data[cpuid].prof_multiplier = 1;
+ init_percpu_prof(cpuid);
#endif
/*
@@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void)
}
-/**
- * init_cpu_profiler - enable/setup per cpu profiling hooks.
- * @cpunum: The processor instance.
- *
- * FIXME: doesn't do much yet...
- */
-static inline void __init
-init_percpu_prof(int cpunum)
-{
- cpu_data[cpunum].prof_counter = 1;
- cpu_data[cpunum].prof_multiplier = 1;
-}
-
/**
* init_per_cpu - Handle individual processor initializations.
@@ -293,7 +294,7 @@ init_percpu_prof(int cpunum)
*
* o Enable CPU profiling hooks.
*/
-int __init init_per_cpu(int cpunum)
+int __cpuinit init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
@@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum)
/* FWIW, FP rev/model is a more accurate way to determine
** CPU type. CPU rev/model has some ambiguous cases.
*/
- cpu_data[cpunum].fp_rev = coproc_cfg.revision;
- cpu_data[cpunum].fp_model = coproc_cfg.model;
+ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+ per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
@@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum)
int
show_cpuinfo (struct seq_file *m, void *v)
{
- int n;
+ unsigned long cpu;
- for(n=0; n<boot_cpu_data.cpu_count; n++) {
+ for_each_online_cpu(cpu) {
+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
#ifdef CONFIG_SMP
- if (0 == cpu_data[n].hpa)
+ if (0 == cpuinfo->hpa)
continue;
#endif
- seq_printf(m, "processor\t: %d\n"
+ seq_printf(m, "processor\t: %lu\n"
"cpu family\t: PA-RISC %s\n",
- n, boot_cpu_data.family_name);
+ cpu, boot_cpu_data.family_name);
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
@@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v)
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
boot_cpu_data.pdc.sys_model_name,
- cpu_data[n].dev ?
- cpu_data[n].dev->name : "Unknown" );
+ cpuinfo->dev ?
+ cpuinfo->dev->name : "Unknown");
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
@@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v)
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
- cpu_data[n].loops_per_jiffy / (500000 / HZ),
- (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
+ cpuinfo->loops_per_jiffy / (500000 / HZ),
+ (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 7d27853ff8c..82131ca8e05 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
EXPORT_SYMBOL(parisc_bus_is_phys);
#endif
-/* This sets the vmerge boundary and size, it's here because it has to
- * be available on all platforms (zero means no-virtual merging) */
-unsigned long parisc_vmerge_boundary = 0;
-unsigned long parisc_vmerge_max_size = 0;
-
void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
@@ -321,7 +316,7 @@ static int __init parisc_init(void)
processor_init();
printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
- boot_cpu_data.cpu_count,
+ num_present_cpus(),
boot_cpu_data.cpu_name,
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
@@ -387,8 +382,8 @@ void start_parisc(void)
if (ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10);
- cpu_data[cpunum].fp_rev = coproc_cfg.revision;
- cpu_data[cpunum].fp_model = coproc_cfg.model;
+ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+ per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
asm volatile ("fstd %fr0,8(%sp)");
} else {
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index d47f3975c9c..9995d7ed581 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -56,31 +56,17 @@ static int smp_debug_lvl = 0;
if (lvl >= smp_debug_lvl) \
printk(printargs);
#else
-#define smp_debug(lvl, ...)
+#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
DEFINE_SPINLOCK(smp_lock);
volatile struct task_struct *smp_init_current_idle_task;
-static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
+/* track which CPU is booting */
+static volatile int cpu_now_booting __cpuinitdata;
-static int parisc_max_cpus __read_mostly = 1;
-
-/* online cpus are ones that we've managed to bring up completely
- * possible cpus are all valid cpu
- * present cpus are all detected cpu
- *
- * On startup we bring up the "possible" cpus. Since we discover
- * CPUs later, we add them as hotplug, so the possible cpu mask is
- * empty in the beginning.
- */
-
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
-
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
+static int parisc_max_cpus __cpuinitdata = 1;
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
@@ -138,7 +124,7 @@ irqreturn_t
ipi_interrupt(int irq, void *dev_id)
{
int this_cpu = smp_processor_id();
- struct cpuinfo_parisc *p = &cpu_data[this_cpu];
+ struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
unsigned long ops;
unsigned long flags;
@@ -217,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id)
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
- struct cpuinfo_parisc *p = &cpu_data[cpu];
+ struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
unsigned long flags;
spin_lock_irqsave(lock, flags);
p->pending_ipi |= 1 << op;
- gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
+ gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
spin_unlock_irqrestore(lock, flags);
}
@@ -239,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
- if (dest_cpu == NO_PROC_ID) {
- BUG();
- return;
- }
+ BUG_ON(dest_cpu == NO_PROC_ID);
ipi_send(dest_cpu, op);
}
@@ -324,8 +307,7 @@ smp_cpu_init(int cpunum)
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- if(current->mm)
- BUG();
+ BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
@@ -360,6 +342,7 @@ void __init smp_callin(void)
*/
int __cpuinit smp_boot_one_cpu(int cpuid)
{
+ const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
struct task_struct *idle;
long timeout;
@@ -391,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
smp_init_current_idle_task = idle ;
mb();
- printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
+ printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
/*
** This gets PDC to release the CPU from a very tight loop.
@@ -402,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
** contents of memory are valid."
*/
- gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
+ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
mb();
/*
@@ -434,12 +417,12 @@ alive:
return 0;
}
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
- int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
+ int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
/* Setup BSP mappings */
- printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+ printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
cpu_set(bootstrap_processor, cpu_online_map);
cpu_set(bootstrap_processor, cpu_present_map);
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 4d09203bc69..9d46c43a415 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
unsigned long cycles_elapsed, ticks_elapsed;
unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
- struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
+ struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
@@ -213,7 +213,7 @@ void __init start_cpu_itimer(void)
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
- cpu_data[cpu].it_value = next_tick;
+ per_cpu(cpu_data, cpu).it_value = next_tick;
}
struct platform_device rtc_parisc_dev = {
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index d71cb018a21..f5159381fdd 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -22,14 +22,14 @@
#include <linux/cpu.h>
#include <linux/cache.h>
-static struct cpu cpu_devices[NR_CPUS] __read_mostly;
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int num;
for_each_present_cpu(num) {
- register_cpu(&cpu_devices[num], num);
+ register_cpu(&per_cpu(cpu_devices, num), num);
}
return 0;
}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 4c771cd580e..ba658d2086f 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs)
/* Fall Through */
case 27:
/* Data memory protection ID trap */
+ if (code == 27 && !user_mode(regs) &&
+ fixup_exception(regs))
+ return;
+
die_if_kernel("Protection id trap", regs, code);
si.si_code = SEGV_MAPERR;
si.si_signo = SIGSEGV;
@@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs)
int __init check_ivt(void *iva)
{
+ extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
- extern const u32 os_hpmc_end[];
int i;
u32 check = 0;
@@ -839,8 +843,7 @@ int __init check_ivt(void *iva)
*ivap++ = 0;
/* Compute Checksum for HPMC handler */
-
- length = os_hpmc_end - os_hpmc;
+ length = os_hpmc_size;
ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 6773c582e45..69dad5a850a 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
- r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
+ r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
if (!r2)
return;
*r2 = *r;
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 9abed07db7f..5069e8b2ca7 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -261,7 +261,7 @@ static const struct iomap_ops iomem_ops = {
iomem_write32r,
};
-const struct iomap_ops *iomap_ops[8] = {
+static const struct iomap_ops *iomap_ops[8] = {
[0] = &ioport_ops,
[7] = &iomem_ops
};
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 2d68431fc22..bbda909c866 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -275,7 +275,7 @@ handle_store_error:
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index b2e3e9a8cec..92c7fa4ecc3 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -139,13 +139,41 @@ parisc_acctyp(unsigned long code, unsigned int inst)
}
#endif
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fix;
+
+ fix = search_exception_tables(regs->iaoq[0]);
+ if (fix) {
+ struct exception_data *d;
+ d = &__get_cpu_var(exception_data);
+ d->fault_ip = regs->iaoq[0];
+ d->fault_space = regs->isr;
+ d->fault_addr = regs->ior;
+
+ regs->iaoq[0] = ((fix->fixup) & ~3);
+ /*
+ * NOTE: In some cases the faulting instruction
+ * may be in the delay slot of a branch. We
+ * don't want to take the branch, so we don't
+ * increment iaoq[1], instead we set it to be
+ * iaoq[0]+4, and clear the B bit in the PSW
+ */
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
+
+ return 1;
+ }
+
+ return 0;
+}
+
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
struct vm_area_struct *vma, *prev_vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- const struct exception_table_entry *fix;
unsigned long acc_type;
int fault;
@@ -229,32 +257,8 @@ bad_area:
no_context:
- if (!user_mode(regs)) {
- fix = search_exception_tables(regs->iaoq[0]);
-
- if (fix) {
- struct exception_data *d;
-
- d = &__get_cpu_var(exception_data);
- d->fault_ip = regs->iaoq[0];
- d->fault_space = regs->isr;
- d->fault_addr = regs->ior;
-
- regs->iaoq[0] = ((fix->fixup) & ~3);
-
- /*
- * NOTE: In some cases the faulting instruction
- * may be in the delay slot of a branch. We
- * don't want to take the branch, so we don't
- * increment iaoq[1], instead we set it to be
- * iaoq[0]+4, and clear the B bit in the PSW
- */
-
- regs->iaoq[1] = regs->iaoq[0] + 4;
- regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
-
- return;
- }
+ if (!user_mode(regs) && fixup_exception(regs)) {
+ return;
}
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);