summaryrefslogtreecommitdiffstats
path: root/arch/parisc/lib
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2013-12-19 15:08:03 +0100
committerJiri Kosina <jkosina@suse.cz>2013-12-19 15:08:32 +0100
commite23c34bb41da65f354fb7eee04300c56ee48f60c (patch)
tree549fbe449d55273b81ef104a9755109bf4ae7817 /arch/parisc/lib
parentb481c2cb3534c85dca625973b33eba15f9af3e4c (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things in tree (efi-stub). Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'arch/parisc/lib')
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/delay.c73
-rw-r--r--arch/parisc/lib/lusercopy.S10
-rw-r--r--arch/parisc/lib/memcpy.c19
4 files changed, 93 insertions, 11 deletions
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 5651536ac73..8fa92b8d839 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -3,6 +3,6 @@
#
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
- ucmpdi2.o
+ ucmpdi2.o delay.o
obj-y := iomap.o
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c
new file mode 100644
index 00000000000..ec9255f27a8
--- /dev/null
+++ b/arch/parisc/lib/delay.c
@@ -0,0 +1,73 @@
+/*
+ * Precise Delay Loops for parisc
+ *
+ * based on code by:
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
+ *
+ * parisc implementation:
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ */
+
+
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/init.h>
+
+#include <asm/processor.h>
+#include <asm/delay.h>
+
+#include <asm/special_insns.h> /* for mfctl() */
+#include <asm/processor.h> /* for boot_cpu_data */
+
+/* CR16 based delay: */
+static void __cr16_delay(unsigned long __loops)
+{
+ /*
+ * Note: Due to unsigned math, cr16 rollovers shouldn't be
+ * a problem here. However, on 32 bit, we need to make sure
+ * we don't pass in too big a value. The current default
+ * value of MAX_UDELAY_MS should help prevent this.
+ */
+ u32 bclock, now, loops = __loops;
+ int cpu;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ bclock = mfctl(16);
+ for (;;) {
+ now = mfctl(16);
+ if ((now - bclock) >= loops)
+ break;
+
+ /* Allow RT tasks to run */
+ preempt_enable();
+ asm volatile(" nop\n");
+ barrier();
+ preempt_disable();
+
+ /*
+ * It is possible that we moved to another CPU, and
+ * since CR16's are per-cpu we need to calculate
+ * that. The delay must guarantee that we wait "at
+ * least" the amount of time. Being moved to another
+ * CPU could make the wait longer but we just need to
+ * make sure we waited long enough. Rebalance the
+ * counter for this CPU.
+ */
+ if (unlikely(cpu != smp_processor_id())) {
+ loops -= (now - bclock);
+ cpu = smp_processor_id();
+ bclock = mfctl(16);
+ }
+ }
+ preempt_enable();
+}
+
+
+void __udelay(unsigned long usecs)
+{
+ __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 6f2d9355efe..a512f07d4fe 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -88,9 +88,7 @@ ENDPROC(lclear_user)
ldo 1(%r25),%r25
.previous
- .section __ex_table,"aw"
- ASM_ULONG_INSN 1b,2b
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
.procend
@@ -129,10 +127,8 @@ ENDPROC(lstrnlen_user)
copy %r24,%r26 /* reset r26 so 0 is returned on fault */
.previous
- .section __ex_table,"aw"
- ASM_ULONG_INSN 1b,3b
- ASM_ULONG_INSN 2b,3b
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
.procend
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index ac4370b1ca4..413dc176929 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -56,7 +56,7 @@
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/compiler.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define s_space "%%sr1"
#define d_space "%%sr2"
#else
@@ -161,7 +161,7 @@ static inline void prefetch_dst(const void *addr)
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
-static inline unsigned long copy_dstaligned(unsigned long dst,
+static noinline unsigned long copy_dstaligned(unsigned long dst,
unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
@@ -276,7 +276,7 @@ handle_store_error:
/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
* In case of an access fault the faulty address can be read from the per_cpu
* exception data struct. */
-static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
+static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
@@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(memcpy);
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+ unsigned long addr = (unsigned long)src;
+
+ if (addr < PAGE_SIZE)
+ return -EFAULT;
+
+ /* check for I/O space F_EXTEND(0xfff00000) access as well? */
+
+ return __probe_kernel_read(dst, src, size);
+}
+
#endif