summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c211
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c1
-rw-r--r--arch/microblaze/kernel/dma.c157
-rw-r--r--arch/microblaze/kernel/entry.S116
-rw-r--r--arch/microblaze/kernel/ftrace.c12
-rw-r--r--arch/microblaze/kernel/head.S21
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/irq.c15
-rw-r--r--arch/microblaze/kernel/misc.S15
-rw-r--r--arch/microblaze/kernel/module.c1
-rw-r--r--arch/microblaze/kernel/of_platform.c1
-rw-r--r--arch/microblaze/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/ptrace.c1
-rw-r--r--arch/microblaze/kernel/setup.c69
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c1
-rw-r--r--arch/microblaze/kernel/traps.c6
18 files changed, 539 insertions, 213 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index b07594eccf9..e51bc152082 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -14,7 +14,7 @@ endif
extra-y := head.o vmlinux.lds
-obj-y += exceptions.o \
+obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \
setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 7bc7b68f97d..0071260a672 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -90,6 +90,7 @@ int main(int argc, char *argv[])
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
BLANK();
/* struct cpu_context */
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 2a56bccce4e..f04d8a86dea 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
-static inline void __invalidate_flush_icache(unsigned int addr)
-{
- __asm__ __volatile__ ("wic %0, r0;" \
- : : "r" (addr));
-}
-
-static inline void __flush_dcache(unsigned int addr)
-{
- __asm__ __volatile__ ("wdc.flush %0, r0;" \
- : : "r" (addr));
-}
-
-static inline void __invalidate_dcache(unsigned int baseaddr,
- unsigned int offset)
-{
- __asm__ __volatile__ ("wdc.clear %0, %1;" \
- : : "r" (baseaddr), "r" (offset));
-}
-
static inline void __enable_icache_msr(void)
{
__asm__ __volatile__ (" msrset r0, %0; \
@@ -148,9 +129,9 @@ do { \
int step = -line_length; \
BUG_ON(step >= 0); \
\
- __asm__ __volatile__ (" 1: " #op " r0, %0; \
- bgtid %0, 1b; \
- addk %0, %0, %1; \
+ __asm__ __volatile__ (" 1: " #op " r0, %0; \
+ bgtid %0, 1b; \
+ addk %0, %0, %1; \
" : : "r" (len), "r" (step) \
: "memory"); \
} while (0);
@@ -162,9 +143,9 @@ do { \
int count = end - start; \
BUG_ON(count <= 0); \
\
- __asm__ __volatile__ (" 1: " #op " %0, %1; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
+ __asm__ __volatile__ (" 1: " #op " %0, %1; \
+ bgtid %1, 1b; \
+ addk %1, %1, %2; \
" : : "r" (start), "r" (count), \
"r" (step) : "memory"); \
} while (0);
@@ -175,7 +156,7 @@ do { \
int volatile temp; \
BUG_ON(end - start <= 0); \
\
- __asm__ __volatile__ (" 1: " #op " %1, r0; \
+ __asm__ __volatile__ (" 1: " #op " %1, r0; \
cmpu %0, %1, %2; \
bgtid %0, 1b; \
addk %1, %1, %3; \
@@ -183,10 +164,14 @@ do { \
"r" (line_length) : "memory"); \
} while (0);
+#define ASM_LOOP
+
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
local_irq_save(flags);
__disable_icache_msr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
-
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_msr();
local_irq_restore(flags);
}
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
local_irq_save(flags);
__disable_icache_nomsr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_nomsr();
local_irq_restore(flags);
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
static void __flush_icache_range_noirq(unsigned long start,
unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.icache_line_length, cpuinfo.icache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __flush_icache_all_msr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_icache_msr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
-
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_msr();
local_irq_restore(flags);
}
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
static void __flush_icache_all_nomsr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_icache_nomsr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
-
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_nomsr();
local_irq_restore(flags);
}
static void __flush_icache_all_noirq(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_all_msr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_dcache_msr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
-
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_msr();
local_irq_restore(flags);
}
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
static void __invalidate_dcache_all_nomsr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_dcache_nomsr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
-
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_nomsr();
local_irq_restore(flags);
}
static void __invalidate_dcache_all_noirq_wt(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
}
/* FIXME this is weird - should be only wdc but not work
* MS: I am getting bus errors and other weird things */
static void __invalidate_dcache_all_wb(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.clear %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,
unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc.clear %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
local_irq_save(flags);
__disable_dcache_msr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_msr();
local_irq_restore(flags);
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
local_irq_save(flags);
__disable_dcache_nomsr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_nomsr();
local_irq_restore(flags);
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
static void __flush_dcache_all_wb(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.flush);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (i));
+#endif
}
/* struct for wb caches and for wt caches */
@@ -493,7 +611,7 @@ const struct scache wt_nomsr_noirq = {
#define CPUVER_7_20_A 0x0c
#define CPUVER_7_20_D 0x0f
-#define INFO(s) printk(KERN_INFO "cache: " s " \n");
+#define INFO(s) printk(KERN_INFO "cache: " s "\n");
void microblaze_cache_init(void)
{
@@ -532,4 +650,9 @@ void microblaze_cache_init(void)
}
}
}
+ invalidate_dcache();
+ enable_dcache();
+
+ invalidate_icache();
+ enable_icache();
}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 991d71311b0..255ef880351 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -9,7 +9,6 @@
*/
#include <linux/init.h>
-#include <linux/slab.h>
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 00000000000..ce72dd4967c
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2009-2010 PetaLogix
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/dma-debug.h>
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Generic direct DMA implementation
+ *
+ * This implementation supports a per-device offset that can be applied if
+ * the address at which memory is visible to devices is not 0. Platform code
+ * can set archdata.dma_data to an unsigned long holding the offset. By
+ * default the offset is PCI_DRAM_OFFSET.
+ */
+static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ flush_dcache_range(paddr + offset, paddr + offset + size);
+ break;
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range(paddr + offset, paddr + offset + size);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static unsigned long get_dma_direct_offset(struct device *dev)
+{
+ if (likely(dev))
+ return (unsigned long)dev->archdata.dma_data;
+
+ return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
+}
+
+#define NOT_COHERENT_CACHE
+
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+#ifdef NOT_COHERENT_CACHE
+ return consistent_alloc(flag, size, dma_handle);
+#else
+ void *ret;
+ struct page *page;
+ int node = dev_to_node(dev);
+
+ /* ignore region specifiers */
+ flag &= ~(__GFP_HIGHMEM);
+
+ page = alloc_pages_node(node, flag, get_order(size));
+ if (page == NULL)
+ return NULL;
+ ret = page_address(page);
+ memset(ret, 0, size);
+ *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
+
+ return ret;
+#endif
+}
+
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+#ifdef NOT_COHERENT_CACHE
+ consistent_free(vaddr);
+#else
+ free_pages((unsigned long)vaddr, get_order(size));
+#endif
+}
+
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* FIXME this part of code is untested */
+ for_each_sg(sgl, sg, nents, i) {
+ sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
+ sg->dma_length = sg->length;
+ __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
+ sg->length, direction);
+ }
+
+ return nents;
+}
+
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+}
+
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline dma_addr_t dma_direct_map_page(struct device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ __dma_sync_page(page_to_phys(page), offset, size, direction);
+ return page_to_phys(page) + offset + get_dma_direct_offset(dev);
+}
+
+static inline void dma_direct_unmap_page(struct device *dev,
+ dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+/* There is not necessary to do cache cleanup
+ *
+ * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
+ * dma_address is physical address
+ */
+ __dma_sync_page(dma_address, 0 , size, direction);
+}
+
+struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+ .unmap_sg = dma_direct_unmap_sg,
+ .dma_supported = dma_direct_dma_supported,
+ .map_page = dma_direct_map_page,
+ .unmap_page = dma_direct_unmap_page,
+};
+EXPORT_SYMBOL(dma_direct_ops);
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 3bad4ff4947..c0ede25c5b9 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception):
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
-2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception):
rtid r11, 0
nop
3:
- add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO /* get thread info */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
lwi r11, r11, TI_FLAGS /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 4f
@@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap):
/* See if returning to kernel mode, if so, skip resched &c. */
bnei r11, 2f;
+ swi r3, r1, PTO + PT_R3
+ swi r4, r1, PTO + PT_R4
+
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
- # FIXME: Restructure all these flag checks.
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ /* FIXME: Restructure all these flag checks. */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 1f
- swi r3, r1, PTO + PT_R3
- swi r4, r1, PTO + PT_R4
brlid r15, do_syscall_trace_leave
addik r5, r1, PTO + PT_R0
- lwi r3, r1, PTO + PT_R3
- lwi r4, r1, PTO + PT_R4
1:
-
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
- /* Get current task ptr into r11 */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ /* get thread info from current task */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
- swi r3, r1, PTO + PT_R3; /* store syscall result */
- swi r4, r1, PTO + PT_R4;
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
- lwi r3, r1, PTO + PT_R3; /* restore syscall result */
- lwi r4, r1, PTO + PT_R4;
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+5: /* get thread info from current task*/
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
- swi r3, r1, PTO + PT_R3; /* store syscall result */
- swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
- add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 1; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
- nop;
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
+
+/* Finally, return to user state. */
+1:
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
-/* Finally, return to user state. */
-1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
@@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
addi r11, r0, 1; \
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
-2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
/* Save away the syscall number. */ \
swi r0, r1, PTO+PT_R0; \
tovirt(r1,r1)
@@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc):
/* We're returning to user mode, so check for various conditions that
trigger rescheduling. */
- /* Get current task ptr into r11 */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
@@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc):
nop; /* delay slot */
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
@@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc):
* store return registers separately because this macros is use
* for others exceptions */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
- add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
- nop;
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
@@ -802,7 +786,7 @@ C_ENTRY(_interrupt):
swi r11, r0, TOPHYS(PER_CPU(KM));
2:
- lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
swi r0, r1, PTO + PT_R0;
tovirt(r1,r1)
la r5, r1, PTO;
@@ -817,8 +801,7 @@ ret_from_irq:
lwi r11, r1, PTO + PT_MODE;
bnei r11, 2f;
- add r11, r0, CURRENT_TASK;
- lwi r11, r11, TS_THREAD_INFO;
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f
@@ -826,8 +809,7 @@ ret_from_irq:
nop; /* delay slot */
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK;
- lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
+5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqid r11, no_intr_resched
@@ -842,8 +824,7 @@ no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
- add r11, r0, CURRENT_TASK;
- swi r11, r0, PER_CPU(CURRENT_SAVE);
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@@ -853,7 +834,28 @@ no_intr_resched:
lwi r1, r1, PT_R1 - PT_SIZE;
bri 6f;
/* MS: Return to kernel state. */
-2: VM_OFF /* MS: turn off MMU */
+2:
+#ifdef CONFIG_PREEMPT
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
+ /* MS: get preempt_count from thread info */
+ lwi r5, r11, TI_PREEMPT_COUNT;
+ bgti r5, restore;
+
+ lwi r5, r11, TI_FLAGS; /* get flags in thread info */
+ andi r5, r5, _TIF_NEED_RESCHED;
+ beqi r5, restore /* if zero jump over */
+
+preempt:
+ /* interrupts are off that's why I am calling preempt_chedule_irq */
+ bralid r15, preempt_schedule_irq
+ nop
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
+ lwi r5, r11, TI_FLAGS; /* get flags in thread info */
+ andi r5, r5, _TIF_NEED_RESCHED;
+ bnei r5, preempt /* if non zero jump to resched */
+restore:
+#endif
+ VM_OFF /* MS: turn off MMU */
tophys(r1,r1)
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
@@ -915,7 +917,7 @@ C_ENTRY(_debug_exception):
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
-2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r0, r1, PTO+PT_R0;
tovirt(r1,r1)
@@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
bnei r11, 2f;
/* Get current task ptr into r11 */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
@@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
@@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
(in a possibly modified form) after do_signal returns. */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
- add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
- nop;
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
@@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
ENTRY(_switch_to)
/* prepare return value */
- addk r3, r0, r31
+ addk r3, r0, CURRENT_TASK
/* save registers in cpu_context */
/* use r11 and r12, volatile registers, as temp register */
@@ -1051,10 +1049,10 @@ ENTRY(_switch_to)
nop
swi r12, r11, CC_FSR
- /* update r31, the current */
- lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
+ /* update r31, the current-give me pointer to task which will be next */
+ lwi CURRENT_TASK, r6, TI_TASK
/* stored it to current_save too */
- swi r31, r0, PER_CPU(CURRENT_SAVE)
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
/* get new process' cpu context and restore */
/* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index 388b31ca65a..515feb40455 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -151,13 +151,10 @@ int ftrace_make_nop(struct module *mod,
return ret;
}
-static int ret_addr; /* initialized as 0 by default */
-
/* I believe that first is called ftrace_make_nop before this function */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int ret;
- ret_addr = addr; /* saving where the barrier jump is */
pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
__func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
ret = ftrace_modify_code(rec->ip, imm);
@@ -194,12 +191,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(ip, upper);
ret += ftrace_modify_code(ip + 4, lower);
- /* We just need to remove the rtsd r15, 8 by NOP */
- BUG_ON(!ret_addr);
- if (ret_addr)
- ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP);
- else
- ret = 1; /* fault */
+ /* We just need to replace the rtsd r15, 8 with NOP */
+ ret += ftrace_modify_code((unsigned long)&ftrace_caller,
+ MICROBLAZE_NOP);
/* All changes are done - lets do caches consistent */
flush_icache();
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 30916193fcc..da6a5f5dc76 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@ swapper_pg_dir:
.text
ENTRY(_start)
+#if CONFIG_KERNEL_BASE_ADDR == 0
+ brai TOPHYS(real_start)
+ .org 0x100
+real_start:
+#endif
+
mfs r1, rmsr
andi r1, r1, ~2
mts rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
- lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
- sb r7, r4, r6 /* addr[r4+r6]= r7*/
+ lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
+ sb r2, r4, r6 /* addr[r4+r6]= r2*/
addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
* virtual to physical.
*/
nop
- addik r3, r0, 63 /* Invalidate all TLB entries */
+ addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
@@ -136,6 +142,11 @@ _invalidate:
addik r3, r3, -1
/* sync */
+ /* Setup the kernel PID */
+ mts rpid,r0 /* Load the kernel PID */
+ nop
+ bri 4
+
/*
* We should still be executing code at physical address area
* RAM_BASEADDR at this point. However, kernel code is at
@@ -146,10 +157,6 @@ _invalidate:
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
- mts rpid,r0 /* Load the kernel PID */
- nop
- bri 4
-
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa84..995a2123635 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
mfs r5, rmsr;
nop
swi r5, r1, 0;
- mfs r3, resr
+ mfs r4, resr
nop
- mfs r4, rear;
+ mfs r3, rear;
nop
#ifndef CONFIG_MMU
- andi r5, r3, 0x1000; /* Check ESR[DS] */
+ andi r5, r4, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
swi r17, r1, PT_R17
#endif
- andi r5, r3, 0x1F; /* Extract ESR[EXC] */
+ andi r5, r4, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
+#ifdef DEBUG
/* counting which exception happen */
lwi r5, r0, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
lwi r5, r6, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
swi r5, r6, 0x200 + TOPHYS(r0_ram)
+#endif
/* end */
/* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
swi r18, r1, PT_R18
or r5, r1, r0
- andi r6, r3, 0x1F; /* Load ESR[EC] */
+ andi r6, r4, 0x1F; /* Load ESR[EC] */
lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
swi r7, r1, PT_MODE
mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
*/
handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6
- * R3 = ESR
- * R4 = EAR
+ * R4 = ESR
+ * R3 = EAR
*/
#ifdef CONFIG_MMU
- andi r6, r3, 0x1000 /* Check ESR[DS] */
+ andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@@ -439,7 +441,7 @@ _no_delayslot:
RESTORE_STATE;
bri unaligned_data_trap
#endif
- andi r6, r3, 0x3E0; /* Mask and extract the register operand */
+ andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
- andi r6, r3, 0x400; /* Extract ESR[S] */
+ andi r6, r4, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
ex_lw:
- andi r6, r3, 0x800; /* Extract ESR[W] */
+ andi r6, r4, 0x800; /* Extract ESR[W] */
beqi r6, ex_lhw;
- lbui r5, r4, 0; /* Exception address in r4 */
+ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a word, byte-by-byte from destination address
and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r5, r4, 1;
+ lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
- lbui r5, r4, 2;
+ lbui r5, r3, 2;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
- lbui r5, r4, 3;
+ lbui r5, r3, 3;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
- /* Get the destination register value into r3 */
- lwi r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Get the destination register value into r4 */
+ lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
bri ex_lw_tail;
ex_lhw:
- lbui r5, r4, 0; /* Exception address in r4 */
+ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a half-word, byte-by-byte from destination
address and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r5, r4, 1;
+ lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
- /* Get the destination register value into r3 */
- lhui r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Get the destination register value into r4 */
+ lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
ex_lw_tail:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
andi r6, r6, 0x800; /* Extract ESR[W] */
beqi r6, ex_shw;
/* Get the word - delay slot */
- swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
/* Store the word, byte-by-byte into destination address */
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_0);
- sbi r3, r4, 0;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_1);
- sbi r3, r4, 1;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
- sbi r3, r4, 2;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
- sbi r3, r4, 3;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
+ sbi r4, r3, 1;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 2;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 3;
bri ex_handler_done;
ex_shw:
/* Store the lower half-word, byte-by-byte into destination address */
- swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
- sbi r3, r4, 0;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
- sbi r3, r4, 1;
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 1;
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- ori r4, r0, CONFIG_KERNEL_START
- cmpu r4, r3, r4
- bgti r4, ex3
+ ori r5, r0, CONFIG_KERNEL_START
+ cmpu r5, r3, r5
+ bgti r5, ex3
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- mfs r4, resr
- nop
andi r4, r4, 0x800 /* ESR_Z - zone protection */
bnei r4, ex2
@@ -589,8 +586,6 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- mfs r4, resr
- nop
andi r4, r4, 0x800 /* ESR_Z */
bnei r4, ex2
/* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
* R3 = ESR
*/
- mfs r3, rear /* Get faulting address */
- nop
RESTORE_STATE;
bri page_fault_instr_trap
@@ -677,18 +670,15 @@ ex_handler_done:
*/
handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
- * R3 = ESR
+ * R3 = EAR, R4 = ESR
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables. */
- ori r4, r0, CONFIG_KERNEL_START
- cmpu r4, r3, r4
+ ori r6, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r6
bgti r4, ex5
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
+ brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
-
- bri finish_tlb_load
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
- beqi r6, ex7
+ beqi r6, ex10
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
+ brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
-
- bri finish_tlb_load
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
ori r6, r0, 1
cmp r31, r5, r6
- blti r31, sem
+ blti r31, ex12
addik r5, r6, 1
- sem:
+ ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
@@ -859,7 +844,6 @@ ex_handler_done:
nop
/* Done...restore registers and get out of here. */
- ex12:
mts rpid, r11
nop
bri 4
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 0f06034d1fe..6f39e2c001f 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -93,3 +93,18 @@ skip:
}
return 0;
}
+
+/* MS: There is no any advance mapping mechanism. We are using simple 32bit
+ intc without any cascades or any connection that's why mapping is 1:1 */
+unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
+{
+ return hwirq;
+}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8..7cf86498326 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
+.type _tlbia, @function
.align 4;
_tlbia:
- addik r12, r0, 63 /* flush all entries (63 - 3) */
+ addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
/* isync */
_tlbia_1:
mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
/* sync */
rtsd r15, 8
nop
+ .size _tlbia, . - _tlbia
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
+.type _tlbie, @function
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
rtsd r15, 8
nop
+ .size _tlbie, . - _tlbie
+
/*
* Allocate TLB entry for early console
*/
.globl early_console_reg_tlb_alloc;
+.type early_console_reg_tlb_alloc, @function
.align 4;
early_console_reg_tlb_alloc:
/*
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
- ori r4, r0, 63
+ ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
rtsd r15, 8
nop
+ .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
+
/*
* Copy a whole page (4096 bytes).
*/
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
#define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page;
+.type copy_page, @function
.align 4;
copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
addik r11, r11, -1
rtsd r15, 8
nop
+
+ .size copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 5a45b1adfef..cbecf110dc3 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c
index 1c6d684996d..0dc755286d3 100644
--- a/arch/microblaze/kernel/of_platform.c
+++ b/arch/microblaze/kernel/of_platform.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf06c9..09bed44dfcd 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
+#include <asm/uaccess.h> /* for USER_DS macros */
#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
void default_idle(void)
{
- if (!hlt_counter) {
+ if (likely(hlt_counter)) {
+ while (!need_resched())
+ cpu_relax();
+ } else {
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
cpu_sleep();
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
- } else
- while (!need_resched())
- cpu_relax();
+ }
}
void cpu_idle(void)
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 6d6349a145f..a4a7770c614 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -75,7 +75,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int rval;
unsigned long val = 0;
- unsigned long copied;
switch (request) {
/* Read/write the word at location ADDR in the registers. */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bb8c4b9ccb8..17c98dbcec8 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -22,7 +22,10 @@
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/param.h>
+#include <linux/pci.h>
#include <linux/cache.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/entry.h>
#include <asm/cpuinfo.h>
@@ -54,14 +57,10 @@ void __init setup_arch(char **cmdline_p)
microblaze_cache_init();
- invalidate_dcache();
- enable_dcache();
-
- invalidate_icache();
- enable_icache();
-
setup_memory();
+ xilinx_pci_init();
+
#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
printk(KERN_NOTICE "Self modified code enable\n");
#endif
@@ -93,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
}
#endif /* CONFIG_MTD_UCLINUX_EBSS */
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
+#define eprintk early_printk
+#else
+#define eprintk printk
+#endif
+
void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr)
{
@@ -140,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
- early_printk("Ramdisk addr 0x%08x, ", ram);
+ eprintk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
- early_printk("FDT at 0x%08x\n", fdt);
+ eprintk("FDT at 0x%08x\n", fdt);
else
- early_printk("Compiled-in FDT at 0x%08x\n",
+ eprintk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
- early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+ eprintk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size);
- early_printk("#### klimit %p ####\n", old_klimit);
+ eprintk("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */
- early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&_ebss);
- early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
+ eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr)
- early_printk("!!!Your kernel has setup MSR instruction but "
+ eprintk("!!!Your kernel has setup MSR instruction but "
"CPU don't have it %d\n", msr);
#else
if (!msr)
- early_printk("!!!Your kernel not setup MSR instruction but "
+ eprintk("!!!Your kernel not setup MSR instruction but "
"CPU have it %d\n", msr);
#endif
@@ -188,3 +193,37 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
#endif
+
+static int dflt_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ set_dma_ops(dev, &dma_direct_ops);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dflt_plat_bus_notifier = {
+ .notifier_call = dflt_bus_notify,
+ .priority = INT_MAX,
+};
+
+static struct notifier_block dflt_of_bus_notifier = {
+ .notifier_call = dflt_bus_notify,
+ .priority = INT_MAX,
+};
+
+static int __init setup_bus_notifier(void)
+{
+ bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
+ bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
+
+ return 0;
+}
+
+arch_initcall(setup_bus_notifier);
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 9f3c205fb75..f4e00b7f125 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -30,6 +30,7 @@
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include <linux/slab.h>
#include <asm/syscalls.h>
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31..5e4570ef515 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
__enable_hw_exceptions();
}
-static int kstack_depth_to_print = 24;
+static unsigned long kstack_depth_to_print = 24;
static int __init kstack_setup(char *s)
{
- kstack_depth_to_print = strict_strtoul(s, 0, NULL);
-
- return 1;
+ return !strict_strtoul(s, 0, &kstack_depth_to_print);
}
__setup("kstack=", kstack_setup);