summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c234
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c1
-rw-r--r--arch/microblaze/kernel/dma.c157
-rw-r--r--arch/microblaze/kernel/entry-nommu.S10
-rw-r--r--arch/microblaze/kernel/entry.S116
-rw-r--r--arch/microblaze/kernel/ftrace.c12
-rw-r--r--arch/microblaze/kernel/head.S21
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/irq.c15
-rw-r--r--arch/microblaze/kernel/misc.S15
-rw-r--r--arch/microblaze/kernel/module.c1
-rw-r--r--arch/microblaze/kernel/of_platform.c3
-rw-r--r--arch/microblaze/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/prom.c990
-rw-r--r--arch/microblaze/kernel/prom_parse.c2
-rw-r--r--arch/microblaze/kernel/ptrace.c65
-rw-r--r--arch/microblaze/kernel/setup.c68
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c1
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/microblaze/kernel/traps.c6
22 files changed, 568 insertions, 1276 deletions
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index b07594eccf9..e51bc152082 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -14,7 +14,7 @@ endif
extra-y := head.o vmlinux.lds
-obj-y += exceptions.o \
+obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \
setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 7bc7b68f97d..0071260a672 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -90,6 +90,7 @@ int main(int argc, char *argv[])
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
BLANK();
/* struct cpu_context */
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2..f04d8a86dea 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
-static inline void __invalidate_flush_icache(unsigned int addr)
-{
- __asm__ __volatile__ ("wic %0, r0;" \
- : : "r" (addr));
-}
-
-static inline void __flush_dcache(unsigned int addr)
-{
- __asm__ __volatile__ ("wdc.flush %0, r0;" \
- : : "r" (addr));
-}
-
-static inline void __invalidate_dcache(unsigned int baseaddr,
- unsigned int offset)
-{
- __asm__ __volatile__ ("wdc.clear %0, %1;" \
- : : "r" (baseaddr), "r" (offset));
-}
-
static inline void __enable_icache_msr(void)
{
__asm__ __volatile__ (" msrset r0, %0; \
@@ -148,9 +129,9 @@ do { \
int step = -line_length; \
BUG_ON(step >= 0); \
\
- __asm__ __volatile__ (" 1: " #op " r0, %0; \
- bgtid %0, 1b; \
- addk %0, %0, %1; \
+ __asm__ __volatile__ (" 1: " #op " r0, %0; \
+ bgtid %0, 1b; \
+ addk %0, %0, %1; \
" : : "r" (len), "r" (step) \
: "memory"); \
} while (0);
@@ -162,9 +143,9 @@ do { \
int count = end - start; \
BUG_ON(count <= 0); \
\
- __asm__ __volatile__ (" 1: " #op " %0, %1; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
+ __asm__ __volatile__ (" 1: " #op " %0, %1; \
+ bgtid %1, 1b; \
+ addk %1, %1, %2; \
" : : "r" (start), "r" (count), \
"r" (step) : "memory"); \
} while (0);
@@ -172,22 +153,25 @@ do { \
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int step = -line_length; \
- int count = end - start; \
- BUG_ON(count <= 0); \
+ int volatile temp; \
+ BUG_ON(end - start <= 0); \
\
- __asm__ __volatile__ (" 1: addk %0, %0, %1; \
- " #op " %0, r0; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
- " : : "r" (start), "r" (count), \
- "r" (step) : "memory"); \
+ __asm__ __volatile__ (" 1: " #op " %1, r0; \
+ cmpu %0, %1, %2; \
+ bgtid %0, 1b; \
+ addk %1, %1, %3; \
+ " : : "r" (temp), "r" (start), "r" (end),\
+ "r" (line_length) : "memory"); \
} while (0);
+#define ASM_LOOP
+
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@@ -197,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
local_irq_save(flags);
__disable_icache_msr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
-
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_msr();
local_irq_restore(flags);
}
@@ -207,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@@ -217,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
local_irq_save(flags);
__disable_icache_nomsr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_nomsr();
local_irq_restore(flags);
@@ -226,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
static void __flush_icache_range_noirq(unsigned long start,
unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.icache_line_length, cpuinfo.icache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __flush_icache_all_msr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_icache_msr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
-
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_msr();
local_irq_restore(flags);
}
@@ -252,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
static void __flush_icache_all_nomsr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_icache_nomsr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
-
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
__enable_icache_nomsr();
local_irq_restore(flags);
}
static void __flush_icache_all_noirq(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_all_msr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_dcache_msr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
-
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_msr();
local_irq_restore(flags);
}
@@ -288,70 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
static void __invalidate_dcache_all_nomsr_irq(void)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_dcache_nomsr();
-
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
-
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_nomsr();
local_irq_restore(flags);
}
static void __invalidate_dcache_all_noirq_wt(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
}
/* FIXME this is weird - should be only wdc but not work
* MS: I am getting bus errors and other weird things */
static void __invalidate_dcache_all_wb(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
-
-#if 0
- unsigned int i;
-
- pr_debug("%s\n", __func__);
-
- /* Just loop through cache size and invalidate it */
- for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
- __invalidate_dcache(0, i);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.clear %0, r0;" \
+ : : "r" (i));
#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,
unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc.clear %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
@@ -360,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
local_irq_save(flags);
__disable_dcache_msr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_msr();
local_irq_restore(flags);
@@ -370,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
unsigned long flags;
-
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@@ -380,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
local_irq_save(flags);
__disable_dcache_nomsr();
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
__enable_dcache_nomsr();
local_irq_restore(flags);
@@ -388,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
static void __flush_dcache_all_wb(void)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.flush);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (i));
+#endif
}
static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{
+#ifndef ASM_LOOP
+ int i;
+#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (i));
+#endif
}
/* struct for wb caches and for wt caches */
@@ -504,7 +611,7 @@ const struct scache wt_nomsr_noirq = {
#define CPUVER_7_20_A 0x0c
#define CPUVER_7_20_D 0x0f
-#define INFO(s) printk(KERN_INFO "cache: " s " \n");
+#define INFO(s) printk(KERN_INFO "cache: " s "\n");
void microblaze_cache_init(void)
{
@@ -543,4 +650,9 @@ void microblaze_cache_init(void)
}
}
}
+ invalidate_dcache();
+ enable_dcache();
+
+ invalidate_icache();
+ enable_icache();
}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 991d71311b0..255ef880351 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -9,7 +9,6 @@
*/
#include <linux/init.h>
-#include <linux/slab.h>
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 00000000000..ce72dd4967c
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2009-2010 PetaLogix
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/dma-debug.h>
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Generic direct DMA implementation
+ *
+ * This implementation supports a per-device offset that can be applied if
+ * the address at which memory is visible to devices is not 0. Platform code
+ * can set archdata.dma_data to an unsigned long holding the offset. By
+ * default the offset is PCI_DRAM_OFFSET.
+ */
+static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ flush_dcache_range(paddr + offset, paddr + offset + size);
+ break;
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range(paddr + offset, paddr + offset + size);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static unsigned long get_dma_direct_offset(struct device *dev)
+{
+ if (likely(dev))
+ return (unsigned long)dev->archdata.dma_data;
+
+ return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
+}
+
+#define NOT_COHERENT_CACHE
+
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+#ifdef NOT_COHERENT_CACHE
+ return consistent_alloc(flag, size, dma_handle);
+#else
+ void *ret;
+ struct page *page;
+ int node = dev_to_node(dev);
+
+ /* ignore region specifiers */
+ flag &= ~(__GFP_HIGHMEM);
+
+ page = alloc_pages_node(node, flag, get_order(size));
+ if (page == NULL)
+ return NULL;
+ ret = page_address(page);
+ memset(ret, 0, size);
+ *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
+
+ return ret;
+#endif
+}
+
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+#ifdef NOT_COHERENT_CACHE
+ consistent_free(vaddr);
+#else
+ free_pages((unsigned long)vaddr, get_order(size));
+#endif
+}
+
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* FIXME this part of code is untested */
+ for_each_sg(sgl, sg, nents, i) {
+ sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
+ sg->dma_length = sg->length;
+ __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
+ sg->length, direction);
+ }
+
+ return nents;
+}
+
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+}
+
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline dma_addr_t dma_direct_map_page(struct device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ __dma_sync_page(page_to_phys(page), offset, size, direction);
+ return page_to_phys(page) + offset + get_dma_direct_offset(dev);
+}
+
+static inline void dma_direct_unmap_page(struct device *dev,
+ dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+/* There is not necessary to do cache cleanup
+ *
+ * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
+ * dma_address is physical address
+ */
+ __dma_sync_page(dma_address, 0 , size, direction);
+}
+
+struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+ .unmap_sg = dma_direct_unmap_sg,
+ .dma_supported = dma_direct_dma_supported,
+ .map_page = dma_direct_map_page,
+ .unmap_page = dma_direct_unmap_page,
+};
+EXPORT_SYMBOL(dma_direct_ops);
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 95b0855802d..391d6197fc3 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -122,7 +122,7 @@ ENTRY(_interrupt)
ret_from_intr:
lwi r11, r1, PT_MODE
- bneid r11, 3f
+ bneid r11, no_intr_resched
lwi r6, r31, TS_THREAD_INFO /* get thread info */
lwi r19, r6, TI_FLAGS /* get flags in thread info */
@@ -133,16 +133,18 @@ ret_from_intr:
bralid r15, schedule
nop
1: andi r11, r19, _TIF_SIGPENDING
- beqid r11, no_intr_reshed
+ beqid r11, no_intr_resched
addk r5, r1, r0
addk r7, r0, r0
bralid r15, do_signal
addk r6, r0, r0
-no_intr_reshed:
+no_intr_resched:
+ /* Disable interrupts, we are now committed to the state restore */
+ disable_irq
+
/* save mode indicator */
lwi r11, r1, PT_MODE
-3:
swi r11, r0, PER_CPU(KM)
/* save r31 */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 3bad4ff4947..c0ede25c5b9 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception):
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
-2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception):
rtid r11, 0
nop
3:
- add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO /* get thread info */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
lwi r11, r11, TI_FLAGS /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 4f
@@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap):
/* See if returning to kernel mode, if so, skip resched &c. */
bnei r11, 2f;
+ swi r3, r1, PTO + PT_R3
+ swi r4, r1, PTO + PT_R4
+
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
- # FIXME: Restructure all these flag checks.
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ /* FIXME: Restructure all these flag checks. */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 1f
- swi r3, r1, PTO + PT_R3
- swi r4, r1, PTO + PT_R4
brlid r15, do_syscall_trace_leave
addik r5, r1, PTO + PT_R0
- lwi r3, r1, PTO + PT_R3
- lwi r4, r1, PTO + PT_R4
1:
-
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
- /* Get current task ptr into r11 */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ /* get thread info from current task */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
- swi r3, r1, PTO + PT_R3; /* store syscall result */
- swi r4, r1, PTO + PT_R4;
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
- lwi r3, r1, PTO + PT_R3; /* restore syscall result */
- lwi r4, r1, PTO + PT_R4;
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+5: /* get thread info from current task*/
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
- swi r3, r1, PTO + PT_R3; /* store syscall result */
- swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
- add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 1; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
- nop;
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
+
+/* Finally, return to user state. */
+1:
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
-/* Finally, return to user state. */
-1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
@@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
addi r11, r0, 1; \
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
-2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
/* Save away the syscall number. */ \
swi r0, r1, PTO+PT_R0; \
tovirt(r1,r1)
@@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc):
/* We're returning to user mode, so check for various conditions that
trigger rescheduling. */
- /* Get current task ptr into r11 */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
@@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc):
nop; /* delay slot */
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
@@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc):
* store return registers separately because this macros is use
* for others exceptions */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
- add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
- nop;
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
@@ -802,7 +786,7 @@ C_ENTRY(_interrupt):
swi r11, r0, TOPHYS(PER_CPU(KM));
2:
- lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
swi r0, r1, PTO + PT_R0;
tovirt(r1,r1)
la r5, r1, PTO;
@@ -817,8 +801,7 @@ ret_from_irq:
lwi r11, r1, PTO + PT_MODE;
bnei r11, 2f;
- add r11, r0, CURRENT_TASK;
- lwi r11, r11, TS_THREAD_INFO;
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f
@@ -826,8 +809,7 @@ ret_from_irq:
nop; /* delay slot */
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK;
- lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
+5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqid r11, no_intr_resched
@@ -842,8 +824,7 @@ no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
- add r11, r0, CURRENT_TASK;
- swi r11, r0, PER_CPU(CURRENT_SAVE);
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@@ -853,7 +834,28 @@ no_intr_resched:
lwi r1, r1, PT_R1 - PT_SIZE;
bri 6f;
/* MS: Return to kernel state. */
-2: VM_OFF /* MS: turn off MMU */
+2:
+#ifdef CONFIG_PREEMPT
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
+ /* MS: get preempt_count from thread info */
+ lwi r5, r11, TI_PREEMPT_COUNT;
+ bgti r5, restore;
+
+ lwi r5, r11, TI_FLAGS; /* get flags in thread info */
+ andi r5, r5, _TIF_NEED_RESCHED;
+ beqi r5, restore /* if zero jump over */
+
+preempt:
+ /* interrupts are off that's why I am calling preempt_chedule_irq */
+ bralid r15, preempt_schedule_irq
+ nop
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
+ lwi r5, r11, TI_FLAGS; /* get flags in thread info */
+ andi r5, r5, _TIF_NEED_RESCHED;
+ bnei r5, preempt /* if non zero jump to resched */
+restore:
+#endif
+ VM_OFF /* MS: turn off MMU */
tophys(r1,r1)
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
@@ -915,7 +917,7 @@ C_ENTRY(_debug_exception):
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
-2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r0, r1, PTO+PT_R0;
tovirt(r1,r1)
@@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
bnei r11, 2f;
/* Get current task ptr into r11 */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
@@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
/* Maybe handle a signal */
-5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- lwi r11, r11, TS_THREAD_INFO; /* get thread info */
+5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
@@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
(in a possibly modified form) after do_signal returns. */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
- add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
- nop;
+ add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
- add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
- swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
@@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
ENTRY(_switch_to)
/* prepare return value */
- addk r3, r0, r31
+ addk r3, r0, CURRENT_TASK
/* save registers in cpu_context */
/* use r11 and r12, volatile registers, as temp register */
@@ -1051,10 +1049,10 @@ ENTRY(_switch_to)
nop
swi r12, r11, CC_FSR
- /* update r31, the current */
- lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
+ /* update r31, the current-give me pointer to task which will be next */
+ lwi CURRENT_TASK, r6, TI_TASK
/* stored it to current_save too */
- swi r31, r0, PER_CPU(CURRENT_SAVE)
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
/* get new process' cpu context and restore */
/* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index 388b31ca65a..515feb40455 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -151,13 +151,10 @@ int ftrace_make_nop(struct module *mod,
return ret;
}
-static int ret_addr; /* initialized as 0 by default */
-
/* I believe that first is called ftrace_make_nop before this function */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int ret;
- ret_addr = addr; /* saving where the barrier jump is */
pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
__func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
ret = ftrace_modify_code(rec->ip, imm);
@@ -194,12 +191,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(ip, upper);
ret += ftrace_modify_code(ip + 4, lower);
- /* We just need to remove the rtsd r15, 8 by NOP */
- BUG_ON(!ret_addr);
- if (ret_addr)
- ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP);
- else
- ret = 1; /* fault */
+ /* We just need to replace the rtsd r15, 8 with NOP */
+ ret += ftrace_modify_code((unsigned long)&ftrace_caller,
+ MICROBLAZE_NOP);
/* All changes are done - lets do caches consistent */
flush_icache();
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 30916193fcc..da6a5f5dc76 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@ swapper_pg_dir:
.text
ENTRY(_start)
+#if CONFIG_KERNEL_BASE_ADDR == 0
+ brai TOPHYS(real_start)
+ .org 0x100
+real_start:
+#endif
+
mfs r1, rmsr
andi r1, r1, ~2
mts rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
- lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
- sb r7, r4, r6 /* addr[r4+r6]= r7*/
+ lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
+ sb r2, r4, r6 /* addr[r4+r6]= r2*/
addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
* virtual to physical.
*/
nop
- addik r3, r0, 63 /* Invalidate all TLB entries */
+ addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
@@ -136,6 +142,11 @@ _invalidate:
addik r3, r3, -1
/* sync */
+ /* Setup the kernel PID */
+ mts rpid,r0 /* Load the kernel PID */
+ nop
+ bri 4
+
/*
* We should still be executing code at physical address area
* RAM_BASEADDR at this point. However, kernel code is at
@@ -146,10 +157,6 @@ _invalidate:
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
- mts rpid,r0 /* Load the kernel PID */
- nop
- bri 4
-
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa84..995a2123635 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
mfs r5, rmsr;
nop
swi r5, r1, 0;
- mfs r3, resr
+ mfs r4, resr
nop
- mfs r4, rear;
+ mfs r3, rear;
nop
#ifndef CONFIG_MMU
- andi r5, r3, 0x1000; /* Check ESR[DS] */
+ andi r5, r4, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
swi r17, r1, PT_R17
#endif
- andi r5, r3, 0x1F; /* Extract ESR[EXC] */
+ andi r5, r4, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
+#ifdef DEBUG
/* counting which exception happen */
lwi r5, r0, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
lwi r5, r6, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
swi r5, r6, 0x200 + TOPHYS(r0_ram)
+#endif
/* end */
/* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
swi r18, r1, PT_R18
or r5, r1, r0
- andi r6, r3, 0x1F; /* Load ESR[EC] */
+ andi r6, r4, 0x1F; /* Load ESR[EC] */
lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
swi r7, r1, PT_MODE
mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
*/
handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6
- * R3 = ESR
- * R4 = EAR
+ * R4 = ESR
+ * R3 = EAR
*/
#ifdef CONFIG_MMU
- andi r6, r3, 0x1000 /* Check ESR[DS] */
+ andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@@ -439,7 +441,7 @@ _no_delayslot:
RESTORE_STATE;
bri unaligned_data_trap
#endif
- andi r6, r3, 0x3E0; /* Mask and extract the register operand */
+ andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
- andi r6, r3, 0x400; /* Extract ESR[S] */
+ andi r6, r4, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
ex_lw:
- andi r6, r3, 0x800; /* Extract ESR[W] */
+ andi r6, r4, 0x800; /* Extract ESR[W] */
beqi r6, ex_lhw;
- lbui r5, r4, 0; /* Exception address in r4 */
+ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a word, byte-by-byte from destination address
and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r5, r4, 1;
+ lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
- lbui r5, r4, 2;
+ lbui r5, r3, 2;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
- lbui r5, r4, 3;
+ lbui r5, r3, 3;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
- /* Get the destination register value into r3 */
- lwi r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Get the destination register value into r4 */
+ lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
bri ex_lw_tail;
ex_lhw:
- lbui r5, r4, 0; /* Exception address in r4 */
+ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a half-word, byte-by-byte from destination
address and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r5, r4, 1;
+ lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
- /* Get the destination register value into r3 */
- lhui r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Get the destination register value into r4 */
+ lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
ex_lw_tail:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
andi r6, r6, 0x800; /* Extract ESR[W] */
beqi r6, ex_shw;
/* Get the word - delay slot */
- swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
/* Store the word, byte-by-byte into destination address */
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_0);
- sbi r3, r4, 0;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_1);
- sbi r3, r4, 1;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
- sbi r3, r4, 2;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
- sbi r3, r4, 3;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
+ sbi r4, r3, 1;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 2;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 3;
bri ex_handler_done;
ex_shw:
/* Store the lower half-word, byte-by-byte into destination address */
- swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
- sbi r3, r4, 0;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
- sbi r3, r4, 1;
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 1;
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- ori r4, r0, CONFIG_KERNEL_START
- cmpu r4, r3, r4
- bgti r4, ex3
+ ori r5, r0, CONFIG_KERNEL_START
+ cmpu r5, r3, r5
+ bgti r5, ex3
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- mfs r4, resr
- nop
andi r4, r4, 0x800 /* ESR_Z - zone protection */
bnei r4, ex2
@@ -589,8 +586,6 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- mfs r4, resr
- nop
andi r4, r4, 0x800 /* ESR_Z */
bnei r4, ex2
/* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
* R3 = ESR
*/
- mfs r3, rear /* Get faulting address */
- nop
RESTORE_STATE;
bri page_fault_instr_trap
@@ -677,18 +670,15 @@ ex_handler_done:
*/
handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
- * R3 = ESR
+ * R3 = EAR, R4 = ESR
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables. */
- ori r4, r0, CONFIG_KERNEL_START
- cmpu r4, r3, r4
+ ori r6, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r6
bgti r4, ex5
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
+ brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
-
- bri finish_tlb_load
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
- beqi r6, ex7
+ beqi r6, ex10
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
+ brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
-
- bri finish_tlb_load
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
ori r6, r0, 1
cmp r31, r5, r6
- blti r31, sem
+ blti r31, ex12
addik r5, r6, 1
- sem:
+ ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
@@ -859,7 +844,6 @@ ex_handler_done:
nop
/* Done...restore registers and get out of here. */
- ex12:
mts rpid, r11
nop
bri 4
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 0f06034d1fe..6f39e2c001f 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -93,3 +93,18 @@ skip:
}
return 0;
}
+
+/* MS: There is no any advance mapping mechanism. We are using simple 32bit
+ intc without any cascades or any connection that's why mapping is 1:1 */
+unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
+{
+ return hwirq;
+}
+EXPORT_SYMBOL_GPL(irq_create_mapping);
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8..7cf86498326 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
+.type _tlbia, @function
.align 4;
_tlbia:
- addik r12, r0, 63 /* flush all entries (63 - 3) */
+ addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
/* isync */
_tlbia_1:
mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
/* sync */
rtsd r15, 8
nop
+ .size _tlbia, . - _tlbia
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
+.type _tlbie, @function
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
rtsd r15, 8
nop
+ .size _tlbie, . - _tlbie
+
/*
* Allocate TLB entry for early console
*/
.globl early_console_reg_tlb_alloc;
+.type early_console_reg_tlb_alloc, @function
.align 4;
early_console_reg_tlb_alloc:
/*
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
- ori r4, r0, 63
+ ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
rtsd r15, 8
nop
+ .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
+
/*
* Copy a whole page (4096 bytes).
*/
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
#define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page;
+.type copy_page, @function
.align 4;
copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
addik r11, r11, -1
rtsd r15, 8
nop
+
+ .size copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 5a45b1adfef..cbecf110dc3 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c
index acf4574d0f1..0dc755286d3 100644
--- a/arch/microblaze/kernel/of_platform.c
+++ b/arch/microblaze/kernel/of_platform.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -185,7 +184,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
static int of_dev_phandle_match(struct device *dev, void *data)
{
phandle *ph = data;
- return to_of_device(dev)->node->linux_phandle == *ph;
+ return to_of_device(dev)->node->phandle == *ph;
}
struct of_device *of_find_device_by_phandle(phandle ph)
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf06c9..09bed44dfcd 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
+#include <asm/uaccess.h> /* for USER_DS macros */
#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
void default_idle(void)
{
- if (!hlt_counter) {
+ if (likely(hlt_counter)) {
+ while (!need_resched())
+ cpu_relax();
+ } else {
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
cpu_sleep();
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
- } else
- while (!need_resched())
- cpu_relax();
+ }
}
void cpu_idle(void)
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index b817df172aa..a15ef6d67ca 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -42,698 +42,21 @@
#include <asm/sections.h>
#include <asm/pci-bridge.h>
-static int __initdata dt_root_addr_cells;
-static int __initdata dt_root_size_cells;
-
-typedef u32 cell_t;
-
-static struct boot_param_header *initial_boot_params;
-
-/* export that to outside world */
-struct device_node *of_chosen;
-
-static inline char *find_flat_dt_string(u32 offset)
-{
- return ((char *)initial_boot_params) +
- initial_boot_params->off_dt_strings + offset;
-}
-
-/**
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory informations at boot before we can
- * unflatten the tree
- */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data)
-{
- unsigned long p = ((unsigned long)initial_boot_params) +
- initial_boot_params->off_dt_struct;
- int rc = 0;
- int depth = -1;
-
- do {
- u32 tag = *((u32 *)p);
- char *pathp;
-
- p += 4;
- if (tag == OF_DT_END_NODE) {
- depth--;
- continue;
- }
- if (tag == OF_DT_NOP)
- continue;
- if (tag == OF_DT_END)
- break;
- if (tag == OF_DT_PROP) {
- u32 sz = *((u32 *)p);
- p += 8;
- if (initial_boot_params->version < 0x10)
- p = _ALIGN(p, sz >= 8 ? 8 : 4);
- p += sz;
- p = _ALIGN(p, 4);
- continue;
- }
- if (tag != OF_DT_BEGIN_NODE) {
- printk(KERN_WARNING "Invalid tag %x scanning flattened"
- " device tree !\n", tag);
- return -EINVAL;
- }
- depth++;
- pathp = (char *)p;
- p = _ALIGN(p + strlen(pathp) + 1, 4);
- if ((*pathp) == '/') {
- char *lp, *np;
- for (lp = NULL, np = pathp; *np; np++)
- if ((*np) == '/')
- lp = np+1;
- if (lp != NULL)
- pathp = lp;
- }
- rc = it(p, pathp, depth, data);
- if (rc != 0)
- break;
- } while (1);
-
- return rc;
-}
-
-unsigned long __init of_get_flat_dt_root(void)
-{
- unsigned long p = ((unsigned long)initial_boot_params) +
- initial_boot_params->off_dt_struct;
-
- while (*((u32 *)p) == OF_DT_NOP)
- p += 4;
- BUG_ON(*((u32 *)p) != OF_DT_BEGIN_NODE);
- p += 4;
- return _ALIGN(p + strlen((char *)p) + 1, 4);
-}
-
-/**
- * This function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size)
-{
- unsigned long p = node;
-
- do {
- u32 tag = *((u32 *)p);
- u32 sz, noff;
- const char *nstr;
-
- p += 4;
- if (tag == OF_DT_NOP)
- continue;
- if (tag != OF_DT_PROP)
- return NULL;
-
- sz = *((u32 *)p);
- noff = *((u32 *)(p + 4));
- p += 8;
- if (initial_boot_params->version < 0x10)
- p = _ALIGN(p, sz >= 8 ? 8 : 4);
-
- nstr = find_flat_dt_string(noff);
- if (nstr == NULL) {
- printk(KERN_WARNING "Can't find property index"
- " name !\n");
- return NULL;
- }
- if (strcmp(name, nstr) == 0) {
- if (size)
- *size = sz;
- return (void *)p;
- }
- p += sz;
- p = _ALIGN(p, 4);
- } while (1);
-}
-
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
-{
- const char *cp;
- unsigned long cplen, l;
-
- cp = of_get_flat_dt_prop(node, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
- if (strncasecmp(cp, compat, strlen(compat)) == 0)
- return 1;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
- }
-
- return 0;
-}
-
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
- unsigned long align)
-{
- void *res;
-
- *mem = _ALIGN(*mem, align);
- res = (void *)*mem;
- *mem += size;
-
- return res;
-}
-
-static unsigned long __init unflatten_dt_node(unsigned long mem,
- unsigned long *p,
- struct device_node *dad,
- struct device_node ***allnextpp,
- unsigned long fpsize)
-{
- struct device_node *np;
- struct property *pp, **prev_pp = NULL;
- char *pathp;
- u32 tag;
- unsigned int l, allocl;
- int has_name = 0;
- int new_format = 0;
-
- tag = *((u32 *)(*p));
- if (tag != OF_DT_BEGIN_NODE) {
- printk("Weird tag at start of node: %x\n", tag);
- return mem;
- }
- *p += 4;
- pathp = (char *)*p;
- l = allocl = strlen(pathp) + 1;
- *p = _ALIGN(*p + l, 4);
-
- /* version 0x10 has a more compact unit name here instead of the full
- * path. we accumulate the full path size using "fpsize", we'll rebuild
- * it later. We detect this because the first character of the name is
- * not '/'.
- */
- if ((*pathp) != '/') {
- new_format = 1;
- if (fpsize == 0) {
- /* root node: special case. fpsize accounts for path
- * plus terminating zero. root node only has '/', so
- * fpsize should be 2, but we want to avoid the first
- * level nodes to have two '/' so we use fpsize 1 here
- */
- fpsize = 1;
- allocl = 2;
- } else {
- /* account for '/' and path size minus terminal 0
- * already in 'l'
- */
- fpsize += l;
- allocl = fpsize;
- }
- }
-
- np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
- __alignof__(struct device_node));
- if (allnextpp) {
- memset(np, 0, sizeof(*np));
- np->full_name = ((char *)np) + sizeof(struct device_node);
- if (new_format) {
- char *p2 = np->full_name;
- /* rebuild full path for new format */
- if (dad && dad->parent) {
- strcpy(p2, dad->full_name);
-#ifdef DEBUG
- if ((strlen(p2) + l + 1) != allocl) {
- pr_debug("%s: p: %d, l: %d, a: %d\n",
- pathp, (int)strlen(p2),
- l, allocl);
- }
-#endif
- p2 += strlen(p2);
- }
- *(p2++) = '/';
- memcpy(p2, pathp, l);
- } else
- memcpy(np->full_name, pathp, l);
- prev_pp = &np->properties;
- **allnextpp = np;
- *allnextpp = &np->allnext;
- if (dad != NULL) {
- np->parent = dad;
- /* we temporarily use the next field as `last_child'*/
- if (dad->next == NULL)
- dad->child = np;
- else
- dad->next->sibling = np;
- dad->next = np;
- }
- kref_init(&np->kref);
- }
- while (1) {
- u32 sz, noff;
- char *pname;
-
- tag = *((u32 *)(*p));
- if (tag == OF_DT_NOP) {
- *p += 4;
- continue;
- }
- if (tag != OF_DT_PROP)
- break;
- *p += 4;
- sz = *((u32 *)(*p));
- noff = *((u32 *)((*p) + 4));
- *p += 8;
- if (initial_boot_params->version < 0x10)
- *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
-
- pname = find_flat_dt_string(noff);
- if (pname == NULL) {
- printk(KERN_INFO
- "Can't find property name in list !\n");
- break;
- }
- if (strcmp(pname, "name") == 0)
- has_name = 1;
- l = strlen(pname) + 1;
- pp = unflatten_dt_alloc(&mem, sizeof(struct property),
- __alignof__(struct property));
- if (allnextpp) {
- if (strcmp(pname, "linux,phandle") == 0) {
- np->node = *((u32 *)*p);
- if (np->linux_phandle == 0)
- np->linux_phandle = np->node;
- }
- if (strcmp(pname, "ibm,phandle") == 0)
- np->linux_phandle = *((u32 *)*p);
- pp->name = pname;
- pp->length = sz;
- pp->value = (void *)*p;
- *prev_pp = pp;
- prev_pp = &pp->next;
- }
- *p = _ALIGN((*p) + sz, 4);
- }
- /* with version 0x10 we may not have the name property, recreate
- * it here from the unit name if absent
- */
- if (!has_name) {
- char *p1 = pathp, *ps = pathp, *pa = NULL;
- int sz;
-
- while (*p1) {
- if ((*p1) == '@')
- pa = p1;
- if ((*p1) == '/')
- ps = p1 + 1;
- p1++;
- }
- if (pa < ps)
- pa = p1;
- sz = (pa - ps) + 1;
- pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
- __alignof__(struct property));
- if (allnextpp) {
- pp->name = "name";
- pp->length = sz;
- pp->value = pp + 1;
- *prev_pp = pp;
- prev_pp = &pp->next;
- memcpy(pp->value, ps, sz - 1);
- ((char *)pp->value)[sz - 1] = 0;
- pr_debug("fixed up name for %s -> %s\n", pathp,
- (char *)pp->value);
- }
- }
- if (allnextpp) {
- *prev_pp = NULL;
- np->name = of_get_property(np, "name", NULL);
- np->type = of_get_property(np, "device_type", NULL);
-
- if (!np->name)
- np->name = "<NULL>";
- if (!np->type)
- np->type = "<NULL>";
- }
- while (tag == OF_DT_BEGIN_NODE) {
- mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
- tag = *((u32 *)(*p));
- }
- if (tag != OF_DT_END_NODE) {
- printk(KERN_INFO "Weird tag at end of node: %x\n", tag);
- return mem;
- }
- *p += 4;
- return mem;
-}
-
-/**
- * unflattens the device-tree passed by the firmware, creating the
- * tree of struct device_node. It also fills the "name" and "type"
- * pointers of the nodes so the normal device-tree walking functions
- * can be used (this used to be done by finish_device_tree)
- */
-void __init unflatten_device_tree(void)
-{
- unsigned long start, mem, size;
- struct device_node **allnextp = &allnodes;
-
- pr_debug(" -> unflatten_device_tree()\n");
-
- /* First pass, scan for size */
- start = ((unsigned long)initial_boot_params) +
- initial_boot_params->off_dt_struct;
- size = unflatten_dt_node(0, &start, NULL, NULL, 0);
- size = (size | 3) + 1;
-
- pr_debug(" size is %lx, allocating...\n", size);
-
- /* Allocate memory for the expanded device tree */
- mem = lmb_alloc(size + 4, __alignof__(struct device_node));
- mem = (unsigned long) __va(mem);
-
- ((u32 *)mem)[size / 4] = 0xdeadbeef;
-
- pr_debug(" unflattening %lx...\n", mem);
-
- /* Second pass, do actual unflattening */
- start = ((unsigned long)initial_boot_params) +
- initial_boot_params->off_dt_struct;
- unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
- if (*((u32 *)start) != OF_DT_END)
- printk(KERN_WARNING "Weird tag at end of tree: %08x\n",
- *((u32 *)start));
- if (((u32 *)mem)[size / 4] != 0xdeadbeef)
- printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
- ((u32 *)mem)[size / 4]);
- *allnextp = NULL;
-
- /* Get pointer to OF "/chosen" node for use everywhere */
- of_chosen = of_find_node_by_path("/chosen");
- if (of_chosen == NULL)
- of_chosen = of_find_node_by_path("/chosen@0");
-
- pr_debug(" <- unflatten_device_tree()\n");
-}
-
-#define early_init_dt_scan_drconf_memory(node) 0
-
-static int __init early_init_dt_scan_cpus(unsigned long node,
- const char *uname, int depth,
- void *data)
-{
- static int logical_cpuid;
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- const u32 *intserv;
- int i, nthreads;
- int found = 0;
-
- /* We are scanning "cpu" nodes only */
- if (type == NULL || strcmp(type, "cpu") != 0)
- return 0;
-
- /* Get physical cpuid */
- intserv = of_get_flat_dt_prop(node, "reg", NULL);
- nthreads = 1;
-
- /*
- * Now see if any of these threads match our boot cpu.
- * NOTE: This must match the parsing done in smp_setup_cpu_maps.
- */
- for (i = 0; i < nthreads; i++) {
- /*
- * version 2 of the kexec param format adds the phys cpuid of
- * booted proc.
- */
- if (initial_boot_params && initial_boot_params->version >= 2) {
- if (intserv[i] ==
- initial_boot_params->boot_cpuid_phys) {
- found = 1;
- break;
- }
- } else {
- /*
- * Check if it's the boot-cpu, set it's hw index now,
- * unfortunately this format did not support booting
- * off secondary threads.
- */
- if (of_get_flat_dt_prop(node,
- "linux,boot-cpu", NULL) != NULL) {
- found = 1;
- break;
- }
- }
-
-#ifdef CONFIG_SMP
- /* logical cpu id is always 0 on UP kernels */
- logical_cpuid++;
-#endif
- }
-
- if (found) {
- pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
- intserv[i]);
- boot_cpuid = logical_cpuid;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-static void __init early_init_dt_check_for_initrd(unsigned long node)
-{
- unsigned long l;
- u32 *prop;
-
- pr_debug("Looking for initrd properties... ");
-
- prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l);
- if (prop) {
- initrd_start = (unsigned long)
- __va((u32)of_read_ulong(prop, l/4));
-
- prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l);
- if (prop) {
- initrd_end = (unsigned long)
- __va((u32)of_read_ulong(prop, 1/4));
- initrd_below_start_ok = 1;
- } else {
- initrd_start = 0;
- }
- }
-
- pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n",
- initrd_start, initrd_end);
-}
-#else
-static inline void early_init_dt_check_for_initrd(unsigned long node)
-{
-}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static int __init early_init_dt_scan_chosen(unsigned long node,
- const char *uname, int depth, void *data)
-{
- unsigned long l;
- char *p;
-
- pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
-
- if (depth != 1 ||
- (strcmp(uname, "chosen") != 0 &&
- strcmp(uname, "chosen@0") != 0))
- return 0;
-
-#ifdef CONFIG_KEXEC
- lprop = (u64 *)of_get_flat_dt_prop(node,
- "linux,crashkernel-base", NULL);
- if (lprop)
- crashk_res.start = *lprop;
-
- lprop = (u64 *)of_get_flat_dt_prop(node,
- "linux,crashkernel-size", NULL);
- if (lprop)
- crashk_res.end = crashk_res.start + *lprop - 1;
-#endif
-
- early_init_dt_check_for_initrd(node);
-
- /* Retreive command line */
- p = of_get_flat_dt_prop(node, "bootargs", &l);
- if (p != NULL && l > 0)
- strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
-
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
- if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
-#endif
- strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
- pr_debug("Command line is: %s\n", cmd_line);
-
- /* break now */
- return 1;
-}
-
-static int __init early_init_dt_scan_root(unsigned long node,
- const char *uname, int depth, void *data)
-{
- u32 *prop;
-
- if (depth != 0)
- return 0;
-
- prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
- dt_root_size_cells = (prop == NULL) ? 1 : *prop;
- pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
-
- prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
- dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
- pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
-
- /* break now */
- return 1;
-}
-
-static u64 __init dt_mem_next_cell(int s, cell_t **cellp)
+void __init early_init_dt_scan_chosen_arch(unsigned long node)
{
- cell_t *p = *cellp;
-
- *cellp = p + s;
- return of_read_number(p, s);
+ /* No Microblaze specific code here */
}
-static int __init early_init_dt_scan_memory(unsigned long node,
- const char *uname, int depth, void *data)
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- cell_t *reg, *endp;
- unsigned long l;
-
- /* Look for the ibm,dynamic-reconfiguration-memory node */
-/* if (depth == 1 &&
- strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
- return early_init_dt_scan_drconf_memory(node);
-*/
- /* We are scanning "memory" nodes only */
- if (type == NULL) {
- /*
- * The longtrail doesn't have a device_type on the
- * /memory node, so look for the node called /memory@0.
- */
- if (depth != 1 || strcmp(uname, "memory@0") != 0)
- return 0;
- } else if (strcmp(type, "memory") != 0)
- return 0;
-
- reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
- if (reg == NULL)
- reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
- if (reg == NULL)
- return 0;
-
- endp = reg + (l / sizeof(cell_t));
-
- pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
- uname, l, reg[0], reg[1], reg[2], reg[3]);
-
- while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
- u64 base, size;
-
- base = dt_mem_next_cell(dt_root_addr_cells, &reg);
- size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
- if (size == 0)
- continue;
- pr_debug(" - %llx , %llx\n", (unsigned long long)base,
- (unsigned long long)size);
-
- lmb_add(base, size);
- }
- return 0;
+ lmb_add(base, size);
}
-#ifdef CONFIG_PHYP_DUMP
-/**
- * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
- *
- * Function to find the largest size we need to reserve
- * during early boot process.
- *
- * It either looks for boot param and returns that OR
- * returns larger of 256 or 5% rounded down to multiples of 256MB.
- *
- */
-static inline unsigned long phyp_dump_calculate_reserve_size(void)
+u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
- unsigned long tmp;
-
- if (phyp_dump_info->reserve_bootvar)
- return phyp_dump_info->reserve_bootvar;
-
- /* divide by 20 to get 5% of value */
- tmp = lmb_end_of_DRAM();
- do_div(tmp, 20);
-
- /* round it down in multiples of 256 */
- tmp = tmp & ~0x0FFFFFFFUL;
-
- return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
+ return lmb_alloc(size, align);
}
-/**
- * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
- *
- * This routine may reserve memory regions in the kernel only
- * if the system is supported and a dump was taken in last
- * boot instance or if the hardware is supported and the
- * scratch area needs to be setup. In other instances it returns
- * without reserving anything. The memory in case of dump being
- * active is freed when the dump is collected (by userland tools).
- */
-static void __init phyp_dump_reserve_mem(void)
-{
- unsigned long base, size;
- unsigned long variable_reserve_size;
-
- if (!phyp_dump_info->phyp_dump_configured) {
- printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
- return;
- }
-
- if (!phyp_dump_info->phyp_dump_at_boot) {
- printk(KERN_INFO "Phyp-dump disabled at boot time\n");
- return;
- }
-
- variable_reserve_size = phyp_dump_calculate_reserve_size();
-
- if (phyp_dump_info->phyp_dump_is_active) {
- /* Reserve *everything* above RMR.Area freed by userland tools*/
- base = variable_reserve_size;
- size = lmb_end_of_DRAM() - base;
-
- /* XXX crashed_ram_end is wrong, since it may be beyond
- * the memory_limit, it will need to be adjusted. */
- lmb_reserve(base, size);
-
- phyp_dump_info->init_reserve_start = base;
- phyp_dump_info->init_reserve_size = size;
- } else {
- size = phyp_dump_info->cpu_state_size +
- phyp_dump_info->hpte_region_size +
- variable_reserve_size;
- base = lmb_end_of_DRAM() - size;
- lmb_reserve(base, size);
- phyp_dump_info->init_reserve_start = base;
- phyp_dump_info->init_reserve_size = size;
- }
-}
-#else
-static inline void __init phyp_dump_reserve_mem(void) {}
-#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
-
#ifdef CONFIG_EARLY_PRINTK
/* MS this is Microblaze specifig function */
static int __init early_init_dt_scan_serial(unsigned long node,
@@ -775,11 +98,6 @@ void __init early_init_devtree(void *params)
/* Setup flat device-tree pointer */
initial_boot_params = params;
-#ifdef CONFIG_PHYP_DUMP
- /* scan tree to see if dump occured during last boot */
- of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
-#endif
-
/* Retrieve various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
@@ -799,33 +117,18 @@ void __init early_init_devtree(void *params)
pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
- pr_debug("Scanning CPUs ...\n");
-
- /* Retreive CPU related informations from the flat tree
- * (altivec support, boot CPU ID, ...)
- */
- of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
-
pr_debug(" <- early_init_devtree()\n");
}
-/**
- * Indicates whether the root node has a given value in its
- * compatible property.
- */
-int machine_is_compatible(const char *compat)
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
{
- struct device_node *root;
- int rc = 0;
-
- root = of_find_node_by_path("/");
- if (root) {
- rc = of_device_is_compatible(root, compat);
- of_node_put(root);
- }
- return rc;
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
}
-EXPORT_SYMBOL(machine_is_compatible);
+#endif
/*******
*
@@ -838,273 +141,6 @@ EXPORT_SYMBOL(machine_is_compatible);
*
*******/
-/**
- * of_find_node_by_phandle - Find a node given a phandle
- * @handle: phandle of the node to find
- *
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
- */
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
- struct device_node *np;
-
- read_lock(&devtree_lock);
- for (np = allnodes; np != NULL; np = np->allnext)
- if (np->linux_phandle == handle)
- break;
- of_node_get(np);
- read_unlock(&devtree_lock);
- return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-/**
- * of_node_get - Increment refcount of a node
- * @node: Node to inc refcount, NULL is supported to
- * simplify writing of callers
- *
- * Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
-{
- if (node)
- kref_get(&node->kref);
- return node;
-}
-EXPORT_SYMBOL(of_node_get);
-
-static inline struct device_node *kref_to_device_node(struct kref *kref)
-{
- return container_of(kref, struct device_node, kref);
-}
-
-/**
- * of_node_release - release a dynamically allocated node
- * @kref: kref element of the node to be released
- *
- * In of_node_put() this function is passed to kref_put()
- * as the destructor.
- */
-static void of_node_release(struct kref *kref)
-{
- struct device_node *node = kref_to_device_node(kref);
- struct property *prop = node->properties;
-
- /* We should never be releasing nodes that haven't been detached. */
- if (!of_node_check_flag(node, OF_DETACHED)) {
- printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
- node->full_name);
- dump_stack();
- kref_init(&node->kref);
- return;
- }
-
- if (!of_node_check_flag(node, OF_DYNAMIC))
- return;
-
- while (prop) {
- struct property *next = prop->next;
- kfree(prop->name);
- kfree(prop->value);
- kfree(prop);
- prop = next;
-
- if (!prop) {
- prop = node->deadprops;
- node->deadprops = NULL;
- }
- }
- kfree(node->full_name);
- kfree(node->data);
- kfree(node);
-}
-
-/**
- * of_node_put - Decrement refcount of a node
- * @node: Node to dec refcount, NULL is supported to
- * simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
-{
- if (node)
- kref_put(&node->kref, of_node_release);
-}
-EXPORT_SYMBOL(of_node_put);
-
-/*
- * Plug a device node into the tree and global list.
- */
-void of_attach_node(struct device_node *np)
-{
- unsigned long flags;
-
- write_lock_irqsave(&devtree_lock, flags);
- np->sibling = np->parent->child;
- np->allnext = allnodes;
- np->parent->child = np;
- allnodes = np;
- write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * "Unplug" a node from the device tree. The caller must hold
- * a reference to the node. The memory associated with the node
- * is not freed until its refcount goes to zero.
- */
-void of_detach_node(struct device_node *np)
-{
- struct device_node *parent;
- unsigned long flags;
-
- write_lock_irqsave(&devtree_lock, flags);
-
- parent = np->parent;
- if (!parent)
- goto out_unlock;
-
- if (allnodes == np)
- allnodes = np->allnext;
- else {
- struct device_node *prev;
- for (prev = allnodes;
- prev->allnext != np;
- prev = prev->allnext)
- ;
- prev->allnext = np->allnext;
- }
-
- if (parent->child == np)
- parent->child = np->sibling;
- else {
- struct device_node *prevsib;
- for (prevsib = np->parent->child;
- prevsib->sibling != np;
- prevsib = prevsib->sibling)
- ;
- prevsib->sibling = np->sibling;
- }
-
- of_node_set_flag(np, OF_DETACHED);
-
-out_unlock:
- write_unlock_irqrestore(&devtree_lock, flags);
-}
-
-/*
- * Add a property to a node
- */
-int prom_add_property(struct device_node *np, struct property *prop)
-{
- struct property **next;
- unsigned long flags;
-
- prop->next = NULL;
- write_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (strcmp(prop->name, (*next)->name) == 0) {
- /* duplicate ! don't insert it */
- write_unlock_irqrestore(&devtree_lock, flags);
- return -1;
- }
- next = &(*next)->next;
- }
- *next = prop;
- write_unlock_irqrestore(&devtree_lock, flags);
-
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
- if (np->pde)
- proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
- return 0;
-}
-
-/*
- * Remove a property from a node. Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property. Instead we just move the property
- * to the "dead properties" list, so it won't be found any more.
- */
-int prom_remove_property(struct device_node *np, struct property *prop)
-{
- struct property **next;
- unsigned long flags;
- int found = 0;
-
- write_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (*next == prop) {
- /* found the node */
- *next = prop->next;
- prop->next = np->deadprops;
- np->deadprops = prop;
- found = 1;
- break;
- }
- next = &(*next)->next;
- }
- write_unlock_irqrestore(&devtree_lock, flags);
-
- if (!found)
- return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to remove the proc node as well */
- if (np->pde)
- proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
- return 0;
-}
-
-/*
- * Update a property in a node. Note that we don't actually
- * remove it, since we have given out who-knows-how-many pointers
- * to the data using get-property. Instead we just move the property
- * to the "dead properties" list, and add the new property to the
- * property list
- */
-int prom_update_property(struct device_node *np,
- struct property *newprop,
- struct property *oldprop)
-{
- struct property **next;
- unsigned long flags;
- int found = 0;
-
- write_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (*next == oldprop) {
- /* found the node */
- newprop->next = oldprop->next;
- *next = newprop;
- oldprop->next = np->deadprops;
- np->deadprops = oldprop;
- found = 1;
- break;
- }
- next = &(*next)->next;
- }
- write_unlock_irqrestore(&devtree_lock, flags);
-
- if (!found)
- return -ENODEV;
-
-#ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
- if (np->pde)
- proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
-
- return 0;
-}
-
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index ae0352ecd5a..bf7e6c27e31 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -256,7 +256,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
if (ppdev == NULL) {
struct pci_controller *host;
host = pci_bus_to_host(pdev->bus);
- ppnode = host ? host->arch_data : NULL;
+ ppnode = host ? host->dn : NULL;
/* No node for host bridge ? give up */
if (ppnode == NULL)
return -EINVAL;
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 4b3ac32754d..a4a7770c614 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -75,29 +75,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int rval;
unsigned long val = 0;
- unsigned long copied;
switch (request) {
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- pr_debug("PEEKTEXT/PEEKDATA at %08lX\n", addr);
- copied = access_process_vm(child, addr, &val, sizeof(val), 0);
- rval = -EIO;
- if (copied != sizeof(val))
- break;
- rval = put_user(val, (unsigned long *)data);
- break;
-
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- pr_debug("POKETEXT/POKEDATA to %08lX\n", addr);
- rval = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- rval = -EIO;
- break;
-
/* Read/write the word at location ADDR in the registers. */
case PTRACE_PEEKUSR:
case PTRACE_POKEUSR:
@@ -130,50 +109,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (rval == 0 && request == PTRACE_PEEKUSR)
rval = put_user(val, (unsigned long *)data);
break;
- /* Continue and stop at next (return from) syscall */
- case PTRACE_SYSCALL:
- pr_debug("PTRACE_SYSCALL\n");
- case PTRACE_SINGLESTEP:
- pr_debug("PTRACE_SINGLESTEP\n");
- /* Restart after a signal. */
- case PTRACE_CONT:
- pr_debug("PTRACE_CONT\n");
- rval = -EIO;
- if (!valid_signal(data))
- break;
-
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-
- child->exit_code = data;
- pr_debug("wakeup_process\n");
- wake_up_process(child);
- rval = 0;
- break;
-
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- pr_debug("PTRACE_KILL\n");
- rval = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
-
- case PTRACE_DETACH: /* detach a process that was attached. */
- pr_debug("PTRACE_DETACH\n");
- rval = ptrace_detach(child, data);
- break;
default:
- /* rval = ptrace_request(child, request, addr, data); noMMU */
- rval = -EIO;
+ rval = ptrace_request(child, request, addr, data);
}
return rval;
}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 5372b24ad04..17c98dbcec8 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -22,7 +22,10 @@
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/param.h>
+#include <linux/pci.h>
#include <linux/cache.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/entry.h>
#include <asm/cpuinfo.h>
@@ -54,13 +57,10 @@ void __init setup_arch(char **cmdline_p)
microblaze_cache_init();
- enable_dcache();
-
- invalidate_icache();
- enable_icache();
-
setup_memory();
+ xilinx_pci_init();
+
#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
printk(KERN_NOTICE "Self modified code enable\n");
#endif
@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
}
#endif /* CONFIG_MTD_UCLINUX_EBSS */
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
+#define eprintk early_printk
+#else
+#define eprintk printk
+#endif
+
void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr)
{
@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
- early_printk("Ramdisk addr 0x%08x, ", ram);
+ eprintk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
- early_printk("FDT at 0x%08x\n", fdt);
+ eprintk("FDT at 0x%08x\n", fdt);
else
- early_printk("Compiled-in FDT at 0x%08x\n",
+ eprintk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
- early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+ eprintk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size);
- early_printk("#### klimit %p ####\n", old_klimit);
+ eprintk("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */
- early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&_ebss);
- early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
+ eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr)
- early_printk("!!!Your kernel has setup MSR instruction but "
+ eprintk("!!!Your kernel has setup MSR instruction but "
"CPU don't have it %d\n", msr);
#else
if (!msr)
- early_printk("!!!Your kernel not setup MSR instruction but "
+ eprintk("!!!Your kernel not setup MSR instruction but "
"CPU have it %d\n", msr);
#endif
@@ -187,3 +193,37 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
#endif
+
+static int dflt_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ set_dma_ops(dev, &dma_direct_ops);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block dflt_plat_bus_notifier = {
+ .notifier_call = dflt_bus_notify,
+ .priority = INT_MAX,
+};
+
+static struct notifier_block dflt_of_bus_notifier = {
+ .notifier_call = dflt_bus_notify,
+ .priority = INT_MAX,
+};
+
+static int __init setup_bus_notifier(void)
+{
+ bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
+ bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
+
+ return 0;
+}
+
+arch_initcall(setup_bus_notifier);
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 9f3c205fb75..f4e00b7f125 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -30,6 +30,7 @@
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include <linux/slab.h>
#include <asm/syscalls.h>
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 4088be7d4e2..03376dc814c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -366,7 +366,7 @@ ENTRY(sys_call_table)
.long sys_shutdown
.long sys_sendmsg /* 360 */
.long sys_recvmsg
- .long sys_ni_syscall
+ .long sys_accept4
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_rt_tgsigqueueinfo /* 365 */
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31..5e4570ef515 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
__enable_hw_exceptions();
}
-static int kstack_depth_to_print = 24;
+static unsigned long kstack_depth_to_print = 24;
static int __init kstack_setup(char *s)
{
- kstack_depth_to_print = strict_strtoul(s, 0, NULL);
-
- return 1;
+ return !strict_strtoul(s, 0, &kstack_depth_to_print);
}
__setup("kstack=", kstack_setup);