summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-23 18:02:59 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-08-23 18:02:59 +0900
commit0858d9c0c591ce9baa1baf72eaf6f67823f3bc25 (patch)
tree1ea697a8e34ff1784572db22bcb5c9da7bbfb40c /arch/sh/kernel
parent9d7302299ee96ca954fe4ab8ca640333b6e19ad0 (diff)
parent963e04cafbf001ec431025a46ec246ae6d89daba (diff)
Merge branch 'sh/hwblk' into sh/pm-runtime
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile_329
-rw-r--r--arch/sh/kernel/Makefile_643
-rw-r--r--arch/sh/kernel/asm-offsets.c1
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/hwblk.c155
-rw-r--r--arch/sh/kernel/cpu/init.c34
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh2a/entry.S3
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S1
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile6
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c63
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c113
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c122
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c106
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c117
-rw-r--r--arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c121
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c39
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c42
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c43
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile1
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c102
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c26
-rw-r--r--arch/sh/kernel/dumpstack.c123
-rw-r--r--arch/sh/kernel/dwarf.c902
-rw-r--r--arch/sh/kernel/early_printk.c5
-rw-r--r--arch/sh/kernel/entry-common.S89
-rw-r--r--arch/sh/kernel/ftrace.c190
-rw-r--r--arch/sh/kernel/io_trapped.c7
-rw-r--r--arch/sh/kernel/irq.c21
-rw-r--r--arch/sh/kernel/process_32.c5
-rw-r--r--arch/sh/kernel/ptrace_32.c8
-rw-r--r--arch/sh/kernel/setup.c71
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c4
-rw-r--r--arch/sh/kernel/stacktrace.c98
-rw-r--r--arch/sh/kernel/time.c29
-rw-r--r--arch/sh/kernel/traps_32.c24
-rw-r--r--arch/sh/kernel/unwinder.c162
-rw-r--r--arch/sh/kernel/vmlinux.lds.S90
39 files changed, 2533 insertions, 409 deletions
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 9411e3e31e6..f2245ebf0b3 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -9,10 +9,10 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
endif
-obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \
+obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \
machvec.o process_32.o ptrace_32.o setup.o signal_32.o \
- sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \
- traps.o traps_32.o
+ sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \
+ traps.o traps_32.o unwinder.o
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
@@ -29,8 +29,11 @@ obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 67b9f6c6326..639ee514266 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -2,7 +2,7 @@ extra-y := head_64.o init_task.o vmlinux.lds
obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \
ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
- syscalls_64.o time.o topology.o traps.o traps_64.o
+ syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o
obj-y += cpu/
obj-$(CONFIG_SMP) += smp.o
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index 99aceb28ee2..d218e808294 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -26,6 +26,7 @@ int main(void)
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+ DEFINE(TI_SIZE, sizeof(struct thread_info));
#ifdef CONFIG_HIBERNATION
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index eecad7cbd61..3d6b9312dc4 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -19,4 +19,4 @@ obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
-obj-y += irq/ init.o clock.o
+obj-y += irq/ init.o clock.o hwblk.o
diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c
new file mode 100644
index 00000000000..c0ad7d46e78
--- /dev/null
+++ b/arch/sh/kernel/cpu/hwblk.c
@@ -0,0 +1,155 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <asm/clock.h>
+
+static DEFINE_SPINLOCK(hwblk_lock);
+
+static void hwblk_area_mod_cnt(struct hwblk_info *info,
+ int area, int counter, int value, int goal)
+{
+ struct hwblk_area *hap = info->areas + area;
+
+ hap->cnt[counter] += value;
+
+ if (hap->cnt[counter] != goal)
+ return;
+
+ if (hap->flags & HWBLK_AREA_FLAG_PARENT)
+ hwblk_area_mod_cnt(info, hap->parent, counter, value, goal);
+}
+
+
+static int __hwblk_mod_cnt(struct hwblk_info *info, int hwblk,
+ int counter, int value, int goal)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+
+ hp->cnt[counter] += value;
+ if (hp->cnt[counter] == goal)
+ hwblk_area_mod_cnt(info, hp->area, counter, value, goal);
+
+ return hp->cnt[counter];
+}
+
+static void hwblk_mod_cnt(struct hwblk_info *info, int hwblk,
+ int counter, int value, int goal)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+ __hwblk_mod_cnt(info, hwblk, counter, value, goal);
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int counter)
+{
+ hwblk_mod_cnt(info, hwblk, counter, 1, 1);
+}
+
+void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int counter)
+{
+ hwblk_mod_cnt(info, hwblk, counter, -1, 0);
+}
+
+void hwblk_enable(struct hwblk_info *info, int hwblk)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+ unsigned long tmp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+
+ ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, 1, 1);
+ if (ret == 1) {
+ tmp = __raw_readl(hp->mstp);
+ tmp &= ~(1 << hp->bit);
+ __raw_writel(tmp, hp->mstp);
+ }
+
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+void hwblk_disable(struct hwblk_info *info, int hwblk)
+{
+ struct hwblk *hp = info->hwblks + hwblk;
+ unsigned long tmp;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hwblk_lock, flags);
+
+ ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, -1, 0);
+ if (ret == 0) {
+ tmp = __raw_readl(hp->mstp);
+ tmp |= 1 << hp->bit;
+ __raw_writel(tmp, hp->mstp);
+ }
+
+ spin_unlock_irqrestore(&hwblk_lock, flags);
+}
+
+struct hwblk_info *hwblk_info;
+
+int __init hwblk_register(struct hwblk_info *info)
+{
+ hwblk_info = info;
+ return 0;
+}
+
+int __init __weak arch_hwblk_init(void)
+{
+ return 0;
+}
+
+int __weak arch_hwblk_sleep_mode(void)
+{
+ return SUSP_SH_SLEEP;
+}
+
+int __init hwblk_init(void)
+{
+ return arch_hwblk_init();
+}
+
+/* allow clocks to enable and disable hardware blocks */
+static int sh_hwblk_clk_enable(struct clk *clk)
+{
+ if (!hwblk_info)
+ return -ENOENT;
+
+ hwblk_enable(hwblk_info, clk->arch_flags);
+ return 0;
+}
+
+static void sh_hwblk_clk_disable(struct clk *clk)
+{
+ if (hwblk_info)
+ hwblk_disable(hwblk_info, clk->arch_flags);
+}
+
+static struct clk_ops sh_hwblk_clk_ops = {
+ .enable = sh_hwblk_clk_enable,
+ .disable = sh_hwblk_clk_disable,
+ .recalc = followparent_recalc,
+};
+
+int __init sh_hwblk_clk_register(struct clk *clks, int nr)
+{
+ struct clk *clkp;
+ int ret = 0;
+ int k;
+
+ for (k = 0; !ret && (k < nr); k++) {
+ clkp = clks + k;
+ clkp->ops = &sh_hwblk_clk_ops;
+ ret |= clk_register(clkp);
+ }
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ad85421099c..d40b9db5be0 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
*
* CPU init code
*
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -62,6 +62,37 @@ static void __init speculative_execution_init(void)
#define speculative_execution_init() do { } while (0)
#endif
+#ifdef CONFIG_CPU_SH4A
+#define EXPMASK 0xff2f0004
+#define EXPMASK_RTEDS (1 << 0)
+#define EXPMASK_BRDSSLP (1 << 1)
+#define EXPMASK_MMCAW (1 << 4)
+
+static void __init expmask_init(void)
+{
+ unsigned long expmask = __raw_readl(EXPMASK);
+
+ /*
+ * Future proofing.
+ *
+ * Disable support for slottable sleep instruction
+ * and non-nop instructions in the rte delay slot.
+ */
+ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP);
+
+ /*
+ * Enable associative writes to the memory-mapped cache array
+ * until the cache flush ops have been rewritten.
+ */
+ expmask |= EXPMASK_MMCAW;
+
+ __raw_writel(expmask, EXPMASK);
+ ctrl_barrier();
+}
+#else
+#define expmask_init() do { } while (0)
+#endif
+
/* 2nd-level cache init */
void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
{
@@ -321,4 +352,5 @@ asmlinkage void __init sh_cpu_init(void)
#endif
speculative_execution_init();
+ expmask_init();
}
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index becc54c4569..c8a4331d9b8 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -227,8 +227,9 @@ ENTRY(sh_bios_handler)
mov.l @r15+, r14
add #8,r15
lds.l @r15+, pr
+ mov.l @r15+,r15
rte
- mov.l @r15+,r15
+ nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S
index ab3903eeda5..222742ddc0d 100644
--- a/arch/sh/kernel/cpu/sh2a/entry.S
+++ b/arch/sh/kernel/cpu/sh2a/entry.S
@@ -176,8 +176,9 @@ ENTRY(sh_bios_handler)
movml.l @r15+,r14
add #8,r15
lds.l @r15+, pr
+ mov.l @r15+,r15
rte
- mov.l @r15+,r15
+ nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3cb531f233f..67ad6467c69 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -137,6 +137,7 @@ ENTRY(tlb_protection_violation_store)
mov #1, r5
call_dpf:
+ setup_frame_reg
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index ebdd391d5f4..12cddf4c721 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -25,9 +25,9 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o
clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o hwblk-sh7724.o
clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o
clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 40f859354f7..5b1bbbe63b1 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
/* SH7722 registers */
#define FRQCR 0xa4150000
@@ -30,9 +32,6 @@
#define SCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
-#define MSTPCR0 0xa4150030
-#define MSTPCR1 0xa4150034
-#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
@@ -140,35 +139,37 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _flags) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags)
+#define R_CLK &r_clk
+#define P_CLK &div4_clks[DIV4_P]
+#define B_CLK &div4_clks[DIV4_B]
+#define U_CLK &div4_clks[DIV4_U]
static struct clk mstp_clks[] = {
- MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
- MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 8, 0),
-
- MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0),
- MSTP("keysc0", &r_clk, MSTPCR2, 14, 0),
- MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0),
- MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
+ SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+
+ SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0),
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -191,7 +192,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index e67c2678b8a..e5c63911403 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7723.h>
/* SH7723 registers */
#define FRQCR 0xa4150000
@@ -30,9 +32,6 @@
#define SCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
-#define MSTPCR0 0xa4150030
-#define MSTPCR1 0xa4150034
-#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
@@ -140,60 +139,64 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT)
+#define R_CLK (&r_clk)
+#define P_CLK (&div4_clks[DIV4_P])
+#define B_CLK (&div4_clks[DIV4_B])
+#define U_CLK (&div4_clks[DIV4_U])
+#define I_CLK (&div4_clks[DIV4_I])
+#define SH_CLK (&div4_clks[DIV4_SH])
static struct clk mstp_clks[] = {
/* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
- MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0),
- MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0),
- MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0),
- MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 28, 1, 1, 0),
- MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0),
- MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0),
- MSTP("intc0", &div4_clks[DIV4_I], MSTPCR0, 22, 1, 1, 0),
- MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1),
- MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0),
- MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0),
- MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0),
- MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1),
- MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 11, 0, 1, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0),
- MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0),
- MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0),
- MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0),
- MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0),
- MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0),
- MSTP("meram0", &div4_clks[DIV4_SH], MSTPCR0, 0, 1, 1, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 8, 0, 0, 0),
-
- MSTP("atapi0", &div4_clks[DIV4_SH], MSTPCR2, 28, 0, 1, 0),
- MSTP("adc0", &div4_clks[DIV4_P], MSTPCR2, 27, 0, 1, 0),
- MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0),
- MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0),
- MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0),
- MSTP("icb0", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1),
- MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0),
- MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0),
- MSTP("keysc0", &r_clk, MSTPCR2, 14, 0, 0, 0),
- MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 11, 0, 1, 0),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 10, 0, 1, 1),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0, 1, 0),
- MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1),
+ SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("intc0", -1, I_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
+ SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
+ SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
+ SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
+ SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+ SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
+ SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
+ SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
+ SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
+ SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
+ SH_HWBLK_CLK("meram0", -1, SH_CLK, HWBLK_MERAM, 0),
+
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+
+ SH_HWBLK_CLK("atapi0", -1, SH_CLK, HWBLK_ATAPI, 0),
+ SH_HWBLK_CLK("adc0", -1, P_CLK, HWBLK_ADC, 0),
+ SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
+ SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
+ SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
+ SH_HWBLK_CLK("icb0", -1, B_CLK, HWBLK_ICB, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
+ SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
+ SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -216,7 +219,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 5d5c9b95288..34611d97378 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7724.h>
/* SH7724 registers */
#define FRQCRA 0xa4150000
@@ -31,9 +33,6 @@
#define FCLKBCR 0xa415000c
#define IRDACLKCR 0xa4150018
#define PLLCR 0xa4150024
-#define MSTPCR0 0xa4150030
-#define MSTPCR1 0xa4150034
-#define MSTPCR2 0xa4150038
#define SPUCLKCR 0xa415003c
#define FLLFRQ 0xa4150050
#define LSTATS 0xa4150060
@@ -156,64 +155,67 @@ struct clk div6_clks[] = {
SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT)
+#define R_CLK (&r_clk)
+#define P_CLK (&div4_clks[DIV4_P])
+#define B_CLK (&div4_clks[DIV4_B])
+#define I_CLK (&div4_clks[DIV4_I])
+#define SH_CLK (&div4_clks[DIV4_SH])
static struct clk mstp_clks[] = {
- MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0),
- MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0),
- MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0),
- MSTP("rs0", &div4_clks[DIV4_B], MSTPCR0, 28, 1, 1, 0),
- MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0),
- MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 26, 1, 1, 0),
- MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0),
- MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 1, 1, 0),
- MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1),
- MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0),
- MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0),
- MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0),
- MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1),
- MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0),
- MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0),
- MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0),
- MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0),
- MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0),
- MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0),
-
- MSTP("keysc0", &r_clk, MSTPCR1, 12, 0, 0, 0),
- MSTP("rtc0", &r_clk, MSTPCR1, 11, 0, 0, 0),
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0),
- MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0, 1, 0),
-
- MSTP("mmc0", &div4_clks[DIV4_B], MSTPCR2, 29, 0, 1, 0),
- MSTP("eth0", &div4_clks[DIV4_B], MSTPCR2, 28, 0, 1, 0),
- MSTP("atapi0", &div4_clks[DIV4_B], MSTPCR2, 26, 0, 1, 0),
- MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0),
- MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0),
- MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0),
- MSTP("usb1", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1),
- MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 20, 0, 1, 1),
- MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 19, 0, 1, 1),
- MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0),
- MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0),
- MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 15, 1, 1, 1),
- MSTP("ceu1", &div4_clks[DIV4_B], MSTPCR2, 13, 0, 1, 1),
- MSTP("beu1", &div4_clks[DIV4_B], MSTPCR2, 12, 0, 1, 1),
- MSTP("2ddmac0", &div4_clks[DIV4_SH], MSTPCR2, 10, 0, 1, 1),
- MSTP("spu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0, 1, 0),
- MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1),
+ SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
+ SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
+ SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
+ SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
+ SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
+ SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
+ SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
+ SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
+ SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
+ SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
+ SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
+ SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
+ SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
+ SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
+ SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
+ SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
+
+ SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
+ SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
+ SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0),
+ SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0),
+
+ SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0),
+ SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0),
+ SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0),
+ SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
+ SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
+ SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
+ SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0),
+ SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0),
+ SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
+ SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
+ SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
+ SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0),
+ SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0),
+ SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0),
+ SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0),
+ SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
+ SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0),
+ SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0),
+ SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
};
int __init arch_clk_init(void)
@@ -236,7 +238,7 @@ int __init arch_clk_init(void)
ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
new file mode 100644
index 00000000000..a288b5d9234
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
@@ -0,0 +1,106 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c
+ *
+ * SH7722 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7722.h>
+
+/* SH7722 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7722 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7722_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7722_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA),
+ [HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+
+ [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
+
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA),
+ [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA),
+ [HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
+ [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
+ [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7722_hwblk_info = {
+ .areas = sh7722_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7722_hwblk_area),
+ .hwblks = sh7722_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7722_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7722_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7722_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7722_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
new file mode 100644
index 00000000000..a7f4684d203
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
@@ -0,0 +1,117 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c
+ *
+ * SH7723 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7723.h>
+
+/* SH7723 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7723 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7723_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7723_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_L2C] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA),
+ [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_SUBC] = HWBLK(MSTPCR0, 16, CORE_AREA),
+ [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM),
+ [HWBLK_TMU1] = HWBLK(MSTPCR0, 11, CORE_AREA),
+ [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA),
+ [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+ [HWBLK_MERAM] = HWBLK(MSTPCR0, 0, CORE_AREA),
+
+ [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA),
+
+ [HWBLK_ATAPI] = HWBLK(MSTPCR2, 28, CORE_AREA_BM),
+ [HWBLK_ADC] = HWBLK(MSTPCR2, 27, CORE_AREA),
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA),
+ [HWBLK_ICB] = HWBLK(MSTPCR2, 21, CORE_AREA_BM),
+ [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA),
+ [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA),
+ [HWBLK_USB] = HWBLK(MSTPCR2, 11, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 10, CORE_AREA_BM),
+ [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA),
+ [HWBLK_VEU2H1] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU2H0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7723_hwblk_info = {
+ .areas = sh7723_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7723_hwblk_area),
+ .hwblks = sh7723_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7723_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7723_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7723_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7723_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
new file mode 100644
index 00000000000..1613ad6013c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
@@ -0,0 +1,121 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c
+ *
+ * SH7724 hardware block support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/suspend.h>
+#include <asm/hwblk.h>
+#include <cpu/sh7724.h>
+
+/* SH7724 registers */
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+
+/* SH7724 Power Domains */
+enum { CORE_AREA, SUB_AREA, CORE_AREA_BM };
+static struct hwblk_area sh7724_hwblk_area[] = {
+ [CORE_AREA] = HWBLK_AREA(0, 0),
+ [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA),
+ [SUB_AREA] = HWBLK_AREA(0, 0),
+};
+
+/* Table mapping HWBLK to Module Stop Bit and Power Domain */
+static struct hwblk sh7724_hwblk[HWBLK_NR] = {
+ [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA),
+ [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA),
+ [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA),
+ [HWBLK_RSMEM] = HWBLK(MSTPCR0, 28, CORE_AREA),
+ [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA),
+ [HWBLK_L2C] = HWBLK(MSTPCR0, 26, CORE_AREA),
+ [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA),
+ [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA),
+ [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM),
+ [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA),
+ [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA),
+ [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA),
+ [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA),
+ [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA),
+ [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA),
+ [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA),
+ [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM),
+ [HWBLK_TMU1] = HWBLK(MSTPCR0, 10, CORE_AREA),
+ [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA),
+ [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA),
+ [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA),
+ [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA),
+ [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA),
+ [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA),
+ [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA),
+ [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA),
+
+ [HWBLK_KEYSC] = HWBLK(MSTPCR1, 12, SUB_AREA),
+ [HWBLK_RTC] = HWBLK(MSTPCR1, 11, SUB_AREA),
+ [HWBLK_IIC0] = HWBLK(MSTPCR1, 9, CORE_AREA),
+ [HWBLK_IIC1] = HWBLK(MSTPCR1, 8, CORE_AREA),
+
+ [HWBLK_MMC] = HWBLK(MSTPCR2, 29, CORE_AREA),
+ [HWBLK_ETHER] = HWBLK(MSTPCR2, 28, CORE_AREA_BM),
+ [HWBLK_ATAPI] = HWBLK(MSTPCR2, 26, CORE_AREA_BM),
+ [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA),
+ [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA),
+ [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA),
+ [HWBLK_USB1] = HWBLK(MSTPCR2, 21, CORE_AREA),
+ [HWBLK_USB0] = HWBLK(MSTPCR2, 20, CORE_AREA),
+ [HWBLK_2DG] = HWBLK(MSTPCR2, 19, CORE_AREA_BM),
+ [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA),
+ [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA),
+ [HWBLK_VEU1] = HWBLK(MSTPCR2, 15, CORE_AREA_BM),
+ [HWBLK_CEU1] = HWBLK(MSTPCR2, 13, CORE_AREA_BM),
+ [HWBLK_BEU1] = HWBLK(MSTPCR2, 12, CORE_AREA_BM),
+ [HWBLK_2DDMAC] = HWBLK(MSTPCR2, 10, CORE_AREA_BM),
+ [HWBLK_SPU] = HWBLK(MSTPCR2, 9, CORE_AREA_BM),
+ [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM),
+ [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM),
+ [HWBLK_BEU0] = HWBLK(MSTPCR2, 4, CORE_AREA_BM),
+ [HWBLK_CEU0] = HWBLK(MSTPCR2, 3, CORE_AREA_BM),
+ [HWBLK_VEU0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM),
+ [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM),
+ [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM),
+};
+
+static struct hwblk_info sh7724_hwblk_info = {
+ .areas = sh7724_hwblk_area,
+ .nr_areas = ARRAY_SIZE(sh7724_hwblk_area),
+ .hwblks = sh7724_hwblk,
+ .nr_hwblks = ARRAY_SIZE(sh7724_hwblk),
+};
+
+int arch_hwblk_sleep_mode(void)
+{
+ if (!sh7724_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_STANDBY | SUSP_SH_SF;
+
+ if (!sh7724_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE])
+ return SUSP_SH_SLEEP | SUSP_SH_SF;
+
+ return SUSP_SH_SLEEP;
+}
+
+int __init arch_hwblk_init(void)
+{
+ return hwblk_register(&sh7724_hwblk_info);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 1a956b1becc..4a9010bf4fd 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -40,7 +40,7 @@ static struct platform_device iic_device = {
};
static struct r8a66597_platdata r8a66597_data = {
- /* This set zero to all members */
+ .on_chip = 1,
};
static struct resource usb_host_resources[] = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index cda76ebf87c..35097753456 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -13,9 +13,11 @@
#include <linux/serial_sci.h>
#include <linux/mm.h>
#include <linux/uio_driver.h>
+#include <linux/usb/m66592.h>
#include <linux/sh_timer.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <cpu/sh7722.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -45,11 +47,18 @@ static struct platform_device rtc_device = {
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_RTC,
+ },
+};
+
+static struct m66592_platdata usbf_platdata = {
+ .on_chip = 1,
};
static struct resource usbf_resources[] = {
[0] = {
- .name = "m66592_udc",
+ .name = "USBF",
.start = 0x04480000,
.end = 0x044800FF,
.flags = IORESOURCE_MEM,
@@ -67,9 +76,13 @@ static struct platform_device usbf_device = {
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbf_platdata,
},
.num_resources = ARRAY_SIZE(usbf_resources),
.resource = usbf_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_USBF,
+ },
};
static struct resource iic_resources[] = {
@@ -91,6 +104,9 @@ static struct platform_device iic_device = {
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC,
+ },
};
static struct uio_info vpu_platform_data = {
@@ -119,6 +135,9 @@ static struct platform_device vpu_device = {
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VPU,
+ },
};
static struct uio_info veu_platform_data = {
@@ -147,6 +166,9 @@ static struct platform_device veu_device = {
},
.resource = veu_resources,
.num_resources = ARRAY_SIZE(veu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU,
+ },
};
static struct uio_info jpu_platform_data = {
@@ -175,6 +197,9 @@ static struct platform_device jpu_device = {
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_JPU,
+ },
};
static struct sh_timer_config cmt_platform_data = {
@@ -207,6 +232,9 @@ static struct platform_device cmt_device = {
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_CMT,
+ },
};
static struct sh_timer_config tmu0_platform_data = {
@@ -238,6 +266,9 @@ static struct platform_device tmu0_device = {
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU,
+ },
};
static struct sh_timer_config tmu1_platform_data = {
@@ -269,6 +300,9 @@ static struct platform_device tmu1_device = {
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU,
+ },
};
static struct sh_timer_config tmu2_platform_data = {
@@ -299,6 +333,9 @@ static struct platform_device tmu2_device = {
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU,
+ },
};
static struct plat_sci_port sci_platform_data[] = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index b45dace9539..4caa5a7ca86 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <cpu/sh7723.h>
static struct uio_info vpu_platform_data = {
.name = "VPU5",
@@ -45,6 +46,9 @@ static struct platform_device vpu_device = {
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VPU,
+ },
};
static struct uio_info veu0_platform_data = {
@@ -73,6 +77,9 @@ static struct platform_device veu0_device = {
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU2H0,
+ },
};
static struct uio_info veu1_platform_data = {
@@ -101,6 +108,9 @@ static struct platform_device veu1_device = {
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU2H1,
+ },
};
static struct sh_timer_config cmt_platform_data = {
@@ -133,6 +143,9 @@ static struct platform_device cmt_device = {
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_CMT,
+ },
};
static struct sh_timer_config tmu0_platform_data = {
@@ -164,6 +177,9 @@ static struct platform_device tmu0_device = {
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu1_platform_data = {
@@ -195,6 +211,9 @@ static struct platform_device tmu1_device = {
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu2_platform_data = {
@@ -225,6 +244,9 @@ static struct platform_device tmu2_device = {
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu3_platform_data = {
@@ -255,6 +277,9 @@ static struct platform_device tmu3_device = {
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu4_platform_data = {
@@ -285,6 +310,9 @@ static struct platform_device tmu4_device = {
},
.resource = tmu4_resources,
.num_resources = ARRAY_SIZE(tmu4_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu5_platform_data = {
@@ -315,6 +343,9 @@ static struct platform_device tmu5_device = {
},
.resource = tmu5_resources,
.num_resources = ARRAY_SIZE(tmu5_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct plat_sci_port sci_platform_data[] = {
@@ -395,10 +426,13 @@ static struct platform_device rtc_device = {
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_RTC,
+ },
};
static struct r8a66597_platdata r8a66597_data = {
- /* This set zero to all members */
+ .on_chip = 1,
};
static struct resource sh7723_usb_host_resources[] = {
@@ -424,6 +458,9 @@ static struct platform_device sh7723_usb_host_device = {
},
.num_resources = ARRAY_SIZE(sh7723_usb_host_resources),
.resource = sh7723_usb_host_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_USB,
+ },
};
static struct resource iic_resources[] = {
@@ -445,6 +482,9 @@ static struct platform_device iic_device = {
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC,
+ },
};
static struct platform_device *sh7723_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index a04edaab9a2..f3851fd757e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <cpu/sh7724.h>
/* Serial */
static struct plat_sci_port sci_platform_data[] = {
@@ -103,6 +104,9 @@ static struct platform_device rtc_device = {
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_RTC,
+ },
};
/* I2C0 */
@@ -125,6 +129,9 @@ static struct platform_device iic0_device = {
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic0_resources),
.resource = iic0_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC0,
+ },
};
/* I2C1 */
@@ -147,6 +154,9 @@ static struct platform_device iic1_device = {
.id = 1, /* "i2c1" clock */
.num_resources = ARRAY_SIZE(iic1_resources),
.resource = iic1_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_IIC1,
+ },
};
/* VPU */
@@ -176,6 +186,9 @@ static struct platform_device vpu_device = {
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VPU,
+ },
};
/* VEU0 */
@@ -205,6 +218,9 @@ static struct platform_device veu0_device = {
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU0,
+ },
};
/* VEU1 */
@@ -234,6 +250,9 @@ static struct platform_device veu1_device = {
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_VEU1,
+ },
};
static struct sh_timer_config cmt_platform_data = {
@@ -266,6 +285,9 @@ static struct platform_device cmt_device = {
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_CMT,
+ },
};
static struct sh_timer_config tmu0_platform_data = {
@@ -297,6 +319,9 @@ static struct platform_device tmu0_device = {
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu1_platform_data = {
@@ -328,6 +353,9 @@ static struct platform_device tmu1_device = {
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
static struct sh_timer_config tmu2_platform_data = {
@@ -358,6 +386,9 @@ static struct platform_device tmu2_device = {
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU0,
+ },
};
@@ -389,6 +420,9 @@ static struct platform_device tmu3_device = {
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu4_platform_data = {
@@ -419,6 +453,9 @@ static struct platform_device tmu4_device = {
},
.resource = tmu4_resources,
.num_resources = ARRAY_SIZE(tmu4_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
static struct sh_timer_config tmu5_platform_data = {
@@ -449,6 +486,9 @@ static struct platform_device tmu5_device = {
},
.resource = tmu5_resources,
.num_resources = ARRAY_SIZE(tmu5_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_TMU1,
+ },
};
/* JPU */
@@ -478,6 +518,9 @@ static struct platform_device jpu_device = {
},
.resource = jpu_resources,
.num_resources = ARRAY_SIZE(jpu_resources),
+ .archdata = {
+ .hwblk_id = HWBLK_JPU,
+ },
};
static struct platform_device *sh7724_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
index 08bfa7c7db2..e8a5111e848 100644
--- a/arch/sh/kernel/cpu/shmobile/Makefile
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -4,3 +4,4 @@
# Power Management & Sleep mode
obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 00000000000..4afdd975cc6
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,102 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/cpuidle.c
+ *
+ * Cpuidle support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpuidle.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+#include <asm/hwblk.h>
+
+static unsigned long cpuidle_mode[] = {
+ SUSP_SH_SLEEP, /* regular sleep mode */
+ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
+};
+
+static int cpuidle_sleep_enter(struct cpuidle_device *dev,
+ struct cpuidle_state *state)
+{
+ unsigned long allowed_mode = arch_hwblk_sleep_mode();
+ ktime_t before, after;
+ int requested_state = state - &dev->states[0];
+ int allowed_state;
+ int k;
+
+ /* convert allowed mode to allowed state */
+ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
+ if (cpuidle_mode[k] == allowed_mode)
+ break;
+
+ allowed_state = k;
+
+ /* take the following into account for sleep mode selection:
+ * - allowed_state: best mode allowed by hardware (clock deps)
+ * - requested_state: best mode allowed by software (latencies)
+ */
+ k = min_t(int, allowed_state, requested_state);
+
+ dev->last_state = &dev->states[k];
+ before = ktime_get();
+ sh_mobile_call_standby(cpuidle_mode[k]);
+ after = ktime_get();
+ return ktime_to_ns(ktime_sub(after, before)) >> 10;
+}
+
+static struct cpuidle_device cpuidle_dev;
+static struct cpuidle_driver cpuidle_driver = {
+ .name = "sh_idle",
+ .owner = THIS_MODULE,
+};
+
+void sh_mobile_setup_cpuidle(void)
+{
+ struct cpuidle_device *dev = &cpuidle_dev;
+ struct cpuidle_state *state;
+ int i;
+
+ cpuidle_register_driver(&cpuidle_driver);
+
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ dev->states[i].name[0] = '\0';
+ dev->states[i].desc[0] = '\0';
+ }
+
+ i = CPUIDLE_DRIVER_STATE_START;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
+ strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
+ state->exit_latency = 1;
+ state->target_residency = 1 * 2;
+ state->power_usage = 3;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_SHALLOW;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ dev->safe_state = state;
+
+ state = &dev->states[i++];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+ strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN);
+ state->exit_latency = 100;
+ state->target_residency = 1 * 2;
+ state->power_usage = 1;
+ state->flags = 0;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
+ state->enter = cpuidle_sleep_enter;
+
+ dev->state_count = i;
+
+ cpuidle_register_device(dev);
+}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 8c067adf683..de078d24ce5 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -1,5 +1,5 @@
/*
- * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c
+ * arch/sh/kernel/cpu/shmobile/pm.c
*
* Power management support code for SuperH Mobile
*
@@ -32,20 +32,17 @@
*
* R-standby mode is unsupported, but will be added in the future
* U-standby mode is low priority since it needs bootloader hacks
- *
- * All modes should be tied in with cpuidle. But before that can
- * happen we need to keep track of enabled hardware blocks so we
- * can avoid entering sleep modes that stop clocks to hardware
- * blocks that are in use even though the cpu core is idle.
*/
+#define ILRAM_BASE 0xe5200000
+
extern const unsigned char sh_mobile_standby[];
extern const unsigned int sh_mobile_standby_size;
-static void sh_mobile_call_standby(unsigned long mode)
+void sh_mobile_call_standby(unsigned long mode)
{
extern void *vbr_base;
- void *onchip_mem = (void *)0xe5200000; /* ILRAM */
+ void *onchip_mem = (void *)ILRAM_BASE;
void (*standby_onchip_mem)(unsigned long) = onchip_mem;
/* Note: Wake up from sleep may generate exceptions!
@@ -55,11 +52,6 @@ static void sh_mobile_call_standby(unsigned long mode)
if (mode & SUSP_SH_SF)
asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory");
- /* Copy the assembly snippet to the otherwise ununsed ILRAM */
- memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
- wmb();
- ctrl_barrier();
-
/* Let assembly snippet in on-chip memory handle the rest */
standby_onchip_mem(mode);
@@ -85,7 +77,15 @@ static struct platform_suspend_ops sh_pm_ops = {
static int __init sh_pm_init(void)
{
+ void *onchip_mem = (void *)ILRAM_BASE;
+
+ /* Copy the assembly snippet to the otherwise ununsed ILRAM */
+ memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
+ wmb();
+ ctrl_barrier();
+
suspend_set_ops(&sh_pm_ops);
+ sh_mobile_setup_cpuidle();
return 0;
}
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
new file mode 100644
index 00000000000..6f5ad151340
--- /dev/null
+++ b/arch/sh/kernel/dumpstack.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2009 Matt Fleming
+ */
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+#include <linux/debug_locks.h>
+#include <asm/unwinder.h>
+#include <asm/stacktrace.h>
+
+void printk_address(unsigned long address, int reliable)
+{
+ printk(" [<%p>] %s%pS\n", (void *) address,
+ reliable ? "" : "? ", (void *) address);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{
+ struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+ if (addr != (unsigned long)return_to_handler)
+ return;
+
+ if (!task->ret_stack || index < *graph)
+ return;
+
+ index -= *graph;
+ ret_addr = task->ret_stack[index].ret;
+
+ ops->address(data, ret_addr, 1);
+
+ (*graph)++;
+}
+#else
+static inline void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{ }
+#endif
+
+void
+stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct thread_info *context;
+ int graph = 0;
+
+ context = (struct thread_info *)
+ ((unsigned long)sp & (~(THREAD_SIZE - 1)));
+
+ while (!kstack_end(sp)) {
+ unsigned long addr = *sp++;
+
+ if (__kernel_text_address(addr)) {
+ ops->address(data, addr, 1);
+
+ print_ftrace_graph_addr(addr, data, ops,
+ context, &graph);
+ }
+ }
+}
+
+static void
+print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+ printk(data);
+ print_symbol(msg, symbol);
+ printk("\n");
+}
+
+static void print_trace_warning(void *data, char *msg)
+{
+ printk("%s%s\n", (char *)data, msg);
+}
+
+static int print_trace_stack(void *data, char *name)
+{
+ printk("%s <%s> ", (char *)data, name);
+ return 0;
+}
+
+/*
+ * Print one address/symbol entries per line.
+ */
+static void print_trace_address(void *data, unsigned long addr, int reliable)
+{
+ printk(data);
+ printk_address(addr, reliable);
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+ .warning = print_trace_warning,
+ .warning_symbol = print_trace_warning_symbol,
+ .stack = print_trace_stack,
+ .address = print_trace_address,
+};
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs)
+{
+ if (regs && user_mode(regs))
+ return;
+
+ printk("\nCall trace:\n");
+
+ unwind_stack(tsk, regs, sp, &print_trace_ops, "");
+
+ printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
+}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 00000000000..c6c5764a8ab
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,902 @@
+/*
+ * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This is an implementation of a DWARF unwinder. Its main purpose is
+ * for generating stacktrace information. Based on the DWARF 3
+ * specification from http://www.dwarfstd.org.
+ *
+ * TODO:
+ * - DWARF64 doesn't work.
+ */
+
+/* #define DEBUG */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <asm/dwarf.h>
+#include <asm/unwinder.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/dwarf.h>
+#include <asm/stacktrace.h>
+
+static LIST_HEAD(dwarf_cie_list);
+DEFINE_SPINLOCK(dwarf_cie_lock);
+
+static LIST_HEAD(dwarf_fde_list);
+DEFINE_SPINLOCK(dwarf_fde_lock);
+
+static struct dwarf_cie *cached_cie;
+
+/*
+ * Figure out whether we need to allocate some dwarf registers. If dwarf
+ * registers have already been allocated then we may need to realloc
+ * them. "reg" is a register number that we need to be able to access
+ * after this call.
+ *
+ * Register numbers start at zero, therefore we need to allocate space
+ * for "reg" + 1 registers.
+ */
+static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
+ unsigned int reg)
+{
+ struct dwarf_reg *regs;
+ unsigned int num_regs = reg + 1;
+ size_t new_size;
+ size_t old_size;
+
+ new_size = num_regs * sizeof(*regs);
+ old_size = frame->num_regs * sizeof(*regs);
+
+ /* Fast path: don't allocate any regs if we've already got enough. */
+ if (frame->num_regs >= num_regs)
+ return;
+
+ regs = kzalloc(new_size, GFP_ATOMIC);
+ if (!regs) {
+ printk(KERN_WARNING "Unable to allocate DWARF registers\n");
+ /*
+ * Let's just bomb hard here, we have no way to
+ * gracefully recover.
+ */
+ BUG();
+ }
+
+ if (frame->regs) {
+ memcpy(regs, frame->regs, old_size);
+ kfree(frame->regs);
+ }
+
+ frame->regs = regs;
+ frame->num_regs = num_regs;
+}
+
+/**
+ * dwarf_read_addr - read dwarf data
+ * @src: source address of data
+ * @dst: destination address to store the data to
+ *
+ * Read 'n' bytes from @src, where 'n' is the size of an address on
+ * the native machine. We return the number of bytes read, which
+ * should always be 'n'. We also have to be careful when reading
+ * from @src and writing to @dst, because they can be arbitrarily
+ * aligned. Return 'n' - the number of bytes read.
+ */
+static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
+{
+ u32 val = get_unaligned(src);
+ put_unaligned(val, dst);
+ return sizeof(unsigned long *);
+}
+
+/**
+ * dwarf_read_uleb128 - read unsigned LEB128 data
+ * @addr: the address where the ULEB128 data is stored
+ * @ret: address to store the result
+ *
+ * Decode an unsigned LEB128 encoded datum. The algorithm is taken
+ * from Appendix C of the DWARF 3 spec. For information on the
+ * encodings refer to section "7.6 - Variable Length Data". Return
+ * the number of bytes read.
+ */
+static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
+{
+ unsigned int result;
+ unsigned char byte;
+ int shift, count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ count++;
+
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_leb128 - read signed LEB128 data
+ * @addr: the address of the LEB128 encoded data
+ * @ret: address to store the result
+ *
+ * Decode signed LEB128 data. The algorithm is taken from Appendix
+ * C of the DWARF 3 spec. Return the number of bytes read.
+ */
+static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
+{
+ unsigned char byte;
+ int result, shift;
+ int num_bits;
+ int count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+ count++;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ /* The number of bits in a signed integer. */
+ num_bits = 8 * sizeof(result);
+
+ if ((shift < num_bits) && (byte & 0x40))
+ result |= (-1 << shift);
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_encoded_value - return the decoded value at @addr
+ * @addr: the address of the encoded value
+ * @val: where to write the decoded value
+ * @encoding: the encoding with which we can decode @addr
+ *
+ * GCC emits encoded address in the .eh_frame FDE entries. Decode
+ * the value at @addr using @encoding. The decoded value is written
+ * to @val and the number of bytes read is returned.
+ */
+static int dwarf_read_encoded_value(char *addr, unsigned long *val,
+ char encoding)
+{
+ unsigned long decoded_addr = 0;
+ int count = 0;
+
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ decoded_addr = (unsigned long)addr;
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", (encoding & 0x70));
+ BUG();
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & 0x0f) {
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_udata4:
+ count += 4;
+ decoded_addr += get_unaligned((u32 *)addr);
+ __raw_writel(decoded_addr, val);
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", encoding);
+ BUG();
+ }
+
+ return count;
+}
+
+/**
+ * dwarf_entry_len - return the length of an FDE or CIE
+ * @addr: the address of the entry
+ * @len: the length of the entry
+ *
+ * Read the initial_length field of the entry and store the size of
+ * the entry in @len. We return the number of bytes read. Return a
+ * count of 0 on error.
+ */
+static inline int dwarf_entry_len(char *addr, unsigned long *len)
+{
+ u32 initial_len;
+ int count;
+
+ initial_len = get_unaligned((u32 *)addr);
+ count = 4;
+
+ /*
+ * An initial length field value in the range DW_LEN_EXT_LO -
+ * DW_LEN_EXT_HI indicates an extension, and should not be
+ * interpreted as a length. The only extension that we currently
+ * understand is the use of DWARF64 addresses.
+ */
+ if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
+ /*
+ * The 64-bit length field immediately follows the
+ * compulsory 32-bit length field.
+ */
+ if (initial_len == DW_EXT_DWARF64) {
+ *len = get_unaligned((u64 *)addr + 4);
+ count = 12;
+ } else {
+ printk(KERN_WARNING "Unknown DWARF extension\n");
+ count = 0;
+ }
+ } else
+ *len = initial_len;
+
+ return count;
+}
+
+/**
+ * dwarf_lookup_cie - locate the cie
+ * @cie_ptr: pointer to help with lookup
+ */
+static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
+{
+ struct dwarf_cie *cie, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ /*
+ * We've cached the last CIE we looked up because chances are
+ * that the FDE wants this CIE.
+ */
+ if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
+ cie = cached_cie;
+ goto out;
+ }
+
+ list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) {
+ if (cie->cie_pointer == cie_ptr) {
+ cached_cie = cie;
+ break;
+ }
+ }
+
+ /* Couldn't find the entry in the list. */
+ if (&cie->link == &dwarf_cie_list)
+ cie = NULL;
+out:
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+ return cie;
+}
+
+/**
+ * dwarf_lookup_fde - locate the FDE that covers pc
+ * @pc: the program counter
+ */
+struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
+{
+ unsigned long flags;
+ struct dwarf_fde *fde, *n;
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) {
+ unsigned long start, end;
+
+ start = fde->initial_location;
+ end = fde->initial_location + fde->address_range;
+
+ if (pc >= start && pc < end)
+ break;
+ }
+
+ /* Couldn't find the entry in the list. */
+ if (&fde->link == &dwarf_fde_list)
+ fde = NULL;
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return fde;
+}
+
+/**
+ * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
+ * @insn_start: address of the first instruction
+ * @insn_end: address of the last instruction
+ * @cie: the CIE for this function
+ * @fde: the FDE for this function
+ * @frame: the instructions calculate the CFA for this frame
+ * @pc: the program counter of the address we're interested in
+ * @define_ra: keep executing insns until the return addr reg is defined?
+ *
+ * Execute the Call Frame instruction sequence starting at
+ * @insn_start and ending at @insn_end. The instructions describe
+ * how to calculate the Canonical Frame Address of a stackframe.
+ * Store the results in @frame.
+ */
+static int dwarf_cfa_execute_insns(unsigned char *insn_start,
+ unsigned char *insn_end,
+ struct dwarf_cie *cie,
+ struct dwarf_fde *fde,
+ struct dwarf_frame *frame,
+ unsigned long pc,
+ bool define_ra)
+{
+ unsigned char insn;
+ unsigned char *current_insn;
+ unsigned int count, delta, reg, expr_len, offset;
+ bool seen_ra_reg;
+
+ current_insn = insn_start;
+
+ /*
+ * If we're executing instructions for the dwarf_unwind_stack()
+ * FDE we need to keep executing instructions until the value of
+ * DWARF_ARCH_RA_REG is defined. See the comment in
+ * dwarf_unwind_stack() for more details.
+ */
+ if (define_ra)
+ seen_ra_reg = false;
+ else
+ seen_ra_reg = true;
+
+ while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) {
+ insn = __raw_readb(current_insn++);
+
+ if (!seen_ra_reg) {
+ if (frame->num_regs >= DWARF_ARCH_RA_REG &&
+ frame->regs[DWARF_ARCH_RA_REG].flags)
+ seen_ra_reg = true;
+ }
+
+ /*
+ * Firstly, handle the opcodes that embed their operands
+ * in the instructions.
+ */
+ switch (DW_CFA_opcode(insn)) {
+ case DW_CFA_advance_loc:
+ delta = DW_CFA_operand(insn);
+ delta *= cie->code_alignment_factor;
+ frame->pc += delta;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_offset:
+ reg = DW_CFA_operand(insn);
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ dwarf_frame_alloc_regs(frame, reg);
+ frame->regs[reg].addr = offset;
+ frame->regs[reg].flags |= DWARF_REG_OFFSET;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_restore:
+ reg = DW_CFA_operand(insn);
+ continue;
+ /* NOTREACHED */
+ }
+
+ /*
+ * Secondly, handle the opcodes that don't embed their
+ * operands in the instruction.
+ */
+ switch (insn) {
+ case DW_CFA_nop:
+ continue;
+ case DW_CFA_advance_loc1:
+ delta = *current_insn++;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc2:
+ delta = get_unaligned((u16 *)current_insn);
+ current_insn += 2;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc4:
+ delta = get_unaligned((u32 *)current_insn);
+ current_insn += 4;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ break;
+ case DW_CFA_restore_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_undefined:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_def_cfa:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_offset);
+ current_insn += count;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_register:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ frame->cfa_offset = 0;
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_offset:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ frame->cfa_offset = offset;
+ break;
+ case DW_CFA_def_cfa_expression:
+ count = dwarf_read_uleb128(current_insn, &expr_len);
+ current_insn += count;
+
+ frame->cfa_expr = current_insn;
+ frame->cfa_expr_len = expr_len;
+ current_insn += expr_len;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_EXP;
+ break;
+ case DW_CFA_offset_extended_sf:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ dwarf_frame_alloc_regs(frame, reg);
+ frame->regs[reg].flags |= DWARF_REG_OFFSET;
+ frame->regs[reg].addr = offset;
+ break;
+ case DW_CFA_val_offset:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+ frame->regs[reg].flags |= DWARF_REG_OFFSET;
+ frame->regs[reg].addr = offset;
+ break;
+ default:
+ pr_debug("unhandled DWARF instruction 0x%x\n", insn);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dwarf_unwind_stack - recursively unwind the stack
+ * @pc: address of the function to unwind
+ * @prev: struct dwarf_frame of the previous stackframe on the callstack
+ *
+ * Return a struct dwarf_frame representing the most recent frame
+ * on the callstack. Each of the lower (older) stack frames are
+ * linked via the "prev" member.
+ */
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+ struct dwarf_frame *prev)
+{
+ struct dwarf_frame *frame;
+ struct dwarf_cie *cie;
+ struct dwarf_fde *fde;
+ unsigned long addr;
+ int i, offset;
+ bool define_ra = false;
+
+ /*
+ * If this is the first invocation of this recursive function we
+ * need get the contents of a physical register to get the CFA
+ * in order to begin the virtual unwinding of the stack.
+ *
+ * Setting "define_ra" to true indictates that we want
+ * dwarf_cfa_execute_insns() to continue executing instructions
+ * until we know how to calculate the value of DWARF_ARCH_RA_REG
+ * (which we need in order to kick off the whole unwinding
+ * process).
+ *
+ * NOTE: the return address is guaranteed to be setup by the
+ * time this function makes its first function call.
+ */
+ if (!pc && !prev) {
+ pc = (unsigned long)&dwarf_unwind_stack;
+ define_ra = true;
+ }
+
+ frame = kzalloc(sizeof(*frame), GFP_ATOMIC);
+ if (!frame)
+ return NULL;
+
+ frame->prev = prev;
+
+ fde = dwarf_lookup_fde(pc);
+ if (!fde) {
+ /*
+ * This is our normal exit path - the one that stops the
+ * recursion. There's two reasons why we might exit
+ * here,
+ *
+ * a) pc has no asscociated DWARF frame info and so
+ * we don't know how to unwind this frame. This is
+ * usually the case when we're trying to unwind a
+ * frame that was called from some assembly code
+ * that has no DWARF info, e.g. syscalls.
+ *
+ * b) the DEBUG info for pc is bogus. There's
+ * really no way to distinguish this case from the
+ * case above, which sucks because we could print a
+ * warning here.
+ */
+ return NULL;
+ }
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+
+ frame->pc = fde->initial_location;
+
+ /* CIE initial instructions */
+ dwarf_cfa_execute_insns(cie->initial_instructions,
+ cie->instructions_end, cie, fde,
+ frame, pc, false);
+
+ /* FDE instructions */
+ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
+ fde, frame, pc, define_ra);
+
+ /* Calculate the CFA */
+ switch (frame->flags) {
+ case DWARF_FRAME_CFA_REG_OFFSET:
+ if (prev) {
+ BUG_ON(!prev->regs[frame->cfa_register].flags);
+
+ addr = prev->cfa;
+ addr += prev->regs[frame->cfa_register].addr;
+ frame->cfa = __raw_readl(addr);
+
+ } else {
+ /*
+ * Again, this is the first invocation of this
+ * recurisve function. We need to physically
+ * read the contents of a register in order to
+ * get the Canonical Frame Address for this
+ * function.
+ */
+ frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
+ }
+
+ frame->cfa += frame->cfa_offset;
+ break;
+ default:
+ BUG();
+ }
+
+ /* If we haven't seen the return address reg, we're screwed. */
+ BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
+
+ for (i = 0; i <= frame->num_regs; i++) {
+ struct dwarf_reg *reg = &frame->regs[i];
+
+ if (!reg->flags)
+ continue;
+
+ offset = reg->addr;
+ offset += frame->cfa;
+ }
+
+ addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
+ frame->return_addr = __raw_readl(addr);
+
+ frame->next = dwarf_unwind_stack(frame->return_addr, frame);
+ return frame;
+}
+
+static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
+ unsigned char *end)
+{
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+
+ cie = kzalloc(sizeof(*cie), GFP_KERNEL);
+ if (!cie)
+ return -ENOMEM;
+
+ cie->length = len;
+
+ /*
+ * Record the offset into the .eh_frame section
+ * for this CIE. It allows this CIE to be
+ * quickly and easily looked up from the
+ * corresponding FDE.
+ */
+ cie->cie_pointer = (unsigned long)entry;
+
+ cie->version = *(char *)p++;
+ BUG_ON(cie->version != 1);
+
+ cie->augmentation = p;
+ p += strlen(cie->augmentation) + 1;
+
+ count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
+ p += count;
+
+ count = dwarf_read_leb128(p, &cie->data_alignment_factor);
+ p += count;
+
+ /*
+ * Which column in the rule table contains the
+ * return address?
+ */
+ if (cie->version == 1) {
+ cie->return_address_reg = __raw_readb(p);
+ p++;
+ } else {
+ count = dwarf_read_uleb128(p, &cie->return_address_reg);
+ p += count;
+ }
+
+ if (cie->augmentation[0] == 'z') {
+ unsigned int length, count;
+ cie->flags |= DWARF_CIE_Z_AUGMENTATION;
+
+ count = dwarf_read_uleb128(p, &length);
+ p += count;
+
+ BUG_ON((unsigned char *)p > end);
+
+ cie->initial_instructions = p + length;
+ cie->augmentation++;
+ }
+
+ while (*cie->augmentation) {
+ /*
+ * "L" indicates a byte showing how the
+ * LSDA pointer is encoded. Skip it.
+ */
+ if (*cie->augmentation == 'L') {
+ p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'R') {
+ /*
+ * "R" indicates a byte showing
+ * how FDE addresses are
+ * encoded.
+ */
+ cie->encoding = *(char *)p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'P') {
+ /*
+ * "R" indicates a personality
+ * routine in the CIE
+ * augmentation.
+ */
+ BUG();
+ } else if (*cie->augmentation == 'S') {
+ BUG();
+ } else {
+ /*
+ * Unknown augmentation. Assume
+ * 'z' augmentation.
+ */
+ p = cie->initial_instructions;
+ BUG_ON(!p);
+ break;
+ }
+ }
+
+ cie->initial_instructions = p;
+ cie->instructions_end = end;
+
+ /* Add to list */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+ list_add_tail(&cie->link, &dwarf_cie_list);
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ return 0;
+}
+
+static int dwarf_parse_fde(void *entry, u32 entry_type,
+ void *start, unsigned long len)
+{
+ struct dwarf_fde *fde;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+ void *p = start;
+
+ fde = kzalloc(sizeof(*fde), GFP_KERNEL);
+ if (!fde)
+ return -ENOMEM;
+
+ fde->length = len;
+
+ /*
+ * In a .eh_frame section the CIE pointer is the
+ * delta between the address within the FDE
+ */
+ fde->cie_pointer = (unsigned long)(p - entry_type - 4);
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+ fde->cie = cie;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->initial_location,
+ cie->encoding);
+ else
+ count = dwarf_read_addr(p, &fde->initial_location);
+
+ p += count;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->address_range,
+ cie->encoding & 0x0f);
+ else
+ count = dwarf_read_addr(p, &fde->address_range);
+
+ p += count;
+
+ if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
+ unsigned int length;
+ count = dwarf_read_uleb128(p, &length);
+ p += count + length;
+ }
+
+ /* Call frame instructions. */
+ fde->instructions = p;
+ fde->end = start + len;
+
+ /* Add to list. */
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_add_tail(&fde->link, &dwarf_fde_list);
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return 0;
+}
+
+static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops, void *data)
+{
+ struct dwarf_frame *frame;
+
+ frame = dwarf_unwind_stack(0, NULL);
+
+ while (frame && frame->return_addr) {
+ ops->address(data, frame->return_addr, 1);
+ frame = frame->next;
+ }
+}
+
+static struct unwinder dwarf_unwinder = {
+ .name = "dwarf-unwinder",
+ .dump = dwarf_unwinder_dump,
+ .rating = 150,
+};
+
+static void dwarf_unwinder_cleanup(void)
+{
+ struct dwarf_cie *cie, *m;
+ struct dwarf_fde *fde, *n;
+ unsigned long flags;
+
+ /*
+ * Deallocate all the memory allocated for the DWARF unwinder.
+ * Traverse all the FDE/CIE lists and remove and free all the
+ * memory associated with those data structures.
+ */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+ list_for_each_entry_safe(cie, m, &dwarf_cie_list, link)
+ kfree(cie);
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_for_each_entry_safe(fde, n, &dwarf_fde_list, link)
+ kfree(fde);
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+}
+
+/**
+ * dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ * Build the data structures describing the .dwarf_frame section to
+ * make it easier to lookup CIE and FDE entries. Because the
+ * .eh_frame section is packed as tightly as possible it is not
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
+ * and CIE entries that make it easier.
+ */
+void dwarf_unwinder_init(void)
+{
+ u32 entry_type;
+ void *p, *entry;
+ int count, err;
+ unsigned long len;
+ unsigned int c_entries, f_entries;
+ unsigned char *end;
+ INIT_LIST_HEAD(&dwarf_cie_list);
+ INIT_LIST_HEAD(&dwarf_fde_list);
+
+ c_entries = 0;
+ f_entries = 0;
+ entry = &__start_eh_frame;
+
+ while ((char *)entry < __stop_eh_frame) {
+ p = entry;
+
+ count = dwarf_entry_len(p, &len);
+ if (count == 0) {
+ /*
+ * We read a bogus length field value. There is
+ * nothing we can do here apart from disabling
+ * the DWARF unwinder. We can't even skip this
+ * entry and move to the next one because 'len'
+ * tells us where our next entry is.
+ */
+ goto out;
+ } else
+ p += count;
+
+ /* initial length does not include itself */
+ end = p + len;
+
+ entry_type = get_unaligned((u32 *)p);
+ p += 4;
+
+ if (entry_type == DW_EH_FRAME_CIE) {
+ err = dwarf_parse_cie(entry, p, len, end);
+ if (err < 0)
+ goto out;
+ else
+ c_entries++;
+ } else {
+ err = dwarf_parse_fde(entry, entry_type, p, len);
+ if (err < 0)
+ goto out;
+ else
+ f_entries++;
+ }
+
+ entry = (char *)entry + len + 4;
+ }
+
+ printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
+ c_entries, f_entries);
+
+ err = unwinder_register(&dwarf_unwinder);
+ if (err)
+ goto out;
+
+ return;
+
+out:
+ printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
+ dwarf_unwinder_cleanup();
+}
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index a952dcf9999..81a46145ffa 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -134,7 +134,7 @@ static void scif_sercon_init(char *s)
sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
}
-#elif defined(CONFIG_CPU_SH4)
+#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
#define DEFAULT_BAUD 115200
/*
* Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
@@ -220,8 +220,7 @@ static int __init setup_early_printk(char *buf)
early_console = &scif_console;
#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
scif_sercon_init(buf + 6);
#endif
#endif
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index d62359cfbbe..e63178fefb9 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -43,9 +43,10 @@
* syscall #
*
*/
+#include <asm/dwarf.h>
#if defined(CONFIG_PREEMPT)
-# define preempt_stop() cli
+# define preempt_stop() cli ; TRACE_IRQS_OFF
#else
# define preempt_stop()
# define resume_kernel __restore_all
@@ -55,11 +56,7 @@
.align 2
ENTRY(exception_error)
!
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 2f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_ON
sti
mov.l 1f, r0
jmp @r0
@@ -67,22 +64,28 @@ ENTRY(exception_error)
.align 2
1: .long do_exception_error
-#ifdef CONFIG_TRACE_IRQFLAGS
-2: .long trace_hardirqs_on
-#endif
.align 2
ret_from_exception:
+ CFI_STARTPROC simple
+ CFI_DEF_CFA r14, 0
+ CFI_REL_OFFSET 17, 64
+ CFI_REL_OFFSET 15, 0
+ CFI_REL_OFFSET 14, 56
preempt_stop()
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 4f, r0
- jsr @r0
- nop
-#endif
ENTRY(ret_from_irq)
!
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
+
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bt 9f
+ TRACE_IRQS_ON
+9:
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
shll r0
shll r0 ! kernel space?
get_current_thread_info r8, r0
@@ -125,13 +128,9 @@ noresched:
ENTRY(resume_userspace)
! r8: current_thread_info
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OfF
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
+ tst #(_TIF_WORK_MASK & 0xff), r0
bt/s __restore_all
tst #_TIF_NEED_RESCHED, r0
@@ -156,14 +155,10 @@ work_resched:
jsr @r1 ! schedule
nop
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
!
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
+ tst #(_TIF_WORK_MASK & 0xff), r0
bt __restore_all
bra work_pending
tst #_TIF_NEED_RESCHED, r0
@@ -172,23 +167,15 @@ work_resched:
1: .long schedule
2: .long do_notify_resume
3: .long resume_userspace
-#ifdef CONFIG_TRACE_IRQFLAGS
-4: .long trace_hardirqs_on
-5: .long trace_hardirqs_off
-#endif
.align 2
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
- tst #_TIF_WORK_SYSCALL_MASK, r0
+ tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_ON
sti
mov r15, r4
mov.l 8f, r0 ! do_syscall_trace_leave
@@ -259,6 +246,7 @@ debug_trap:
nop
bra __restore_all
nop
+ CFI_ENDPROC
.align 2
1: .long debug_trap_table
@@ -304,6 +292,7 @@ ret_from_fork:
* system calls and debug traps through their respective jump tables.
*/
ENTRY(system_call)
+ setup_frame_reg
#if !defined(CONFIG_CPU_SH2)
mov.l 1f, r9
mov.l @r9, r8 ! Read from TRA (Trap Address) Register
@@ -321,18 +310,18 @@ ENTRY(system_call)
bt/s debug_trap ! it's a debug trap..
nop
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r10
- jsr @r10
- nop
-#endif
+ TRACE_IRQS_ON
sti
!
get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8
- mov #_TIF_WORK_SYSCALL_MASK, r10
+ mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10
+ mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9
tst r10, r8
+ shll8 r9
+ bf syscall_trace_entry
+ tst r9, r8
bf syscall_trace_entry
!
mov.l 2f, r8 ! Number of syscalls
@@ -351,15 +340,15 @@ syscall_call:
!
syscall_exit:
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 6f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
!
get_current_thread_info r8, r0
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_ALLWORK_MASK, r0
+ tst #(_TIF_ALLWORK_MASK & 0xff), r0
+ mov #(_TIF_ALLWORK_MASK >> 8), r1
+ bf syscall_exit_work
+ shlr8 r0
+ tst r0, r1
bf syscall_exit_work
bra __restore_all
nop
@@ -369,9 +358,5 @@ syscall_exit:
#endif
2: .long NR_syscalls
3: .long sys_call_table
-#ifdef CONFIG_TRACE_IRQFLAGS
-5: .long trace_hardirqs_on
-6: .long trace_hardirqs_off
-#endif
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 066f37dc32a..6647dfcb781 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -16,9 +16,13 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <asm/ftrace.h>
#include <asm/cacheflush.h>
+#include <asm/unistd.h>
+#include <trace/syscall.h>
+#ifdef CONFIG_DYNAMIC_FTRACE
static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
static unsigned char ftrace_nop[4];
@@ -131,3 +135,189 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+static int ftrace_mod(unsigned long ip, unsigned long old_addr,
+ unsigned long new_addr)
+{
+ unsigned char code[MCOUNT_INSN_SIZE];
+
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (old_addr != __raw_readl((unsigned long *)code))
+ return -EINVAL;
+
+ __raw_writel(new_addr, ip);
+ return 0;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&skip_trace);
+ new_addr = (unsigned long)(&ftrace_graph_caller);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&ftrace_graph_caller);
+ new_addr = (unsigned long)(&skip_trace);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in the current thread info.
+ *
+ * This is the main routine for the function graph tracer. The function
+ * graph tracer essentially works like this:
+ *
+ * parent is the stack address containing self_addr's return address.
+ * We pull the real return address out of parent and store it in
+ * current's ret_stack. Then, we replace the return address on the stack
+ * with the address of return_to_handler. self_addr is the function that
+ * called mcount.
+ *
+ * When self_addr returns, it will jump to return_to_handler which calls
+ * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
+ * return address off of current's ret_stack and jump to it.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%2, %0 \n\t"
+ "2: \n\t"
+ "mov.l %3, @%2 \n\t"
+ "mov #0, %1 \n\t"
+ "3: \n\t"
+ ".section .fixup, \"ax\" \n\t"
+ "4: \n\t"
+ "mov.l 5f, %0 \n\t"
+ "jmp @%0 \n\t"
+ " mov #1, %1 \n\t"
+ ".balign 4 \n\t"
+ "5: .long 3b \n\t"
+ ".previous \n\t"
+ ".section __ex_table,\"a\" \n\t"
+ ".long 1b, 4b \n\t"
+ ".long 2b, 4b \n\t"
+ ".previous \n\t"
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ __raw_writel(old, parent);
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ __raw_writel(old, parent);
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned long __start_syscalls_metadata[];
+extern unsigned long __stop_syscalls_metadata[];
+extern unsigned long *sys_call_table;
+
+static struct syscall_metadata **syscalls_metadata;
+
+static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
+{
+ struct syscall_metadata *start;
+ struct syscall_metadata *stop;
+ char str[KSYM_SYMBOL_LEN];
+
+
+ start = (struct syscall_metadata *)__start_syscalls_metadata;
+ stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+ kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
+
+ for ( ; start < stop; start++) {
+ if (start->name && !strcmp(start->name, str))
+ return start;
+ }
+
+ return NULL;
+}
+
+#define FTRACE_SYSCALL_MAX (NR_syscalls - 1)
+
+struct syscall_metadata *syscall_nr_to_meta(int nr)
+{
+ if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
+ return NULL;
+
+ return syscalls_metadata[nr];
+}
+
+void arch_init_ftrace_syscalls(void)
+{
+ int i;
+ struct syscall_metadata *meta;
+ unsigned long **psys_syscall_table = &sys_call_table;
+ static atomic_t refs;
+
+ if (atomic_inc_return(&refs) != 1)
+ goto end;
+
+ syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
+ FTRACE_SYSCALL_MAX, GFP_KERNEL);
+ if (!syscalls_metadata) {
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
+ meta = find_syscall_meta(psys_syscall_table[i]);
+ syscalls_metadata[i] = meta;
+ }
+ return;
+
+ /* Paranoid: avoid overflow */
+end:
+ atomic_dec(&refs);
+}
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 77dfecb6437..e27a19e1f46 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -112,14 +112,15 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
struct trapped_io *tiop;
struct resource *res;
int k, len;
+ unsigned long flags;
- spin_lock_irq(&trapped_lock);
+ spin_lock_irqsave(&trapped_lock, flags);
list_for_each_entry(tiop, list, list) {
voffs = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
if (res->start == offset) {
- spin_unlock_irq(&trapped_lock);
+ spin_unlock_irqrestore(&trapped_lock, flags);
return tiop->virt_base + voffs;
}
@@ -127,7 +128,7 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
voffs += roundup(len, PAGE_SIZE);
}
}
- spin_unlock_irq(&trapped_lock);
+ spin_unlock_irqrestore(&trapped_lock, flags);
return NULL;
}
EXPORT_SYMBOL_GPL(match_trapped_io_handler);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 3d09062f468..2bb43dc74f2 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
+#include <asm/dwarf.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
@@ -114,23 +115,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
#endif
irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: is there less than 1KB free? */
- {
- long sp;
-
- __asm__ __volatile__ ("and r15, %0" :
- "=r" (sp) : "0" (THREAD_SIZE - 1));
-
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
-
irq = irq_demux(intc_evt2irq(irq));
#ifdef CONFIG_IRQSTACKS
@@ -278,6 +262,9 @@ void __init init_IRQ(void)
sh_mv.mv_init_irq();
irq_ctx_init(smp_processor_id());
+
+ /* This needs to be early, but not too early.. */
+ dwarf_unwinder_init();
}
#ifdef CONFIG_SPARSE_IRQ
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 92d7740faab..9fee977f176 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -23,6 +23,7 @@
#include <linux/tick.h>
#include <linux/reboot.h>
#include <linux/fs.h>
+#include <linux/ftrace.h>
#include <linux/preempt.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -264,8 +265,8 @@ static void ubc_set_tracing(int asid, unsigned long pc)
* switch_to(x,y) should switch tasks from x to y.
*
*/
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next)
+__notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev, struct task_struct *next)
{
#if defined(CONFIG_SH_FPU)
unlazy_fpu(prev, task_pt_regs(prev));
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 3392e835a37..c198eceaee9 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -34,6 +34,8 @@
#include <asm/syscalls.h>
#include <asm/fpu.h>
+#include <trace/syscall.h>
+
/*
* This routine will get a word off of the process kernel stack.
*/
@@ -459,6 +461,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
*/
ret = -1L;
+ if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
+ ftrace_syscall_enter(regs);
+
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[3],
regs->regs[4], regs->regs[5],
@@ -475,6 +480,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
regs->regs[0]);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
+ ftrace_syscall_exit(regs);
+
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index dd38338553e..ceb409bf774 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -30,6 +30,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/lmb.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -233,39 +234,45 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
void __init setup_bootmem_allocator(unsigned long free_pfn)
{
unsigned long bootmap_size;
+ unsigned long bootmap_pages, bootmem_paddr;
+ u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT;
+ int i;
+
+ bootmap_pages = bootmem_bootmap_pages(total_pages);
+
+ bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
/*
* Find a proper area for the bootmem bitmap. After this
* bootstrap step all allocations (until the page allocator
* is intact) must be done via bootmem_alloc().
*/
- bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ bootmem_paddr >> PAGE_SHIFT,
min_low_pfn, max_low_pfn);
- __add_active_range(0, min_low_pfn, max_low_pfn);
- register_bootmem_low_pages();
-
- node_set_online(0);
+ /* Add active regions with valid PFNs. */
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ unsigned long start_pfn, end_pfn;
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ __add_active_range(0, start_pfn, end_pfn);
+ }
/*
- * Reserve the kernel text and
- * Reserve the bootmem bitmap. We do this in two steps (first step
- * was init_bootmem()), because this catches the (definitely buggy)
- * case of us accidentally initializing the bootmem allocator with
- * an invalid RAM area.
+ * Add all physical memory to the bootmem map and mark each
+ * area as present.
*/
- reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
- BOOTMEM_DEFAULT);
+ register_bootmem_low_pages();
- /*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
- */
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
+ /* Reserve the sections we're already using. */
+ for (i = 0; i < lmb.reserved.cnt; i++)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i),
BOOTMEM_DEFAULT);
+ node_set_online(0);
+
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_BLK_DEV_INITRD
@@ -296,12 +303,37 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
static void __init setup_memory(void)
{
unsigned long start_pfn;
+ u64 base = min_low_pfn << PAGE_SHIFT;
+ u64 size = (max_low_pfn << PAGE_SHIFT) - base;
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(__pa(_end));
+
+ lmb_add(base, size);
+
+ /*
+ * Reserve the kernel text and
+ * Reserve the bootmem bitmap. We do this in two steps (first step
+ * was init_bootmem()), because this catches the (definitely buggy)
+ * case of us accidentally initializing the bootmem allocator with
+ * an invalid RAM area.
+ */
+ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+ (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
+ (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
+
+ /*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+
+ lmb_analyze();
+ lmb_dump_all();
+
setup_bootmem_allocator(start_pfn);
}
#else
@@ -402,6 +434,7 @@ void __init setup_arch(char **cmdline_p)
nodes_clear(node_online_map);
/* Setup bootmem with available RAM */
+ lmb_init();
setup_memory();
sparse_init();
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index fcc5de31f83..cec610888e2 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -106,8 +106,8 @@ EXPORT_SYMBOL(flush_dcache_page);
EXPORT_SYMBOL(clear_user_page);
#endif
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(mcount);
+#ifdef CONFIG_MCOUNT
+DECLARE_EXPORT(mcount);
#endif
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index 1a2a5eb76e4..c2e45c48409 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -13,47 +13,93 @@
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
+#include <asm/unwinder.h>
#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+static void save_stack_warning(void *data, char *msg)
+{
+}
+
+static void
+save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+}
+
+static int save_stack_stack(void *data, char *name)
+{
+ return 0;
+}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
+static void save_stack_address(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = data;
+
+ if (!reliable)
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops = {
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
+ .stack = save_stack_stack,
+ .address = save_stack_address,
+};
+
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)current_stack_pointer;
- while (!kstack_end(sp)) {
- unsigned long addr = *sp++;
-
- if (__kernel_text_address(addr)) {
- if (trace->skip > 0)
- trace->skip--;
- else
- trace->entries[trace->nr_entries++] = addr;
- if (trace->nr_entries >= trace->max_entries)
- break;
- }
- }
+ unwind_stack(current, NULL, sp, &save_stack_ops, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
+static void
+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = (struct stack_trace *)data;
+
+ if (!reliable)
+ return;
+
+ if (in_sched_functions(addr))
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops_nosched = {
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
+ .stack = save_stack_stack,
+ .address = save_stack_address_nosched,
+};
+
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)tsk->thread.sp;
- while (!kstack_end(sp)) {
- unsigned long addr = *sp++;
-
- if (__kernel_text_address(addr)) {
- if (in_sched_functions(addr))
- break;
- if (trace->skip > 0)
- trace->skip--;
- else
- trace->entries[trace->nr_entries++] = addr;
- if (trace->nr_entries >= trace->max_entries)
- break;
- }
- }
+ unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 9b352a1e3fb..7f95f479060 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/rtc.h>
#include <asm/clock.h>
+#include <asm/hwblk.h>
#include <asm/rtc.h>
/* Dummy RTC ops */
@@ -91,11 +92,27 @@ module_init(rtc_generic_init);
void (*board_time_init)(void);
+static void __init sh_late_time_init(void)
+{
+ /*
+ * Make sure all compiled-in early timers register themselves.
+ *
+ * Run probe() for two "earlytimer" devices, these will be the
+ * clockevents and clocksource devices respectively. In the event
+ * that only a clockevents device is available, we -ENODEV on the
+ * clocksource and the jiffies clocksource is used transparently
+ * instead. No error handling is necessary here.
+ */
+ early_platform_driver_register_all("earlytimer");
+ early_platform_driver_probe("earlytimer", 2, 0);
+}
+
void __init time_init(void)
{
if (board_time_init)
board_time_init();
+ hwblk_init();
clk_init();
rtc_sh_get_time(&xtime);
@@ -106,15 +123,5 @@ void __init time_init(void)
local_timer_setup(smp_processor_id());
#endif
- /*
- * Make sure all compiled-in early timers register themselves.
- *
- * Run probe() for two "earlytimer" devices, these will be the
- * clockevents and clocksource devices respectively. In the event
- * that only a clockevents device is available, we -ENODEV on the
- * clocksource and the jiffies clocksource is used transparently
- * instead. No error handling is necessary here.
- */
- early_platform_driver_register_all("earlytimer");
- early_platform_driver_probe("earlytimer", 2, 0);
+ late_time_init = sh_late_time_init;
}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 2b772776fcd..563426487c6 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -858,30 +858,6 @@ void __init trap_init(void)
per_cpu_trap_init();
}
-void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs)
-{
- unsigned long addr;
-
- if (regs && user_mode(regs))
- return;
-
- printk("\nCall trace:\n");
-
- while (!kstack_end(sp)) {
- addr = *sp++;
- if (kernel_text_address(addr))
- print_ip_sym(addr);
- }
-
- printk("\n");
-
- if (!tsk)
- tsk = current;
-
- debug_show_held_locks(tsk);
-}
-
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 00000000000..2b30fa28b44
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2009 Matt Fleming
+ *
+ * Based, in part, on kernel/time/clocksource.c.
+ *
+ * This file provides arbitration code for stack unwinders.
+ *
+ * Multiple stack unwinders can be available on a system, usually with
+ * the most accurate unwinder being the currently active one.
+ */
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/unwinder.h>
+#include <asm/atomic.h>
+
+/*
+ * This is the most basic stack unwinder an architecture can
+ * provide. For architectures without reliable frame pointers, e.g.
+ * RISC CPUs, it can be implemented by looking through the stack for
+ * addresses that lie within the kernel text section.
+ *
+ * Other CPUs, e.g. x86, can use their frame pointer register to
+ * construct more accurate stack traces.
+ */
+static struct list_head unwinder_list;
+static struct unwinder stack_reader = {
+ .name = "stack-reader",
+ .dump = stack_reader_dump,
+ .rating = 50,
+ .list = {
+ .next = &unwinder_list,
+ .prev = &unwinder_list,
+ },
+};
+
+/*
+ * "curr_unwinder" points to the stack unwinder currently in use. This
+ * is the unwinder with the highest rating.
+ *
+ * "unwinder_list" is a linked-list of all available unwinders, sorted
+ * by rating.
+ *
+ * All modifications of "curr_unwinder" and "unwinder_list" must be
+ * performed whilst holding "unwinder_lock".
+ */
+static struct unwinder *curr_unwinder = &stack_reader;
+
+static struct list_head unwinder_list = {
+ .next = &stack_reader.list,
+ .prev = &stack_reader.list,
+};
+
+static DEFINE_SPINLOCK(unwinder_lock);
+
+static atomic_t unwinder_running = ATOMIC_INIT(0);
+
+/**
+ * select_unwinder - Select the best registered stack unwinder.
+ *
+ * Private function. Must hold unwinder_lock when called.
+ *
+ * Select the stack unwinder with the best rating. This is useful for
+ * setting up curr_unwinder.
+ */
+static struct unwinder *select_unwinder(void)
+{
+ struct unwinder *best;
+
+ if (list_empty(&unwinder_list))
+ return NULL;
+
+ best = list_entry(unwinder_list.next, struct unwinder, list);
+ if (best == curr_unwinder)
+ return NULL;
+
+ return best;
+}
+
+/*
+ * Enqueue the stack unwinder sorted by rating.
+ */
+static int unwinder_enqueue(struct unwinder *ops)
+{
+ struct list_head *tmp, *entry = &unwinder_list;
+
+ list_for_each(tmp, &unwinder_list) {
+ struct unwinder *o;
+
+ o = list_entry(tmp, struct unwinder, list);
+ if (o == ops)
+ return -EBUSY;
+ /* Keep track of the place, where to insert */
+ if (o->rating >= ops->rating)
+ entry = tmp;
+ }
+ list_add(&ops->list, entry);
+
+ return 0;
+}
+
+/**
+ * unwinder_register - Used to install new stack unwinder
+ * @u: unwinder to be registered
+ *
+ * Install the new stack unwinder on the unwinder list, which is sorted
+ * by rating.
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ */
+int unwinder_register(struct unwinder *u)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&unwinder_lock, flags);
+ ret = unwinder_enqueue(u);
+ if (!ret)
+ curr_unwinder = select_unwinder();
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Unwind the call stack and pass information to the stacktrace_ops
+ * functions. Also handle the case where we need to switch to a new
+ * stack dumper because the current one faulted unexpectedly.
+ */
+void unwind_stack(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ unsigned long flags;
+
+ /*
+ * The problem with unwinders with high ratings is that they are
+ * inherently more complicated than the simple ones with lower
+ * ratings. We are therefore more likely to fault in the
+ * complicated ones, e.g. hitting BUG()s. If we fault in the
+ * code for the current stack unwinder we try to downgrade to
+ * one with a lower rating.
+ *
+ * Hopefully this will give us a semi-reliable stacktrace so we
+ * can diagnose why curr_unwinder->dump() faulted.
+ */
+ if (atomic_inc_return(&unwinder_running) != 1) {
+ spin_lock_irqsave(&unwinder_lock, flags);
+
+ if (!list_is_singular(&unwinder_list)) {
+ list_del(&curr_unwinder->list);
+ curr_unwinder = select_unwinder();
+ }
+
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+ atomic_dec(&unwinder_running);
+ }
+
+ curr_unwinder->dump(task, regs, sp, ops, data);
+
+ atomic_dec(&unwinder_running);
+}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index f53c76acaed..1b7d9d541e0 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -12,7 +12,7 @@ OUTPUT_ARCH(sh)
#include <asm/thread_info.h>
#include <asm/cache.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
ENTRY(_start)
SECTIONS
@@ -50,12 +50,7 @@ SECTIONS
_etext = .; /* End of text section */
} = 0x0009
- . = ALIGN(16); /* Exception table */
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
NOTES
RO_DATA(PAGE_SIZE)
@@ -71,69 +66,16 @@ SECTIONS
__uncached_end = .;
}
- . = ALIGN(THREAD_SIZE);
- .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
- *(.data.init_task)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.read_mostly)
-
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- __nosave_begin = .;
- *(.data.nosave)
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- DATA_DATA
- CONSTRUCTORS
- }
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */
- . = ALIGN(PAGE_SIZE); /* Init code and data */
- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
- __init_begin = .;
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
-
- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA }
-
- . = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
-
- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
-
- SECURITY_INIT
+ DWARF_EH_FRAME
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
. = ALIGN(4);
.machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
@@ -152,16 +94,10 @@ SECTIONS
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
- __init_end = .;
- __bss_start = .; /* BSS */
- *(.bss.page_aligned)
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = .; /* uClinux MTD sucks */
- _end = . ;
- }
+ __init_end = .;
+ BSS_SECTION(0, PAGE_SIZE, 4)
+ _ebss = .; /* uClinux MTD sucks */
+ _end = . ;
/*
* When something in the kernel is NOT compiled as a module, the
@@ -170,7 +106,7 @@ SECTIONS
* it's a module.
*/
/DISCARD/ : {
- *(.exitcall.exit)
+ EXIT_CALL
}
STABS_DEBUG