From 849620fab413355eff48232eac5a8c53c57615c5 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 14 May 2009 17:10:52 +0200 Subject: Revert "oprofile: discover counters for op ppro too" This reverts commit 59512900baab03c5629f2ff5efad1d5d4e682ece. arch_perfmon_setup_counters() is actually never called for ppro, so there is no code that changes the numbers in op_ppro_spec. The patch as it is has no effect. Cc: Andi Kleen Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_ppro.c | 8 +++----- arch/x86/oprofile/op_x86_model.h | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 10131fbdaad..2a123990a84 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -213,9 +213,9 @@ static void ppro_shutdown(struct op_msrs const * const msrs) } -struct op_x86_model_spec op_ppro_spec = { - .num_counters = 2, /* can be overriden */ - .num_controls = 2, /* dito */ +struct op_x86_model_spec const op_ppro_spec = { + .num_counters = 2, + .num_controls = 2, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, @@ -251,8 +251,6 @@ void arch_perfmon_setup_counters(void) op_arch_perfmon_spec.num_counters = num_counters; op_arch_perfmon_spec.num_controls = num_counters; - op_ppro_spec.num_counters = num_counters; - op_ppro_spec.num_controls = num_counters; } struct op_x86_model_spec op_arch_perfmon_spec = { diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 825e79064d6..2317149c94f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -45,7 +45,7 @@ struct op_x86_model_spec { void (*shutdown)(struct op_msrs const * const msrs); }; -extern struct op_x86_model_spec op_ppro_spec; +extern struct op_x86_model_spec const op_ppro_spec; extern struct op_x86_model_spec const op_p4_spec; extern struct op_x86_model_spec const op_p4_ht2_spec; extern struct op_x86_model_spec const op_amd_spec; -- cgit v1.2.3-70-g09d2 From e419294ed3c98cccc145202e4fe165bfd8099d63 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Sun, 12 Oct 2008 15:12:34 -0400 Subject: x86/oprofile: moving arch_perfmon counter setup to op_x86_model_spec.init The function arch_perfmon_init() in nmi_int.c is model specific. This patch moves it to op_model_ppro.c by using the init function pointer in struct op_x86_model_spec. Cc: Andi Kleen Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 21 +++++++++------------ arch/x86/oprofile/op_model_ppro.c | 9 ++++++++- arch/x86/oprofile/op_x86_model.h | 2 -- 3 files changed, 17 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 3b285e656e2..dd8515301fb 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -427,7 +427,7 @@ static int __init ppro_init(char **cpu_type) *cpu_type = "i386/core_2"; break; case 26: - arch_perfmon_setup_counters(); + model = &op_arch_perfmon_spec; *cpu_type = "i386/core_i7"; break; case 28: @@ -442,16 +442,6 @@ static int __init ppro_init(char **cpu_type) return 1; } -static int __init arch_perfmon_init(char **cpu_type) -{ - if (!cpu_has_arch_perfmon) - return 0; - *cpu_type = "i386/arch_perfmon"; - model = &op_arch_perfmon_spec; - arch_perfmon_setup_counters(); - return 1; -} - /* in order to get sysfs right */ static int using_nmi; @@ -509,8 +499,15 @@ int __init op_nmi_init(struct oprofile_operations *ops) break; } - if (!cpu_type && !arch_perfmon_init(&cpu_type)) + if (cpu_type) + break; + + if (!cpu_has_arch_perfmon) return -ENODEV; + + /* use arch perfmon as fallback */ + cpu_type = "i386/arch_perfmon"; + model = &op_arch_perfmon_spec; break; default: diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 2a123990a84..ae581196688 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -233,7 +233,7 @@ struct op_x86_model_spec const op_ppro_spec = { * the specific CPU. */ -void arch_perfmon_setup_counters(void) +static void arch_perfmon_setup_counters(void) { union cpuid10_eax eax; @@ -253,7 +253,14 @@ void arch_perfmon_setup_counters(void) op_arch_perfmon_spec.num_controls = num_counters; } +static int arch_perfmon_init(struct oprofile_operations *ignore) +{ + arch_perfmon_setup_counters(); + return 0; +} + struct op_x86_model_spec op_arch_perfmon_spec = { + .init = &arch_perfmon_init, /* num_counters/num_controls filled in at runtime */ .fill_in_addresses = &ppro_fill_in_addresses, /* user space does the cpuid check for available events */ diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 2317149c94f..ed27783bb0d 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -51,6 +51,4 @@ extern struct op_x86_model_spec const op_p4_ht2_spec; extern struct op_x86_model_spec const op_amd_spec; extern struct op_x86_model_spec op_arch_perfmon_spec; -extern void arch_perfmon_setup_counters(void); - #endif /* OP_X86_MODEL_H */ -- cgit v1.2.3-70-g09d2 From 06552ccc36abeb12e37efc16c384dc7f30794f85 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 28 May 2009 02:12:36 +0200 Subject: x86/oprofile: minor style changes in struct op_x86_model_spec Some vertical alignments. Variables are now located in the beginning of the struct. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_x86_model.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index ed27783bb0d..bd8157d12ff 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -32,17 +32,17 @@ struct pt_regs; * various x86 CPU models' perfctr support. */ struct op_x86_model_spec { - int (*init)(struct oprofile_operations *ops); - void (*exit)(void); - unsigned int num_counters; - unsigned int num_controls; - void (*fill_in_addresses)(struct op_msrs * const msrs); - void (*setup_ctrs)(struct op_msrs const * const msrs); - int (*check_ctrs)(struct pt_regs * const regs, - struct op_msrs const * const msrs); - void (*start)(struct op_msrs const * const msrs); - void (*stop)(struct op_msrs const * const msrs); - void (*shutdown)(struct op_msrs const * const msrs); + unsigned int num_counters; + unsigned int num_controls; + int (*init)(struct oprofile_operations *ops); + void (*exit)(void); + void (*fill_in_addresses)(struct op_msrs * const msrs); + void (*setup_ctrs)(struct op_msrs const * const msrs); + int (*check_ctrs)(struct pt_regs * const regs, + struct op_msrs const * const msrs); + void (*start)(struct op_msrs const * const msrs); + void (*stop)(struct op_msrs const * const msrs); + void (*shutdown)(struct op_msrs const * const msrs); }; extern struct op_x86_model_spec const op_ppro_spec; -- cgit v1.2.3-70-g09d2 From 9063759540daac40cc1f402f83a3be6b489f8583 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 10 Mar 2009 19:15:57 +0100 Subject: x86/oprofile: remove #ifdefs in ibs functions IBS code is moved to separate functions. This allows the removal of #ifdefs in functions. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 80 +++++++++++++++++++++++----------------- 1 file changed, 46 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 8fdf06e4edf..b54c0880b7d 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -220,6 +220,50 @@ op_amd_handle_ibs(struct pt_regs * const regs, return 1; } +static inline void op_amd_start_ibs(void) +{ + unsigned int low, high; + if (has_ibs && ibs_config.fetch_enabled) { + low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; + high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ + + IBS_FETCH_HIGH_ENABLE; + wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); + } + + if (has_ibs && ibs_config.op_enabled) { + low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ + + IBS_OP_LOW_ENABLE; + high = 0; + wrmsr(MSR_AMD64_IBSOPCTL, low, high); + } +} + +static void op_amd_stop_ibs(void) +{ + unsigned int low, high; + if (has_ibs && ibs_config.fetch_enabled) { + /* clear max count and enable */ + low = 0; + high = 0; + wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); + } + + if (has_ibs && ibs_config.op_enabled) { + /* clear max count and enable */ + low = 0; + high = 0; + wrmsr(MSR_AMD64_IBSOPCTL, low, high); + } +} + +#else + +static inline int op_amd_handle_ibs(struct pt_regs * const regs, + struct op_msrs const * const msrs) { } +static inline void op_amd_start_ibs(void) { } +static inline void op_amd_stop_ibs(void) { } + #endif static int op_amd_check_ctrs(struct pt_regs * const regs, @@ -238,9 +282,7 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, } } -#ifdef CONFIG_OPROFILE_IBS op_amd_handle_ibs(regs, msrs); -#endif /* See op_model_ppro.c */ return 1; @@ -258,25 +300,9 @@ static void op_amd_start(struct op_msrs const * const msrs) } } -#ifdef CONFIG_OPROFILE_IBS - if (has_ibs && ibs_config.fetch_enabled) { - low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; - high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ - + IBS_FETCH_HIGH_ENABLE; - wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); - } - - if (has_ibs && ibs_config.op_enabled) { - low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) - + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ - + IBS_OP_LOW_ENABLE; - high = 0; - wrmsr(MSR_AMD64_IBSOPCTL, low, high); - } -#endif + op_amd_start_ibs(); } - static void op_amd_stop(struct op_msrs const * const msrs) { unsigned int low, high; @@ -294,21 +320,7 @@ static void op_amd_stop(struct op_msrs const * const msrs) CTRL_WRITE(low, high, msrs, i); } -#ifdef CONFIG_OPROFILE_IBS - if (has_ibs && ibs_config.fetch_enabled) { - /* clear max count and enable */ - low = 0; - high = 0; - wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); - } - - if (has_ibs && ibs_config.op_enabled) { - /* clear max count and enable */ - low = 0; - high = 0; - wrmsr(MSR_AMD64_IBSOPCTL, low, high); - } -#endif + op_amd_stop_ibs(); } static void op_amd_shutdown(struct op_msrs const * const msrs) -- cgit v1.2.3-70-g09d2 From d20f24c66011f8a397bca6c5d1a6a7c7e612d2d7 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Sun, 11 Jan 2009 13:01:16 +0100 Subject: x86/oprofile: simplify AMD cpu init code Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index dd8515301fb..ae0ab03959b 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -460,27 +460,26 @@ int __init op_nmi_init(struct oprofile_operations *ops) /* Needs to be at least an Athlon (or hammer in 32bit mode) */ switch (family) { - default: - return -ENODEV; case 6: - model = &op_amd_spec; cpu_type = "i386/athlon"; break; case 0xf: - model = &op_amd_spec; - /* Actually it could be i386/hammer too, but give - user space an consistent name. */ + /* + * Actually it could be i386/hammer too, but + * give user space an consistent name. + */ cpu_type = "x86-64/hammer"; break; case 0x10: - model = &op_amd_spec; cpu_type = "x86-64/family10"; break; case 0x11: - model = &op_amd_spec; cpu_type = "x86-64/family11h"; break; + default: + return -ENODEV; } + model = &op_amd_spec; break; case X86_VENDOR_INTEL: -- cgit v1.2.3-70-g09d2 From ff9faa8b676e195476b86f03fe58db0f01bda8f3 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 22 May 2009 15:36:29 +0200 Subject: x86/oprofile: move common macros to op_x86_model.h There are duplicate macro implementations in model specific code. This patch moves all common macros to op_x86_model.h. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 8 -------- arch/x86/oprofile/op_model_p4.c | 2 -- arch/x86/oprofile/op_model_ppro.c | 8 -------- arch/x86/oprofile/op_x86_model.h | 9 +++++++++ 4 files changed, 9 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b54c0880b7d..4b9254a67e6 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -26,22 +26,14 @@ #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 -#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) -#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) -#define CTRL_SET_ACTIVE(n) (n |= (1<<22)) -#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) #define CTRL_CLEAR_LO(x) (x &= (1<<21)) #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) -#define CTRL_SET_ENABLE(val) (val |= 1<<20) -#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) -#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) -#define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 819b131fd75..420c15e7123 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -366,8 +366,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) -#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) -#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index ae581196688..a922a1a815c 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -26,19 +26,11 @@ static int num_counters = 2; static int counter_width = 32; -#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) -#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) -#define CTRL_SET_ACTIVE(n) (n |= (1<<22)) -#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) #define CTRL_CLEAR(x) (x &= (1<<21)) -#define CTRL_SET_ENABLE(val) (val |= 1<<20) -#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) -#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) -#define CTRL_SET_UM(val, m) (val |= (m << 8)) #define CTRL_SET_EVENT(val, e) (val |= e) static u64 *reset_value; diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index bd8157d12ff..c80ec7d0999 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -11,6 +11,15 @@ #ifndef OP_X86_MODEL_H #define OP_X86_MODEL_H +#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) +#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) +#define CTRL_SET_ACTIVE(n) (n |= (1<<22)) +#define CTRL_SET_ENABLE(val) (val |= 1<<20) +#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) +#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) +#define CTRL_SET_UM(val, m) (val |= (m << 8)) +#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) + struct op_saved_msr { unsigned int high; unsigned int low; -- cgit v1.2.3-70-g09d2 From d2731a4387ad6c6bca07abfe9ed41d450fb6d665 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 22 May 2009 19:47:38 +0200 Subject: x86/oprofile: remove MSR macros for AMD cpus The macros CTRL_READ() and CTRL_WRITE() make the code hard to read and maintain. This patch replaces them by rdmsr()/wrmsr() functions and simplifies the code. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 4b9254a67e6..c6181c265ae 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -26,12 +26,7 @@ #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 -#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) -#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) - -#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) -#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) #define CTRL_CLEAR_LO(x) (x &= (1<<21)) #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) @@ -101,17 +96,17 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!CTR_IS_RESERVED(msrs, i))) continue; - CTR_WRITE(1, msrs, i); + wrmsr(msrs->counters[i].addr, -1, -1); } /* enable active counters */ @@ -119,9 +114,9 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; - CTR_WRITE(counter_config[i].count, msrs, i); + wrmsr(msrs->counters[i].addr, -(unsigned int)counter_config[i].count, -1); - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_CLEAR_LO(low); CTRL_CLEAR_HI(high); CTRL_SET_ENABLE(low); @@ -133,7 +128,7 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } else { reset_value[i] = 0; } @@ -267,10 +262,10 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) continue; - CTR_READ(low, high, msrs, i); + rdmsr(msrs->counters[i].addr, low, high); if (CTR_OVERFLOWED(low)) { oprofile_add_sample(regs, i); - CTR_WRITE(reset_value[i], msrs, i); + wrmsr(msrs->counters[i].addr, -(unsigned int)reset_value[i], -1); } } @@ -286,9 +281,9 @@ static void op_amd_start(struct op_msrs const * const msrs) int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (reset_value[i]) { - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_SET_ACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } } @@ -307,9 +302,9 @@ static void op_amd_stop(struct op_msrs const * const msrs) for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (!reset_value[i]) continue; - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_SET_INACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } op_amd_stop_ibs(); -- cgit v1.2.3-70-g09d2 From 74c9a5c341bb1f6cbb5095b07c77230f19682ce8 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 22 May 2009 19:47:38 +0200 Subject: x86/oprofile: remove MSR macros for ppro cpus The macros CTRL_READ() and CTRL_WRITE() make the code hard to read and maintain. This patch replaces them by rdmsr()/wrmsr() functions and simplifies the code. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_ppro.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index a922a1a815c..6c5d288c566 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -27,9 +27,6 @@ static int num_counters = 2; static int counter_width = 32; #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) - -#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) -#define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) #define CTRL_CLEAR(x) (x &= (1<<21)) #define CTRL_SET_EVENT(val, e) (val |= e) @@ -88,9 +85,9 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) for (i = 0 ; i < num_counters; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_CLEAR(low); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } /* avoid a false detection of ctr overflows in NMI handler */ @@ -107,14 +104,14 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs) wrmsrl(msrs->counters[i].addr, -reset_value[i]); - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_CLEAR(low); CTRL_SET_ENABLE(low); CTRL_SET_USR(low, counter_config[i].user); CTRL_SET_KERN(low, counter_config[i].kernel); CTRL_SET_UM(low, counter_config[i].unit_mask); CTRL_SET_EVENT(low, counter_config[i].event); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } else { reset_value[i] = 0; } @@ -162,9 +159,9 @@ static void ppro_start(struct op_msrs const * const msrs) return; for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_SET_ACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } } } @@ -180,9 +177,9 @@ static void ppro_stop(struct op_msrs const * const msrs) for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; - CTRL_READ(low, high, msrs, i); + rdmsr(msrs->controls[i].addr, low, high); CTRL_SET_INACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + wrmsr(msrs->controls[i].addr, low, high); } } -- cgit v1.2.3-70-g09d2 From 1131a478245b00664ae2dbc0f68db987b51fa806 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 20:23:23 +0200 Subject: x86/oprofile: remove MSR macros for p4 cpus The macros CTRL_READ() and CTRL_WRITE() make the code hard to read and maintain. This patch replaces them by rdmsr()/wrmsr() functions and simplifies the code. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_p4.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 420c15e7123..365d8a9c03d 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -350,8 +350,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) -#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) -#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0) #define CCCR_RESERVED_BITS 0x38030FFF #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) @@ -361,13 +359,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) -#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) -#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0) #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) -#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) -#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) @@ -513,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) if (ev->bindings[i].virt_counter & counter_bit) { /* modify ESCR */ - ESCR_READ(escr, high, ev, i); + rdmsr(ev->bindings[i].escr_address, escr, high); ESCR_CLEAR(escr); if (stag == 0) { ESCR_SET_USR_0(escr, counter_config[ctr].user); @@ -524,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) } ESCR_SET_EVENT_SELECT(escr, ev->event_select); ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); - ESCR_WRITE(escr, high, ev, i); + wrmsr(ev->bindings[i].escr_address, escr, high); /* modify CCCR */ - CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); + rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, + cccr, high); CCCR_CLEAR(cccr); CCCR_SET_REQUIRED_BITS(cccr); CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); @@ -535,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) CCCR_SET_PMI_OVF_0(cccr); else CCCR_SET_PMI_OVF_1(cccr); - CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); + wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address, + cccr, high); return; } } @@ -582,7 +578,8 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs) if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; pmc_setup_one_p4_counter(i); - CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); + wrmsr(p4_counters[VIRT_CTR(stag, i)].counter_address, + -(u32)counter_config[i].count, -1); } else { reset_value[i] = 0; } @@ -622,14 +619,16 @@ static int p4_check_ctrs(struct pt_regs * const regs, real = VIRT_CTR(stag, i); - CCCR_READ(low, high, real); - CTR_READ(ctr, high, real); + rdmsr(p4_counters[real].cccr_address, low, high); + rdmsr(p4_counters[real].counter_address, ctr, high); if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { oprofile_add_sample(regs, i); - CTR_WRITE(reset_value[i], real); + wrmsr(p4_counters[real].counter_address, + -(u32)reset_value[i], -1); CCCR_CLEAR_OVF(low); - CCCR_WRITE(low, high, real); - CTR_WRITE(reset_value[i], real); + wrmsr(p4_counters[real].cccr_address, low, high); + wrmsr(p4_counters[real].counter_address, + -(u32)reset_value[i], -1); } } @@ -651,9 +650,9 @@ static void p4_start(struct op_msrs const * const msrs) for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; - CCCR_READ(low, high, VIRT_CTR(stag, i)); + rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_SET_ENABLE(low); - CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } } @@ -668,9 +667,9 @@ static void p4_stop(struct op_msrs const * const msrs) for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; - CCCR_READ(low, high, VIRT_CTR(stag, i)); + rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_SET_DISABLE(low); - CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); } } -- cgit v1.2.3-70-g09d2 From ec064c093e254f4433afb17dcef7f964c76436af Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 15:05:50 +0200 Subject: x86/oprofile: fix and cleanup CTRL_SET_* macros This patch fixes missing braces around macro parameters. Macro definitions from intel_arch_perfmon.h are used where possible. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_ppro.c | 1 - arch/x86/oprofile/op_x86_model.h | 18 ++++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 6c5d288c566..61ee8f64053 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "op_x86_model.h" #include "op_counter.h" diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index c80ec7d0999..a207b1c46e2 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -11,14 +11,16 @@ #ifndef OP_X86_MODEL_H #define OP_X86_MODEL_H -#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) -#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) -#define CTRL_SET_ACTIVE(n) (n |= (1<<22)) -#define CTRL_SET_ENABLE(val) (val |= 1<<20) -#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) -#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17)) -#define CTRL_SET_UM(val, m) (val |= (m << 8)) -#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16)) +#include + +#define CTR_IS_RESERVED(msrs, c) ((msrs)->counters[(c)].addr ? 1 : 0) +#define CTRL_IS_RESERVED(msrs, c) ((msrs)->controls[(c)].addr ? 1 : 0) +#define CTRL_SET_ACTIVE(val) ((val) |= ARCH_PERFMON_EVENTSEL0_ENABLE) +#define CTRL_SET_ENABLE(val) ((val) |= ARCH_PERFMON_EVENTSEL_INT) +#define CTRL_SET_INACTIVE(val) ((val) &= ~ARCH_PERFMON_EVENTSEL0_ENABLE) +#define CTRL_SET_KERN(val, k) ((val) |= ((k) ? ARCH_PERFMON_EVENTSEL_OS : 0)) +#define CTRL_SET_USR(val, u) ((val) |= ((u) ? ARCH_PERFMON_EVENTSEL_USR : 0)) +#define CTRL_SET_UM(val, m) ((val) |= ((m) << 8)) struct op_saved_msr { unsigned int high; -- cgit v1.2.3-70-g09d2 From 9c59354b48ce9cf28048b02fea73dd0236f876ea Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 18:16:43 +0200 Subject: x86/oprofile: remove unused macros for AMD virtualization profiling The use of the macros has no effect. The oprofilefs has to be extended first to support these features. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index c6181c265ae..aaa7ffaed6b 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -31,8 +31,6 @@ #define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) #define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) #define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) -#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9)) -#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8)) static unsigned long reset_value[NUM_COUNTERS]; @@ -125,9 +123,6 @@ static void op_amd_setup_ctrs(struct op_msrs const * const msrs) CTRL_SET_UM(low, counter_config[i].unit_mask); CTRL_SET_EVENT_LOW(low, counter_config[i].event); CTRL_SET_EVENT_HIGH(high, counter_config[i].event); - CTRL_SET_HOST_ONLY(high, 0); - CTRL_SET_GUEST_ONLY(high, 0); - wrmsr(msrs->controls[i].addr, low, high); } else { reset_value[i] = 0; -- cgit v1.2.3-70-g09d2 From ef8828ddf828174785421af67c281144d4b8e796 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 19:31:44 +0200 Subject: x86/oprofile: pass the model to setup_ctrs() functions In follow-on patches the setup_ctrs() functions will need data that describes the model. This patch extends the function argument list to pass a pointer of the model to these function. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 2 +- arch/x86/oprofile/op_model_amd.c | 3 ++- arch/x86/oprofile/op_model_p4.c | 3 ++- arch/x86/oprofile/op_model_ppro.c | 3 ++- arch/x86/oprofile/op_x86_model.h | 3 ++- 5 files changed, 9 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index ae0ab03959b..c31f87bbf43 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -125,7 +125,7 @@ static void nmi_cpu_setup(void *dummy) int cpu = smp_processor_id(); struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); spin_lock(&oprofilefs_lock); - model->setup_ctrs(msrs); + model->setup_ctrs(model, msrs); spin_unlock(&oprofilefs_lock); per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, APIC_DM_NMI); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index aaa7ffaed6b..86e0a01ba12 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -85,7 +85,8 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) } -static void op_amd_setup_ctrs(struct op_msrs const * const msrs) +static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs) { unsigned int low, high; int i; diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 365d8a9c03d..05ba0287b1f 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -542,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr) } -static void p4_setup_ctrs(struct op_msrs const * const msrs) +static void p4_setup_ctrs(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs) { unsigned int i; unsigned int low, high; diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 61ee8f64053..40b44ee521d 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -51,7 +51,8 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) } -static void ppro_setup_ctrs(struct op_msrs const * const msrs) +static void ppro_setup_ctrs(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs) { unsigned int low, high; int i; diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index a207b1c46e2..6161c7f0e7f 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -48,7 +48,8 @@ struct op_x86_model_spec { int (*init)(struct oprofile_operations *ops); void (*exit)(void); void (*fill_in_addresses)(struct op_msrs * const msrs); - void (*setup_ctrs)(struct op_msrs const * const msrs); + void (*setup_ctrs)(struct op_x86_model_spec const *model, + struct op_msrs const * const msrs); int (*check_ctrs)(struct pt_regs * const regs, struct op_msrs const * const msrs); void (*start)(struct op_msrs const * const msrs); -- cgit v1.2.3-70-g09d2 From 3370d358569755625aba4d9a846a040ce691d9ed Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 15:10:32 +0200 Subject: x86/oprofile: replace macros to calculate control register This patch introduces op_x86_get_ctrl() to calculate the value of the performance control register. This is generic code usable for all models. The event and reserved masks are model specific and stored in struct op_x86_model_spec. 64 bit MSR functions are used now. The patch removes many hard to read macros used for ctrl calculation. The function op_x86_get_ctrl() is common code and the first step to further merge performance counter implementations for x86 models. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 20 +++++++++++++++++++ arch/x86/oprofile/op_model_amd.c | 41 +++++++++++++++------------------------ arch/x86/oprofile/op_model_ppro.c | 29 +++++++++++++-------------- arch/x86/oprofile/op_x86_model.h | 15 ++++++++++---- 4 files changed, 60 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index c31f87bbf43..388ee15e0e4 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -31,6 +31,26 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc); /* 0 == registered but off, 1 == registered and on */ static int nmi_enabled = 0; +/* common functions */ + +u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, + struct op_counter_config *counter_config) +{ + u64 val = 0; + u16 event = (u16)counter_config->event; + + val |= ARCH_PERFMON_EVENTSEL_INT; + val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0; + val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0; + val |= (counter_config->unit_mask & 0xFF) << 8; + event &= model->event_mask ? model->event_mask : 0xFF; + val |= event & 0xFF; + val |= (event & 0x0F00) << 24; + + return val; +} + + static int profile_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 86e0a01ba12..2406ab86360 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -25,12 +25,11 @@ #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 +#define OP_EVENT_MASK 0x0FFF + +#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) -#define CTRL_CLEAR_LO(x) (x &= (1<<21)) -#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0) -#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff)) -#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf)) static unsigned long reset_value[NUM_COUNTERS]; @@ -84,21 +83,19 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs) } } - static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; - rdmsr(msrs->controls[i].addr, low, high); - CTRL_CLEAR_LO(low); - CTRL_CLEAR_HI(high); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + wrmsrl(msrs->controls[i].addr, val); } /* avoid a false detection of ctr overflows in NMI handler */ @@ -112,19 +109,11 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, for (i = 0; i < NUM_COUNTERS; ++i) { if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; - wrmsr(msrs->counters[i].addr, -(unsigned int)counter_config[i].count, -1); - - rdmsr(msrs->controls[i].addr, low, high); - CTRL_CLEAR_LO(low); - CTRL_CLEAR_HI(high); - CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[i].event); - CTRL_SET_EVENT_HIGH(high, counter_config[i].event); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + val |= op_x86_get_ctrl(model, &counter_config[i]); + wrmsrl(msrs->controls[i].addr, val); } else { reset_value[i] = 0; } @@ -486,14 +475,16 @@ static void op_amd_exit(void) {} #endif /* CONFIG_OPROFILE_IBS */ struct op_x86_model_spec const op_amd_spec = { - .init = op_amd_init, - .exit = op_amd_exit, .num_counters = NUM_COUNTERS, .num_controls = NUM_CONTROLS, + .reserved = MSR_AMD_EVENTSEL_RESERVED, + .event_mask = OP_EVENT_MASK, + .init = op_amd_init, + .exit = op_amd_exit, .fill_in_addresses = &op_amd_fill_in_addresses, .setup_ctrs = &op_amd_setup_ctrs, .check_ctrs = &op_amd_check_ctrs, .start = &op_amd_start, .stop = &op_amd_stop, - .shutdown = &op_amd_shutdown + .shutdown = &op_amd_shutdown, }; diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 40b44ee521d..3092f998baf 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -10,6 +10,7 @@ * @author Philippe Elie * @author Graydon Hoare * @author Andi Kleen + * @author Robert Richter */ #include @@ -26,8 +27,8 @@ static int num_counters = 2; static int counter_width = 32; #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) -#define CTRL_CLEAR(x) (x &= (1<<21)) -#define CTRL_SET_EVENT(val, e) (val |= e) + +#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) static u64 *reset_value; @@ -54,7 +55,7 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs) static void ppro_setup_ctrs(struct op_x86_model_spec const *model, struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; if (!reset_value) { @@ -85,9 +86,9 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, for (i = 0 ; i < num_counters; ++i) { if (unlikely(!CTRL_IS_RESERVED(msrs, i))) continue; - rdmsr(msrs->controls[i].addr, low, high); - CTRL_CLEAR(low); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + wrmsrl(msrs->controls[i].addr, val); } /* avoid a false detection of ctr overflows in NMI handler */ @@ -101,17 +102,11 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, for (i = 0; i < num_counters; ++i) { if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { reset_value[i] = counter_config[i].count; - wrmsrl(msrs->counters[i].addr, -reset_value[i]); - - rdmsr(msrs->controls[i].addr, low, high); - CTRL_CLEAR(low); - CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT(low, counter_config[i].event); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val &= model->reserved; + val |= op_x86_get_ctrl(model, &counter_config[i]); + wrmsrl(msrs->controls[i].addr, val); } else { reset_value[i] = 0; } @@ -205,6 +200,7 @@ static void ppro_shutdown(struct op_msrs const * const msrs) struct op_x86_model_spec const op_ppro_spec = { .num_counters = 2, .num_controls = 2, + .reserved = MSR_PPRO_EVENTSEL_RESERVED, .fill_in_addresses = &ppro_fill_in_addresses, .setup_ctrs = &ppro_setup_ctrs, .check_ctrs = &ppro_check_ctrs, @@ -249,6 +245,7 @@ static int arch_perfmon_init(struct oprofile_operations *ignore) } struct op_x86_model_spec op_arch_perfmon_spec = { + .reserved = MSR_PPRO_EVENTSEL_RESERVED, .init = &arch_perfmon_init, /* num_counters/num_controls filled in at runtime */ .fill_in_addresses = &ppro_fill_in_addresses, diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 6161c7f0e7f..3220d4ce632 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -6,21 +6,19 @@ * @remark Read the file COPYING * * @author Graydon Hoare + * @author Robert Richter */ #ifndef OP_X86_MODEL_H #define OP_X86_MODEL_H +#include #include #define CTR_IS_RESERVED(msrs, c) ((msrs)->counters[(c)].addr ? 1 : 0) #define CTRL_IS_RESERVED(msrs, c) ((msrs)->controls[(c)].addr ? 1 : 0) #define CTRL_SET_ACTIVE(val) ((val) |= ARCH_PERFMON_EVENTSEL0_ENABLE) -#define CTRL_SET_ENABLE(val) ((val) |= ARCH_PERFMON_EVENTSEL_INT) #define CTRL_SET_INACTIVE(val) ((val) &= ~ARCH_PERFMON_EVENTSEL0_ENABLE) -#define CTRL_SET_KERN(val, k) ((val) |= ((k) ? ARCH_PERFMON_EVENTSEL_OS : 0)) -#define CTRL_SET_USR(val, u) ((val) |= ((u) ? ARCH_PERFMON_EVENTSEL_USR : 0)) -#define CTRL_SET_UM(val, m) ((val) |= ((m) << 8)) struct op_saved_msr { unsigned int high; @@ -39,12 +37,16 @@ struct op_msrs { struct pt_regs; +struct oprofile_operations; + /* The model vtable abstracts the differences between * various x86 CPU models' perfctr support. */ struct op_x86_model_spec { unsigned int num_counters; unsigned int num_controls; + u64 reserved; + u16 event_mask; int (*init)(struct oprofile_operations *ops); void (*exit)(void); void (*fill_in_addresses)(struct op_msrs * const msrs); @@ -57,6 +59,11 @@ struct op_x86_model_spec { void (*shutdown)(struct op_msrs const * const msrs); }; +struct op_counter_config; + +extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, + struct op_counter_config *counter_config); + extern struct op_x86_model_spec const op_ppro_spec; extern struct op_x86_model_spec const op_p4_spec; extern struct op_x86_model_spec const op_p4_ht2_spec; -- cgit v1.2.3-70-g09d2 From 42399adb239d4f1413899cc618ecf640779e79df Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 17:59:06 +0200 Subject: x86/oprofile: replace CTR_OVERFLOWED macros The patch replaces all CTR_OVERFLOWED macros. 64 bit MSR functions and 64 bit counter values are used now. Thus, it will be easier to later extend the models to use more than 32 bit width counters. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 16 ++++++++-------- arch/x86/oprofile/op_model_p4.c | 6 +++--- arch/x86/oprofile/op_model_ppro.c | 10 ++++------ 3 files changed, 15 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 2406ab86360..b5d678fbf03 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -26,11 +26,10 @@ #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 #define OP_EVENT_MASK 0x0FFF +#define OP_CTR_OVERFLOW (1ULL<<31) #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21)) -#define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) - static unsigned long reset_value[NUM_COUNTERS]; #ifdef CONFIG_OPROFILE_IBS @@ -241,17 +240,18 @@ static inline void op_amd_stop_ibs(void) { } static int op_amd_check_ctrs(struct pt_regs * const regs, struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; for (i = 0 ; i < NUM_COUNTERS; ++i) { if (!reset_value[i]) continue; - rdmsr(msrs->counters[i].addr, low, high); - if (CTR_OVERFLOWED(low)) { - oprofile_add_sample(regs, i); - wrmsr(msrs->counters[i].addr, -(unsigned int)reset_value[i], -1); - } + rdmsrl(msrs->counters[i].addr, val); + /* bit is clear if overflowed: */ + if (val & OP_CTR_OVERFLOW) + continue; + oprofile_add_sample(regs, i); + wrmsr(msrs->counters[i].addr, -(unsigned int)reset_value[i], -1); } op_amd_handle_ibs(regs, msrs); diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 05ba0287b1f..ac4ca28b9ed 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -32,6 +32,8 @@ #define NUM_CCCRS_HT2 9 #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) +#define OP_CTR_OVERFLOW (1ULL<<31) + static unsigned int num_counters = NUM_COUNTERS_NON_HT; static unsigned int num_controls = NUM_CONTROLS_NON_HT; @@ -362,8 +364,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = { #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) -#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) - /* this assigns a "stagger" to the current CPU, which is used throughout the code in this module as an extra array offset, to select the "even" @@ -622,7 +622,7 @@ static int p4_check_ctrs(struct pt_regs * const regs, rdmsr(p4_counters[real].cccr_address, low, high); rdmsr(p4_counters[real].counter_address, ctr, high); - if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { + if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) { oprofile_add_sample(regs, i); wrmsr(p4_counters[real].counter_address, -(u32)reset_value[i], -1); diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 3092f998baf..82db396dc3e 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -26,8 +26,6 @@ static int num_counters = 2; static int counter_width = 32; -#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) - #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) static u64 *reset_value; @@ -124,10 +122,10 @@ static int ppro_check_ctrs(struct pt_regs * const regs, if (!reset_value[i]) continue; rdmsrl(msrs->counters[i].addr, val); - if (CTR_OVERFLOWED(val)) { - oprofile_add_sample(regs, i); - wrmsrl(msrs->counters[i].addr, -reset_value[i]); - } + if (val & (1ULL << (counter_width - 1))) + continue; + oprofile_add_sample(regs, i); + wrmsrl(msrs->counters[i].addr, -reset_value[i]); } /* Only P6 based Pentium M need to re-unmask the apic vector but it -- cgit v1.2.3-70-g09d2 From dea3766ca052a4f572b16a23a322553c064d75af Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 18:11:52 +0200 Subject: x86/oprofile: replace CTRL_SET_*ACTIVE macros The patch replaces all CTRL_SET_*ACTIVE macros. 64 bit MSR functions and 64 bit counter values are used now. The code uses bit masks from . Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 16 ++++++++-------- arch/x86/oprofile/op_model_ppro.c | 16 ++++++++-------- arch/x86/oprofile/op_x86_model.h | 2 -- 3 files changed, 16 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b5d678fbf03..4ac9d283e8d 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -262,13 +262,13 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, static void op_amd_start(struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (reset_value[i]) { - rdmsr(msrs->controls[i].addr, low, high); - CTRL_SET_ACTIVE(low); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(msrs->controls[i].addr, val); } } @@ -277,7 +277,7 @@ static void op_amd_start(struct op_msrs const * const msrs) static void op_amd_stop(struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; /* @@ -287,9 +287,9 @@ static void op_amd_stop(struct op_msrs const * const msrs) for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (!reset_value[i]) continue; - rdmsr(msrs->controls[i].addr, low, high); - CTRL_SET_INACTIVE(low); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(msrs->controls[i].addr, val); } op_amd_stop_ibs(); diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 82db396dc3e..566b43f0b6c 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -145,16 +145,16 @@ static int ppro_check_ctrs(struct pt_regs * const regs, static void ppro_start(struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; if (!reset_value) return; for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { - rdmsr(msrs->controls[i].addr, low, high); - CTRL_SET_ACTIVE(low); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val |= ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(msrs->controls[i].addr, val); } } } @@ -162,7 +162,7 @@ static void ppro_start(struct op_msrs const * const msrs) static void ppro_stop(struct op_msrs const * const msrs) { - unsigned int low, high; + u64 val; int i; if (!reset_value) @@ -170,9 +170,9 @@ static void ppro_stop(struct op_msrs const * const msrs) for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; - rdmsr(msrs->controls[i].addr, low, high); - CTRL_SET_INACTIVE(low); - wrmsr(msrs->controls[i].addr, low, high); + rdmsrl(msrs->controls[i].addr, val); + val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; + wrmsrl(msrs->controls[i].addr, val); } } diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 3220d4ce632..1c4577795a9 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -17,8 +17,6 @@ #define CTR_IS_RESERVED(msrs, c) ((msrs)->counters[(c)].addr ? 1 : 0) #define CTRL_IS_RESERVED(msrs, c) ((msrs)->controls[(c)].addr ? 1 : 0) -#define CTRL_SET_ACTIVE(val) ((val) |= ARCH_PERFMON_EVENTSEL0_ENABLE) -#define CTRL_SET_INACTIVE(val) ((val) &= ~ARCH_PERFMON_EVENTSEL0_ENABLE) struct op_saved_msr { unsigned int high; -- cgit v1.2.3-70-g09d2 From 217d3cfb959756cb493fc03106c0253baa420ce8 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 4 Jun 2009 02:36:44 +0200 Subject: x86/oprofile: replace CTR*_IS_RESERVED macros The patch replaces all CTR*_IS_RESERVED macros. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 10 +++++----- arch/x86/oprofile/op_model_p4.c | 10 +++++----- arch/x86/oprofile/op_model_ppro.c | 10 +++++----- arch/x86/oprofile/op_x86_model.h | 3 --- 4 files changed, 15 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 4ac9d283e8d..c5c5eec2fa7 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -90,7 +90,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { - if (unlikely(!CTRL_IS_RESERVED(msrs, i))) + if (unlikely(!msrs->controls[i].addr)) continue; rdmsrl(msrs->controls[i].addr, val); val &= model->reserved; @@ -99,14 +99,14 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < NUM_COUNTERS; ++i) { - if (unlikely(!CTR_IS_RESERVED(msrs, i))) + if (unlikely(!msrs->counters[i].addr)) continue; wrmsr(msrs->counters[i].addr, -1, -1); } /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { - if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { + if (counter_config[i].enabled && msrs->counters[i].addr) { reset_value[i] = counter_config[i].count; wrmsr(msrs->counters[i].addr, -(unsigned int)counter_config[i].count, -1); rdmsrl(msrs->controls[i].addr, val); @@ -300,11 +300,11 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { - if (CTR_IS_RESERVED(msrs, i)) + if (msrs->counters[i].addr) release_perfctr_nmi(MSR_K7_PERFCTR0 + i); } for (i = 0 ; i < NUM_CONTROLS ; ++i) { - if (CTRL_IS_RESERVED(msrs, i)) + if (msrs->controls[i].addr) release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } } diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index ac4ca28b9ed..9db0ca9af76 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -559,7 +559,7 @@ static void p4_setup_ctrs(struct op_x86_model_spec const *model, /* clear the cccrs we will use */ for (i = 0 ; i < num_counters ; i++) { - if (unlikely(!CTRL_IS_RESERVED(msrs, i))) + if (unlikely(!msrs->controls[i].addr)) continue; rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); CCCR_CLEAR(low); @@ -569,14 +569,14 @@ static void p4_setup_ctrs(struct op_x86_model_spec const *model, /* clear all escrs (including those outside our concern) */ for (i = num_counters; i < num_controls; i++) { - if (unlikely(!CTRL_IS_RESERVED(msrs, i))) + if (unlikely(!msrs->controls[i].addr)) continue; wrmsr(msrs->controls[i].addr, 0, 0); } /* setup all counters */ for (i = 0 ; i < num_counters ; ++i) { - if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { + if (counter_config[i].enabled && msrs->controls[i].addr) { reset_value[i] = counter_config[i].count; pmc_setup_one_p4_counter(i); wrmsr(p4_counters[VIRT_CTR(stag, i)].counter_address, @@ -679,7 +679,7 @@ static void p4_shutdown(struct op_msrs const * const msrs) int i; for (i = 0 ; i < num_counters ; ++i) { - if (CTR_IS_RESERVED(msrs, i)) + if (msrs->counters[i].addr) release_perfctr_nmi(msrs->counters[i].addr); } /* @@ -688,7 +688,7 @@ static void p4_shutdown(struct op_msrs const * const msrs) * This saves a few bits. */ for (i = num_counters ; i < num_controls ; ++i) { - if (CTRL_IS_RESERVED(msrs, i)) + if (msrs->controls[i].addr) release_evntsel_nmi(msrs->controls[i].addr); } } diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 566b43f0b6c..0a261a5c696 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c @@ -82,7 +82,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, /* clear all counters */ for (i = 0 ; i < num_counters; ++i) { - if (unlikely(!CTRL_IS_RESERVED(msrs, i))) + if (unlikely(!msrs->controls[i].addr)) continue; rdmsrl(msrs->controls[i].addr, val); val &= model->reserved; @@ -91,14 +91,14 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < num_counters; ++i) { - if (unlikely(!CTR_IS_RESERVED(msrs, i))) + if (unlikely(!msrs->counters[i].addr)) continue; wrmsrl(msrs->counters[i].addr, -1LL); } /* enable active counters */ for (i = 0; i < num_counters; ++i) { - if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { + if (counter_config[i].enabled && msrs->counters[i].addr) { reset_value[i] = counter_config[i].count; wrmsrl(msrs->counters[i].addr, -reset_value[i]); rdmsrl(msrs->controls[i].addr, val); @@ -181,11 +181,11 @@ static void ppro_shutdown(struct op_msrs const * const msrs) int i; for (i = 0 ; i < num_counters ; ++i) { - if (CTR_IS_RESERVED(msrs, i)) + if (msrs->counters[i].addr) release_perfctr_nmi(MSR_P6_PERFCTR0 + i); } for (i = 0 ; i < num_counters ; ++i) { - if (CTRL_IS_RESERVED(msrs, i)) + if (msrs->controls[i].addr) release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); } if (reset_value) { diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 1c4577795a9..69f1eb46e1b 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -15,9 +15,6 @@ #include #include -#define CTR_IS_RESERVED(msrs, c) ((msrs)->counters[(c)].addr ? 1 : 0) -#define CTRL_IS_RESERVED(msrs, c) ((msrs)->controls[(c)].addr ? 1 : 0) - struct op_saved_msr { unsigned int high; unsigned int low; -- cgit v1.2.3-70-g09d2 From bbc5986d2db427fdd61b6116ff8b9ed988e663a8 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 25 May 2009 17:38:19 +0200 Subject: x86/oprofile: use 64 bit wrmsr functions This patch replaces some wrmsr() functions with wrmsrl(). Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 7 ++++--- arch/x86/oprofile/op_model_p4.c | 12 ++++++------ 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index c5c5eec2fa7..9bf90176241 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -101,14 +101,15 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, for (i = 0; i < NUM_COUNTERS; ++i) { if (unlikely(!msrs->counters[i].addr)) continue; - wrmsr(msrs->counters[i].addr, -1, -1); + wrmsrl(msrs->counters[i].addr, -1LL); } /* enable active counters */ for (i = 0; i < NUM_COUNTERS; ++i) { if (counter_config[i].enabled && msrs->counters[i].addr) { reset_value[i] = counter_config[i].count; - wrmsr(msrs->counters[i].addr, -(unsigned int)counter_config[i].count, -1); + wrmsrl(msrs->counters[i].addr, + -(s64)counter_config[i].count); rdmsrl(msrs->controls[i].addr, val); val &= model->reserved; val |= op_x86_get_ctrl(model, &counter_config[i]); @@ -251,7 +252,7 @@ static int op_amd_check_ctrs(struct pt_regs * const regs, if (val & OP_CTR_OVERFLOW) continue; oprofile_add_sample(regs, i); - wrmsr(msrs->counters[i].addr, -(unsigned int)reset_value[i], -1); + wrmsrl(msrs->counters[i].addr, -(s64)reset_value[i]); } op_amd_handle_ibs(regs, msrs); diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 9db0ca9af76..f01e53b118f 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -579,8 +579,8 @@ static void p4_setup_ctrs(struct op_x86_model_spec const *model, if (counter_config[i].enabled && msrs->controls[i].addr) { reset_value[i] = counter_config[i].count; pmc_setup_one_p4_counter(i); - wrmsr(p4_counters[VIRT_CTR(stag, i)].counter_address, - -(u32)counter_config[i].count, -1); + wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address, + -(s64)counter_config[i].count); } else { reset_value[i] = 0; } @@ -624,12 +624,12 @@ static int p4_check_ctrs(struct pt_regs * const regs, rdmsr(p4_counters[real].counter_address, ctr, high); if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) { oprofile_add_sample(regs, i); - wrmsr(p4_counters[real].counter_address, - -(u32)reset_value[i], -1); + wrmsrl(p4_counters[real].counter_address, + -(s64)reset_value[i]); CCCR_CLEAR_OVF(low); wrmsr(p4_counters[real].cccr_address, low, high); - wrmsr(p4_counters[real].counter_address, - -(u32)reset_value[i], -1); + wrmsrl(p4_counters[real].counter_address, + -(s64)reset_value[i]); } } -- cgit v1.2.3-70-g09d2 From 95e74e62c1540b1115fe8cec5b592f22960f2bb2 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 3 Jun 2009 19:09:27 +0200 Subject: x86/oprofile: use 64 bit values to save MSR states This patch removes struct op_saved_msr and replaces it by an u64 variable. This makes code easier and it is possible to use 64 bit MSR functions. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 28 ++++++++-------------------- arch/x86/oprofile/op_x86_model.h | 9 ++------- 2 files changed, 10 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 388ee15e0e4..3b84b789de0 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -78,19 +78,13 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) unsigned int i; for (i = 0; i < nr_ctrs; ++i) { - if (counters[i].addr) { - rdmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); - } + if (counters[i].addr) + rdmsrl(counters[i].addr, counters[i].saved); } for (i = 0; i < nr_ctrls; ++i) { - if (controls[i].addr) { - rdmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); - } + if (controls[i].addr) + rdmsrl(controls[i].addr, controls[i].saved); } } @@ -204,19 +198,13 @@ static void nmi_restore_registers(struct op_msrs *msrs) unsigned int i; for (i = 0; i < nr_ctrls; ++i) { - if (controls[i].addr) { - wrmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); - } + if (controls[i].addr) + wrmsrl(controls[i].addr, controls[i].saved); } for (i = 0; i < nr_ctrs; ++i) { - if (counters[i].addr) { - wrmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); - } + if (counters[i].addr) + wrmsrl(counters[i].addr, counters[i].saved); } } diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index 69f1eb46e1b..fda52b4c1b9 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h @@ -15,14 +15,9 @@ #include #include -struct op_saved_msr { - unsigned int high; - unsigned int low; -}; - struct op_msr { - unsigned long addr; - struct op_saved_msr saved; + unsigned long addr; + u64 saved; }; struct op_msrs { -- cgit v1.2.3-70-g09d2 From 1a245c45343651a87ff63afc5ddeb8e24d731835 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 5 Jun 2009 15:54:24 +0200 Subject: x86/oprofile: remove some local variables in MSR save/restore functions The patch removes some local variables in these functions. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 3b84b789de0..80b63d5db50 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -71,18 +71,16 @@ static int profile_exceptions_notify(struct notifier_block *self, static void nmi_cpu_save_registers(struct op_msrs *msrs) { - unsigned int const nr_ctrs = model->num_counters; - unsigned int const nr_ctrls = model->num_controls; struct op_msr *counters = msrs->counters; struct op_msr *controls = msrs->controls; unsigned int i; - for (i = 0; i < nr_ctrs; ++i) { + for (i = 0; i < model->num_counters; ++i) { if (counters[i].addr) rdmsrl(counters[i].addr, counters[i].saved); } - for (i = 0; i < nr_ctrls; ++i) { + for (i = 0; i < model->num_controls; ++i) { if (controls[i].addr) rdmsrl(controls[i].addr, controls[i].saved); } @@ -191,18 +189,16 @@ static int nmi_setup(void) static void nmi_restore_registers(struct op_msrs *msrs) { - unsigned int const nr_ctrs = model->num_counters; - unsigned int const nr_ctrls = model->num_controls; struct op_msr *counters = msrs->counters; struct op_msr *controls = msrs->controls; unsigned int i; - for (i = 0; i < nr_ctrls; ++i) { + for (i = 0; i < model->num_controls; ++i) { if (controls[i].addr) wrmsrl(controls[i].addr, controls[i].saved); } - for (i = 0; i < nr_ctrs; ++i) { + for (i = 0; i < model->num_counters; ++i) { if (counters[i].addr) wrmsrl(counters[i].addr, counters[i].saved); } -- cgit v1.2.3-70-g09d2 From c572ae4efd1b0a5cc76c5e6aae05c1b182b6a69c Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 3 Jun 2009 20:10:39 +0200 Subject: x86/oprofile: use 64 bit values in IBS functions The IBS code internally uses 32 bit values (a low and a high value) to represent a 64 bit value. This patch changes this and now 64 bit values are used instead. 64 bit MSR functions can be used now. No functional changes. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 131 ++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 70 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 9bf90176241..6493ef7ae9a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -35,16 +35,18 @@ static unsigned long reset_value[NUM_COUNTERS]; #ifdef CONFIG_OPROFILE_IBS /* IbsFetchCtl bits/masks */ -#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */ -#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */ -#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */ +#define IBS_FETCH_RAND_EN (1ULL<<57) +#define IBS_FETCH_VAL (1ULL<<49) +#define IBS_FETCH_ENABLE (1ULL<<48) +#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL /*IbsOpCtl bits */ -#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ -#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ +#define IBS_OP_CNT_CTL (1ULL<<19) +#define IBS_OP_VAL (1ULL<<18) +#define IBS_OP_ENABLE (1ULL<<17) -#define IBS_FETCH_SIZE 6 -#define IBS_OP_SIZE 12 +#define IBS_FETCH_SIZE 6 +#define IBS_OP_SIZE 12 static int has_ibs; /* AMD Family10h and later */ @@ -126,66 +128,63 @@ static inline int op_amd_handle_ibs(struct pt_regs * const regs, struct op_msrs const * const msrs) { - u32 low, high; - u64 msr; + u64 val, ctl; struct op_entry entry; if (!has_ibs) return 1; if (ibs_config.fetch_enabled) { - rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); - if (high & IBS_FETCH_HIGH_VALID_BIT) { - rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); - oprofile_write_reserve(&entry, regs, msr, + rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); + if (ctl & IBS_FETCH_VAL) { + rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); + oprofile_write_reserve(&entry, regs, val, IBS_FETCH_CODE, IBS_FETCH_SIZE); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); - oprofile_add_data(&entry, low); - oprofile_add_data(&entry, high); - rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data(&entry, (u32)ctl); + oprofile_add_data(&entry, (u32)(ctl >> 32)); + rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); oprofile_write_commit(&entry); /* reenable the IRQ */ - high &= ~IBS_FETCH_HIGH_VALID_BIT; - high |= IBS_FETCH_HIGH_ENABLE; - low &= IBS_FETCH_LOW_MAX_CNT_MASK; - wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); + ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); + ctl |= IBS_FETCH_ENABLE; + wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); } } if (ibs_config.op_enabled) { - rdmsr(MSR_AMD64_IBSOPCTL, low, high); - if (low & IBS_OP_LOW_VALID_BIT) { - rdmsrl(MSR_AMD64_IBSOPRIP, msr); - oprofile_write_reserve(&entry, regs, msr, + rdmsrl(MSR_AMD64_IBSOPCTL, ctl); + if (ctl & IBS_OP_VAL) { + rdmsrl(MSR_AMD64_IBSOPRIP, val); + oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE, IBS_OP_SIZE); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); - rdmsrl(MSR_AMD64_IBSOPDATA, msr); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); - rdmsrl(MSR_AMD64_IBSOPDATA2, msr); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); - rdmsrl(MSR_AMD64_IBSOPDATA3, msr); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); - rdmsrl(MSR_AMD64_IBSDCLINAD, msr); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); - rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); - oprofile_add_data(&entry, (u32)msr); - oprofile_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); + rdmsrl(MSR_AMD64_IBSOPDATA, val); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); + rdmsrl(MSR_AMD64_IBSOPDATA2, val); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); + rdmsrl(MSR_AMD64_IBSOPDATA3, val); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); + rdmsrl(MSR_AMD64_IBSDCLINAD, val); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); + rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); + oprofile_add_data(&entry, (u32)val); + oprofile_add_data(&entry, (u32)(val >> 32)); oprofile_write_commit(&entry); /* reenable the IRQ */ - high = 0; - low &= ~IBS_OP_LOW_VALID_BIT; - low |= IBS_OP_LOW_ENABLE; - wrmsr(MSR_AMD64_IBSOPCTL, low, high); + ctl &= ~IBS_OP_VAL & 0xFFFFFFFF; + ctl |= IBS_OP_ENABLE; + wrmsrl(MSR_AMD64_IBSOPCTL, ctl); } } @@ -194,39 +193,31 @@ op_amd_handle_ibs(struct pt_regs * const regs, static inline void op_amd_start_ibs(void) { - unsigned int low, high; + u64 val; if (has_ibs && ibs_config.fetch_enabled) { - low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; - high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ - + IBS_FETCH_HIGH_ENABLE; - wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); + val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; + val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; + val |= IBS_FETCH_ENABLE; + wrmsrl(MSR_AMD64_IBSFETCHCTL, val); } if (has_ibs && ibs_config.op_enabled) { - low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) - + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ - + IBS_OP_LOW_ENABLE; - high = 0; - wrmsr(MSR_AMD64_IBSOPCTL, low, high); + val = (ibs_config.max_cnt_op >> 4) & 0xFFFF; + val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0; + val |= IBS_OP_ENABLE; + wrmsrl(MSR_AMD64_IBSOPCTL, val); } } static void op_amd_stop_ibs(void) { - unsigned int low, high; - if (has_ibs && ibs_config.fetch_enabled) { + if (has_ibs && ibs_config.fetch_enabled) /* clear max count and enable */ - low = 0; - high = 0; - wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); - } + wrmsrl(MSR_AMD64_IBSFETCHCTL, 0); - if (has_ibs && ibs_config.op_enabled) { + if (has_ibs && ibs_config.op_enabled) /* clear max count and enable */ - low = 0; - high = 0; - wrmsr(MSR_AMD64_IBSOPCTL, low, high); - } + wrmsrl(MSR_AMD64_IBSOPCTL, 0); } #else -- cgit v1.2.3-70-g09d2 From 51563a0e5650d0d76539625388d72d62b34c726e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 3 Jun 2009 20:54:56 +0200 Subject: x86/oprofile: introduce oprofile_add_data64() The IBS implemention writes 64 bit register values to the cpu buffer by writing two 32 values using oprofile_add_data(). This patch introduces oprofile_add_data64() to write a single 64 bit value to the buffer. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 27 +++++++++------------------ drivers/oprofile/cpu_buffer.c | 15 +++++++++++++++ include/linux/oprofile.h | 1 + 3 files changed, 25 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 6493ef7ae9a..cc930467575 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -140,13 +140,10 @@ op_amd_handle_ibs(struct pt_regs * const regs, rdmsrl(MSR_AMD64_IBSFETCHLINAD, val); oprofile_write_reserve(&entry, regs, val, IBS_FETCH_CODE, IBS_FETCH_SIZE); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); - oprofile_add_data(&entry, (u32)ctl); - oprofile_add_data(&entry, (u32)(ctl >> 32)); + oprofile_add_data64(&entry, val); + oprofile_add_data64(&entry, ctl); rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); oprofile_write_commit(&entry); /* reenable the IRQ */ @@ -162,23 +159,17 @@ op_amd_handle_ibs(struct pt_regs * const regs, rdmsrl(MSR_AMD64_IBSOPRIP, val); oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE, IBS_OP_SIZE); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSOPDATA, val); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSOPDATA2, val); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSOPDATA3, val); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSDCLINAD, val); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); rdmsrl(MSR_AMD64_IBSDCPHYSAD, val); - oprofile_add_data(&entry, (u32)val); - oprofile_add_data(&entry, (u32)(val >> 32)); + oprofile_add_data64(&entry, val); oprofile_write_commit(&entry); /* reenable the IRQ */ diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 50640cc5eef..a7aae24f288 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -406,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val) return op_cpu_buffer_add_data(entry, val); } +int oprofile_add_data64(struct op_entry *entry, u64 val) +{ + if (!entry->event) + return 0; + if (op_cpu_buffer_get_size(entry) < 2) + /* + * the function returns 0 to indicate a too small + * buffer, even if there is some space left + */ + return 0; + if (!op_cpu_buffer_add_data(entry, (u32)val)) + return 0; + return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); +} + int oprofile_write_commit(struct op_entry *entry) { if (!entry->event) diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index dbbe2dbc441..d68d2ed94f1 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -179,6 +179,7 @@ void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size); int oprofile_add_data(struct op_entry *entry, unsigned long val); +int oprofile_add_data64(struct op_entry *entry, u64 val); int oprofile_write_commit(struct op_entry *entry); #endif /* OPROFILE_H */ -- cgit v1.2.3-70-g09d2 From 802070f5474af1a49435a9528aede47bb18abd47 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Fri, 12 Jun 2009 18:32:07 +0200 Subject: x86/oprofile: fix initialization of arch_perfmon for core_i7 Commit: e419294 x86/oprofile: moving arch_perfmon counter setup to op_x86_model_spec.init introduced a bug in the initialization of core_i7 leading to the incorrect model setup to &op_ppro_spec. This patch fixes this. Signed-off-by: Robert Richter --- arch/x86/oprofile/nmi_int.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 7826dfcc842..28ee490c1b8 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -406,6 +406,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); static int __init ppro_init(char **cpu_type) { __u8 cpu_model = boot_cpu_data.x86_model; + struct op_x86_model_spec const *spec = &op_ppro_spec; /* default */ if (force_arch_perfmon && cpu_has_arch_perfmon) return 0; @@ -432,7 +433,7 @@ static int __init ppro_init(char **cpu_type) *cpu_type = "i386/core_2"; break; case 26: - model = &op_arch_perfmon_spec; + spec = &op_arch_perfmon_spec; *cpu_type = "i386/core_i7"; break; case 28: @@ -443,7 +444,7 @@ static int __init ppro_init(char **cpu_type) return 0; } - model = &op_ppro_spec; + model = spec; return 1; } -- cgit v1.2.3-70-g09d2 From c64b04fe6e0cb7c78e01751a44ef56cf20344e87 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 14 Jun 2009 00:59:50 +0530 Subject: x86, cpu: cpu/proc.c display cache alignment and address sizes for 32 bit 32 bits can also access x86_cache_alignment, x86_phys_bits and x86_virt_bits, make them available to user space just as on 64 bits. Signed-off-by: Jaswinder Singh Rajput LKML-Reference: <1244921390.11733.30.camel@ht.satnam> Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/proc.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index d5e30397246..f82706a3901 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); #endif seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); -#ifdef CONFIG_X86_64 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", c->x86_phys_bits, c->x86_virt_bits); -#endif seq_printf(m, "power management:"); for (i = 0; i < 32; i++) { -- cgit v1.2.3-70-g09d2 From ac5672f82c39ff2f8dce81bf3e68b1dfc41f366f Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 14 Apr 2009 14:29:44 -0700 Subject: x86/paravirt: split paravirt definitions into paravirt_types.h Split the monolithic asm/paravirt.h into separate paravirt.h (inlines and other "active" definitions), and paravirt_types.h (types, constants and other "passive" definitions). This makes it easier to use the type/constant definitions without pulling in everything else and causing circular dependency problems. [ Impact: cleanup ] Signed-off-by: Jeremy Fitzhardinge --- arch/x86/include/asm/paravirt.h | 711 +-------------------------------- arch/x86/include/asm/paravirt_types.h | 720 ++++++++++++++++++++++++++++++++++ 2 files changed, 721 insertions(+), 710 deletions(-) create mode 100644 arch/x86/include/asm/paravirt_types.h (limited to 'arch') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4fb37c8a083..6a07af432c8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -7,689 +7,11 @@ #include #include -/* Bitmask of what can be clobbered: usually at least eax. */ -#define CLBR_NONE 0 -#define CLBR_EAX (1 << 0) -#define CLBR_ECX (1 << 1) -#define CLBR_EDX (1 << 2) -#define CLBR_EDI (1 << 3) - -#ifdef CONFIG_X86_32 -/* CLBR_ANY should match all regs platform has. For i386, that's just it */ -#define CLBR_ANY ((1 << 4) - 1) - -#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) -#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) -#define CLBR_SCRATCH (0) -#else -#define CLBR_RAX CLBR_EAX -#define CLBR_RCX CLBR_ECX -#define CLBR_RDX CLBR_EDX -#define CLBR_RDI CLBR_EDI -#define CLBR_RSI (1 << 4) -#define CLBR_R8 (1 << 5) -#define CLBR_R9 (1 << 6) -#define CLBR_R10 (1 << 7) -#define CLBR_R11 (1 << 8) - -#define CLBR_ANY ((1 << 9) - 1) - -#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ - CLBR_RCX | CLBR_R8 | CLBR_R9) -#define CLBR_RET_REG (CLBR_RAX) -#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) - -#include -#endif /* X86_64 */ - -#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) +#include #ifndef __ASSEMBLY__ #include #include -#include -#include - -struct page; -struct thread_struct; -struct desc_ptr; -struct tss_struct; -struct mm_struct; -struct desc_struct; -struct task_struct; - -/* - * Wrapper type for pointers to code which uses the non-standard - * calling convention. See PV_CALL_SAVE_REGS_THUNK below. - */ -struct paravirt_callee_save { - void *func; -}; - -/* general info */ -struct pv_info { - unsigned int kernel_rpl; - int shared_kernel_pmd; - int paravirt_enabled; - const char *name; -}; - -struct pv_init_ops { - /* - * Patch may replace one of the defined code sequences with - * arbitrary code, subject to the same register constraints. - * This generally means the code is not free to clobber any - * registers other than EAX. The patch function should return - * the number of bytes of code generated, as we nop pad the - * rest in generic code. - */ - unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, - unsigned long addr, unsigned len); - - /* Basic arch-specific setup */ - void (*arch_setup)(void); - char *(*memory_setup)(void); - void (*post_allocator_init)(void); - - /* Print a banner to identify the environment */ - void (*banner)(void); -}; - - -struct pv_lazy_ops { - /* Set deferred update mode, used for batching operations. */ - void (*enter)(void); - void (*leave)(void); -}; - -struct pv_time_ops { - void (*time_init)(void); - - /* Set and set time of day */ - unsigned long (*get_wallclock)(void); - int (*set_wallclock)(unsigned long); - - unsigned long long (*sched_clock)(void); - unsigned long (*get_tsc_khz)(void); -}; - -struct pv_cpu_ops { - /* hooks for various privileged instructions */ - unsigned long (*get_debugreg)(int regno); - void (*set_debugreg)(int regno, unsigned long value); - - void (*clts)(void); - - unsigned long (*read_cr0)(void); - void (*write_cr0)(unsigned long); - - unsigned long (*read_cr4_safe)(void); - unsigned long (*read_cr4)(void); - void (*write_cr4)(unsigned long); - -#ifdef CONFIG_X86_64 - unsigned long (*read_cr8)(void); - void (*write_cr8)(unsigned long); -#endif - - /* Segment descriptor handling */ - void (*load_tr_desc)(void); - void (*load_gdt)(const struct desc_ptr *); - void (*load_idt)(const struct desc_ptr *); - void (*store_gdt)(struct desc_ptr *); - void (*store_idt)(struct desc_ptr *); - void (*set_ldt)(const void *desc, unsigned entries); - unsigned long (*store_tr)(void); - void (*load_tls)(struct thread_struct *t, unsigned int cpu); -#ifdef CONFIG_X86_64 - void (*load_gs_index)(unsigned int idx); -#endif - void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, - const void *desc); - void (*write_gdt_entry)(struct desc_struct *, - int entrynum, const void *desc, int size); - void (*write_idt_entry)(gate_desc *, - int entrynum, const gate_desc *gate); - void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); - void (*free_ldt)(struct desc_struct *ldt, unsigned entries); - - void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); - - void (*set_iopl_mask)(unsigned mask); - - void (*wbinvd)(void); - void (*io_delay)(void); - - /* cpuid emulation, mostly so that caps bits can be disabled */ - void (*cpuid)(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx); - - /* MSR, PMC and TSR operations. - err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ - u64 (*read_msr_amd)(unsigned int msr, int *err); - u64 (*read_msr)(unsigned int msr, int *err); - int (*write_msr)(unsigned int msr, unsigned low, unsigned high); - - u64 (*read_tsc)(void); - u64 (*read_pmc)(int counter); - unsigned long long (*read_tscp)(unsigned int *aux); - - /* - * Atomically enable interrupts and return to userspace. This - * is only ever used to return to 32-bit processes; in a - * 64-bit kernel, it's used for 32-on-64 compat processes, but - * never native 64-bit processes. (Jump, not call.) - */ - void (*irq_enable_sysexit)(void); - - /* - * Switch to usermode gs and return to 64-bit usermode using - * sysret. Only used in 64-bit kernels to return to 64-bit - * processes. Usermode register state, including %rsp, must - * already be restored. - */ - void (*usergs_sysret64)(void); - - /* - * Switch to usermode gs and return to 32-bit usermode using - * sysret. Used to return to 32-on-64 compat processes. - * Other usermode register state, including %esp, must already - * be restored. - */ - void (*usergs_sysret32)(void); - - /* Normal iret. Jump to this with the standard iret stack - frame set up. */ - void (*iret)(void); - - void (*swapgs)(void); - - void (*start_context_switch)(struct task_struct *prev); - void (*end_context_switch)(struct task_struct *next); -}; - -struct pv_irq_ops { - void (*init_IRQ)(void); - - /* - * Get/set interrupt state. save_fl and restore_fl are only - * expected to use X86_EFLAGS_IF; all other bits - * returned from save_fl are undefined, and may be ignored by - * restore_fl. - * - * NOTE: These functions callers expect the callee to preserve - * more registers than the standard C calling convention. - */ - struct paravirt_callee_save save_fl; - struct paravirt_callee_save restore_fl; - struct paravirt_callee_save irq_disable; - struct paravirt_callee_save irq_enable; - - void (*safe_halt)(void); - void (*halt)(void); - -#ifdef CONFIG_X86_64 - void (*adjust_exception_frame)(void); -#endif -}; - -struct pv_apic_ops { -#ifdef CONFIG_X86_LOCAL_APIC - void (*setup_boot_clock)(void); - void (*setup_secondary_clock)(void); - - void (*startup_ipi_hook)(int phys_apicid, - unsigned long start_eip, - unsigned long start_esp); -#endif -}; - -struct pv_mmu_ops { - /* - * Called before/after init_mm pagetable setup. setup_start - * may reset %cr3, and may pre-install parts of the pagetable; - * pagetable setup is expected to preserve any existing - * mapping. - */ - void (*pagetable_setup_start)(pgd_t *pgd_base); - void (*pagetable_setup_done)(pgd_t *pgd_base); - - unsigned long (*read_cr2)(void); - void (*write_cr2)(unsigned long); - - unsigned long (*read_cr3)(void); - void (*write_cr3)(unsigned long); - - /* - * Hooks for intercepting the creation/use/destruction of an - * mm_struct. - */ - void (*activate_mm)(struct mm_struct *prev, - struct mm_struct *next); - void (*dup_mmap)(struct mm_struct *oldmm, - struct mm_struct *mm); - void (*exit_mmap)(struct mm_struct *mm); - - - /* TLB operations */ - void (*flush_tlb_user)(void); - void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); - void (*flush_tlb_others)(const struct cpumask *cpus, - struct mm_struct *mm, - unsigned long va); - - /* Hooks for allocating and freeing a pagetable top-level */ - int (*pgd_alloc)(struct mm_struct *mm); - void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); - - /* - * Hooks for allocating/releasing pagetable pages when they're - * attached to a pagetable - */ - void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); - void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); - void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); - void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); - void (*release_pte)(unsigned long pfn); - void (*release_pmd)(unsigned long pfn); - void (*release_pud)(unsigned long pfn); - - /* Pagetable manipulation functions */ - void (*set_pte)(pte_t *ptep, pte_t pteval); - void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval); - void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); - void (*pte_update_defer)(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); - - pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); - void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte); - - struct paravirt_callee_save pte_val; - struct paravirt_callee_save make_pte; - - struct paravirt_callee_save pgd_val; - struct paravirt_callee_save make_pgd; - -#if PAGETABLE_LEVELS >= 3 -#ifdef CONFIG_X86_PAE - void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); - void (*pmd_clear)(pmd_t *pmdp); - -#endif /* CONFIG_X86_PAE */ - - void (*set_pud)(pud_t *pudp, pud_t pudval); - - struct paravirt_callee_save pmd_val; - struct paravirt_callee_save make_pmd; - -#if PAGETABLE_LEVELS == 4 - struct paravirt_callee_save pud_val; - struct paravirt_callee_save make_pud; - - void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); -#endif /* PAGETABLE_LEVELS == 4 */ -#endif /* PAGETABLE_LEVELS >= 3 */ - -#ifdef CONFIG_HIGHPTE - void *(*kmap_atomic_pte)(struct page *page, enum km_type type); -#endif - - struct pv_lazy_ops lazy_mode; - - /* dom0 ops */ - - /* Sometimes the physical address is a pfn, and sometimes its - an mfn. We can tell which is which from the index. */ - void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, - phys_addr_t phys, pgprot_t flags); -}; - -struct raw_spinlock; -struct pv_lock_ops { - int (*spin_is_locked)(struct raw_spinlock *lock); - int (*spin_is_contended)(struct raw_spinlock *lock); - void (*spin_lock)(struct raw_spinlock *lock); - void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); - int (*spin_trylock)(struct raw_spinlock *lock); - void (*spin_unlock)(struct raw_spinlock *lock); -}; - -/* This contains all the paravirt structures: we get a convenient - * number for each function using the offset which we use to indicate - * what to patch. */ -struct paravirt_patch_template { - struct pv_init_ops pv_init_ops; - struct pv_time_ops pv_time_ops; - struct pv_cpu_ops pv_cpu_ops; - struct pv_irq_ops pv_irq_ops; - struct pv_apic_ops pv_apic_ops; - struct pv_mmu_ops pv_mmu_ops; - struct pv_lock_ops pv_lock_ops; -}; - -extern struct pv_info pv_info; -extern struct pv_init_ops pv_init_ops; -extern struct pv_time_ops pv_time_ops; -extern struct pv_cpu_ops pv_cpu_ops; -extern struct pv_irq_ops pv_irq_ops; -extern struct pv_apic_ops pv_apic_ops; -extern struct pv_mmu_ops pv_mmu_ops; -extern struct pv_lock_ops pv_lock_ops; - -#define PARAVIRT_PATCH(x) \ - (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) - -#define paravirt_type(op) \ - [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ - [paravirt_opptr] "i" (&(op)) -#define paravirt_clobber(clobber) \ - [paravirt_clobber] "i" (clobber) - -/* - * Generate some code, and mark it as patchable by the - * apply_paravirt() alternate instruction patcher. - */ -#define _paravirt_alt(insn_string, type, clobber) \ - "771:\n\t" insn_string "\n" "772:\n" \ - ".pushsection .parainstructions,\"a\"\n" \ - _ASM_ALIGN "\n" \ - _ASM_PTR " 771b\n" \ - " .byte " type "\n" \ - " .byte 772b-771b\n" \ - " .short " clobber "\n" \ - ".popsection\n" - -/* Generate patchable code, with the default asm parameters. */ -#define paravirt_alt(insn_string) \ - _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") - -/* Simple instruction patching code. */ -#define DEF_NATIVE(ops, name, code) \ - extern const char start_##ops##_##name[], end_##ops##_##name[]; \ - asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") - -unsigned paravirt_patch_nop(void); -unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); -unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); -unsigned paravirt_patch_ignore(unsigned len); -unsigned paravirt_patch_call(void *insnbuf, - const void *target, u16 tgt_clobbers, - unsigned long addr, u16 site_clobbers, - unsigned len); -unsigned paravirt_patch_jmp(void *insnbuf, const void *target, - unsigned long addr, unsigned len); -unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, - unsigned long addr, unsigned len); - -unsigned paravirt_patch_insns(void *insnbuf, unsigned len, - const char *start, const char *end); - -unsigned native_patch(u8 type, u16 clobbers, void *ibuf, - unsigned long addr, unsigned len); - -int paravirt_disable_iospace(void); - -/* - * This generates an indirect call based on the operation type number. - * The type number, computed in PARAVIRT_PATCH, is derived from the - * offset into the paravirt_patch_template structure, and can therefore be - * freely converted back into a structure offset. - */ -#define PARAVIRT_CALL "call *%c[paravirt_opptr];" - -/* - * These macros are intended to wrap calls through one of the paravirt - * ops structs, so that they can be later identified and patched at - * runtime. - * - * Normally, a call to a pv_op function is a simple indirect call: - * (pv_op_struct.operations)(args...). - * - * Unfortunately, this is a relatively slow operation for modern CPUs, - * because it cannot necessarily determine what the destination - * address is. In this case, the address is a runtime constant, so at - * the very least we can patch the call to e a simple direct call, or - * ideally, patch an inline implementation into the callsite. (Direct - * calls are essentially free, because the call and return addresses - * are completely predictable.) - * - * For i386, these macros rely on the standard gcc "regparm(3)" calling - * convention, in which the first three arguments are placed in %eax, - * %edx, %ecx (in that order), and the remaining arguments are placed - * on the stack. All caller-save registers (eax,edx,ecx) are expected - * to be modified (either clobbered or used for return values). - * X86_64, on the other hand, already specifies a register-based calling - * conventions, returning at %rax, with parameteres going on %rdi, %rsi, - * %rdx, and %rcx. Note that for this reason, x86_64 does not need any - * special handling for dealing with 4 arguments, unlike i386. - * However, x86_64 also have to clobber all caller saved registers, which - * unfortunately, are quite a bit (r8 - r11) - * - * The call instruction itself is marked by placing its start address - * and size into the .parainstructions section, so that - * apply_paravirt() in arch/i386/kernel/alternative.c can do the - * appropriate patching under the control of the backend pv_init_ops - * implementation. - * - * Unfortunately there's no way to get gcc to generate the args setup - * for the call, and then allow the call itself to be generated by an - * inline asm. Because of this, we must do the complete arg setup and - * return value handling from within these macros. This is fairly - * cumbersome. - * - * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. - * It could be extended to more arguments, but there would be little - * to be gained from that. For each number of arguments, there are - * the two VCALL and CALL variants for void and non-void functions. - * - * When there is a return value, the invoker of the macro must specify - * the return type. The macro then uses sizeof() on that type to - * determine whether its a 32 or 64 bit value, and places the return - * in the right register(s) (just %eax for 32-bit, and %edx:%eax for - * 64-bit). For x86_64 machines, it just returns at %rax regardless of - * the return value size. - * - * 64-bit arguments are passed as a pair of adjacent 32-bit arguments - * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments - * in low,high order - * - * Small structures are passed and returned in registers. The macro - * calling convention can't directly deal with this, so the wrapper - * functions must do this. - * - * These PVOP_* macros are only defined within this header. This - * means that all uses must be wrapped in inline functions. This also - * makes sure the incoming and outgoing types are always correct. - */ -#ifdef CONFIG_X86_32 -#define PVOP_VCALL_ARGS \ - unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS - -#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) -#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) -#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) - -#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ - "=c" (__ecx) -#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS - -#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) -#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS - -#define EXTRA_CLOBBERS -#define VEXTRA_CLOBBERS -#else /* CONFIG_X86_64 */ -#define PVOP_VCALL_ARGS \ - unsigned long __edi = __edi, __esi = __esi, \ - __edx = __edx, __ecx = __ecx -#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax - -#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) -#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) -#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) -#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) - -#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ - "=S" (__esi), "=d" (__edx), \ - "=c" (__ecx) -#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) - -#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) -#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS - -#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" -#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" -#endif /* CONFIG_X86_32 */ - -#ifdef CONFIG_PARAVIRT_DEBUG -#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) -#else -#define PVOP_TEST_NULL(op) ((void)op) -#endif - -#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ - pre, post, ...) \ - ({ \ - rettype __ret; \ - PVOP_CALL_ARGS; \ - PVOP_TEST_NULL(op); \ - /* This is 32-bit specific, but is okay in 64-bit */ \ - /* since this condition will never hold */ \ - if (sizeof(rettype) > sizeof(unsigned long)) { \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : call_clbr \ - : paravirt_type(op), \ - paravirt_clobber(clbr), \ - ##__VA_ARGS__ \ - : "memory", "cc" extra_clbr); \ - __ret = (rettype)((((u64)__edx) << 32) | __eax); \ - } else { \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : call_clbr \ - : paravirt_type(op), \ - paravirt_clobber(clbr), \ - ##__VA_ARGS__ \ - : "memory", "cc" extra_clbr); \ - __ret = (rettype)__eax; \ - } \ - __ret; \ - }) - -#define __PVOP_CALL(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ - EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) - -#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ - PVOP_CALLEE_CLOBBERS, , \ - pre, post, ##__VA_ARGS__) - - -#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ - ({ \ - PVOP_VCALL_ARGS; \ - PVOP_TEST_NULL(op); \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : call_clbr \ - : paravirt_type(op), \ - paravirt_clobber(clbr), \ - ##__VA_ARGS__ \ - : "memory", "cc" extra_clbr); \ - }) - -#define __PVOP_VCALL(op, pre, post, ...) \ - ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ - VEXTRA_CLOBBERS, \ - pre, post, ##__VA_ARGS__) - -#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ - ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ - PVOP_VCALLEE_CLOBBERS, , \ - pre, post, ##__VA_ARGS__) - - - -#define PVOP_CALL0(rettype, op) \ - __PVOP_CALL(rettype, op, "", "") -#define PVOP_VCALL0(op) \ - __PVOP_VCALL(op, "", "") - -#define PVOP_CALLEE0(rettype, op) \ - __PVOP_CALLEESAVE(rettype, op, "", "") -#define PVOP_VCALLEE0(op) \ - __PVOP_VCALLEESAVE(op, "", "") - - -#define PVOP_CALL1(rettype, op, arg1) \ - __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) -#define PVOP_VCALL1(op, arg1) \ - __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) - -#define PVOP_CALLEE1(rettype, op, arg1) \ - __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) -#define PVOP_VCALLEE1(op, arg1) \ - __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) - - -#define PVOP_CALL2(rettype, op, arg1, arg2) \ - __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) -#define PVOP_VCALL2(op, arg1, arg2) \ - __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) - -#define PVOP_CALLEE2(rettype, op, arg1, arg2) \ - __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) -#define PVOP_VCALLEE2(op, arg1, arg2) \ - __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2)) - - -#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ - __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) -#define PVOP_VCALL3(op, arg1, arg2, arg3) \ - __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ - PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) - -/* This is the only difference in x86_64. We can make it much simpler */ -#ifdef CONFIG_X86_32 -#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ - __PVOP_CALL(rettype, op, \ - "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ - PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) -#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ - __PVOP_VCALL(op, \ - "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ - "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) -#else -#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ - __PVOP_CALL(rettype, op, "", "", \ - PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ - PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) -#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ - __PVOP_VCALL(op, "", "", \ - PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ - PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) -#endif static inline int paravirt_enabled(void) { @@ -1393,20 +715,6 @@ static inline void pmd_clear(pmd_t *pmdp) } #endif /* CONFIG_X86_PAE */ -/* Lazy mode for batching updates / context switch */ -enum paravirt_lazy_mode { - PARAVIRT_LAZY_NONE, - PARAVIRT_LAZY_MMU, - PARAVIRT_LAZY_CPU, -}; - -enum paravirt_lazy_mode paravirt_get_lazy_mode(void); -void paravirt_start_context_switch(struct task_struct *prev); -void paravirt_end_context_switch(struct task_struct *next); - -void paravirt_enter_lazy_mmu(void); -void paravirt_leave_lazy_mmu(void); - #define __HAVE_ARCH_START_CONTEXT_SWITCH static inline void arch_start_context_switch(struct task_struct *prev) { @@ -1437,12 +745,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, pv_mmu_ops.set_fixmap(idx, phys, flags); } -void _paravirt_nop(void); -u32 _paravirt_ident_32(u32); -u64 _paravirt_ident_64(u64); - -#define paravirt_nop ((void *)_paravirt_nop) - #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) static inline int __raw_spin_is_locked(struct raw_spinlock *lock) @@ -1479,17 +781,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) #endif -/* These all sit in the .parainstructions section to tell us what to patch. */ -struct paravirt_patch_site { - u8 *instr; /* original instructions */ - u8 instrtype; /* type of this instruction */ - u8 len; /* length of original instruction */ - u16 clobbers; /* what registers you may clobber */ -}; - -extern struct paravirt_patch_site __parainstructions[], - __parainstructions_end[]; - #ifdef CONFIG_X86_32 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" #define PV_RESTORE_REGS "popl %edx; popl %ecx;" diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h new file mode 100644 index 00000000000..2b3371bae29 --- /dev/null +++ b/arch/x86/include/asm/paravirt_types.h @@ -0,0 +1,720 @@ +#ifndef _ASM_X86_PARAVIRT_TYPES_H +#define _ASM_X86_PARAVIRT_TYPES_H + +/* Bitmask of what can be clobbered: usually at least eax. */ +#define CLBR_NONE 0 +#define CLBR_EAX (1 << 0) +#define CLBR_ECX (1 << 1) +#define CLBR_EDX (1 << 2) +#define CLBR_EDI (1 << 3) + +#ifdef CONFIG_X86_32 +/* CLBR_ANY should match all regs platform has. For i386, that's just it */ +#define CLBR_ANY ((1 << 4) - 1) + +#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) +#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) +#define CLBR_SCRATCH (0) +#else +#define CLBR_RAX CLBR_EAX +#define CLBR_RCX CLBR_ECX +#define CLBR_RDX CLBR_EDX +#define CLBR_RDI CLBR_EDI +#define CLBR_RSI (1 << 4) +#define CLBR_R8 (1 << 5) +#define CLBR_R9 (1 << 6) +#define CLBR_R10 (1 << 7) +#define CLBR_R11 (1 << 8) + +#define CLBR_ANY ((1 << 9) - 1) + +#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ + CLBR_RCX | CLBR_R8 | CLBR_R9) +#define CLBR_RET_REG (CLBR_RAX) +#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) + +#endif /* X86_64 */ + +#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) + +#ifndef __ASSEMBLY__ + +#include +#include + +struct page; +struct thread_struct; +struct desc_ptr; +struct tss_struct; +struct mm_struct; +struct desc_struct; +struct task_struct; +struct cpumask; + +/* + * Wrapper type for pointers to code which uses the non-standard + * calling convention. See PV_CALL_SAVE_REGS_THUNK below. + */ +struct paravirt_callee_save { + void *func; +}; + +/* general info */ +struct pv_info { + unsigned int kernel_rpl; + int shared_kernel_pmd; + int paravirt_enabled; + const char *name; +}; + +struct pv_init_ops { + /* + * Patch may replace one of the defined code sequences with + * arbitrary code, subject to the same register constraints. + * This generally means the code is not free to clobber any + * registers other than EAX. The patch function should return + * the number of bytes of code generated, as we nop pad the + * rest in generic code. + */ + unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, + unsigned long addr, unsigned len); + + /* Basic arch-specific setup */ + void (*arch_setup)(void); + char *(*memory_setup)(void); + void (*post_allocator_init)(void); + + /* Print a banner to identify the environment */ + void (*banner)(void); +}; + + +struct pv_lazy_ops { + /* Set deferred update mode, used for batching operations. */ + void (*enter)(void); + void (*leave)(void); +}; + +struct pv_time_ops { + void (*time_init)(void); + + /* Set and set time of day */ + unsigned long (*get_wallclock)(void); + int (*set_wallclock)(unsigned long); + + unsigned long long (*sched_clock)(void); + unsigned long (*get_tsc_khz)(void); +}; + +struct pv_cpu_ops { + /* hooks for various privileged instructions */ + unsigned long (*get_debugreg)(int regno); + void (*set_debugreg)(int regno, unsigned long value); + + void (*clts)(void); + + unsigned long (*read_cr0)(void); + void (*write_cr0)(unsigned long); + + unsigned long (*read_cr4_safe)(void); + unsigned long (*read_cr4)(void); + void (*write_cr4)(unsigned long); + +#ifdef CONFIG_X86_64 + unsigned long (*read_cr8)(void); + void (*write_cr8)(unsigned long); +#endif + + /* Segment descriptor handling */ + void (*load_tr_desc)(void); + void (*load_gdt)(const struct desc_ptr *); + void (*load_idt)(const struct desc_ptr *); + void (*store_gdt)(struct desc_ptr *); + void (*store_idt)(struct desc_ptr *); + void (*set_ldt)(const void *desc, unsigned entries); + unsigned long (*store_tr)(void); + void (*load_tls)(struct thread_struct *t, unsigned int cpu); +#ifdef CONFIG_X86_64 + void (*load_gs_index)(unsigned int idx); +#endif + void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, + const void *desc); + void (*write_gdt_entry)(struct desc_struct *, + int entrynum, const void *desc, int size); + void (*write_idt_entry)(gate_desc *, + int entrynum, const gate_desc *gate); + void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); + void (*free_ldt)(struct desc_struct *ldt, unsigned entries); + + void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); + + void (*set_iopl_mask)(unsigned mask); + + void (*wbinvd)(void); + void (*io_delay)(void); + + /* cpuid emulation, mostly so that caps bits can be disabled */ + void (*cpuid)(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx); + + /* MSR, PMC and TSR operations. + err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ + u64 (*read_msr_amd)(unsigned int msr, int *err); + u64 (*read_msr)(unsigned int msr, int *err); + int (*write_msr)(unsigned int msr, unsigned low, unsigned high); + + u64 (*read_tsc)(void); + u64 (*read_pmc)(int counter); + unsigned long long (*read_tscp)(unsigned int *aux); + + /* + * Atomically enable interrupts and return to userspace. This + * is only ever used to return to 32-bit processes; in a + * 64-bit kernel, it's used for 32-on-64 compat processes, but + * never native 64-bit processes. (Jump, not call.) + */ + void (*irq_enable_sysexit)(void); + + /* + * Switch to usermode gs and return to 64-bit usermode using + * sysret. Only used in 64-bit kernels to return to 64-bit + * processes. Usermode register state, including %rsp, must + * already be restored. + */ + void (*usergs_sysret64)(void); + + /* + * Switch to usermode gs and return to 32-bit usermode using + * sysret. Used to return to 32-on-64 compat processes. + * Other usermode register state, including %esp, must already + * be restored. + */ + void (*usergs_sysret32)(void); + + /* Normal iret. Jump to this with the standard iret stack + frame set up. */ + void (*iret)(void); + + void (*swapgs)(void); + + void (*start_context_switch)(struct task_struct *prev); + void (*end_context_switch)(struct task_struct *next); +}; + +struct pv_irq_ops { + void (*init_IRQ)(void); + + /* + * Get/set interrupt state. save_fl and restore_fl are only + * expected to use X86_EFLAGS_IF; all other bits + * returned from save_fl are undefined, and may be ignored by + * restore_fl. + * + * NOTE: These functions callers expect the callee to preserve + * more registers than the standard C calling convention. + */ + struct paravirt_callee_save save_fl; + struct paravirt_callee_save restore_fl; + struct paravirt_callee_save irq_disable; + struct paravirt_callee_save irq_enable; + + void (*safe_halt)(void); + void (*halt)(void); + +#ifdef CONFIG_X86_64 + void (*adjust_exception_frame)(void); +#endif +}; + +struct pv_apic_ops { +#ifdef CONFIG_X86_LOCAL_APIC + void (*setup_boot_clock)(void); + void (*setup_secondary_clock)(void); + + void (*startup_ipi_hook)(int phys_apicid, + unsigned long start_eip, + unsigned long start_esp); +#endif +}; + +struct pv_mmu_ops { + /* + * Called before/after init_mm pagetable setup. setup_start + * may reset %cr3, and may pre-install parts of the pagetable; + * pagetable setup is expected to preserve any existing + * mapping. + */ + void (*pagetable_setup_start)(pgd_t *pgd_base); + void (*pagetable_setup_done)(pgd_t *pgd_base); + + unsigned long (*read_cr2)(void); + void (*write_cr2)(unsigned long); + + unsigned long (*read_cr3)(void); + void (*write_cr3)(unsigned long); + + /* + * Hooks for intercepting the creation/use/destruction of an + * mm_struct. + */ + void (*activate_mm)(struct mm_struct *prev, + struct mm_struct *next); + void (*dup_mmap)(struct mm_struct *oldmm, + struct mm_struct *mm); + void (*exit_mmap)(struct mm_struct *mm); + + + /* TLB operations */ + void (*flush_tlb_user)(void); + void (*flush_tlb_kernel)(void); + void (*flush_tlb_single)(unsigned long addr); + void (*flush_tlb_others)(const struct cpumask *cpus, + struct mm_struct *mm, + unsigned long va); + + /* Hooks for allocating and freeing a pagetable top-level */ + int (*pgd_alloc)(struct mm_struct *mm); + void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); + + /* + * Hooks for allocating/releasing pagetable pages when they're + * attached to a pagetable + */ + void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count); + void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); + void (*release_pte)(unsigned long pfn); + void (*release_pmd)(unsigned long pfn); + void (*release_pud)(unsigned long pfn); + + /* Pagetable manipulation functions */ + void (*set_pte)(pte_t *ptep, pte_t pteval); + void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval); + void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); + void (*pte_update)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + void (*pte_update_defer)(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + + pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); + + struct paravirt_callee_save pte_val; + struct paravirt_callee_save make_pte; + + struct paravirt_callee_save pgd_val; + struct paravirt_callee_save make_pgd; + +#if PAGETABLE_LEVELS >= 3 +#ifdef CONFIG_X86_PAE + void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); + void (*pte_clear)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + void (*pmd_clear)(pmd_t *pmdp); + +#endif /* CONFIG_X86_PAE */ + + void (*set_pud)(pud_t *pudp, pud_t pudval); + + struct paravirt_callee_save pmd_val; + struct paravirt_callee_save make_pmd; + +#if PAGETABLE_LEVELS == 4 + struct paravirt_callee_save pud_val; + struct paravirt_callee_save make_pud; + + void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); +#endif /* PAGETABLE_LEVELS == 4 */ +#endif /* PAGETABLE_LEVELS >= 3 */ + +#ifdef CONFIG_HIGHPTE + void *(*kmap_atomic_pte)(struct page *page, enum km_type type); +#endif + + struct pv_lazy_ops lazy_mode; + + /* dom0 ops */ + + /* Sometimes the physical address is a pfn, and sometimes its + an mfn. We can tell which is which from the index. */ + void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags); +}; + +struct raw_spinlock; +struct pv_lock_ops { + int (*spin_is_locked)(struct raw_spinlock *lock); + int (*spin_is_contended)(struct raw_spinlock *lock); + void (*spin_lock)(struct raw_spinlock *lock); + void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct raw_spinlock *lock); + void (*spin_unlock)(struct raw_spinlock *lock); +}; + +/* This contains all the paravirt structures: we get a convenient + * number for each function using the offset which we use to indicate + * what to patch. */ +struct paravirt_patch_template { + struct pv_init_ops pv_init_ops; + struct pv_time_ops pv_time_ops; + struct pv_cpu_ops pv_cpu_ops; + struct pv_irq_ops pv_irq_ops; + struct pv_apic_ops pv_apic_ops; + struct pv_mmu_ops pv_mmu_ops; + struct pv_lock_ops pv_lock_ops; +}; + +extern struct pv_info pv_info; +extern struct pv_init_ops pv_init_ops; +extern struct pv_time_ops pv_time_ops; +extern struct pv_cpu_ops pv_cpu_ops; +extern struct pv_irq_ops pv_irq_ops; +extern struct pv_apic_ops pv_apic_ops; +extern struct pv_mmu_ops pv_mmu_ops; +extern struct pv_lock_ops pv_lock_ops; + +#define PARAVIRT_PATCH(x) \ + (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) + +#define paravirt_type(op) \ + [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ + [paravirt_opptr] "i" (&(op)) +#define paravirt_clobber(clobber) \ + [paravirt_clobber] "i" (clobber) + +/* + * Generate some code, and mark it as patchable by the + * apply_paravirt() alternate instruction patcher. + */ +#define _paravirt_alt(insn_string, type, clobber) \ + "771:\n\t" insn_string "\n" "772:\n" \ + ".pushsection .parainstructions,\"a\"\n" \ + _ASM_ALIGN "\n" \ + _ASM_PTR " 771b\n" \ + " .byte " type "\n" \ + " .byte 772b-771b\n" \ + " .short " clobber "\n" \ + ".popsection\n" + +/* Generate patchable code, with the default asm parameters. */ +#define paravirt_alt(insn_string) \ + _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") + +/* Simple instruction patching code. */ +#define DEF_NATIVE(ops, name, code) \ + extern const char start_##ops##_##name[], end_##ops##_##name[]; \ + asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") + +unsigned paravirt_patch_nop(void); +unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); +unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); +unsigned paravirt_patch_ignore(unsigned len); +unsigned paravirt_patch_call(void *insnbuf, + const void *target, u16 tgt_clobbers, + unsigned long addr, u16 site_clobbers, + unsigned len); +unsigned paravirt_patch_jmp(void *insnbuf, const void *target, + unsigned long addr, unsigned len); +unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, + unsigned long addr, unsigned len); + +unsigned paravirt_patch_insns(void *insnbuf, unsigned len, + const char *start, const char *end); + +unsigned native_patch(u8 type, u16 clobbers, void *ibuf, + unsigned long addr, unsigned len); + +int paravirt_disable_iospace(void); + +/* + * This generates an indirect call based on the operation type number. + * The type number, computed in PARAVIRT_PATCH, is derived from the + * offset into the paravirt_patch_template structure, and can therefore be + * freely converted back into a structure offset. + */ +#define PARAVIRT_CALL "call *%c[paravirt_opptr];" + +/* + * These macros are intended to wrap calls through one of the paravirt + * ops structs, so that they can be later identified and patched at + * runtime. + * + * Normally, a call to a pv_op function is a simple indirect call: + * (pv_op_struct.operations)(args...). + * + * Unfortunately, this is a relatively slow operation for modern CPUs, + * because it cannot necessarily determine what the destination + * address is. In this case, the address is a runtime constant, so at + * the very least we can patch the call to e a simple direct call, or + * ideally, patch an inline implementation into the callsite. (Direct + * calls are essentially free, because the call and return addresses + * are completely predictable.) + * + * For i386, these macros rely on the standard gcc "regparm(3)" calling + * convention, in which the first three arguments are placed in %eax, + * %edx, %ecx (in that order), and the remaining arguments are placed + * on the stack. All caller-save registers (eax,edx,ecx) are expected + * to be modified (either clobbered or used for return values). + * X86_64, on the other hand, already specifies a register-based calling + * conventions, returning at %rax, with parameteres going on %rdi, %rsi, + * %rdx, and %rcx. Note that for this reason, x86_64 does not need any + * special handling for dealing with 4 arguments, unlike i386. + * However, x86_64 also have to clobber all caller saved registers, which + * unfortunately, are quite a bit (r8 - r11) + * + * The call instruction itself is marked by placing its start address + * and size into the .parainstructions section, so that + * apply_paravirt() in arch/i386/kernel/alternative.c can do the + * appropriate patching under the control of the backend pv_init_ops + * implementation. + * + * Unfortunately there's no way to get gcc to generate the args setup + * for the call, and then allow the call itself to be generated by an + * inline asm. Because of this, we must do the complete arg setup and + * return value handling from within these macros. This is fairly + * cumbersome. + * + * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. + * It could be extended to more arguments, but there would be little + * to be gained from that. For each number of arguments, there are + * the two VCALL and CALL variants for void and non-void functions. + * + * When there is a return value, the invoker of the macro must specify + * the return type. The macro then uses sizeof() on that type to + * determine whether its a 32 or 64 bit value, and places the return + * in the right register(s) (just %eax for 32-bit, and %edx:%eax for + * 64-bit). For x86_64 machines, it just returns at %rax regardless of + * the return value size. + * + * 64-bit arguments are passed as a pair of adjacent 32-bit arguments + * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments + * in low,high order + * + * Small structures are passed and returned in registers. The macro + * calling convention can't directly deal with this, so the wrapper + * functions must do this. + * + * These PVOP_* macros are only defined within this header. This + * means that all uses must be wrapped in inline functions. This also + * makes sure the incoming and outgoing types are always correct. + */ +#ifdef CONFIG_X86_32 +#define PVOP_VCALL_ARGS \ + unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS + +#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) +#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) +#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) + +#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ + "=c" (__ecx) +#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS + +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) +#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS + +#define EXTRA_CLOBBERS +#define VEXTRA_CLOBBERS +#else /* CONFIG_X86_64 */ +#define PVOP_VCALL_ARGS \ + unsigned long __edi = __edi, __esi = __esi, \ + __edx = __edx, __ecx = __ecx +#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax + +#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) +#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) +#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) +#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) + +#define PVOP_VCALL_CLOBBERS "=D" (__edi), \ + "=S" (__esi), "=d" (__edx), \ + "=c" (__ecx) +#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) + +#define PVOP_VCALLEE_CLOBBERS "=a" (__eax) +#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS + +#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" +#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" +#endif /* CONFIG_X86_32 */ + +#ifdef CONFIG_PARAVIRT_DEBUG +#define PVOP_TEST_NULL(op) BUG_ON(op == NULL) +#else +#define PVOP_TEST_NULL(op) ((void)op) +#endif + +#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ + pre, post, ...) \ + ({ \ + rettype __ret; \ + PVOP_CALL_ARGS; \ + PVOP_TEST_NULL(op); \ + /* This is 32-bit specific, but is okay in 64-bit */ \ + /* since this condition will never hold */ \ + if (sizeof(rettype) > sizeof(unsigned long)) { \ + asm volatile(pre \ + paravirt_alt(PARAVIRT_CALL) \ + post \ + : call_clbr \ + : paravirt_type(op), \ + paravirt_clobber(clbr), \ + ##__VA_ARGS__ \ + : "memory", "cc" extra_clbr); \ + __ret = (rettype)((((u64)__edx) << 32) | __eax); \ + } else { \ + asm volatile(pre \ + paravirt_alt(PARAVIRT_CALL) \ + post \ + : call_clbr \ + : paravirt_type(op), \ + paravirt_clobber(clbr), \ + ##__VA_ARGS__ \ + : "memory", "cc" extra_clbr); \ + __ret = (rettype)__eax; \ + } \ + __ret; \ + }) + +#define __PVOP_CALL(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ + EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) + +#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ + PVOP_CALLEE_CLOBBERS, , \ + pre, post, ##__VA_ARGS__) + + +#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ + ({ \ + PVOP_VCALL_ARGS; \ + PVOP_TEST_NULL(op); \ + asm volatile(pre \ + paravirt_alt(PARAVIRT_CALL) \ + post \ + : call_clbr \ + : paravirt_type(op), \ + paravirt_clobber(clbr), \ + ##__VA_ARGS__ \ + : "memory", "cc" extra_clbr); \ + }) + +#define __PVOP_VCALL(op, pre, post, ...) \ + ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ + VEXTRA_CLOBBERS, \ + pre, post, ##__VA_ARGS__) + +#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ + ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ + PVOP_VCALLEE_CLOBBERS, , \ + pre, post, ##__VA_ARGS__) + + + +#define PVOP_CALL0(rettype, op) \ + __PVOP_CALL(rettype, op, "", "") +#define PVOP_VCALL0(op) \ + __PVOP_VCALL(op, "", "") + +#define PVOP_CALLEE0(rettype, op) \ + __PVOP_CALLEESAVE(rettype, op, "", "") +#define PVOP_VCALLEE0(op) \ + __PVOP_VCALLEESAVE(op, "", "") + + +#define PVOP_CALL1(rettype, op, arg1) \ + __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) +#define PVOP_VCALL1(op, arg1) \ + __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) + +#define PVOP_CALLEE1(rettype, op, arg1) \ + __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) +#define PVOP_VCALLEE1(op, arg1) \ + __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) + + +#define PVOP_CALL2(rettype, op, arg1, arg2) \ + __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) +#define PVOP_VCALL2(op, arg1, arg2) \ + __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) + +#define PVOP_CALLEE2(rettype, op, arg1, arg2) \ + __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) +#define PVOP_VCALLEE2(op, arg1, arg2) \ + __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2)) + + +#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ + __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) +#define PVOP_VCALL3(op, arg1, arg2, arg3) \ + __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ + PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) + +/* This is the only difference in x86_64. We can make it much simpler */ +#ifdef CONFIG_X86_32 +#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ + __PVOP_CALL(rettype, op, \ + "push %[_arg4];", "lea 4(%%esp),%%esp;", \ + PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) +#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ + __PVOP_VCALL(op, \ + "push %[_arg4];", "lea 4(%%esp),%%esp;", \ + "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ + "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) +#else +#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ + __PVOP_CALL(rettype, op, "", "", \ + PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) +#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ + __PVOP_VCALL(op, "", "", \ + PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ + PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) +#endif + +/* Lazy mode for batching updates / context switch */ +enum paravirt_lazy_mode { + PARAVIRT_LAZY_NONE, + PARAVIRT_LAZY_MMU, + PARAVIRT_LAZY_CPU, +}; + +enum paravirt_lazy_mode paravirt_get_lazy_mode(void); +void paravirt_start_context_switch(struct task_struct *prev); +void paravirt_end_context_switch(struct task_struct *next); + +void paravirt_enter_lazy_mmu(void); +void paravirt_leave_lazy_mmu(void); + +void _paravirt_nop(void); +u32 _paravirt_ident_32(u32); +u64 _paravirt_ident_64(u64); + +#define paravirt_nop ((void *)_paravirt_nop) + +/* These all sit in the .parainstructions section to tell us what to patch. */ +struct paravirt_patch_site { + u8 *instr; /* original instructions */ + u8 instrtype; /* type of this instruction */ + u8 len; /* length of original instruction */ + u16 clobbers; /* what registers you may clobber */ +}; + +extern struct paravirt_patch_site __parainstructions[], + __parainstructions_end[]; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_X86_PARAVIRT_TYPES_H */ -- cgit v1.2.3-70-g09d2 From e6e9cac8c3417b43498b243c1f8f11780e157168 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 24 Apr 2009 00:40:59 -0700 Subject: x86: split out core __math_state_restore Split the core fpu state restoration out into __math_state_restore, which assumes that cr0.TS is clear and that the fpu context has been initialized. This will be used during context switch. There are two reasons this is desireable: - There's a small clarification. When __switch_to() calls math_state_restore, it relies on the fact that tsk_used_math() returns true, and so will never do a blocking init_fpu(). __math_state_restore() does not have (or need) that logic, so the question never arises. - It allows the clts() to be moved earler in __switch_to() so it can be performed while cpu context updates are batched (will be done in a later patch). [ Impact: refactor code to make reuse cleaner; no functional change ] Signed-off-by: Jeremy Fitzhardinge Cc: Alok Kataria Cc: Rusty Russell --- arch/x86/include/asm/i387.h | 1 + arch/x86/kernel/traps.c | 33 +++++++++++++++++++++++---------- 2 files changed, 24 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 175adf58dd4..2e7529295f5 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -26,6 +26,7 @@ extern void fpu_init(void); extern void mxcsr_feature_mask_init(void); extern int init_fpu(struct task_struct *child); extern asmlinkage void math_state_restore(void); +extern void __math_state_restore(void); extern void init_thread_xstate(void); extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5f935f0d586..71b91669ad1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -813,6 +813,28 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) { } +/* + * __math_state_restore assumes that cr0.TS is already clear and the + * fpu state is all ready for use. Used during context switch. + */ +void __math_state_restore(void) +{ + struct thread_info *thread = current_thread_info(); + struct task_struct *tsk = thread->task; + + /* + * Paranoid restore. send a SIGSEGV if we fail to restore the state. + */ + if (unlikely(restore_fpu_checking(tsk))) { + stts(); + force_sig(SIGSEGV, tsk); + return; + } + + thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ + tsk->fpu_counter++; +} + /* * 'math_state_restore()' saves the current math information in the * old math state array, and gets the new ones from the current task @@ -844,17 +866,8 @@ asmlinkage void math_state_restore(void) } clts(); /* Allow maths ops (or we recurse) */ - /* - * Paranoid restore. send a SIGSEGV if we fail to restore the state. - */ - if (unlikely(restore_fpu_checking(tsk))) { - stts(); - force_sig(SIGSEGV, tsk); - return; - } - thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ - tsk->fpu_counter++; + __math_state_restore(); } EXPORT_SYMBOL_GPL(math_state_restore); -- cgit v1.2.3-70-g09d2 From 2fcddce10f6771cfa0c56fd1e826d50d67d100b7 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 24 Apr 2009 00:45:26 -0700 Subject: x86-32: make sure clts is batched during context switch If we're preloading the fpu state during context switch, make sure the clts happens while we're batching the cpu context update, then do the actual __math_state_restore once the updates are flushed. This allows more efficient context switches when running paravirtualized, as all the hypercalls can be folded together into one. [ Impact: optimise paravirtual FPU context switch ] Signed-off-by: Jeremy Fitzhardinge Cc: Alok Kataria Cc: Rusty Russell --- arch/x86/kernel/process_32.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 59f4524984a..a80eddd4165 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -350,14 +350,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); + bool preload_fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ - __unlazy_fpu(prev_p); + /* + * If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; + __unlazy_fpu(prev_p); /* we're going to use this soon, after a few expensive things */ - if (next_p->fpu_counter > 5) + if (preload_fpu) prefetch(next->xstate); /* @@ -398,6 +405,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); + /* If we're going to preload the fpu context, make sure clts + is run while we're batching the cpu state updates. */ + if (preload_fpu) + clts(); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so @@ -407,15 +419,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ arch_end_context_switch(next_p); - /* If the task has used fpu the last 5 timeslices, just do a full - * restore of the math state immediately to avoid the trap; the - * chances of needing FPU soon are obviously high now - * - * tsk_used_math() checks prevent calling math_state_restore(), - * which can sleep in the case of !tsk_used_math() - */ - if (tsk_used_math(next_p) && next_p->fpu_counter > 5) - math_state_restore(); + if (preload_fpu) + __math_state_restore(); /* * Restore %gs if needed (which is common) -- cgit v1.2.3-70-g09d2 From 16d9dbf0c2bd167fdd942b83592d59696c7b73bd Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 24 Apr 2009 00:50:27 -0700 Subject: x86-64: move unlazy_fpu() into lazy cpu state part of context switch Make sure that unlazy_fpu()'s stts gets batched along with the other cpu state changes during context switch. (32-bit already does this.) This makes sure it gets batched when running paravirtualized. [ Impact: optimise paravirtual FPU context switch ] Signed-off-by: Jeremy Fitzhardinge Cc: Alok Kataria Cc: Rusty Russell --- arch/x86/kernel/process_64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ebefb5407b9..c9b8904736d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -419,6 +419,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) load_TLS(next, cpu); + /* Must be after DS reload */ + unlazy_fpu(prev_p); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so @@ -459,9 +462,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) wrmsrl(MSR_KERNEL_GS_BASE, next->gs); prev->gsindex = gsindex; - /* Must be after DS reload */ - unlazy_fpu(prev_p); - /* * Switch the PDA and FPU contexts. */ -- cgit v1.2.3-70-g09d2 From 17950c5b243f99cbabef173415ee988c52104d7e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 24 Apr 2009 01:01:01 -0700 Subject: x86-64: move clts into batch cpu state updates when preloading fpu When a task is likely to be using the fpu, we preload its state during the context switch, rather than waiting for it to run an fpu instruction. Make sure the clts() happens while we're doing batched fpu state updates to optimise paravirtualized context switches. [ Impact: optimise paravirtual FPU context switch ] Signed-off-by: Jeremy Fitzhardinge Cc: Alok Kataria Cc: Rusty Russell --- arch/x86/kernel/process_64.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index c9b8904736d..a28279dbb07 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -386,9 +386,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); unsigned fsindex, gsindex; + bool preload_fpu; + + /* + * If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; /* we're going to use this soon, after a few expensive things */ - if (next_p->fpu_counter > 5) + if (preload_fpu) prefetch(next->xstate); /* @@ -422,6 +430,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Must be after DS reload */ unlazy_fpu(prev_p); + /* Make sure cpu is ready for new context */ + if (preload_fpu) + clts(); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so @@ -480,15 +492,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) __switch_to_xtra(prev_p, next_p, tss); - /* If the task has used fpu the last 5 timeslices, just do a full - * restore of the math state immediately to avoid the trap; the - * chances of needing FPU soon are obviously high now - * - * tsk_used_math() checks prevent calling math_state_restore(), - * which can sleep in the case of !tsk_used_math() + /* + * Preload the FPU context, now that we've determined that the + * task is likely to be using it. */ - if (tsk_used_math(next_p) && next_p->fpu_counter > 5) - math_state_restore(); + if (preload_fpu) + __math_state_restore(); return prev_p; } -- cgit v1.2.3-70-g09d2 From 03b56ce54143a3a69d4fea6ff8130b1c903a47ce Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Thu, 18 Jun 2009 19:52:59 +0800 Subject: crypto: des_s390 - Permit weak keys unless REQ_WEAK_KEY set Just started running fips cavs test vectors through an s390x system for giggles, and discovered that I missed patching s390's arch-specific des3 implementation w/an earlier des3 patch to permit weak keys. This change adds the same flag tweaks as ad79cdd77fc1466e45cf923890f66bcfe7c43f12 (crypto: des3_ede - permit weak keys unless REQ_WEAK_KEY set) for s390's des3 implementation, yields expected test results now. Signed-off-by: Jarod Wilson Signed-off-by: Herbert Xu --- arch/s390/crypto/des_s390.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 4aba83b3159..2bc479ab3a6 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, const u8 *temp_key = key; u32 *flags = &tfm->crt_flags; - if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; + if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { @@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], - DES_KEY_SIZE))) { - - *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; + DES_KEY_SIZE)) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { -- cgit v1.2.3-70-g09d2 From 21e70878215f620fe99ea7d7c74bc641aeec932f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Thu, 18 Jun 2009 17:09:27 +0530 Subject: x86: oprofile/op_model_amd.c set return values for op_amd_handle_ibs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit op_amd_handle_ibs() should return 0 when IBS is not present or not defined. Fix compilation warning: CC [M] arch/x86/oprofile/op_model_amd.o arch/x86/oprofile/op_model_amd.c: In function ‘op_amd_handle_ibs’: arch/x86/oprofile/op_model_amd.c:217: warning: no return statement in function returning non-void Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index cc930467575..e95268eb922 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -132,7 +132,7 @@ op_amd_handle_ibs(struct pt_regs * const regs, struct op_entry entry; if (!has_ibs) - return 1; + return 0; if (ibs_config.fetch_enabled) { rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl); @@ -214,7 +214,10 @@ static void op_amd_stop_ibs(void) #else static inline int op_amd_handle_ibs(struct pt_regs * const regs, - struct op_msrs const * const msrs) { } + struct op_msrs const * const msrs) +{ + return 0; +} static inline void op_amd_start_ibs(void) { } static inline void op_amd_stop_ibs(void) { } -- cgit v1.2.3-70-g09d2 From 4adc667593f83a18a8e54ce94f250fd166a272ac Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Jun 2009 21:48:16 +0200 Subject: x86: add copies of some headers to convert to asm-generic Just an intermediate step to make reviewing easier. These files are identical copies of the existing headers. Signed-off-by: Arnd Bergmann LKML-Reference: Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/generic-mman.h | 20 ++++++++ arch/x86/include/asm/generic-module.h | 80 ++++++++++++++++++++++++++++++ arch/x86/include/asm/generic-scatterlist.h | 33 ++++++++++++ arch/x86/include/asm/generic-types.h | 30 +++++++++++ arch/x86/include/asm/generic-ucontext.h | 18 +++++++ 5 files changed, 181 insertions(+) create mode 100644 arch/x86/include/asm/generic-mman.h create mode 100644 arch/x86/include/asm/generic-module.h create mode 100644 arch/x86/include/asm/generic-scatterlist.h create mode 100644 arch/x86/include/asm/generic-types.h create mode 100644 arch/x86/include/asm/generic-ucontext.h (limited to 'arch') diff --git a/arch/x86/include/asm/generic-mman.h b/arch/x86/include/asm/generic-mman.h new file mode 100644 index 00000000000..751af2550ed --- /dev/null +++ b/arch/x86/include/asm/generic-mman.h @@ -0,0 +1,20 @@ +#ifndef _ASM_X86_MMAN_H +#define _ASM_X86_MMAN_H + +#include + +#define MAP_32BIT 0x40 /* only give out 32bit addresses */ + +#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ +#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#define MAP_LOCKED 0x2000 /* pages are locked */ +#define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#endif /* _ASM_X86_MMAN_H */ diff --git a/arch/x86/include/asm/generic-module.h b/arch/x86/include/asm/generic-module.h new file mode 100644 index 00000000000..47d62743c4d --- /dev/null +++ b/arch/x86/include/asm/generic-module.h @@ -0,0 +1,80 @@ +#ifndef _ASM_X86_MODULE_H +#define _ASM_X86_MODULE_H + +/* x86_32/64 are simple */ +struct mod_arch_specific {}; + +#ifdef CONFIG_X86_32 +# define Elf_Shdr Elf32_Shdr +# define Elf_Sym Elf32_Sym +# define Elf_Ehdr Elf32_Ehdr +#else +# define Elf_Shdr Elf64_Shdr +# define Elf_Sym Elf64_Sym +# define Elf_Ehdr Elf64_Ehdr +#endif + +#ifdef CONFIG_X86_64 +/* X86_64 does not define MODULE_PROC_FAMILY */ +#elif defined CONFIG_M386 +#define MODULE_PROC_FAMILY "386 " +#elif defined CONFIG_M486 +#define MODULE_PROC_FAMILY "486 " +#elif defined CONFIG_M586 +#define MODULE_PROC_FAMILY "586 " +#elif defined CONFIG_M586TSC +#define MODULE_PROC_FAMILY "586TSC " +#elif defined CONFIG_M586MMX +#define MODULE_PROC_FAMILY "586MMX " +#elif defined CONFIG_MCORE2 +#define MODULE_PROC_FAMILY "CORE2 " +#elif defined CONFIG_M686 +#define MODULE_PROC_FAMILY "686 " +#elif defined CONFIG_MPENTIUMII +#define MODULE_PROC_FAMILY "PENTIUMII " +#elif defined CONFIG_MPENTIUMIII +#define MODULE_PROC_FAMILY "PENTIUMIII " +#elif defined CONFIG_MPENTIUMM +#define MODULE_PROC_FAMILY "PENTIUMM " +#elif defined CONFIG_MPENTIUM4 +#define MODULE_PROC_FAMILY "PENTIUM4 " +#elif defined CONFIG_MK6 +#define MODULE_PROC_FAMILY "K6 " +#elif defined CONFIG_MK7 +#define MODULE_PROC_FAMILY "K7 " +#elif defined CONFIG_MK8 +#define MODULE_PROC_FAMILY "K8 " +#elif defined CONFIG_X86_ELAN +#define MODULE_PROC_FAMILY "ELAN " +#elif defined CONFIG_MCRUSOE +#define MODULE_PROC_FAMILY "CRUSOE " +#elif defined CONFIG_MEFFICEON +#define MODULE_PROC_FAMILY "EFFICEON " +#elif defined CONFIG_MWINCHIPC6 +#define MODULE_PROC_FAMILY "WINCHIPC6 " +#elif defined CONFIG_MWINCHIP3D +#define MODULE_PROC_FAMILY "WINCHIP3D " +#elif defined CONFIG_MCYRIXIII +#define MODULE_PROC_FAMILY "CYRIXIII " +#elif defined CONFIG_MVIAC3_2 +#define MODULE_PROC_FAMILY "VIAC3-2 " +#elif defined CONFIG_MVIAC7 +#define MODULE_PROC_FAMILY "VIAC7 " +#elif defined CONFIG_MGEODEGX1 +#define MODULE_PROC_FAMILY "GEODEGX1 " +#elif defined CONFIG_MGEODE_LX +#define MODULE_PROC_FAMILY "GEODE " +#else +#error unknown processor family +#endif + +#ifdef CONFIG_X86_32 +# ifdef CONFIG_4KSTACKS +# define MODULE_STACKSIZE "4KSTACKS " +# else +# define MODULE_STACKSIZE "" +# endif +# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE +#endif + +#endif /* _ASM_X86_MODULE_H */ diff --git a/arch/x86/include/asm/generic-scatterlist.h b/arch/x86/include/asm/generic-scatterlist.h new file mode 100644 index 00000000000..263d397d2ee --- /dev/null +++ b/arch/x86/include/asm/generic-scatterlist.h @@ -0,0 +1,33 @@ +#ifndef _ASM_X86_SCATTERLIST_H +#define _ASM_X86_SCATTERLIST_H + +#include + +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + unsigned int dma_length; +}; + +#define ARCH_HAS_SG_CHAIN +#define ISA_DMA_THRESHOLD (0x00ffffff) + +/* + * These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns. + */ +#define sg_dma_address(sg) ((sg)->dma_address) +#ifdef CONFIG_X86_32 +# define sg_dma_len(sg) ((sg)->length) +#else +# define sg_dma_len(sg) ((sg)->dma_length) +#endif + +#endif /* _ASM_X86_SCATTERLIST_H */ diff --git a/arch/x86/include/asm/generic-types.h b/arch/x86/include/asm/generic-types.h new file mode 100644 index 00000000000..09b97745772 --- /dev/null +++ b/arch/x86/include/asm/generic-types.h @@ -0,0 +1,30 @@ +#ifndef _ASM_X86_TYPES_H +#define _ASM_X86_TYPES_H + +#include + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +typedef u64 dma64_addr_t; +#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) +/* DMA addresses come in 32-bit and 64-bit flavours. */ +typedef u64 dma_addr_t; +#else +typedef u32 dma_addr_t; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_X86_TYPES_H */ diff --git a/arch/x86/include/asm/generic-ucontext.h b/arch/x86/include/asm/generic-ucontext.h new file mode 100644 index 00000000000..87324cf439d --- /dev/null +++ b/arch/x86/include/asm/generic-ucontext.h @@ -0,0 +1,18 @@ +#ifndef _ASM_X86_UCONTEXT_H +#define _ASM_X86_UCONTEXT_H + +#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state + * information in the memory layout pointed + * by the fpstate pointer in the ucontext's + * sigcontext struct (uc_mcontext). + */ + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _ASM_X86_UCONTEXT_H */ -- cgit v1.2.3-70-g09d2 From 7bfd124d6dae7d394e73753300594a81a022fe7d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Jun 2009 21:48:17 +0200 Subject: x86: convert trivial headers to asm-generic version For these nine header files, the asm-generic version should be semantically identical to what is in x86. Change the contents to be binary identical, for better review. Signed-off-by: Arnd Bergmann LKML-Reference: Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/ioctls.h | 40 ++++++++++++++++++++++++++++------------ arch/x86/include/asm/ipcbuf.h | 15 ++++++++++----- arch/x86/include/asm/msgbuf.h | 30 +++++++++++++++++++----------- arch/x86/include/asm/param.h | 8 +++++--- arch/x86/include/asm/shmbuf.h | 28 ++++++++++++++++++---------- arch/x86/include/asm/socket.h | 10 +++++----- arch/x86/include/asm/sockios.h | 6 +++--- arch/x86/include/asm/termbits.h | 8 ++++---- 8 files changed, 92 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h index 0d5b23b7b06..a799e20a769 100644 --- a/arch/x86/include/asm/ioctls.h +++ b/arch/x86/include/asm/ioctls.h @@ -1,12 +1,23 @@ -#ifndef _ASM_X86_IOCTLS_H -#define _ASM_X86_IOCTLS_H +#ifndef __ASM_GENERIC_IOCTLS_H +#define __ASM_GENERIC_IOCTLS_H -#include +#include + +/* + * These are the most common definitions for tty ioctl numbers. + * Most of them do not use the recommended _IOC(), but there is + * probably some source code out there hardcoding the number, + * so we might as well use them for all new platforms. + * + * The architectures that use different values here typically + * try to be compatible with some Unix variants for the same + * architecture. + */ /* 0x54 is just a magic number to make these relatively unique ('T') */ #define TCGETS 0x5401 -#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ +#define TCSETS 0x5402 #define TCSETSW 0x5403 #define TCSETSF 0x5404 #define TCGETA 0x5405 @@ -43,7 +54,6 @@ #define TIOCSETD 0x5423 #define TIOCGETD 0x5424 #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ #define TIOCSBRK 0x5427 /* BSD compatibility */ #define TIOCCBRK 0x5428 /* BSD compatibility */ #define TIOCGSID 0x5429 /* Return the session ID of FD */ @@ -53,8 +63,7 @@ #define TCSETSF2 _IOW('T', 0x2D, struct termios2) #define TIOCGRS485 0x542E #define TIOCSRS485 0x542F -#define TIOCGPTN _IOR('T', 0x30, unsigned int) - /* Get Pty Number (of pty-mux device) */ +#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ #define TCSETX 0x5433 @@ -76,9 +85,16 @@ #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ -#define FIOQSIZE 0x5460 + +/* + * some architectures define FIOQSIZE as 0x545E, which is used for + * TIOCGHAYESESP on others + */ +#ifndef FIOQSIZE +# define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +# define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +# define FIOQSIZE 0x5460 +#endif /* Used for packet mode */ #define TIOCPKT_DATA 0 @@ -89,6 +105,6 @@ #define TIOCPKT_NOSTOP 16 #define TIOCPKT_DOSTOP 32 -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ -#endif /* _ASM_X86_IOCTLS_H */ +#endif /* __ASM_GENERIC_IOCTLS_H */ diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h index ee678fd5159..888ca1c8f95 100644 --- a/arch/x86/include/asm/ipcbuf.h +++ b/arch/x86/include/asm/ipcbuf.h @@ -1,13 +1,18 @@ -#ifndef _ASM_X86_IPCBUF_H -#define _ASM_X86_IPCBUF_H +#ifndef __ASM_GENERIC_IPCBUF_H +#define __ASM_GENERIC_IPCBUF_H /* - * The ipc64_perm structure for x86 architecture. + * The generic ipc64_perm structure: * Note extra padding because this structure is passed back and forth * between kernel and user space. * + * ipc64_perm was originally meant to be architecture specific, but + * everyone just ended up making identical copies without specific + * optimizations, so we may just as well all use the same one. + * * Pad space is left for: - * - 32-bit mode_t and seq + * - 32-bit mode_t on architectures that only had 16 bit + * - 32-bit seq * - 2 miscellaneous 32-bit values */ @@ -25,4 +30,4 @@ struct ipc64_perm { unsigned long __unused2; }; -#endif /* _ASM_X86_IPCBUF_H */ +#endif /* __ASM_GENERIC_IPCBUF_H */ diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h index 7e4e9481f51..aec850d9159 100644 --- a/arch/x86/include/asm/msgbuf.h +++ b/arch/x86/include/asm/msgbuf.h @@ -1,30 +1,38 @@ -#ifndef _ASM_X86_MSGBUF_H -#define _ASM_X86_MSGBUF_H +#ifndef __ASM_GENERIC_MSGBUF_H +#define __ASM_GENERIC_MSGBUF_H +#include /* - * The msqid64_ds structure for i386 architecture. + * generic msqid64_ds structure. + * * Note extra padding because this structure is passed back and forth * between kernel and user space. * - * Pad space on i386 is left for: + * msqid64_ds was originally meant to be architecture specific, but + * everyone just ended up making identical copies without specific + * optimizations, so we may just as well all use the same one. + * + * 64 bit architectures typically define a 64 bit __kernel_time_t, + * so they do not need the first three padding words. + * On big-endian systems, the padding is in the wrong place. + * + * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values - * - * Pad space on x8664 is left for: - * - 2 miscellaneous 64-bit values */ + struct msqid64_ds { struct ipc64_perm msg_perm; __kernel_time_t msg_stime; /* last msgsnd time */ -#ifdef __i386__ +#if __BITS_PER_LONG != 64 unsigned long __unused1; #endif __kernel_time_t msg_rtime; /* last msgrcv time */ -#ifdef __i386__ +#if __BITS_PER_LONG != 64 unsigned long __unused2; #endif __kernel_time_t msg_ctime; /* last change time */ -#ifdef __i386__ +#if __BITS_PER_LONG != 64 unsigned long __unused3; #endif unsigned long msg_cbytes; /* current number of bytes on queue */ @@ -36,4 +44,4 @@ struct msqid64_ds { unsigned long __unused5; }; -#endif /* _ASM_X86_MSGBUF_H */ +#endif /* __ASM_GENERIC_MSGBUF_H */ diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h index 6f0d0422f4c..cdf8251bfb6 100644 --- a/arch/x86/include/asm/param.h +++ b/arch/x86/include/asm/param.h @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_PARAM_H -#define _ASM_X86_PARAM_H +#ifndef __ASM_GENERIC_PARAM_H +#define __ASM_GENERIC_PARAM_H #ifdef __KERNEL__ # define HZ CONFIG_HZ /* Internal kernel timer frequency */ @@ -11,7 +11,9 @@ #define HZ 100 #endif +#ifndef EXEC_PAGESIZE #define EXEC_PAGESIZE 4096 +#endif #ifndef NOGROUP #define NOGROUP (-1) @@ -19,4 +21,4 @@ #define MAXHOSTNAMELEN 64 /* max length of hostname */ -#endif /* _ASM_X86_PARAM_H */ +#endif /* __ASM_GENERIC_PARAM_H */ diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h index b51413b7497..5768fa60ac8 100644 --- a/arch/x86/include/asm/shmbuf.h +++ b/arch/x86/include/asm/shmbuf.h @@ -1,32 +1,40 @@ -#ifndef _ASM_X86_SHMBUF_H -#define _ASM_X86_SHMBUF_H +#ifndef __ASM_GENERIC_SHMBUF_H +#define __ASM_GENERIC_SHMBUF_H + +#include /* * The shmid64_ds structure for x86 architecture. * Note extra padding because this structure is passed back and forth * between kernel and user space. * - * Pad space on 32 bit is left for: + * shmid64_ds was originally meant to be architecture specific, but + * everyone just ended up making identical copies without specific + * optimizations, so we may just as well all use the same one. + * + * 64 bit architectures typically define a 64 bit __kernel_time_t, + * so they do not need the first two padding words. + * On big-endian systems, the padding is in the wrong place. + * + * + * Pad space is left for: * - 64-bit time_t to solve y2038 problem * - 2 miscellaneous 32-bit values - * - * Pad space on 64 bit is left for: - * - 2 miscellaneous 64-bit values */ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ size_t shm_segsz; /* size of segment (bytes) */ __kernel_time_t shm_atime; /* last attach time */ -#ifdef __i386__ +#if __BITS_PER_LONG != 64 unsigned long __unused1; #endif __kernel_time_t shm_dtime; /* last detach time */ -#ifdef __i386__ +#if __BITS_PER_LONG != 64 unsigned long __unused2; #endif __kernel_time_t shm_ctime; /* last change time */ -#ifdef __i386__ +#if __BITS_PER_LONG != 64 unsigned long __unused3; #endif __kernel_pid_t shm_cpid; /* pid of creator */ @@ -48,4 +56,4 @@ struct shminfo64 { unsigned long __unused4; }; -#endif /* _ASM_X86_SHMBUF_H */ +#endif /* __ASM_GENERIC_SHMBUF_H */ diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h index ca8bf2cd0ba..d4ae42a06a2 100644 --- a/arch/x86/include/asm/socket.h +++ b/arch/x86/include/asm/socket.h @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_SOCKET_H -#define _ASM_X86_SOCKET_H +#ifndef __ASM_GENERIC_SOCKET_H +#define __ASM_GENERIC_SOCKET_H #include @@ -38,8 +38,8 @@ #define SO_BINDTODEVICE 25 /* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 @@ -57,4 +57,4 @@ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING -#endif /* _ASM_X86_SOCKET_H */ +#endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h index 49cc72b5d3c..9a61a369b90 100644 --- a/arch/x86/include/asm/sockios.h +++ b/arch/x86/include/asm/sockios.h @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_SOCKIOS_H -#define _ASM_X86_SOCKIOS_H +#ifndef __ASM_GENERIC_SOCKIOS_H +#define __ASM_GENERIC_SOCKIOS_H /* Socket-level I/O control calls. */ #define FIOSETOWN 0x8901 @@ -10,4 +10,4 @@ #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ -#endif /* _ASM_X86_SOCKIOS_H */ +#endif /* __ASM_GENERIC_SOCKIOS_H */ diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h index af1b70ea440..1c9773d48cb 100644 --- a/arch/x86/include/asm/termbits.h +++ b/arch/x86/include/asm/termbits.h @@ -1,5 +1,5 @@ -#ifndef _ASM_X86_TERMBITS_H -#define _ASM_X86_TERMBITS_H +#ifndef __ASM_GENERIC_TERMBITS_H +#define __ASM_GENERIC_TERMBITS_H #include @@ -140,7 +140,7 @@ struct ktermios { #define HUPCL 0002000 #define CLOCAL 0004000 #define CBAUDEX 0010000 -#define BOTHER 0010000 /* non standard rate */ +#define BOTHER 0010000 #define B57600 0010001 #define B115200 0010002 #define B230400 0010003 @@ -195,4 +195,4 @@ struct ktermios { #define TCSADRAIN 1 #define TCSAFLUSH 2 -#endif /* _ASM_X86_TERMBITS_H */ +#endif /* __ASM_GENERIC_TERMBITS_H */ -- cgit v1.2.3-70-g09d2 From 06f5013aa8eb5895ced2c71d13f5114103605555 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Jun 2009 21:48:18 +0200 Subject: x86: convert almost generic headers to asm-generic version In x86, mman.h, module.h, scatterlist.h, types.h and ucontext.h can use the asm-generic version by just defining the x86 specific parts locally and falling back on the generic code for the common bits. This patch illustrates the differences between the x86 and asm-generic versions by changing a file that is initially identical to the x86 version to one that is identical to the asm-generic version. Signed-off-by: Arnd Bergmann LKML-Reference: Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/generic-mman.h | 8 +-- arch/x86/include/asm/generic-module.h | 92 ++++++------------------------ arch/x86/include/asm/generic-scatterlist.h | 34 +++++++---- arch/x86/include/asm/generic-types.h | 32 +++++++---- arch/x86/include/asm/generic-ucontext.h | 12 +--- arch/x86/include/asm/mman.h | 14 +---- arch/x86/include/asm/module.h | 13 +---- arch/x86/include/asm/scatterlist.h | 27 +-------- arch/x86/include/asm/types.h | 12 +--- arch/x86/include/asm/ucontext.h | 8 +-- 10 files changed, 73 insertions(+), 179 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/generic-mman.h b/arch/x86/include/asm/generic-mman.h index 751af2550ed..7cab4de2bca 100644 --- a/arch/x86/include/asm/generic-mman.h +++ b/arch/x86/include/asm/generic-mman.h @@ -1,10 +1,8 @@ -#ifndef _ASM_X86_MMAN_H -#define _ASM_X86_MMAN_H +#ifndef __ASM_GENERIC_MMAN_H +#define __ASM_GENERIC_MMAN_H #include -#define MAP_32BIT 0x40 /* only give out 32bit addresses */ - #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ @@ -17,4 +15,4 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#endif /* _ASM_X86_MMAN_H */ +#endif /* __ASM_GENERIC_MMAN_H */ diff --git a/arch/x86/include/asm/generic-module.h b/arch/x86/include/asm/generic-module.h index 47d62743c4d..ed5b44de4c9 100644 --- a/arch/x86/include/asm/generic-module.h +++ b/arch/x86/include/asm/generic-module.h @@ -1,80 +1,22 @@ -#ifndef _ASM_X86_MODULE_H -#define _ASM_X86_MODULE_H +#ifndef __ASM_GENERIC_MODULE_H +#define __ASM_GENERIC_MODULE_H -/* x86_32/64 are simple */ -struct mod_arch_specific {}; +/* + * Many architectures just need a simple module + * loader without arch specific data. + */ +struct mod_arch_specific +{ +}; -#ifdef CONFIG_X86_32 -# define Elf_Shdr Elf32_Shdr -# define Elf_Sym Elf32_Sym -# define Elf_Ehdr Elf32_Ehdr +#ifdef CONFIG_64BIT +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Ehdr Elf64_Ehdr #else -# define Elf_Shdr Elf64_Shdr -# define Elf_Sym Elf64_Sym -# define Elf_Ehdr Elf64_Ehdr +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr #endif -#ifdef CONFIG_X86_64 -/* X86_64 does not define MODULE_PROC_FAMILY */ -#elif defined CONFIG_M386 -#define MODULE_PROC_FAMILY "386 " -#elif defined CONFIG_M486 -#define MODULE_PROC_FAMILY "486 " -#elif defined CONFIG_M586 -#define MODULE_PROC_FAMILY "586 " -#elif defined CONFIG_M586TSC -#define MODULE_PROC_FAMILY "586TSC " -#elif defined CONFIG_M586MMX -#define MODULE_PROC_FAMILY "586MMX " -#elif defined CONFIG_MCORE2 -#define MODULE_PROC_FAMILY "CORE2 " -#elif defined CONFIG_M686 -#define MODULE_PROC_FAMILY "686 " -#elif defined CONFIG_MPENTIUMII -#define MODULE_PROC_FAMILY "PENTIUMII " -#elif defined CONFIG_MPENTIUMIII -#define MODULE_PROC_FAMILY "PENTIUMIII " -#elif defined CONFIG_MPENTIUMM -#define MODULE_PROC_FAMILY "PENTIUMM " -#elif defined CONFIG_MPENTIUM4 -#define MODULE_PROC_FAMILY "PENTIUM4 " -#elif defined CONFIG_MK6 -#define MODULE_PROC_FAMILY "K6 " -#elif defined CONFIG_MK7 -#define MODULE_PROC_FAMILY "K7 " -#elif defined CONFIG_MK8 -#define MODULE_PROC_FAMILY "K8 " -#elif defined CONFIG_X86_ELAN -#define MODULE_PROC_FAMILY "ELAN " -#elif defined CONFIG_MCRUSOE -#define MODULE_PROC_FAMILY "CRUSOE " -#elif defined CONFIG_MEFFICEON -#define MODULE_PROC_FAMILY "EFFICEON " -#elif defined CONFIG_MWINCHIPC6 -#define MODULE_PROC_FAMILY "WINCHIPC6 " -#elif defined CONFIG_MWINCHIP3D -#define MODULE_PROC_FAMILY "WINCHIP3D " -#elif defined CONFIG_MCYRIXIII -#define MODULE_PROC_FAMILY "CYRIXIII " -#elif defined CONFIG_MVIAC3_2 -#define MODULE_PROC_FAMILY "VIAC3-2 " -#elif defined CONFIG_MVIAC7 -#define MODULE_PROC_FAMILY "VIAC7 " -#elif defined CONFIG_MGEODEGX1 -#define MODULE_PROC_FAMILY "GEODEGX1 " -#elif defined CONFIG_MGEODE_LX -#define MODULE_PROC_FAMILY "GEODE " -#else -#error unknown processor family -#endif - -#ifdef CONFIG_X86_32 -# ifdef CONFIG_4KSTACKS -# define MODULE_STACKSIZE "4KSTACKS " -# else -# define MODULE_STACKSIZE "" -# endif -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE -#endif - -#endif /* _ASM_X86_MODULE_H */ +#endif /* __ASM_GENERIC_MODULE_H */ diff --git a/arch/x86/include/asm/generic-scatterlist.h b/arch/x86/include/asm/generic-scatterlist.h index 263d397d2ee..8b9454496a7 100644 --- a/arch/x86/include/asm/generic-scatterlist.h +++ b/arch/x86/include/asm/generic-scatterlist.h @@ -1,7 +1,7 @@ -#ifndef _ASM_X86_SCATTERLIST_H -#define _ASM_X86_SCATTERLIST_H +#ifndef __ASM_GENERIC_SCATTERLIST_H +#define __ASM_GENERIC_SCATTERLIST_H -#include +#include struct scatterlist { #ifdef CONFIG_DEBUG_SG @@ -14,20 +14,30 @@ struct scatterlist { unsigned int dma_length; }; -#define ARCH_HAS_SG_CHAIN -#define ISA_DMA_THRESHOLD (0x00ffffff) - /* - * These macros should be used after a pci_map_sg call has been done + * These macros should be used after a dma_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg - * returns. + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. */ #define sg_dma_address(sg) ((sg)->dma_address) -#ifdef CONFIG_X86_32 -# define sg_dma_len(sg) ((sg)->length) +#ifndef sg_dma_len +/* + * Normally, you have an iommu on 64 bit machines, but not on 32 bit + * machines. Architectures that are differnt should override this. + */ +#if __BITS_PER_LONG == 64 +#define sg_dma_len(sg) ((sg)->dma_length) #else -# define sg_dma_len(sg) ((sg)->dma_length) +#define sg_dma_len(sg) ((sg)->length) +#endif /* 64 bit */ +#endif /* sg_dma_len */ + +#ifndef ISA_DMA_THRESHOLD +#define ISA_DMA_THRESHOLD (~0UL) #endif -#endif /* _ASM_X86_SCATTERLIST_H */ +#define ARCH_HAS_SG_CHAIN + +#endif /* __ASM_GENERIC_SCATTERLIST_H */ diff --git a/arch/x86/include/asm/generic-types.h b/arch/x86/include/asm/generic-types.h index 09b97745772..fba7d33ca3f 100644 --- a/arch/x86/include/asm/generic-types.h +++ b/arch/x86/include/asm/generic-types.h @@ -1,6 +1,9 @@ -#ifndef _ASM_X86_TYPES_H -#define _ASM_X86_TYPES_H - +#ifndef _ASM_GENERIC_TYPES_H +#define _ASM_GENERIC_TYPES_H +/* + * int-ll64 is used practically everywhere now, + * so use it as a reasonable default. + */ #include #ifndef __ASSEMBLY__ @@ -13,18 +16,27 @@ typedef unsigned short umode_t; * These aren't exported outside the kernel to avoid name space clashes */ #ifdef __KERNEL__ - #ifndef __ASSEMBLY__ - -typedef u64 dma64_addr_t; -#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) -/* DMA addresses come in 32-bit and 64-bit flavours. */ +/* + * DMA addresses may be very different from physical addresses + * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit + * systems, while sparc64 uses 32 bit DMA addresses for 64 bit + * physical addresses. + * This default defines dma_addr_t to have the same size as + * phys_addr_t, which is the most common way. + * Do not define the dma64_addr_t type, which never really + * worked. + */ +#ifndef dma_addr_t +#ifdef CONFIG_PHYS_ADDR_T_64BIT typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; -#endif +#endif /* CONFIG_PHYS_ADDR_T_64BIT */ +#endif /* dma_addr_t */ #endif /* __ASSEMBLY__ */ + #endif /* __KERNEL__ */ -#endif /* _ASM_X86_TYPES_H */ +#endif /* _ASM_GENERIC_TYPES_H */ diff --git a/arch/x86/include/asm/generic-ucontext.h b/arch/x86/include/asm/generic-ucontext.h index 87324cf439d..ad77343e8a9 100644 --- a/arch/x86/include/asm/generic-ucontext.h +++ b/arch/x86/include/asm/generic-ucontext.h @@ -1,11 +1,5 @@ -#ifndef _ASM_X86_UCONTEXT_H -#define _ASM_X86_UCONTEXT_H - -#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state - * information in the memory layout pointed - * by the fpstate pointer in the ucontext's - * sigcontext struct (uc_mcontext). - */ +#ifndef __ASM_GENERIC_UCONTEXT_H +#define __ASM_GENERIC_UCONTEXT_H struct ucontext { unsigned long uc_flags; @@ -15,4 +9,4 @@ struct ucontext { sigset_t uc_sigmask; /* mask last for extensibility */ }; -#endif /* _ASM_X86_UCONTEXT_H */ +#endif /* __ASM_GENERIC_UCONTEXT_H */ diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 751af2550ed..063d8c9e4d6 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h @@ -1,20 +1,8 @@ #ifndef _ASM_X86_MMAN_H #define _ASM_X86_MMAN_H -#include - #define MAP_32BIT 0x40 /* only give out 32bit addresses */ -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ +#include #endif /* _ASM_X86_MMAN_H */ diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 47d62743c4d..4a7a192910d 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -1,18 +1,7 @@ #ifndef _ASM_X86_MODULE_H #define _ASM_X86_MODULE_H -/* x86_32/64 are simple */ -struct mod_arch_specific {}; - -#ifdef CONFIG_X86_32 -# define Elf_Shdr Elf32_Shdr -# define Elf_Sym Elf32_Sym -# define Elf_Ehdr Elf32_Ehdr -#else -# define Elf_Shdr Elf64_Shdr -# define Elf_Sym Elf64_Sym -# define Elf_Ehdr Elf64_Ehdr -#endif +#include #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h index 263d397d2ee..2097d686471 100644 --- a/arch/x86/include/asm/scatterlist.h +++ b/arch/x86/include/asm/scatterlist.h @@ -1,33 +1,8 @@ #ifndef _ASM_X86_SCATTERLIST_H #define _ASM_X86_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; - dma_addr_t dma_address; - unsigned int dma_length; -}; - -#define ARCH_HAS_SG_CHAIN #define ISA_DMA_THRESHOLD (0x00ffffff) -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#ifdef CONFIG_X86_32 -# define sg_dma_len(sg) ((sg)->length) -#else -# define sg_dma_len(sg) ((sg)->dma_length) -#endif +#include #endif /* _ASM_X86_SCATTERLIST_H */ diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h index 09b97745772..f2fe528e968 100644 --- a/arch/x86/include/asm/types.h +++ b/arch/x86/include/asm/types.h @@ -1,19 +1,11 @@ #ifndef _ASM_X86_TYPES_H #define _ASM_X86_TYPES_H -#include +#define dma_addr_t dma_addr_t -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; +#include -#endif /* __ASSEMBLY__ */ - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ #ifdef __KERNEL__ - #ifndef __ASSEMBLY__ typedef u64 dma64_addr_t; diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h index 87324cf439d..7cfc436f86d 100644 --- a/arch/x86/include/asm/ucontext.h +++ b/arch/x86/include/asm/ucontext.h @@ -7,12 +7,6 @@ * sigcontext struct (uc_mcontext). */ -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; +#include #endif /* _ASM_X86_UCONTEXT_H */ -- cgit v1.2.3-70-g09d2 From 69d5ffdaad7b77b97229b55c36afb20e5bebd29e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Jun 2009 21:48:19 +0200 Subject: x86: convert termios.h to the asm-generic version This patch turned out more controversial than expected and may get dropped in the future. I'm including it for reference anyway. The user_termio_to_kernel_termios and kernel_termios_to_user_termio functions on x86 are lacking error checking from get_user and are not portable to big-endian systems, so the asm-generic header has to differ in this regard. Signed-off-by: Arnd Bergmann LKML-Reference: Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/termios.h | 86 +++++++++++++++++++++++++++++++----------- 1 file changed, 63 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h index c4ee8056bac..d0922adc56d 100644 --- a/arch/x86/include/asm/termios.h +++ b/arch/x86/include/asm/termios.h @@ -1,5 +1,12 @@ -#ifndef _ASM_X86_TERMIOS_H -#define _ASM_X86_TERMIOS_H +#ifndef _ASM_GENERIC_TERMIOS_H +#define _ASM_GENERIC_TERMIOS_H +/* + * Most architectures have straight copies of the x86 code, with + * varying levels of bug fixes on top. Usually it's a good idea + * to use this generic version instead, but be careful to avoid + * ABI changes. + * New architectures should not provide their own version. + */ #include #include @@ -54,37 +61,57 @@ struct termio { /* * Translate a "termio" structure into a "termios". Ugh. */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - *(unsigned short *) &(termios)->x = __tmp; \ -} - static inline int user_termio_to_kernel_termios(struct ktermios *termios, - struct termio __user *termio) + const struct termio __user *termio) { - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); - get_user(termios->c_line, &termio->c_line); - return copy_from_user(termios->c_cc, termio->c_cc, NCC); + unsigned short tmp; + + if (get_user(tmp, &termio->c_iflag) < 0) + goto fault; + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; + + if (get_user(tmp, &termio->c_oflag) < 0) + goto fault; + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; + + if (get_user(tmp, &termio->c_cflag) < 0) + goto fault; + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; + + if (get_user(tmp, &termio->c_lflag) < 0) + goto fault; + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; + + if (get_user(termios->c_line, &termio->c_line) < 0) + goto fault; + + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) + goto fault; + + return 0; + + fault: + return -EFAULT; } /* * Translate a "termios" structure into a "termio". Ugh. */ static inline int kernel_termios_to_user_termio(struct termio __user *termio, - struct ktermios *termios) + struct ktermios *termios) { - put_user((termios)->c_iflag, &(termio)->c_iflag); - put_user((termios)->c_oflag, &(termio)->c_oflag); - put_user((termios)->c_cflag, &(termio)->c_cflag); - put_user((termios)->c_lflag, &(termio)->c_lflag); - put_user((termios)->c_line, &(termio)->c_line); - return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || + put_user(termios->c_oflag, &termio->c_oflag) < 0 || + put_user(termios->c_cflag, &termio->c_cflag) < 0 || + put_user(termios->c_lflag, &termio->c_lflag) < 0 || + put_user(termios->c_line, &termio->c_line) < 0 || + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) + return -EFAULT; + + return 0; } +#ifdef TCGETS2 static inline int user_termios_to_kernel_termios(struct ktermios *k, struct termios2 __user *u) { @@ -108,7 +135,20 @@ static inline int kernel_termios_to_user_termios_1(struct termios __user *u, { return copy_to_user(u, k, sizeof(struct termios)); } +#else /* TCGETS2 */ +static inline int user_termios_to_kernel_termios(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} + +static inline int kernel_termios_to_user_termios(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#endif /* TCGETS2 */ #endif /* __KERNEL__ */ -#endif /* _ASM_X86_TERMIOS_H */ +#endif /* _ASM_GENERIC_TERMIOS_H */ -- cgit v1.2.3-70-g09d2 From 73a2d096fdf23aa841f7595d114a11ec85a85e4d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 18 Jun 2009 21:48:20 +0200 Subject: x86: remove all now-duplicate header files All files that have been made identical to the asm-generic version in the previous patches can now be removed, guaranteeing that this does not introduce semantic changes. Signed-off-by: Arnd Bergmann LKML-Reference: Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/generic-mman.h | 18 --- arch/x86/include/asm/generic-module.h | 22 ---- arch/x86/include/asm/generic-scatterlist.h | 43 ------- arch/x86/include/asm/generic-types.h | 42 ------ arch/x86/include/asm/generic-ucontext.h | 12 -- arch/x86/include/asm/ioctls.h | 111 +--------------- arch/x86/include/asm/ipcbuf.h | 34 +---- arch/x86/include/asm/mman.h | 2 +- arch/x86/include/asm/module.h | 2 +- arch/x86/include/asm/msgbuf.h | 48 +------ arch/x86/include/asm/param.h | 25 +--- arch/x86/include/asm/scatterlist.h | 2 +- arch/x86/include/asm/shmbuf.h | 60 +-------- arch/x86/include/asm/socket.h | 61 +-------- arch/x86/include/asm/sockios.h | 14 +- arch/x86/include/asm/termbits.h | 199 +---------------------------- arch/x86/include/asm/termios.h | 155 +--------------------- arch/x86/include/asm/types.h | 2 +- arch/x86/include/asm/ucontext.h | 2 +- 19 files changed, 14 insertions(+), 840 deletions(-) delete mode 100644 arch/x86/include/asm/generic-mman.h delete mode 100644 arch/x86/include/asm/generic-module.h delete mode 100644 arch/x86/include/asm/generic-scatterlist.h delete mode 100644 arch/x86/include/asm/generic-types.h delete mode 100644 arch/x86/include/asm/generic-ucontext.h (limited to 'arch') diff --git a/arch/x86/include/asm/generic-mman.h b/arch/x86/include/asm/generic-mman.h deleted file mode 100644 index 7cab4de2bca..00000000000 --- a/arch/x86/include/asm/generic-mman.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef __ASM_GENERIC_MMAN_H -#define __ASM_GENERIC_MMAN_H - -#include - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __ASM_GENERIC_MMAN_H */ diff --git a/arch/x86/include/asm/generic-module.h b/arch/x86/include/asm/generic-module.h deleted file mode 100644 index ed5b44de4c9..00000000000 --- a/arch/x86/include/asm/generic-module.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef __ASM_GENERIC_MODULE_H -#define __ASM_GENERIC_MODULE_H - -/* - * Many architectures just need a simple module - * loader without arch specific data. - */ -struct mod_arch_specific -{ -}; - -#ifdef CONFIG_64BIT -#define Elf_Shdr Elf64_Shdr -#define Elf_Sym Elf64_Sym -#define Elf_Ehdr Elf64_Ehdr -#else -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr -#endif - -#endif /* __ASM_GENERIC_MODULE_H */ diff --git a/arch/x86/include/asm/generic-scatterlist.h b/arch/x86/include/asm/generic-scatterlist.h deleted file mode 100644 index 8b9454496a7..00000000000 --- a/arch/x86/include/asm/generic-scatterlist.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef __ASM_GENERIC_SCATTERLIST_H -#define __ASM_GENERIC_SCATTERLIST_H - -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; - dma_addr_t dma_address; - unsigned int dma_length; -}; - -/* - * These macros should be used after a dma_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#ifndef sg_dma_len -/* - * Normally, you have an iommu on 64 bit machines, but not on 32 bit - * machines. Architectures that are differnt should override this. - */ -#if __BITS_PER_LONG == 64 -#define sg_dma_len(sg) ((sg)->dma_length) -#else -#define sg_dma_len(sg) ((sg)->length) -#endif /* 64 bit */ -#endif /* sg_dma_len */ - -#ifndef ISA_DMA_THRESHOLD -#define ISA_DMA_THRESHOLD (~0UL) -#endif - -#define ARCH_HAS_SG_CHAIN - -#endif /* __ASM_GENERIC_SCATTERLIST_H */ diff --git a/arch/x86/include/asm/generic-types.h b/arch/x86/include/asm/generic-types.h deleted file mode 100644 index fba7d33ca3f..00000000000 --- a/arch/x86/include/asm/generic-types.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _ASM_GENERIC_TYPES_H -#define _ASM_GENERIC_TYPES_H -/* - * int-ll64 is used practically everywhere now, - * so use it as a reasonable default. - */ -#include - -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -#endif /* __ASSEMBLY__ */ - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ -/* - * DMA addresses may be very different from physical addresses - * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit - * systems, while sparc64 uses 32 bit DMA addresses for 64 bit - * physical addresses. - * This default defines dma_addr_t to have the same size as - * phys_addr_t, which is the most common way. - * Do not define the dma64_addr_t type, which never really - * worked. - */ -#ifndef dma_addr_t -#ifdef CONFIG_PHYS_ADDR_T_64BIT -typedef u64 dma_addr_t; -#else -typedef u32 dma_addr_t; -#endif /* CONFIG_PHYS_ADDR_T_64BIT */ -#endif /* dma_addr_t */ - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - -#endif /* _ASM_GENERIC_TYPES_H */ diff --git a/arch/x86/include/asm/generic-ucontext.h b/arch/x86/include/asm/generic-ucontext.h deleted file mode 100644 index ad77343e8a9..00000000000 --- a/arch/x86/include/asm/generic-ucontext.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_GENERIC_UCONTEXT_H -#define __ASM_GENERIC_UCONTEXT_H - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif /* __ASM_GENERIC_UCONTEXT_H */ diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h index a799e20a769..ec34c760665 100644 --- a/arch/x86/include/asm/ioctls.h +++ b/arch/x86/include/asm/ioctls.h @@ -1,110 +1 @@ -#ifndef __ASM_GENERIC_IOCTLS_H -#define __ASM_GENERIC_IOCTLS_H - -#include - -/* - * These are the most common definitions for tty ioctl numbers. - * Most of them do not use the recommended _IOC(), but there is - * probably some source code out there hardcoding the number, - * so we might as well use them for all new platforms. - * - * The architectures that use different values here typically - * try to be compatible with some Unix variants for the same - * architecture. - */ - -/* 0x54 is just a magic number to make these relatively unique ('T') */ - -#define TCGETS 0x5401 -#define TCSETS 0x5402 -#define TCSETSW 0x5403 -#define TCSETSF 0x5404 -#define TCGETA 0x5405 -#define TCSETA 0x5406 -#define TCSETAW 0x5407 -#define TCSETAF 0x5408 -#define TCSBRK 0x5409 -#define TCXONC 0x540A -#define TCFLSH 0x540B -#define TIOCEXCL 0x540C -#define TIOCNXCL 0x540D -#define TIOCSCTTY 0x540E -#define TIOCGPGRP 0x540F -#define TIOCSPGRP 0x5410 -#define TIOCOUTQ 0x5411 -#define TIOCSTI 0x5412 -#define TIOCGWINSZ 0x5413 -#define TIOCSWINSZ 0x5414 -#define TIOCMGET 0x5415 -#define TIOCMBIS 0x5416 -#define TIOCMBIC 0x5417 -#define TIOCMSET 0x5418 -#define TIOCGSOFTCAR 0x5419 -#define TIOCSSOFTCAR 0x541A -#define FIONREAD 0x541B -#define TIOCINQ FIONREAD -#define TIOCLINUX 0x541C -#define TIOCCONS 0x541D -#define TIOCGSERIAL 0x541E -#define TIOCSSERIAL 0x541F -#define TIOCPKT 0x5420 -#define FIONBIO 0x5421 -#define TIOCNOTTY 0x5422 -#define TIOCSETD 0x5423 -#define TIOCGETD 0x5424 -#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -#define TIOCSBRK 0x5427 /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* BSD compatibility */ -#define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TCGETS2 _IOR('T', 0x2A, struct termios2) -#define TCSETS2 _IOW('T', 0x2B, struct termios2) -#define TCSETSW2 _IOW('T', 0x2C, struct termios2) -#define TCSETSF2 _IOW('T', 0x2D, struct termios2) -#define TIOCGRS485 0x542E -#define TIOCSRS485 0x542F -#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ -#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ -#define TCSETX 0x5433 -#define TCSETXF 0x5434 -#define TCSETXW 0x5435 - -#define FIONCLEX 0x5450 -#define FIOCLEX 0x5451 -#define FIOASYNC 0x5452 -#define TIOCSERCONFIG 0x5453 -#define TIOCSERGWILD 0x5454 -#define TIOCSERSWILD 0x5455 -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TIOCSERGETLSR 0x5459 /* Get line status register */ -#define TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ - -/* - * some architectures define FIOQSIZE as 0x545E, which is used for - * TIOCGHAYESESP on others - */ -#ifndef FIOQSIZE -# define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -# define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ -# define FIOQSIZE 0x5460 -#endif - -/* Used for packet mode */ -#define TIOCPKT_DATA 0 -#define TIOCPKT_FLUSHREAD 1 -#define TIOCPKT_FLUSHWRITE 2 -#define TIOCPKT_STOP 4 -#define TIOCPKT_START 8 -#define TIOCPKT_NOSTOP 16 -#define TIOCPKT_DOSTOP 32 - -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - -#endif /* __ASM_GENERIC_IOCTLS_H */ +#include diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h index 888ca1c8f95..84c7e51cb6d 100644 --- a/arch/x86/include/asm/ipcbuf.h +++ b/arch/x86/include/asm/ipcbuf.h @@ -1,33 +1 @@ -#ifndef __ASM_GENERIC_IPCBUF_H -#define __ASM_GENERIC_IPCBUF_H - -/* - * The generic ipc64_perm structure: - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * ipc64_perm was originally meant to be architecture specific, but - * everyone just ended up making identical copies without specific - * optimizations, so we may just as well all use the same one. - * - * Pad space is left for: - * - 32-bit mode_t on architectures that only had 16 bit - * - 32-bit seq - * - 2 miscellaneous 32-bit values - */ - -struct ipc64_perm { - __kernel_key_t key; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_uid32_t cuid; - __kernel_gid32_t cgid; - __kernel_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned short __pad2; - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* __ASM_GENERIC_IPCBUF_H */ +#include diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h index 063d8c9e4d6..593e51d4643 100644 --- a/arch/x86/include/asm/mman.h +++ b/arch/x86/include/asm/mman.h @@ -3,6 +3,6 @@ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ -#include +#include #endif /* _ASM_X86_MMAN_H */ diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 4a7a192910d..555bc12bdcd 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_MODULE_H #define _ASM_X86_MODULE_H -#include +#include #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h index aec850d9159..809134c644a 100644 --- a/arch/x86/include/asm/msgbuf.h +++ b/arch/x86/include/asm/msgbuf.h @@ -1,47 +1 @@ -#ifndef __ASM_GENERIC_MSGBUF_H -#define __ASM_GENERIC_MSGBUF_H - -#include -/* - * generic msqid64_ds structure. - * - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * msqid64_ds was originally meant to be architecture specific, but - * everyone just ended up making identical copies without specific - * optimizations, so we may just as well all use the same one. - * - * 64 bit architectures typically define a 64 bit __kernel_time_t, - * so they do not need the first three padding words. - * On big-endian systems, the padding is in the wrong place. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ -#if __BITS_PER_LONG != 64 - unsigned long __unused1; -#endif - __kernel_time_t msg_rtime; /* last msgrcv time */ -#if __BITS_PER_LONG != 64 - unsigned long __unused2; -#endif - __kernel_time_t msg_ctime; /* last change time */ -#if __BITS_PER_LONG != 64 - unsigned long __unused3; -#endif - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* __ASM_GENERIC_MSGBUF_H */ +#include diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h index cdf8251bfb6..965d4542797 100644 --- a/arch/x86/include/asm/param.h +++ b/arch/x86/include/asm/param.h @@ -1,24 +1 @@ -#ifndef __ASM_GENERIC_PARAM_H -#define __ASM_GENERIC_PARAM_H - -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* some user interfaces are */ -# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ -#endif - -#ifndef HZ -#define HZ 100 -#endif - -#ifndef EXEC_PAGESIZE -#define EXEC_PAGESIZE 4096 -#endif - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif /* __ASM_GENERIC_PARAM_H */ +#include diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h index 2097d686471..75af592677e 100644 --- a/arch/x86/include/asm/scatterlist.h +++ b/arch/x86/include/asm/scatterlist.h @@ -3,6 +3,6 @@ #define ISA_DMA_THRESHOLD (0x00ffffff) -#include +#include #endif /* _ASM_X86_SCATTERLIST_H */ diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h index 5768fa60ac8..83c05fc2de3 100644 --- a/arch/x86/include/asm/shmbuf.h +++ b/arch/x86/include/asm/shmbuf.h @@ -1,59 +1 @@ -#ifndef __ASM_GENERIC_SHMBUF_H -#define __ASM_GENERIC_SHMBUF_H - -#include - -/* - * The shmid64_ds structure for x86 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * shmid64_ds was originally meant to be architecture specific, but - * everyone just ended up making identical copies without specific - * optimizations, so we may just as well all use the same one. - * - * 64 bit architectures typically define a 64 bit __kernel_time_t, - * so they do not need the first two padding words. - * On big-endian systems, the padding is in the wrong place. - * - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ -#if __BITS_PER_LONG != 64 - unsigned long __unused1; -#endif - __kernel_time_t shm_dtime; /* last detach time */ -#if __BITS_PER_LONG != 64 - unsigned long __unused2; -#endif - __kernel_time_t shm_ctime; /* last change time */ -#if __BITS_PER_LONG != 64 - unsigned long __unused3; -#endif - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* __ASM_GENERIC_SHMBUF_H */ +#include diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h index d4ae42a06a2..6b71384b9d8 100644 --- a/arch/x86/include/asm/socket.h +++ b/arch/x86/include/asm/socket.h @@ -1,60 +1 @@ -#ifndef __ASM_GENERIC_SOCKET_H -#define __ASM_GENERIC_SOCKET_H - -#include - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#endif /* __ASM_GENERIC_SOCKET_H */ +#include diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h index 9a61a369b90..def6d4746ee 100644 --- a/arch/x86/include/asm/sockios.h +++ b/arch/x86/include/asm/sockios.h @@ -1,13 +1 @@ -#ifndef __ASM_GENERIC_SOCKIOS_H -#define __ASM_GENERIC_SOCKIOS_H - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif /* __ASM_GENERIC_SOCKIOS_H */ +#include diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h index 1c9773d48cb..3935b106de7 100644 --- a/arch/x86/include/asm/termbits.h +++ b/arch/x86/include/asm/termbits.h @@ -1,198 +1 @@ -#ifndef __ASM_GENERIC_TERMBITS_H -#define __ASM_GENERIC_TERMBITS_H - -#include - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ -}; - -struct termios2 { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 - -/* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate */ -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ - -/* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - -#endif /* __ASM_GENERIC_TERMBITS_H */ +#include diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h index d0922adc56d..280d78a9d96 100644 --- a/arch/x86/include/asm/termios.h +++ b/arch/x86/include/asm/termios.h @@ -1,154 +1 @@ -#ifndef _ASM_GENERIC_TERMIOS_H -#define _ASM_GENERIC_TERMIOS_H -/* - * Most architectures have straight copies of the x86 code, with - * varying levels of bug fixes on top. Usually it's a good idea - * to use this generic version instead, but be careful to avoid - * ABI changes. - * New architectures should not provide their own version. - */ - -#include -#include - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -#include - -/* intr=^C quit=^\ erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -static inline int user_termio_to_kernel_termios(struct ktermios *termios, - const struct termio __user *termio) -{ - unsigned short tmp; - - if (get_user(tmp, &termio->c_iflag) < 0) - goto fault; - termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; - - if (get_user(tmp, &termio->c_oflag) < 0) - goto fault; - termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; - - if (get_user(tmp, &termio->c_cflag) < 0) - goto fault; - termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; - - if (get_user(tmp, &termio->c_lflag) < 0) - goto fault; - termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; - - if (get_user(termios->c_line, &termio->c_line) < 0) - goto fault; - - if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) - goto fault; - - return 0; - - fault: - return -EFAULT; -} - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -static inline int kernel_termios_to_user_termio(struct termio __user *termio, - struct ktermios *termios) -{ - if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || - put_user(termios->c_oflag, &termio->c_oflag) < 0 || - put_user(termios->c_cflag, &termio->c_cflag) < 0 || - put_user(termios->c_lflag, &termio->c_lflag) < 0 || - put_user(termios->c_line, &termio->c_line) < 0 || - copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) - return -EFAULT; - - return 0; -} - -#ifdef TCGETS2 -static inline int user_termios_to_kernel_termios(struct ktermios *k, - struct termios2 __user *u) -{ - return copy_from_user(k, u, sizeof(struct termios2)); -} - -static inline int kernel_termios_to_user_termios(struct termios2 __user *u, - struct ktermios *k) -{ - return copy_to_user(u, k, sizeof(struct termios2)); -} - -static inline int user_termios_to_kernel_termios_1(struct ktermios *k, - struct termios __user *u) -{ - return copy_from_user(k, u, sizeof(struct termios)); -} - -static inline int kernel_termios_to_user_termios_1(struct termios __user *u, - struct ktermios *k) -{ - return copy_to_user(u, k, sizeof(struct termios)); -} -#else /* TCGETS2 */ -static inline int user_termios_to_kernel_termios(struct ktermios *k, - struct termios __user *u) -{ - return copy_from_user(k, u, sizeof(struct termios)); -} - -static inline int kernel_termios_to_user_termios(struct termios __user *u, - struct ktermios *k) -{ - return copy_to_user(u, k, sizeof(struct termios)); -} -#endif /* TCGETS2 */ - -#endif /* __KERNEL__ */ - -#endif /* _ASM_GENERIC_TERMIOS_H */ +#include diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h index f2fe528e968..df1da20f453 100644 --- a/arch/x86/include/asm/types.h +++ b/arch/x86/include/asm/types.h @@ -3,7 +3,7 @@ #define dma_addr_t dma_addr_t -#include +#include #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h index 7cfc436f86d..b7c29c8017f 100644 --- a/arch/x86/include/asm/ucontext.h +++ b/arch/x86/include/asm/ucontext.h @@ -7,6 +7,6 @@ * sigcontext struct (uc_mcontext). */ -#include +#include #endif /* _ASM_X86_UCONTEXT_H */ -- cgit v1.2.3-70-g09d2 From 6bc9a3966f0395419b09b2ec90f89f7f00341b37 Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Fri, 12 Jun 2009 22:01:00 +0800 Subject: score: Add support for Sunplus S+core architecture This is the complete set of new arch Score's files for linux. Score instruction set support 16bits, 32bits and 64bits instruction, Score SOC had been used in game machine and LCD TV. Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- arch/score/Kconfig | 141 ++++++ arch/score/Kconfig.debug | 37 ++ arch/score/Makefile | 43 ++ arch/score/boot/Makefile | 15 + arch/score/configs/spct6600_defconfig | 717 +++++++++++++++++++++++++++++ arch/score/include/asm/Kbuild | 3 + arch/score/include/asm/asmmacro.h | 161 +++++++ arch/score/include/asm/atomic.h | 6 + arch/score/include/asm/auxvec.h | 4 + arch/score/include/asm/bitops.h | 16 + arch/score/include/asm/bitsperlong.h | 6 + arch/score/include/asm/bug.h | 6 + arch/score/include/asm/bugs.h | 6 + arch/score/include/asm/byteorder.h | 6 + arch/score/include/asm/cache.h | 7 + arch/score/include/asm/cacheflush.h | 47 ++ arch/score/include/asm/checksum.h | 235 ++++++++++ arch/score/include/asm/cputime.h | 6 + arch/score/include/asm/current.h | 6 + arch/score/include/asm/delay.h | 21 + arch/score/include/asm/device.h | 6 + arch/score/include/asm/div64.h | 6 + arch/score/include/asm/dma-mapping.h | 6 + arch/score/include/asm/dma.h | 8 + arch/score/include/asm/elf.h | 99 ++++ arch/score/include/asm/emergency-restart.h | 6 + arch/score/include/asm/errno.h | 7 + arch/score/include/asm/fcntl.h | 6 + arch/score/include/asm/fixmap.h | 82 ++++ arch/score/include/asm/ftrace.h | 4 + arch/score/include/asm/futex.h | 6 + arch/score/include/asm/hardirq.h | 6 + arch/score/include/asm/hw_irq.h | 4 + arch/score/include/asm/io.h | 9 + arch/score/include/asm/ioctl.h | 6 + arch/score/include/asm/ioctls.h | 6 + arch/score/include/asm/ipcbuf.h | 6 + arch/score/include/asm/irq.h | 33 ++ arch/score/include/asm/irq_regs.h | 6 + arch/score/include/asm/irqflags.h | 111 +++++ arch/score/include/asm/kdebug.h | 6 + arch/score/include/asm/kmap_types.h | 6 + arch/score/include/asm/linkage.h | 4 + arch/score/include/asm/local.h | 6 + arch/score/include/asm/mman.h | 6 + arch/score/include/asm/mmu.h | 6 + arch/score/include/asm/mmu_context.h | 113 +++++ arch/score/include/asm/module.h | 39 ++ arch/score/include/asm/msgbuf.h | 6 + arch/score/include/asm/mutex.h | 6 + arch/score/include/asm/page.h | 92 ++++ arch/score/include/asm/param.h | 6 + arch/score/include/asm/pci.h | 4 + arch/score/include/asm/percpu.h | 6 + arch/score/include/asm/pgalloc.h | 83 ++++ arch/score/include/asm/pgtable-bits.h | 23 + arch/score/include/asm/pgtable.h | 267 +++++++++++ arch/score/include/asm/poll.h | 6 + arch/score/include/asm/posix_types.h | 6 + arch/score/include/asm/processor.h | 106 +++++ arch/score/include/asm/ptrace.h | 87 ++++ arch/score/include/asm/resource.h | 6 + arch/score/include/asm/scatterlist.h | 6 + arch/score/include/asm/scoreregs.h | 51 ++ arch/score/include/asm/sections.h | 6 + arch/score/include/asm/segment.h | 21 + arch/score/include/asm/sembuf.h | 6 + arch/score/include/asm/setup.h | 40 ++ arch/score/include/asm/shmbuf.h | 6 + arch/score/include/asm/shmparam.h | 6 + arch/score/include/asm/sigcontext.h | 22 + arch/score/include/asm/siginfo.h | 6 + arch/score/include/asm/signal.h | 6 + arch/score/include/asm/socket.h | 6 + arch/score/include/asm/sockios.h | 6 + arch/score/include/asm/stat.h | 6 + arch/score/include/asm/statfs.h | 6 + arch/score/include/asm/string.h | 8 + arch/score/include/asm/swab.h | 6 + arch/score/include/asm/syscalls.h | 9 + arch/score/include/asm/system.h | 90 ++++ arch/score/include/asm/termbits.h | 6 + arch/score/include/asm/termios.h | 6 + arch/score/include/asm/thread_info.h | 103 +++++ arch/score/include/asm/timex.h | 8 + arch/score/include/asm/tlb.h | 17 + arch/score/include/asm/tlbflush.h | 142 ++++++ arch/score/include/asm/topology.h | 6 + arch/score/include/asm/types.h | 6 + arch/score/include/asm/uaccess.h | 27 ++ arch/score/include/asm/unaligned.h | 6 + arch/score/include/asm/unistd.h | 8 + arch/score/include/asm/user.h | 4 + arch/score/kernel/Makefile | 10 + arch/score/kernel/asm-offsets.c | 216 +++++++++ arch/score/kernel/entry.S | 542 ++++++++++++++++++++++ arch/score/kernel/head.S | 70 +++ arch/score/kernel/init_task.c | 49 ++ arch/score/kernel/irq.c | 135 ++++++ arch/score/kernel/module.c | 164 +++++++ arch/score/kernel/process.c | 165 +++++++ arch/score/kernel/ptrace.c | 465 +++++++++++++++++++ arch/score/kernel/setup.c | 157 +++++++ arch/score/kernel/signal.c | 355 ++++++++++++++ arch/score/kernel/sys_score.c | 147 ++++++ arch/score/kernel/time.c | 99 ++++ arch/score/kernel/traps.c | 349 ++++++++++++++ arch/score/kernel/vmlinux.lds.S | 148 ++++++ arch/score/lib/Makefile | 8 + arch/score/lib/ashldi3.c | 46 ++ arch/score/lib/ashrdi3.c | 48 ++ arch/score/lib/checksum.S | 255 ++++++++++ arch/score/lib/checksum_copy.c | 52 +++ arch/score/lib/cmpdi2.c | 44 ++ arch/score/lib/libgcc.h | 37 ++ arch/score/lib/lshrdi3.c | 47 ++ arch/score/lib/string.S | 196 ++++++++ arch/score/lib/ucmpdi2.c | 38 ++ arch/score/mm/Makefile | 6 + arch/score/mm/cache.c | 308 +++++++++++++ arch/score/mm/extable.c | 38 ++ arch/score/mm/fault.c | 235 ++++++++++ arch/score/mm/init.c | 173 +++++++ arch/score/mm/pgtable.c | 60 +++ arch/score/mm/tlb-miss.S | 199 ++++++++ arch/score/mm/tlb-score.c | 251 ++++++++++ 126 files changed, 8566 insertions(+) create mode 100644 arch/score/Kconfig create mode 100644 arch/score/Kconfig.debug create mode 100644 arch/score/Makefile create mode 100644 arch/score/boot/Makefile create mode 100644 arch/score/configs/spct6600_defconfig create mode 100644 arch/score/include/asm/Kbuild create mode 100644 arch/score/include/asm/asmmacro.h create mode 100644 arch/score/include/asm/atomic.h create mode 100644 arch/score/include/asm/auxvec.h create mode 100644 arch/score/include/asm/bitops.h create mode 100644 arch/score/include/asm/bitsperlong.h create mode 100644 arch/score/include/asm/bug.h create mode 100644 arch/score/include/asm/bugs.h create mode 100644 arch/score/include/asm/byteorder.h create mode 100644 arch/score/include/asm/cache.h create mode 100644 arch/score/include/asm/cacheflush.h create mode 100644 arch/score/include/asm/checksum.h create mode 100644 arch/score/include/asm/cputime.h create mode 100644 arch/score/include/asm/current.h create mode 100644 arch/score/include/asm/delay.h create mode 100644 arch/score/include/asm/device.h create mode 100644 arch/score/include/asm/div64.h create mode 100644 arch/score/include/asm/dma-mapping.h create mode 100644 arch/score/include/asm/dma.h create mode 100644 arch/score/include/asm/elf.h create mode 100644 arch/score/include/asm/emergency-restart.h create mode 100644 arch/score/include/asm/errno.h create mode 100644 arch/score/include/asm/fcntl.h create mode 100644 arch/score/include/asm/fixmap.h create mode 100644 arch/score/include/asm/ftrace.h create mode 100644 arch/score/include/asm/futex.h create mode 100644 arch/score/include/asm/hardirq.h create mode 100644 arch/score/include/asm/hw_irq.h create mode 100644 arch/score/include/asm/io.h create mode 100644 arch/score/include/asm/ioctl.h create mode 100644 arch/score/include/asm/ioctls.h create mode 100644 arch/score/include/asm/ipcbuf.h create mode 100644 arch/score/include/asm/irq.h create mode 100644 arch/score/include/asm/irq_regs.h create mode 100644 arch/score/include/asm/irqflags.h create mode 100644 arch/score/include/asm/kdebug.h create mode 100644 arch/score/include/asm/kmap_types.h create mode 100644 arch/score/include/asm/linkage.h create mode 100644 arch/score/include/asm/local.h create mode 100644 arch/score/include/asm/mman.h create mode 100644 arch/score/include/asm/mmu.h create mode 100644 arch/score/include/asm/mmu_context.h create mode 100644 arch/score/include/asm/module.h create mode 100644 arch/score/include/asm/msgbuf.h create mode 100644 arch/score/include/asm/mutex.h create mode 100644 arch/score/include/asm/page.h create mode 100644 arch/score/include/asm/param.h create mode 100644 arch/score/include/asm/pci.h create mode 100644 arch/score/include/asm/percpu.h create mode 100644 arch/score/include/asm/pgalloc.h create mode 100644 arch/score/include/asm/pgtable-bits.h create mode 100644 arch/score/include/asm/pgtable.h create mode 100644 arch/score/include/asm/poll.h create mode 100644 arch/score/include/asm/posix_types.h create mode 100644 arch/score/include/asm/processor.h create mode 100644 arch/score/include/asm/ptrace.h create mode 100644 arch/score/include/asm/resource.h create mode 100644 arch/score/include/asm/scatterlist.h create mode 100644 arch/score/include/asm/scoreregs.h create mode 100644 arch/score/include/asm/sections.h create mode 100644 arch/score/include/asm/segment.h create mode 100644 arch/score/include/asm/sembuf.h create mode 100644 arch/score/include/asm/setup.h create mode 100644 arch/score/include/asm/shmbuf.h create mode 100644 arch/score/include/asm/shmparam.h create mode 100644 arch/score/include/asm/sigcontext.h create mode 100644 arch/score/include/asm/siginfo.h create mode 100644 arch/score/include/asm/signal.h create mode 100644 arch/score/include/asm/socket.h create mode 100644 arch/score/include/asm/sockios.h create mode 100644 arch/score/include/asm/stat.h create mode 100644 arch/score/include/asm/statfs.h create mode 100644 arch/score/include/asm/string.h create mode 100644 arch/score/include/asm/swab.h create mode 100644 arch/score/include/asm/syscalls.h create mode 100644 arch/score/include/asm/system.h create mode 100644 arch/score/include/asm/termbits.h create mode 100644 arch/score/include/asm/termios.h create mode 100644 arch/score/include/asm/thread_info.h create mode 100644 arch/score/include/asm/timex.h create mode 100644 arch/score/include/asm/tlb.h create mode 100644 arch/score/include/asm/tlbflush.h create mode 100644 arch/score/include/asm/topology.h create mode 100644 arch/score/include/asm/types.h create mode 100644 arch/score/include/asm/uaccess.h create mode 100644 arch/score/include/asm/unaligned.h create mode 100644 arch/score/include/asm/unistd.h create mode 100644 arch/score/include/asm/user.h create mode 100644 arch/score/kernel/Makefile create mode 100644 arch/score/kernel/asm-offsets.c create mode 100644 arch/score/kernel/entry.S create mode 100644 arch/score/kernel/head.S create mode 100644 arch/score/kernel/init_task.c create mode 100644 arch/score/kernel/irq.c create mode 100644 arch/score/kernel/module.c create mode 100644 arch/score/kernel/process.c create mode 100644 arch/score/kernel/ptrace.c create mode 100644 arch/score/kernel/setup.c create mode 100644 arch/score/kernel/signal.c create mode 100644 arch/score/kernel/sys_score.c create mode 100644 arch/score/kernel/time.c create mode 100644 arch/score/kernel/traps.c create mode 100644 arch/score/kernel/vmlinux.lds.S create mode 100644 arch/score/lib/Makefile create mode 100644 arch/score/lib/ashldi3.c create mode 100644 arch/score/lib/ashrdi3.c create mode 100644 arch/score/lib/checksum.S create mode 100644 arch/score/lib/checksum_copy.c create mode 100644 arch/score/lib/cmpdi2.c create mode 100644 arch/score/lib/libgcc.h create mode 100644 arch/score/lib/lshrdi3.c create mode 100644 arch/score/lib/string.S create mode 100644 arch/score/lib/ucmpdi2.c create mode 100644 arch/score/mm/Makefile create mode 100644 arch/score/mm/cache.c create mode 100644 arch/score/mm/extable.c create mode 100644 arch/score/mm/fault.c create mode 100644 arch/score/mm/init.c create mode 100644 arch/score/mm/pgtable.c create mode 100644 arch/score/mm/tlb-miss.S create mode 100644 arch/score/mm/tlb-score.c (limited to 'arch') diff --git a/arch/score/Kconfig b/arch/score/Kconfig new file mode 100644 index 00000000000..55d413e6dcf --- /dev/null +++ b/arch/score/Kconfig @@ -0,0 +1,141 @@ +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. + +mainmenu "Linux/SCORE Kernel Configuration" + +menu "Machine selection" + +choice + prompt "System type" + default MACH_SPCT6600 + +config ARCH_SCORE7 + bool "SCORE7 processor" + select SYS_SUPPORTS_32BIT_KERNEL + select CPU_SCORE7 + select GENERIC_HAS_IOMAP + +config MACH_SPCT6600 + bool "SPCT6600 series based machines" + select SYS_SUPPORTS_32BIT_KERNEL + select CPU_SCORE7 + select GENERIC_HAS_IOMAP + +config SCORE_SIM + bool "Score simulator" + select SYS_SUPPORTS_32BIT_KERNEL + select CPU_SCORE7 + select GENERIC_HAS_IOMAP +endchoice + +endmenu + +config CPU_SCORE7 + bool + +config GENERIC_IOMAP + def_bool y + +config NO_DMA + bool + default y + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config GENERIC_FIND_NEXT_BIT + def_bool y + +config GENERIC_HWEIGHT + def_bool y + +config GENERIC_CALIBRATE_DELAY + def_bool y + +config GENERIC_CLOCKEVENTS + def_bool y + +config GENERIC_TIME + def_bool y + +config SCHED_NO_NO_OMIT_FRAME_POINTER + def_bool y + +config GENERIC_HARDIRQS_NO__DO_IRQ + def_bool y + +config GENERIC_SYSCALL_TABLE + def_bool y + +config SCORE_L1_CACHE_SHIFT + int + default "4" + +menu "Kernel type" + +config 32BIT + def_bool y + +config GENERIC_HARDIRQS + def_bool y + +config ARCH_FLATMEM_ENABLE + def_bool y + +config ARCH_POPULATES_NODE_MAP + def_bool y + +source "mm/Kconfig" + +config MEMORY_START + hex + default 0xa0000000 + +source "kernel/time/Kconfig" +source "kernel/Kconfig.hz" +source "kernel/Kconfig.preempt" + +endmenu + +config RWSEM_GENERIC_SPINLOCK + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +source "init/Kconfig" + +config PROBE_INITRD_HEADER + bool "Probe initrd header created by addinitrd" + depends on BLK_DEV_INITRD + help + Probe initrd header at the last page of kernel image. + Say Y here if you are using arch/score/boot/addinitrd.c to + add initrd or initramfs image to the kernel image. + Otherwise, say N. + +config MMU + def_bool y + +menu "Executable file formats" + +source "fs/Kconfig.binfmt" + +endmenu + +source "net/Kconfig" + +source "drivers/Kconfig" + +source "fs/Kconfig" + +source "arch/score/Kconfig.debug" + +source "security/Kconfig" + +source "crypto/Kconfig" + +source "lib/Kconfig" diff --git a/arch/score/Kconfig.debug b/arch/score/Kconfig.debug new file mode 100644 index 00000000000..451ed54ce64 --- /dev/null +++ b/arch/score/Kconfig.debug @@ -0,0 +1,37 @@ +menu "Kernel hacking" + +config TRACE_IRQFLAGS_SUPPORT + bool + default y + +source "lib/Kconfig.debug" + +config CMDLINE + string "Default kernel command string" + default "" + help + On some platforms, there is currently no way for the boot loader to + pass arguments to the kernel. For these platforms, you can supply + some command-line options at build time by entering them here. In + other cases you can specify kernel args so that you don't have + to set them up in board prom initialization routines. + +config DEBUG_STACK_USAGE + bool "Enable stack utilization instrumentation" + depends on DEBUG_KERNEL + help + Enables the display of the minimum amount of free stack which each + task has ever had available in the sysrq-T and sysrq-P debug output. + + This option will slow down process creation somewhat. + +config RUNTIME_DEBUG + bool "Enable run-time debugging" + depends on DEBUG_KERNEL + help + If you say Y here, some debugging macros will do run-time checking. + If you say N here, those macros will mostly turn to no-ops. See + include/asm-score/debug.h for debuging macros. + If unsure, say N. + +endmenu diff --git a/arch/score/Makefile b/arch/score/Makefile new file mode 100644 index 00000000000..68e0cd06d5c --- /dev/null +++ b/arch/score/Makefile @@ -0,0 +1,43 @@ +# +# arch/score/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +KBUILD_DEFCONFIG := spct6600_defconfig +CROSS_COMPILE := score-linux- + +# +# CPU-dependent compiler/assembler options for optimization. +# +cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \ + -D__linux__ -ffunction-sections -ffreestanding + +# +# Board-dependent options and extra files +# +KBUILD_AFLAGS += $(cflags-y) +KBUILD_CFLAGS += $(cflags-y) +MODFLAGS += -mlong-calls +LDFLAGS += --oformat elf32-littlescore +LDFLAGS_vmlinux += -G0 -static -nostdlib + +head-y := arch/score/kernel/head.o +libs-y += arch/score/lib/ +core-y += arch/score/kernel/ arch/score/mm/ + +boot := arch/score/boot + +vmlinux.bin: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + @$(MAKE) $(clean)=$(boot) + +define archhelp + echo ' vmlinux.bin - Raw binary boot image' + echo + echo ' These will be default as apropriate for a configured platform.' +endef diff --git a/arch/score/boot/Makefile b/arch/score/boot/Makefile new file mode 100644 index 00000000000..0c5fbd0fb69 --- /dev/null +++ b/arch/score/boot/Makefile @@ -0,0 +1,15 @@ +# +# arch/score/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# + +targets := vmlinux.bin + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' + +clean-files += vmlinux.bin diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig new file mode 100644 index 00000000000..e064943b13d --- /dev/null +++ b/arch/score/configs/spct6600_defconfig @@ -0,0 +1,717 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.30-rc5 +# Fri Jun 12 18:57:07 2009 +# + +# +# Machine selection +# +# CONFIG_ARCH_SCORE7 is not set +CONFIG_MACH_SPCT6600=y +# CONFIG_SCORE_SIM is not set +CONFIG_CPU_SCORE7=y +CONFIG_GENERIC_IOMAP=y +CONFIG_NO_DMA=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_TIME=y +CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_SYSCALL_TABLE=y +CONFIG_SCORE_L1_CACHE_SHIFT=4 + +# +# Kernel type +# +CONFIG_32BIT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_ARCH_FLATMEM_ENABLE=y +CONFIG_ARCH_POPULATES_NODE_MAP=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_UNEVICTABLE_LRU=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_MEMORY_START=0xa0000000 +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_HZ_100=y +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=100 +# CONFIG_SCHED_HRTICK is not set +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_CLASSIC_RCU=y +# CONFIG_TREE_RCU is not set +# CONFIG_PREEMPT_RCU is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=12 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +CONFIG_SYSCTL_SYSCALL=y +# CONFIG_KALLSYMS is not set +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_HOTPLUG is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +# CONFIG_SLOW_WORK is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_PROBE_INITRD_HEADER is not set +CONFIG_MMU=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=y +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +CONFIG_ARPD=y +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +# CONFIG_MTD is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=1 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MISC_DEVICES is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +CONFIG_COMPAT_NET_DEV_OPS=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +# CONFIG_NET_ETHERNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_N_HDLC is not set +# CONFIG_RISCOM8 is not set +# CONFIG_SPECIALIX is not set +# CONFIG_RIO is not set +CONFIG_STALDRV=y + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_R3964 is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=8192 +# CONFIG_TCG_TPM is not set +# CONFIG_I2C is not set +# CONFIG_SPI is not set +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_REGULATOR is not set + +# +# Multimedia devices +# + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_DAB is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_RTC_CLASS is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +# CONFIG_EXT2_FS_SECURITY is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4_FS is not set +CONFIG_JBD=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_DNOTIFY=y +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +CONFIG_AUTOFS_FS=y +CONFIG_AUTOFS4_FS=y +# CONFIG_FUSE_FS is not set +CONFIG_GENERIC_ACL=y + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +# CONFIG_PROC_PAGE_MONITOR is not set +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFSD=y +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_RPCSEC_GSS_KRB5=y +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_NLS is not set +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +CONFIG_TRACING_SUPPORT=y + +# +# Tracers +# +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_CONTEXT_SWITCH_TRACER is not set +# CONFIG_EVENT_TRACER is not set +# CONFIG_BOOT_TRACER is not set +# CONFIG_TRACE_BRANCH_PROFILING is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_SAMPLES is not set +CONFIG_CMDLINE="" + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_DEBUG_PROC_KEYS=y +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_SECURITY_FILE_CAPABILITIES=y +CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +CONFIG_CRYPTO_SEQIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=y +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_NLATTR=y diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild new file mode 100644 index 00000000000..b367abd4620 --- /dev/null +++ b/arch/score/include/asm/Kbuild @@ -0,0 +1,3 @@ +include include/asm-generic/Kbuild.asm + +header-y += diff --git a/arch/score/include/asm/asmmacro.h b/arch/score/include/asm/asmmacro.h new file mode 100644 index 00000000000..a04a54cea25 --- /dev/null +++ b/arch/score/include/asm/asmmacro.h @@ -0,0 +1,161 @@ +#ifndef _ASM_SCORE_ASMMACRO_H +#define _ASM_SCORE_ASMMACRO_H + +#include + +#ifdef __ASSEMBLY__ + +.macro SAVE_ALL + mfcr r30, cr0 + mv r31, r0 + nop + /* if UMs == 1, change stack. */ + slli.c r30, r30, 28 + bpl 1f + la r31, kernelsp + lw r31, [r31] +1: + mv r30, r0 + addri r0, r31, -PT_SIZE + + sw r30, [r0, PT_R0] + .set r1 + sw r1, [r0, PT_R1] + .set nor1 + sw r2, [r0, PT_R2] + sw r3, [r0, PT_R3] + sw r4, [r0, PT_R4] + sw r5, [r0, PT_R5] + sw r6, [r0, PT_R6] + sw r7, [r0, PT_R7] + + sw r8, [r0, PT_R8] + sw r9, [r0, PT_R9] + sw r10, [r0, PT_R10] + sw r11, [r0, PT_R11] + sw r12, [r0, PT_R12] + sw r13, [r0, PT_R13] + sw r14, [r0, PT_R14] + sw r15, [r0, PT_R15] + + sw r16, [r0, PT_R16] + sw r17, [r0, PT_R17] + sw r18, [r0, PT_R18] + sw r19, [r0, PT_R19] + sw r20, [r0, PT_R20] + sw r21, [r0, PT_R21] + sw r22, [r0, PT_R22] + sw r23, [r0, PT_R23] + + sw r24, [r0, PT_R24] + sw r25, [r0, PT_R25] + sw r25, [r0, PT_R25] + sw r26, [r0, PT_R26] + sw r27, [r0, PT_R27] + + sw r28, [r0, PT_R28] + sw r29, [r0, PT_R29] + orri r28, r0, 0x1fff + li r31, 0x00001fff + xor r28, r28, r31 + + mfcehl r30, r31 + sw r30, [r0, PT_CEH] + sw r31, [r0, PT_CEL] + + mfcr r31, cr0 + sw r31, [r0, PT_PSR] + + mfcr r31, cr1 + sw r31, [r0, PT_CONDITION] + + mfcr r31, cr2 + sw r31, [r0, PT_ECR] + + mfcr r31, cr5 + srli r31, r31, 1 + slli r31, r31, 1 + sw r31, [r0, PT_EPC] +.endm + +.macro RESTORE_ALL_AND_RET + mfcr r30, cr0 + srli r30, r30, 1 + slli r30, r30, 1 + mtcr r30, cr0 + nop + nop + nop + nop + nop + + .set r1 + ldis r1, 0x00ff + and r30, r30, r1 + not r1, r1 + lw r31, [r0, PT_PSR] + and r31, r31, r1 + .set nor1 + or r31, r31, r30 + mtcr r31, cr0 + nop + nop + nop + nop + nop + + lw r30, [r0, PT_CONDITION] + mtcr r30, cr1 + nop + nop + nop + nop + nop + + lw r30, [r0, PT_CEH] + lw r31, [r0, PT_CEL] + mtcehl r30, r31 + + .set r1 + lw r1, [r0, PT_R1] + .set nor1 + lw r2, [r0, PT_R2] + lw r3, [r0, PT_R3] + lw r4, [r0, PT_R4] + lw r5, [r0, PT_R5] + lw r6, [r0, PT_R6] + lw r7, [r0, PT_R7] + + lw r8, [r0, PT_R8] + lw r9, [r0, PT_R9] + lw r10, [r0, PT_R10] + lw r11, [r0, PT_R11] + lw r12, [r0, PT_R12] + lw r13, [r0, PT_R13] + lw r14, [r0, PT_R14] + lw r15, [r0, PT_R15] + + lw r16, [r0, PT_R16] + lw r17, [r0, PT_R17] + lw r18, [r0, PT_R18] + lw r19, [r0, PT_R19] + lw r20, [r0, PT_R20] + lw r21, [r0, PT_R21] + lw r22, [r0, PT_R22] + lw r23, [r0, PT_R23] + + lw r24, [r0, PT_R24] + lw r25, [r0, PT_R25] + lw r26, [r0, PT_R26] + lw r27, [r0, PT_R27] + lw r28, [r0, PT_R28] + lw r29, [r0, PT_R29] + + lw r30, [r0, PT_EPC] + lw r0, [r0, PT_R0] + mtcr r30, cr5 + rte +.endm + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SCORE_ASMMACRO_H */ diff --git a/arch/score/include/asm/atomic.h b/arch/score/include/asm/atomic.h new file mode 100644 index 00000000000..84eb8ddf9f3 --- /dev/null +++ b/arch/score/include/asm/atomic.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_ATOMIC_H +#define _ASM_SCORE_ATOMIC_H + +#include + +#endif /* _ASM_SCORE_ATOMIC_H */ diff --git a/arch/score/include/asm/auxvec.h b/arch/score/include/asm/auxvec.h new file mode 100644 index 00000000000..f69151565ae --- /dev/null +++ b/arch/score/include/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_AUXVEC_H +#define _ASM_SCORE_AUXVEC_H + +#endif /* _ASM_SCORE_AUXVEC_H */ diff --git a/arch/score/include/asm/bitops.h b/arch/score/include/asm/bitops.h new file mode 100644 index 00000000000..2763b050fca --- /dev/null +++ b/arch/score/include/asm/bitops.h @@ -0,0 +1,16 @@ +#ifndef _ASM_SCORE_BITOPS_H +#define _ASM_SCORE_BITOPS_H + +#include /* swab32 */ +#include /* save_flags */ + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +#include +#include + +#endif /* _ASM_SCORE_BITOPS_H */ diff --git a/arch/score/include/asm/bitsperlong.h b/arch/score/include/asm/bitsperlong.h new file mode 100644 index 00000000000..86ff337aa45 --- /dev/null +++ b/arch/score/include/asm/bitsperlong.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BITSPERLONG_H +#define _ASM_SCORE_BITSPERLONG_H + +#include + +#endif /* _ASM_SCORE_BITSPERLONG_H */ diff --git a/arch/score/include/asm/bug.h b/arch/score/include/asm/bug.h new file mode 100644 index 00000000000..bb76a330bcf --- /dev/null +++ b/arch/score/include/asm/bug.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BUG_H +#define _ASM_SCORE_BUG_H + +#include + +#endif /* _ASM_SCORE_BUG_H */ diff --git a/arch/score/include/asm/bugs.h b/arch/score/include/asm/bugs.h new file mode 100644 index 00000000000..a062e1056bb --- /dev/null +++ b/arch/score/include/asm/bugs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BUGS_H +#define _ASM_SCORE_BUGS_H + +#include + +#endif /* _ASM_SCORE_BUGS_H */ diff --git a/arch/score/include/asm/byteorder.h b/arch/score/include/asm/byteorder.h new file mode 100644 index 00000000000..88cbebc7921 --- /dev/null +++ b/arch/score/include/asm/byteorder.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_BYTEORDER_H +#define _ASM_SCORE_BYTEORDER_H + +#include + +#endif /* _ASM_SCORE_BYTEORDER_H */ diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h new file mode 100644 index 00000000000..ae3d59f2d2c --- /dev/null +++ b/arch/score/include/asm/cache.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_CACHE_H +#define _ASM_SCORE_CACHE_H + +#define L1_CACHE_SHIFT 4 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#endif /* _ASM_SCORE_CACHE_H */ diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h new file mode 100644 index 00000000000..1c74628caf7 --- /dev/null +++ b/arch/score/include/asm/cacheflush.h @@ -0,0 +1,47 @@ +#ifndef _ASM_SCORE_CACHEFLUSH_H +#define _ASM_SCORE_CACHEFLUSH_H + +/* Keep includes the same across arches. */ +#include + +extern void (*flush_cache_all)(void); +extern void (*flush_cache_mm)(struct mm_struct *mm); +extern void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +extern void (*flush_cache_sigtramp)(unsigned long addr); +extern void (*flush_icache_all)(void); +extern void (*flush_icache_range)(unsigned long start, unsigned long end); +extern void (*flush_data_cache_page)(unsigned long addr); + +extern void s7_flush_cache_all(void); + +#define flush_cache_dup_mm(mm) do {} while (0) +#define flush_dcache_page(page) do {} while (0) +#define flush_dcache_mmap_lock(mapping) do {} while (0) +#define flush_dcache_mmap_unlock(mapping) do {} while (0) +#define flush_cache_vmap(start, end) do {} while (0) +#define flush_cache_vunmap(start, end) do {} while (0) + +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ + if (vma->vm_flags & VM_EXEC) { + void *v = page_address(page); + flush_icache_range((unsigned long) v, + (unsigned long) v + PAGE_SIZE); + } +} + +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + if ((vma->vm_flags & VM_EXEC)) \ + flush_cache_page(vma, vaddr, page_to_pfn(page));\ + } while (0) + +#endif /* _ASM_SCORE_CACHEFLUSH_H */ diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h new file mode 100644 index 00000000000..f909ac3144a --- /dev/null +++ b/arch/score/include/asm/checksum.h @@ -0,0 +1,235 @@ +#ifndef _ASM_SCORE_CHECKSUM_H +#define _ASM_SCORE_CHECKSUM_H + +#include +#include + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +unsigned int csum_partial(const void *buff, int len, __wsum sum); +unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, + unsigned int sum, int *csum_err); +unsigned int csum_partial_copy(const char *src, char *dst, + int len, unsigned int sum); + +/* + * this is a new version of the above that records errors it finds in *errp, + * but continues and zeros the rest of the buffer. + */ + +/* + * Copy and checksum to user + */ +#define HAVE_CSUM_COPY_USER +static inline +__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + __wsum sum, int *err_ptr) +{ + sum = csum_partial(src, len, sum); + if (copy_to_user(dst, src, len)) { + *err_ptr = -EFAULT; + return (__force __wsum) -1; /* invalid checksum */ + } + return sum; +} + + +#define csum_partial_copy_nocheck csum_partial_copy +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum sum) +{ + /* the while loop is unnecessary really, it's always enough with two + iterations */ + __asm__ __volatile__( + ".set volatile\n\t" + ".set\tr1\n\t" + "slli\tr1,%0, 16\n\t" + "add\t%0,%0, r1\n\t" + "cmp.c\tr1, %0\n\t" + "srli\t%0, %0, 16\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:ldi\tr30, 0xffff\n\t" + "xor\t%0, %0, r30\n\t" + "slli\t%0, %0, 16\n\t" + "srli\t%0, %0, 16\n\t" + ".set\tnor1\n\t" + ".set optimize\n\t" + : "=r" (sum) + : "0" (sum)); + return sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + * + * By Jorge Cwik , adapted for linux by + * Arnt Gulbrandsen. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned int sum; + unsigned long dummy; + + __asm__ __volatile__( + ".set volatile\n\t" + ".set\tnor1\n\t" + "lw\t%0, [%1]\n\t" + "subri\t%2, %2, 4\n\t" + "slli\t%2, %2, 2\n\t" + "lw\t%3, [%1, 4]\n\t" + "add\t%2, %2, %1\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "lw\t%3, [%1, 8]\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "lw\t%3, [%1, 12]\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n" + + "1:\tlw\t%3, [%1, 16]\n\t" + "addi\t%1, 4\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t2f\n\t" + "addi\t%0, 0x1\n" + "2:cmp.c\t%2, %1\n\t" + "bne\t1b\n\t" + + ".set\tr1\n\t" + ".set optimize\n\t" + : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy) + : "1" (iph), "2" (ihl)); + + return csum_fold(sum); +} + +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + unsigned long tmp = (ntohs(len) << 16) + proto * 256; + __asm__ __volatile__( + ".set volatile\n\t" + "add\t%0, %0, %2\n\t" + "cmp.c\t%2, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %3\n\t" + "cmp.c\t%3, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + "add\t%0, %0, %4\n\t" + "cmp.c\t%4, %0\n\t" + "bleu\t1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + ".set optimize\n\t" + : "=r" (sum) + : "0" (daddr), "r"(saddr), + "r" (tmp), + "r" (sum)); + return sum; +} + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +static inline unsigned short ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + +#define _HAVE_ARCH_IPV6_CSUM +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) +{ + __asm__ __volatile__( + ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" + ".set\tnoat\n\t" + "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" + "sltu\t$1, %0, %5\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %6\t\t\t# csum\n\t" + "sltu\t$1, %0, %6\n\t" + "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 4(%2)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 8(%2)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 12(%2)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 0(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 4(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 8(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "lw\t%1, 12(%3)\n\t" + "addu\t%0, $1\n\t" + "addu\t%0, %1\n\t" + "sltu\t$1, %0, %1\n\t" + "addu\t%0, $1\t\t\t# Add final carry\n\t" + ".set\tnoat\n\t" + ".set\tnoreorder" + : "=r" (sum), "=r" (proto) + : "r" (saddr), "r" (daddr), + "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); + + return csum_fold(sum); +} +#endif /* _ASM_SCORE_CHECKSUM_H */ diff --git a/arch/score/include/asm/cputime.h b/arch/score/include/asm/cputime.h new file mode 100644 index 00000000000..1fced99f0d6 --- /dev/null +++ b/arch/score/include/asm/cputime.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_CPUTIME_H +#define _ASM_SCORE_CPUTIME_H + +#include + +#endif /* _ASM_SCORE_CPUTIME_H */ diff --git a/arch/score/include/asm/current.h b/arch/score/include/asm/current.h new file mode 100644 index 00000000000..16eae9cbaf1 --- /dev/null +++ b/arch/score/include/asm/current.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_CURRENT_H +#define _ASM_SCORE_CURRENT_H + +#include + +#endif /* _ASM_SCORE_CURRENT_H */ diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h new file mode 100644 index 00000000000..ad716f6d922 --- /dev/null +++ b/arch/score/include/asm/delay.h @@ -0,0 +1,21 @@ +#ifndef _ASM_SCORE_DELAY_H +#define _ASM_SCORE_DELAY_H + +static inline void __delay(unsigned long loops) +{ + __asm__ __volatile__ ( + "1:\tsubi\t%0,1\n\t" + "cmpz.c\t%0\n\t" + "bne\t1b\n\t" + : "=r" (loops) + : "0" (loops)); +} + +static inline void __udelay(unsigned long usecs) +{ + __delay(usecs); +} + +#define udelay(usecs) __udelay(usecs) + +#endif /* _ASM_SCORE_DELAY_H */ diff --git a/arch/score/include/asm/device.h b/arch/score/include/asm/device.h new file mode 100644 index 00000000000..2dc7cc5d5ef --- /dev/null +++ b/arch/score/include/asm/device.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DEVICE_H +#define _ASM_SCORE_DEVICE_H + +#include + +#endif /* _ASM_SCORE_DEVICE_H */ diff --git a/arch/score/include/asm/div64.h b/arch/score/include/asm/div64.h new file mode 100644 index 00000000000..75fae19824e --- /dev/null +++ b/arch/score/include/asm/div64.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DIV64_H +#define _ASM_SCORE_DIV64_H + +#include + +#endif /* _ASM_SCORE_DIV64_H */ diff --git a/arch/score/include/asm/dma-mapping.h b/arch/score/include/asm/dma-mapping.h new file mode 100644 index 00000000000..f9c0193c7a5 --- /dev/null +++ b/arch/score/include/asm/dma-mapping.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_DMA_MAPPING_H +#define _ASM_SCORE_DMA_MAPPING_H + +#include + +#endif /* _ASM_SCORE_DMA_MAPPING_H */ diff --git a/arch/score/include/asm/dma.h b/arch/score/include/asm/dma.h new file mode 100644 index 00000000000..9f44185298b --- /dev/null +++ b/arch/score/include/asm/dma.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_DMA_H +#define _ASM_SCORE_DMA_H + +#include + +#define MAX_DMA_ADDRESS (0) + +#endif /* _ASM_SCORE_DMA_H */ diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h new file mode 100644 index 00000000000..832436375ac --- /dev/null +++ b/arch/score/include/asm/elf.h @@ -0,0 +1,99 @@ +#ifndef _ASM_SCORE_ELF_H +#define _ASM_SCORE_ELF_H + +/* ELF register definitions */ +#define ELF_NGREG 45 +#define ELF_NFPREG 33 +#define EM_SCORE7 135 + +/* Relocation types. */ +#define R_SCORE_NONE 0 +#define R_SCORE_HI16 1 +#define R_SCORE_LO16 2 +#define R_SCORE_BCMP 3 +#define R_SCORE_24 4 +#define R_SCORE_PC19 5 +#define R_SCORE16_11 6 +#define R_SCORE16_PC8 7 +#define R_SCORE_ABS32 8 +#define R_SCORE_ABS16 9 +#define R_SCORE_DUMMY2 10 +#define R_SCORE_GP15 11 +#define R_SCORE_GNU_VTINHERIT 12 +#define R_SCORE_GNU_VTENTRY 13 +#define R_SCORE_GOT15 14 +#define R_SCORE_GOT_LO16 15 +#define R_SCORE_CALL15 16 +#define R_SCORE_GPREL32 17 +#define R_SCORE_REL32 18 +#define R_SCORE_DUMMY_HI16 19 +#define R_SCORE_IMM30 20 +#define R_SCORE_IMM32 21 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +#define elf_check_arch(x) ((x)->e_machine == EM_SCORE7) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SCORE7 + +#define SET_PERSONALITY(ex) \ +do { \ + set_personality(PER_LINUX); \ +} while (0) + +struct task_struct; +struct pt_regs; + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This could be done in userspace, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +#define ELF_PLAT_INIT(_r, load_addr) \ +do { \ + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ + _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ + _r->regs[30] = _r->regs[31] = 0; \ +} while (0) + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#ifndef ELF_ET_DYN_BASE +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#endif /* _ASM_SCORE_ELF_H */ diff --git a/arch/score/include/asm/emergency-restart.h b/arch/score/include/asm/emergency-restart.h new file mode 100644 index 00000000000..ca31e9803a8 --- /dev/null +++ b/arch/score/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_EMERGENCY_RESTART_H +#define _ASM_SCORE_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_SCORE_EMERGENCY_RESTART_H */ diff --git a/arch/score/include/asm/errno.h b/arch/score/include/asm/errno.h new file mode 100644 index 00000000000..7cd3e1f07c0 --- /dev/null +++ b/arch/score/include/asm/errno.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SCORE_ERRNO_H +#define _ASM_SCORE_ERRNO_H + +#include +#define EMAXERRNO 1024 + +#endif /* _ASM_SCORE_ERRNO_H */ diff --git a/arch/score/include/asm/fcntl.h b/arch/score/include/asm/fcntl.h new file mode 100644 index 00000000000..03968a3103a --- /dev/null +++ b/arch/score/include/asm/fcntl.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_FCNTL_H +#define _ASM_SCORE_FCNTL_H + +#include + +#endif /* _ASM_SCORE_FCNTL_H */ diff --git a/arch/score/include/asm/fixmap.h b/arch/score/include/asm/fixmap.h new file mode 100644 index 00000000000..ee167669402 --- /dev/null +++ b/arch/score/include/asm/fixmap.h @@ -0,0 +1,82 @@ +#ifndef _ASM_SCORE_FIXMAP_H +#define _ASM_SCORE_FIXMAP_H + +#include + +#define PHY_RAM_BASE 0x00000000 +#define PHY_IO_BASE 0x10000000 + +#define VIRTUAL_RAM_BASE 0xa0000000 +#define VIRTUAL_IO_BASE 0xb0000000 + +#define RAM_SPACE_SIZE 0x10000000 +#define IO_SPACE_SIZE 0x10000000 + +/* Kernel unmapped, cached 512MB */ +#define KSEG1 0xa0000000 + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of virtual memory (0xfffff000) backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * highger than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + * + * TLB entries of such buffers will not be flushed across + * task switches. + */ + +/* + * on UP currently we will have no trace of the fixmap mechanizm, + * no page table allocations, etc. This might change in the + * future, say framebuffers for the console driver(s) could be + * fix-mapped? + */ +enum fixed_addresses { +#define FIX_N_COLOURS 8 + FIX_CMAP_BEGIN, + FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + __end_of_fixed_addresses +}; + +/* + * used by vmalloc.c. + * + * Leave one empty page between vmalloc'ed areas and + * the start of the fixmap, and leave one page empty + * at the top of mem.. + */ +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) \ + ((FIXADDR_TOP - ((x) & PAGE_MASK)) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without tranlation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + return __virt_to_fix(vaddr); +} + +#endif /* _ASM_SCORE_FIXMAP_H */ diff --git a/arch/score/include/asm/ftrace.h b/arch/score/include/asm/ftrace.h new file mode 100644 index 00000000000..79d6f10e1f5 --- /dev/null +++ b/arch/score/include/asm/ftrace.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_FTRACE_H +#define _ASM_SCORE_FTRACE_H + +#endif /* _ASM_SCORE_FTRACE_H */ diff --git a/arch/score/include/asm/futex.h b/arch/score/include/asm/futex.h new file mode 100644 index 00000000000..1dca2420f8d --- /dev/null +++ b/arch/score/include/asm/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_FUTEX_H +#define _ASM_SCORE_FUTEX_H + +#include + +#endif /* _ASM_SCORE_FUTEX_H */ diff --git a/arch/score/include/asm/hardirq.h b/arch/score/include/asm/hardirq.h new file mode 100644 index 00000000000..dc932c50d3e --- /dev/null +++ b/arch/score/include/asm/hardirq.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_HARDIRQ_H +#define _ASM_SCORE_HARDIRQ_H + +#include + +#endif /* _ASM_SCORE_HARDIRQ_H */ diff --git a/arch/score/include/asm/hw_irq.h b/arch/score/include/asm/hw_irq.h new file mode 100644 index 00000000000..4caafb2b509 --- /dev/null +++ b/arch/score/include/asm/hw_irq.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_HW_IRQ_H +#define _ASM_SCORE_HW_IRQ_H + +#endif /* _ASM_SCORE_HW_IRQ_H */ diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h new file mode 100644 index 00000000000..fbbfd7132e3 --- /dev/null +++ b/arch/score/include/asm/io.h @@ -0,0 +1,9 @@ +#ifndef _ASM_SCORE_IO_H +#define _ASM_SCORE_IO_H + +#include + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +#endif /* _ASM_SCORE_IO_H */ diff --git a/arch/score/include/asm/ioctl.h b/arch/score/include/asm/ioctl.h new file mode 100644 index 00000000000..a351d2194bf --- /dev/null +++ b/arch/score/include/asm/ioctl.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IOCTL_H +#define _ASM_SCORE_IOCTL_H + +#include + +#endif /* _ASM_SCORE_IOCTL_H */ diff --git a/arch/score/include/asm/ioctls.h b/arch/score/include/asm/ioctls.h new file mode 100644 index 00000000000..ed01d2b9aea --- /dev/null +++ b/arch/score/include/asm/ioctls.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IOCTLS_H +#define _ASM_SCORE_IOCTLS_H + +#include + +#endif /* _ASM_SCORE_IOCTLS_H */ diff --git a/arch/score/include/asm/ipcbuf.h b/arch/score/include/asm/ipcbuf.h new file mode 100644 index 00000000000..e082ceff181 --- /dev/null +++ b/arch/score/include/asm/ipcbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IPCBUF_H +#define _ASM_SCORE_IPCBUF_H + +#include + +#endif /* _ASM_SCORE_IPCBUF_H */ diff --git a/arch/score/include/asm/irq.h b/arch/score/include/asm/irq.h new file mode 100644 index 00000000000..401f6704848 --- /dev/null +++ b/arch/score/include/asm/irq.h @@ -0,0 +1,33 @@ +#ifndef _ASM_SCORE_IRQ_H +#define _ASM_SCORE_IRQ_H + +#define EXCEPTION_VECTOR_BASE_ADDR 0xa0000000 +#define VECTOR_ADDRESS_OFFSET_MODE4 0 +#define VECTOR_ADDRESS_OFFSET_MODE16 1 + +#define DEBUG_VECTOR_SIZE (0x4) +#define DEBUG_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x1fc) + +#define GENERAL_VECTOR_SIZE (0x10) +#define GENERAL_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x200) + +#define NR_IRQS 64 +#define IRQ_VECTOR_SIZE (0x10) +#define IRQ_VECTOR_BASE_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x210) +#define IRQ_VECTOR_END_ADDR ((EXCEPTION_VECTOR_BASE_ADDR) + 0x5f0) + +#define irq_canonicalize(irq) (irq) + +#define P_INT_PNDL 0x95F50000 +#define P_INT_PNDH 0x95F50004 +#define P_INT_PRIORITY_M 0x95F50008 +#define P_INT_PRIORITY_SG0 0x95F50010 +#define P_INT_PRIORITY_SG1 0x95F50014 +#define P_INT_PRIORITY_SG2 0x95F50018 +#define P_INT_PRIORITY_SG3 0x95F5001C +#define P_INT_MASKL 0x95F50020 +#define P_INT_MASKH 0x95F50024 + +#define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */ + +#endif /* _ASM_SCORE_IRQ_H */ diff --git a/arch/score/include/asm/irq_regs.h b/arch/score/include/asm/irq_regs.h new file mode 100644 index 00000000000..905b7b078ba --- /dev/null +++ b/arch/score/include/asm/irq_regs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_IRQ_REGS_H +#define _ASM_SCORE_IRQ_REGS_H + +#include + +#endif /* _ASM_SCORE_IRQ_REGS_H */ diff --git a/arch/score/include/asm/irqflags.h b/arch/score/include/asm/irqflags.h new file mode 100644 index 00000000000..92eeb33dd72 --- /dev/null +++ b/arch/score/include/asm/irqflags.h @@ -0,0 +1,111 @@ +#ifndef _ASM_SCORE_IRQFLAGS_H +#define _ASM_SCORE_IRQFLAGS_H + +#ifndef __ASSEMBLY__ + +#define raw_local_irq_save(x) \ +{ \ + __asm__ __volatile__( \ + "mfcr r8, cr0;" \ + "li r9, 0xfffffffe;" \ + "nop;" \ + "mv %0, r8;" \ + "and r8, r8, r9;" \ + "mtcr r8, cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "ldi r9, 0x1;" \ + "and %0, %0, r9;" \ + : "=r" (x) \ + : \ + : "r8", "r9" \ + ); \ +} + +#define raw_local_irq_restore(x) \ +{ \ + __asm__ __volatile__( \ + "mfcr r8, cr0;" \ + "ldi r9, 0x1;" \ + "and %0, %0, r9;" \ + "or r8, r8, %0;" \ + "mtcr r8, cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + : \ + : "r"(x) \ + : "r8", "r9" \ + ); \ +} + +#define raw_local_irq_enable(void) \ +{ \ + __asm__ __volatile__( \ + "mfcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "ori\tr8,0x1;" \ + "mtcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + : \ + : \ + : "r8"); \ +} + +#define raw_local_irq_disable(void) \ +{ \ + __asm__ __volatile__( \ + "mfcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "srli\tr8,r8,1;" \ + "slli\tr8,r8,1;" \ + "mtcr\tr8,cr0;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + : \ + : \ + : "r8"); \ +} + +#define raw_local_save_flags(x) \ +{ \ + __asm__ __volatile__( \ + "mfcr r8, cr0;" \ + "nop;" \ + "nop;" \ + "mv %0, r8;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "nop;" \ + "ldi r9, 0x1;" \ + "and %0, %0, r9;" \ + : "=r" (x) \ + : \ + : "r8", "r9" \ + ); \ +} + +static inline int raw_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & 1); +} + +#endif + +#endif /* _ASM_SCORE_IRQFLAGS_H */ diff --git a/arch/score/include/asm/kdebug.h b/arch/score/include/asm/kdebug.h new file mode 100644 index 00000000000..a666e513f74 --- /dev/null +++ b/arch/score/include/asm/kdebug.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_KDEBUG_H +#define _ASM_SCORE_KDEBUG_H + +#include + +#endif /* _ASM_SCORE_KDEBUG_H */ diff --git a/arch/score/include/asm/kmap_types.h b/arch/score/include/asm/kmap_types.h new file mode 100644 index 00000000000..6c46eb5077d --- /dev/null +++ b/arch/score/include/asm/kmap_types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_KMAP_TYPES_H +#define _ASM_SCORE_KMAP_TYPES_H + +#include + +#endif /* _ASM_SCORE_KMAP_TYPES_H */ diff --git a/arch/score/include/asm/linkage.h b/arch/score/include/asm/linkage.h new file mode 100644 index 00000000000..2580fbbfe94 --- /dev/null +++ b/arch/score/include/asm/linkage.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_LINKAGE_H +#define _ASM_SCORE_LINKAGE_H + +#endif /* _ASM_SCORE_LINKAGE_H */ diff --git a/arch/score/include/asm/local.h b/arch/score/include/asm/local.h new file mode 100644 index 00000000000..7e02f13dbba --- /dev/null +++ b/arch/score/include/asm/local.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_LOCAL_H +#define _ASM_SCORE_LOCAL_H + +#include + +#endif /* _ASM_SCORE_LOCAL_H */ diff --git a/arch/score/include/asm/mman.h b/arch/score/include/asm/mman.h new file mode 100644 index 00000000000..84d85ddfed8 --- /dev/null +++ b/arch/score/include/asm/mman.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MMAN_H +#define _ASM_SCORE_MMAN_H + +#include + +#endif /* _ASM_SCORE_MMAN_H */ diff --git a/arch/score/include/asm/mmu.h b/arch/score/include/asm/mmu.h new file mode 100644 index 00000000000..676828e4c10 --- /dev/null +++ b/arch/score/include/asm/mmu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MMU_H +#define _ASM_SCORE_MMU_H + +typedef unsigned long mm_context_t; + +#endif /* _ASM_SCORE_MMU_H */ diff --git a/arch/score/include/asm/mmu_context.h b/arch/score/include/asm/mmu_context.h new file mode 100644 index 00000000000..2644577c96e --- /dev/null +++ b/arch/score/include/asm/mmu_context.h @@ -0,0 +1,113 @@ +#ifndef _ASM_SCORE_MMU_CONTEXT_H +#define _ASM_SCORE_MMU_CONTEXT_H + +#include +#include +#include +#include + +#include +#include +#include + +/* + * For the fast tlb miss handlers, we keep a per cpu array of pointers + * to the current pgd for each processor. Also, the proc. id is stuffed + * into the context register. + */ +extern unsigned long asid_cache; +extern unsigned long pgd_current; + +#define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd)) + +#define TLBMISS_HANDLER_SETUP() \ +do { \ + write_c0_context(0); \ + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \ +} while (0) + +/* + * All unused by hardware upper bits will be considered + * as a software asid extension. + */ +#define ASID_VERSION_MASK 0xfffff000 +#define ASID_FIRST_VERSION 0x1000 + +/* PEVN --------- VPN ---------- --ASID--- -NA- */ +/* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */ +/* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */ +#define ASID_INC 0x10 +#define ASID_MASK 0xff0 + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{} + +static inline void +get_new_mmu_context(struct mm_struct *mm) +{ + unsigned long asid = asid_cache + ASID_INC; + + if (!(asid & ASID_MASK)) { + local_flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + + mm->context = asid; + asid_cache = asid; +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context = 0; + return 0; +} + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + if ((next->context ^ asid_cache) & ASID_VERSION_MASK) + get_new_mmu_context(next); + + pevn_set(next->context); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + local_irq_restore(flags); +} + +/* + * Destroy context related info for an mm_struct that is about + * to be put to rest. + */ +static inline void destroy_context(struct mm_struct *mm) +{} + +static inline void +deactivate_mm(struct task_struct *task, struct mm_struct *mm) +{} + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void +activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + + local_irq_save(flags); + get_new_mmu_context(next); + pevn_set(next->context); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); + local_irq_restore(flags); +} + +#endif /* _ASM_SCORE_MMU_CONTEXT_H */ diff --git a/arch/score/include/asm/module.h b/arch/score/include/asm/module.h new file mode 100644 index 00000000000..f0b5dc0bd02 --- /dev/null +++ b/arch/score/include/asm/module.h @@ -0,0 +1,39 @@ +#ifndef _ASM_SCORE_MODULE_H +#define _ASM_SCORE_MODULE_H + +#include +#include + +struct mod_arch_specific { + /* Data Bus Error exception tables */ + struct list_head dbe_list; + const struct exception_table_entry *dbe_start; + const struct exception_table_entry *dbe_end; +}; + +typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ + +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr + +/* Given an address, look for it in the exception tables. */ +#ifdef CONFIG_MODULES +const struct exception_table_entry *search_module_dbetables(unsigned long addr); +#else +static inline const struct exception_table_entry +*search_module_dbetables(unsigned long addr) +{ + return NULL; +} +#endif + +#define MODULE_PROC_FAMILY "SCORE7" +#define MODULE_KERNEL_TYPE "32BIT " +#define MODULE_KERNEL_SMTC "" + +#define MODULE_ARCH_VERMAGIC \ + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC + +#endif /* _ASM_SCORE_MODULE_H */ diff --git a/arch/score/include/asm/msgbuf.h b/arch/score/include/asm/msgbuf.h new file mode 100644 index 00000000000..7506721e29f --- /dev/null +++ b/arch/score/include/asm/msgbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MSGBUF_H +#define _ASM_SCORE_MSGBUF_H + +#include + +#endif /* _ASM_SCORE_MSGBUF_H */ diff --git a/arch/score/include/asm/mutex.h b/arch/score/include/asm/mutex.h new file mode 100644 index 00000000000..10d48fe4db9 --- /dev/null +++ b/arch/score/include/asm/mutex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_MUTEX_H +#define _ASM_SCORE_MUTEX_H + +#include + +#endif /* _ASM_SCORE_MUTEX_H */ diff --git a/arch/score/include/asm/page.h b/arch/score/include/asm/page.h new file mode 100644 index 00000000000..67e98206be6 --- /dev/null +++ b/arch/score/include/asm/page.h @@ -0,0 +1,92 @@ +#ifndef _ASM_SCORE_PAGE_H +#define _ASM_SCORE_PAGE_H + +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT (12) +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) +#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) + +/* align addr on a size boundary - adjust address up/down if needed */ +#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1))) +#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) + +/* align addr on a size boundary - adjust address up if needed */ +#define _ALIGN(addr, size) _ALIGN_UP(addr, size) + +/* + * PAGE_OFFSET -- the first address of the first page of memory. When not + * using MMU this corresponds to the first free page in physical memory (aligned + * on a page boundary). + */ +#define PAGE_OFFSET (0xA0000000UL) + +#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +#define copy_user_page(vto, vfrom, vaddr, topg) \ + memcpy((vto), (vfrom), PAGE_SIZE) + +/* + * These are used to make use of C type-checking.. + */ + +typedef struct { unsigned long pte; } pte_t; /* page table entry */ +typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +extern unsigned long max_low_pfn; +extern unsigned long min_low_pfn; +extern unsigned long max_pfn; + +#define __pa(vaddr) ((unsigned long) (vaddr)) +#define __va(paddr) ((void *) (paddr)) + +#define phys_to_pfn(phys) (PFN_DOWN(phys)) +#define pfn_to_phys(pfn) (PFN_PHYS(pfn)) + +#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) +#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) + +#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) +#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) + +#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) +#define page_to_bus(page) (page_to_phys(page)) +#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) + +#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) + +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) + +#endif /* __ASSEMBLY__ */ + +#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) + +#endif /* __KERNEL__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include +#include + +#endif /* _ASM_SCORE_PAGE_H */ diff --git a/arch/score/include/asm/param.h b/arch/score/include/asm/param.h new file mode 100644 index 00000000000..916b8690b6a --- /dev/null +++ b/arch/score/include/asm/param.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_PARAM_H +#define _ASM_SCORE_PARAM_H + +#include + +#endif /* _ASM_SCORE_PARAM_H */ diff --git a/arch/score/include/asm/pci.h b/arch/score/include/asm/pci.h new file mode 100644 index 00000000000..3f3cfd82549 --- /dev/null +++ b/arch/score/include/asm/pci.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_PCI_H +#define _ASM_SCORE_PCI_H + +#endif /* _ASM_SCORE_PCI_H */ diff --git a/arch/score/include/asm/percpu.h b/arch/score/include/asm/percpu.h new file mode 100644 index 00000000000..e7bd4e05b47 --- /dev/null +++ b/arch/score/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_PERCPU_H +#define _ASM_SCORE_PERCPU_H + +#include + +#endif /* _ASM_SCORE_PERCPU_H */ diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h new file mode 100644 index 00000000000..28dacc1cbba --- /dev/null +++ b/arch/score/include/asm/pgalloc.h @@ -0,0 +1,83 @@ +#ifndef _ASM_SCORE_PGALLOC_H +#define _ASM_SCORE_PGALLOC_H + +#include + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + set_pmd(pmd, __pmd((unsigned long)pte)); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte) +{ + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); +} + +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + if (ret) { + init = pgd_offset(&init_mm, 0UL); + pgd_init((unsigned long)ret); + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + + return ret; +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + pte_t *pte; + + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, + PTE_ORDER); + + return pte; +} + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *pte; + + pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); + if (pte) { + clear_highpage(pte); + pgtable_page_ctor(pte); + } + return pte; +} + +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_pages((unsigned long)pte, PTE_ORDER); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_pages(pte, PTE_ORDER); +} + +#define __pte_free_tlb(tlb, pte) \ +do { \ + pgtable_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) + +#define check_pgt_cache() do {} while (0) + +#endif /* _ASM_SCORE_PGALLOC_H */ diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h new file mode 100644 index 00000000000..ca16d357a64 --- /dev/null +++ b/arch/score/include/asm/pgtable-bits.h @@ -0,0 +1,23 @@ +#ifndef _ASM_SCORE_PGTABLE_BITS_H +#define _ASM_SCORE_PGTABLE_BITS_H + +#define _PAGE_ACCESSED (1<<5) /* implemented in software */ +#define _PAGE_READ (1<<6) /* implemented in software */ +#define _PAGE_WRITE (1<<7) /* implemented in software */ +#define _PAGE_PRESENT (1<<9) /* implemented in software */ +#define _PAGE_MODIFIED (1<<10) /* implemented in software */ +#define _PAGE_FILE (1<<10) + +#define _PAGE_GLOBAL (1<<0) +#define _PAGE_VALID (1<<1) +#define _PAGE_SILENT_READ (1<<1) /* synonym */ +#define _PAGE_DIRTY (1<<2) /* Write bit */ +#define _PAGE_SILENT_WRITE (1<<2) +#define _PAGE_CACHE (1<<3) /* cache */ +#define _CACHE_MASK (1<<3) +#define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ + +#define _PAGE_CHG_MASK \ + (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) + +#endif /* _ASM_SCORE_PGTABLE_BITS_H */ diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h new file mode 100644 index 00000000000..0f7177a4220 --- /dev/null +++ b/arch/score/include/asm/pgtable.h @@ -0,0 +1,267 @@ +#ifndef _ASM_SCORE_PGTABLE_H +#define _ASM_SCORE_PGTABLE_H + +#include +#include + +#include +#include +#include + +extern void load_pgd(unsigned long pg_dir); +extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* + * Entries per page directory level: we use two-level, so + * we don't really have any PUD/PMD directory physically. + */ +#define PGD_ORDER 0 +#define PTE_ORDER 0 + +#define PTRS_PER_PGD 1024 +#define PTRS_PER_PTE 1024 + +#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +#define VMALLOC_START (0xc0000000UL) + +#define PKMAP_BASE (0xfd000000UL) + +#define VMALLOC_END (FIXADDR_START - 2*PAGE_SIZE) + +#define pte_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ + __FILE__, __LINE__, pte_val(e)) +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ + __FILE__, __LINE__, pgd_val(e)) + +/* + * Empty pgd/pmd entries point to the invalid_pte_table. + */ +static inline int pmd_none(pmd_t pmd) +{ + return pmd_val(pmd) == (unsigned long) invalid_pte_table; +} + +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +static inline int pmd_present(pmd_t pmd) +{ + return pmd_val(pmd) != (unsigned long) invalid_pte_table; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); +} + +#define pte_page(x) pfn_to_page(pte_pfn(x)) +#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) +#define pfn_pte(pfn, prot) \ + __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define __pgd_offset(address) pgd_index(address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) + +/* to find an entry in a page-table-directory */ +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* Find an entry in the third-level page table.. */ +#define __pte_offset(address) \ + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + +#define pte_offset_map(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_offset_map_nested(dir, address) \ + ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) +#define pte_unmap(pte) ((void)(pte)) +#define pte_unmap_nested(pte) ((void)(pte)) + +/* + * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken, + * split up 30 bits of offset into this range: + */ +#define PTE_FILE_MAX_BITS 30 +#define pte_to_pgoff(_pte) \ + (((_pte).pte & 0x1ff) | (((_pte).pte >> 11) << 9)) +#define pgoff_to_pte(off) \ + ((pte_t) {((off) & 0x1ff) | (((off) >> 9) << 11) | _PAGE_FILE}) +#define __pte_to_swp_entry(pte) \ + ((swp_entry_t) { pte_val(pte)}) +#define __swp_entry_to_pte(x) ((pte_t) {(x).val}) + +#define __P000 __pgprot(0) +#define __P001 __pgprot(0) +#define __P010 __pgprot(0) +#define __P011 __pgprot(0) +#define __P100 __pgprot(0) +#define __P101 __pgprot(0) +#define __P110 __pgprot(0) +#define __P111 __pgprot(0) + +#define __S000 __pgprot(0) +#define __S001 __pgprot(0) +#define __S010 __pgprot(0) +#define __S011 __pgprot(0) +#define __S100 __pgprot(0) +#define __S101 __pgprot(0) +#define __S110 __pgprot(0) +#define __S111 __pgprot(0) + +#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +#define set_pte(pteptr, pteval) (*(pteptr) = pteval) +#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +#define pte_clear(mm, addr, xp) \ + do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#define pgd_present(pgd) (1) /* pages are always present on non MMU */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_clear(pgdp) + +#define kern_addr_valid(addr) (1) +#define pmd_offset(a, b) ((void *) 0) +#define pmd_page_vaddr(pmd) pmd_val(pmd) + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +#define pud_offset(pgd, address) ((pud_t *) pgd) + +#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ + +#define pgprot_noncached(x) (x) + +#define __swp_type(x) (0) +#define __swp_offset(x) (0) +#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) + +#define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) + +#define swapper_pg_dir ((pgd_t *) NULL) + +#define pgtable_cache_init() do {} while (0) + +#define arch_enter_lazy_cpu_mode() do {} while (0) + +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_WRITE; +} + +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_MODIFIED; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int pte_file(pte_t pte) +{ + return pte_val(pte) & _PAGE_FILE; +} + +#define pte_special(pte) (0) + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; +} + +#define set_pmd(pmdptr, pmdval) \ + do { *(pmdptr) = (pmdval); } while (0) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +extern unsigned long pgd_current; +extern void paging_init(void); + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t pte); +extern void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + __update_tlb(vma, address, pte); + __update_cache(vma, address, pte); +} + +#ifndef __ASSEMBLY__ +#include + +void setup_memory(void); +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_PGTABLE_H */ diff --git a/arch/score/include/asm/poll.h b/arch/score/include/asm/poll.h new file mode 100644 index 00000000000..18532db0286 --- /dev/null +++ b/arch/score/include/asm/poll.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_POLL_H +#define _ASM_SCORE_POLL_H + +#include + +#endif /* _ASM_SCORE_POLL_H */ diff --git a/arch/score/include/asm/posix_types.h b/arch/score/include/asm/posix_types.h new file mode 100644 index 00000000000..b88acf80048 --- /dev/null +++ b/arch/score/include/asm/posix_types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_POSIX_TYPES_H +#define _ASM_SCORE_POSIX_TYPES_H + +#include + +#endif /* _ASM_SCORE_POSIX_TYPES_H */ diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h new file mode 100644 index 00000000000..7e22f216d77 --- /dev/null +++ b/arch/score/include/asm/processor.h @@ -0,0 +1,106 @@ +#ifndef _ASM_SCORE_PROCESSOR_H +#define _ASM_SCORE_PROCESSOR_H + +#include +#include + +#include + +struct task_struct; + +/* + * System setup and hardware flags.. + */ +extern void (*cpu_wait)(void); + +extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); +extern unsigned long thread_saved_pc(struct task_struct *tsk); +extern void start_thread(struct pt_regs *regs, + unsigned long pc, unsigned long sp); +extern unsigned long get_wchan(struct task_struct *p); + +/* + * Return current * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l; }) + +#define cpu_relax() barrier() +#define release_thread(thread) do {} while (0) +#define prepare_to_copy(tsk) do {} while (0) + +/* + * User space process size: 2GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ +#define TASK_SIZE 0x7fff8000UL + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE)) + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX TASK_SIZE +#endif + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + unsigned long reg0, reg2, reg3; + unsigned long reg12, reg13, reg14, reg15, reg16; + unsigned long reg17, reg18, reg19, reg20, reg21; + + unsigned long cp0_psr; + unsigned long cp0_ema; /* Last user fault */ + unsigned long cp0_badvaddr; /* Last user fault */ + unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ + unsigned long error_code; + unsigned long trap_no; + + unsigned long mflags; + unsigned long reg29; + + unsigned long single_step; + unsigned long ss_nextcnt; + + unsigned long insn1_type; + unsigned long addr1; + unsigned long insn1; + + unsigned long insn2_type; + unsigned long addr2; + unsigned long insn2; + + mm_segment_t current_ds; +}; + +#define INIT_THREAD { \ + .reg0 = 0, \ + .reg2 = 0, \ + .reg3 = 0, \ + .reg12 = 0, \ + .reg13 = 0, \ + .reg14 = 0, \ + .reg15 = 0, \ + .reg16 = 0, \ + .reg17 = 0, \ + .reg18 = 0, \ + .reg19 = 0, \ + .reg20 = 0, \ + .reg21 = 0, \ + .cp0_psr = 0, \ + .error_code = 0, \ + .trap_no = 0, \ +} + +#define kstk_tos(tsk) \ + ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32) +#define task_pt_regs(tsk) ((struct pt_regs *)kstk_tos(tsk) - 1) + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) + +#endif /* _ASM_SCORE_PROCESSOR_H */ diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h new file mode 100644 index 00000000000..1a4900ac49f --- /dev/null +++ b/arch/score/include/asm/ptrace.h @@ -0,0 +1,87 @@ +#ifndef _ASM_SCORE_PTRACE_H +#define _ASM_SCORE_PTRACE_H + +#define PC 32 +#define CONDITION 33 +#define ECR 34 +#define EMA 35 +#define CEH 36 +#define CEL 37 +#define COUNTER 38 +#define LDCR 39 +#define STCR 40 +#define PSR 41 + +#define SINGLESTEP16_INSN 0x7006 +#define SINGLESTEP32_INSN 0x840C8000 +#define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */ +#define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */ + +/* Define instruction mask */ +#define INSN32_MASK 0x80008000 + +#define J32 0x88008000 /* 1_00010_0000000000_1_000000000000000 */ +#define J32M 0xFC008000 /* 1_11111_0000000000_1_000000000000000 */ + +#define B32 0x90008000 /* 1_00100_0000000000_1_000000000000000 */ +#define B32M 0xFC008000 +#define BL32 0x90008001 /* 1_00100_0000000000_1_000000000000001 */ +#define BL32M B32 +#define BR32 0x80008008 /* 1_00000_0000000000_1_00000000_000100_0 */ +#define BR32M 0xFFE0807E +#define BRL32 0x80008009 /* 1_00000_0000000000_1_00000000_000100_1 */ +#define BRL32M BR32M + +#define B32_SET (J32 | B32 | BL32 | BR32 | BRL32) + +#define J16 0x3000 /* 0_011_....... */ +#define J16M 0xF000 +#define B16 0x4000 /* 0_100_....... */ +#define B16M 0xF000 +#define BR16 0x0004 /* 0_000.......0100 */ +#define BR16M 0xF00F +#define B16_SET (J16 | B16 | BR16) + + +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + */ +struct pt_regs { + unsigned long pad0[6]; + unsigned long orig_r4; + unsigned long orig_r7; + unsigned long regs[32]; + + unsigned long cel; + unsigned long ceh; + + unsigned long sr0; /* cnt */ + unsigned long sr1; /* lcr */ + unsigned long sr2; /* scr */ + + unsigned long cp0_epc; + unsigned long cp0_ema; + unsigned long cp0_psr; + unsigned long cp0_ecr; + unsigned long cp0_condition; + + long is_syscall; +}; + +#ifdef __KERNEL__ + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) ((regs->cp0_psr & 8) == 8) + +#define instruction_pointer(regs) (0) +#define profile_pc(regs) instruction_pointer(regs) + +extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); +extern void clear_single_step(struct task_struct *); +#endif + +#endif /* _ASM_SCORE_PTRACE_H */ diff --git a/arch/score/include/asm/resource.h b/arch/score/include/asm/resource.h new file mode 100644 index 00000000000..9ce22bc7b47 --- /dev/null +++ b/arch/score/include/asm/resource.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_RESOURCE_H +#define _ASM_SCORE_RESOURCE_H + +#include + +#endif /* _ASM_SCORE_RESOURCE_H */ diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h new file mode 100644 index 00000000000..9f533b8362c --- /dev/null +++ b/arch/score/include/asm/scatterlist.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SCATTERLIST_H +#define _ASM_SCORE_SCATTERLIST_H + +#include + +#endif /* _ASM_SCORE_SCATTERLIST_H */ diff --git a/arch/score/include/asm/scoreregs.h b/arch/score/include/asm/scoreregs.h new file mode 100644 index 00000000000..d0ad2920451 --- /dev/null +++ b/arch/score/include/asm/scoreregs.h @@ -0,0 +1,51 @@ +#ifndef _ASM_SCORE_SCOREREGS_H +#define _ASM_SCORE_SCOREREGS_H + +#include + +/* TIMER register */ +#define TIME0BASE 0x96080000 +#define P_TIMER0_CTRL (TIME0BASE + 0x00) +#define P_TIMER0_CPP_CTRL (TIME0BASE + 0x04) +#define P_TIMER0_PRELOAD (TIME0BASE + 0x08) +#define P_TIMER0_CPP_REG (TIME0BASE + 0x0C) +#define P_TIMER0_UPCNT (TIME0BASE + 0x10) + +/* Timer Controller Register */ +/* bit 0 Timer enable */ +#define TMR_DISABLE 0x0000 +#define TMR_ENABLE 0x0001 + +/* bit 1 Interrupt enable */ +#define TMR_IE_DISABLE 0x0000 +#define TMR_IE_ENABLE 0x0002 + +/* bit 2 Output enable */ +#define TMR_OE_DISABLE 0x0004 +#define TMR_OE_ENABLE 0x0000 + +/* bit4 Up/Down counting selection */ +#define TMR_UD_DOWN 0x0000 +#define TMR_UD_UP 0x0010 + +/* bit5 Up/Down counting control selection */ +#define TMR_UDS_UD 0x0000 +#define TMR_UDS_EXTUD 0x0020 + +/* bit6 Time output mode */ +#define TMR_OM_TOGGLE 0x0000 +#define TMR_OM_PILSE 0x0040 + +/* bit 8..9 External input active edge selection */ +#define TMR_ES_PE 0x0000 +#define TMR_ES_NE 0x0100 +#define TMR_ES_BOTH 0x0200 + +/* bit 10..11 Operating mode */ +#define TMR_M_FREE 0x0000 /* free running timer mode */ +#define TMR_M_PERIODIC 0x0400 /* periodic timer mode */ +#define TMR_M_FC 0x0800 /* free running counter mode */ +#define TMR_M_PC 0x0c00 /* periodic counter mode */ + +#define SYSTEM_CLOCK (27*1000000/4) /* 27 MHz */ +#endif /* _ASM_SCORE_SCOREREGS_H */ diff --git a/arch/score/include/asm/sections.h b/arch/score/include/asm/sections.h new file mode 100644 index 00000000000..9441d23af00 --- /dev/null +++ b/arch/score/include/asm/sections.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SECTIONS_H +#define _ASM_SCORE_SECTIONS_H + +#include + +#endif /* _ASM_SCORE_SECTIONS_H */ diff --git a/arch/score/include/asm/segment.h b/arch/score/include/asm/segment.h new file mode 100644 index 00000000000..e16cf6afb49 --- /dev/null +++ b/arch/score/include/asm/segment.h @@ -0,0 +1,21 @@ +#ifndef _ASM_SCORE_SEGMENT_H +#define _ASM_SCORE_SEGMENT_H + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define KERNEL_DS ((mm_segment_t){0}) +#define USER_DS KERNEL_DS + +# define get_ds() (KERNEL_DS) +# define get_fs() (current_thread_info()->addr_limit) +# define set_fs(x) \ + do { current_thread_info()->addr_limit = (x); } while (0) + +# define segment_eq(a, b) ((a).seg == (b).seg) + +# endif /* __ASSEMBLY__ */ +#endif /* _ASM_SCORE_SEGMENT_H */ diff --git a/arch/score/include/asm/sembuf.h b/arch/score/include/asm/sembuf.h new file mode 100644 index 00000000000..dae5e835ce9 --- /dev/null +++ b/arch/score/include/asm/sembuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SEMBUF_H +#define _ASM_SCORE_SEMBUF_H + +#include + +#endif /* _ASM_SCORE_SEMBUF_H */ diff --git a/arch/score/include/asm/setup.h b/arch/score/include/asm/setup.h new file mode 100644 index 00000000000..de89eff98da --- /dev/null +++ b/arch/score/include/asm/setup.h @@ -0,0 +1,40 @@ +#ifndef _ASM_SCORE_SETUP_H +#define _ASM_SCORE_SETUP_H + +#define COMMAND_LINE_SIZE 256 +#define MEM_SIZE 0x2000000 + +#ifdef __KERNEL__ + +extern void pagetable_init(void); +extern void pgd_init(unsigned long page); + +extern void setup_early_printk(void); +extern void cpu_cache_init(void); +extern void tlb_init(void); + +extern void handle_nmi(void); +extern void handle_adelinsn(void); +extern void handle_adedata(void); +extern void handle_ibe(void); +extern void handle_pel(void); +extern void handle_sys(void); +extern void handle_ccu(void); +extern void handle_ri(void); +extern void handle_tr(void); +extern void handle_ades(void); +extern void handle_cee(void); +extern void handle_cpe(void); +extern void handle_dve(void); +extern void handle_dbe(void); +extern void handle_reserved(void); +extern void handle_tlb_refill(void); +extern void handle_tlb_invaild(void); +extern void handle_mod(void); +extern void debug_exception_vector(void); +extern void general_exception_vector(void); +extern void interrupt_exception_vector(void); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SCORE_SETUP_H */ diff --git a/arch/score/include/asm/shmbuf.h b/arch/score/include/asm/shmbuf.h new file mode 100644 index 00000000000..c85b2429ba2 --- /dev/null +++ b/arch/score/include/asm/shmbuf.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SHMBUF_H +#define _ASM_SCORE_SHMBUF_H + +#include + +#endif /* _ASM_SCORE_SHMBUF_H */ diff --git a/arch/score/include/asm/shmparam.h b/arch/score/include/asm/shmparam.h new file mode 100644 index 00000000000..1d60813141b --- /dev/null +++ b/arch/score/include/asm/shmparam.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SHMPARAM_H +#define _ASM_SCORE_SHMPARAM_H + +#include + +#endif /* _ASM_SCORE_SHMPARAM_H */ diff --git a/arch/score/include/asm/sigcontext.h b/arch/score/include/asm/sigcontext.h new file mode 100644 index 00000000000..5ffda39ddb9 --- /dev/null +++ b/arch/score/include/asm/sigcontext.h @@ -0,0 +1,22 @@ +#ifndef _ASM_SCORE_SIGCONTEXT_H +#define _ASM_SCORE_SIGCONTEXT_H + +/* + * Keep this struct definition in sync with the sigcontext fragment + * in arch/score/tools/offset.c + */ +struct sigcontext { + unsigned int sc_regmask; + unsigned int sc_psr; + unsigned int sc_condition; + unsigned long sc_pc; + unsigned long sc_regs[32]; + unsigned int sc_ssflags; + unsigned int sc_mdceh; + unsigned int sc_mdcel; + unsigned int sc_ecr; + unsigned long sc_ema; + unsigned long sc_sigset[4]; +}; + +#endif /* _ASM_SCORE_SIGCONTEXT_H */ diff --git a/arch/score/include/asm/siginfo.h b/arch/score/include/asm/siginfo.h new file mode 100644 index 00000000000..87ca35607a2 --- /dev/null +++ b/arch/score/include/asm/siginfo.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SIGINFO_H +#define _ASM_SCORE_SIGINFO_H + +#include + +#endif /* _ASM_SCORE_SIGINFO_H */ diff --git a/arch/score/include/asm/signal.h b/arch/score/include/asm/signal.h new file mode 100644 index 00000000000..2605bc06b64 --- /dev/null +++ b/arch/score/include/asm/signal.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SIGNAL_H +#define _ASM_SCORE_SIGNAL_H + +#include + +#endif /* _ASM_SCORE_SIGNAL_H */ diff --git a/arch/score/include/asm/socket.h b/arch/score/include/asm/socket.h new file mode 100644 index 00000000000..612a70e385b --- /dev/null +++ b/arch/score/include/asm/socket.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SOCKET_H +#define _ASM_SCORE_SOCKET_H + +#include + +#endif /* _ASM_SCORE_SOCKET_H */ diff --git a/arch/score/include/asm/sockios.h b/arch/score/include/asm/sockios.h new file mode 100644 index 00000000000..ba825648018 --- /dev/null +++ b/arch/score/include/asm/sockios.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SOCKIOS_H +#define _ASM_SCORE_SOCKIOS_H + +#include + +#endif /* _ASM_SCORE_SOCKIOS_H */ diff --git a/arch/score/include/asm/stat.h b/arch/score/include/asm/stat.h new file mode 100644 index 00000000000..5037055500a --- /dev/null +++ b/arch/score/include/asm/stat.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_STAT_H +#define _ASM_SCORE_STAT_H + +#include + +#endif /* _ASM_SCORE_STAT_H */ diff --git a/arch/score/include/asm/statfs.h b/arch/score/include/asm/statfs.h new file mode 100644 index 00000000000..36e41004e99 --- /dev/null +++ b/arch/score/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_STATFS_H +#define _ASM_SCORE_STATFS_H + +#include + +#endif /* _ASM_SCORE_STATFS_H */ diff --git a/arch/score/include/asm/string.h b/arch/score/include/asm/string.h new file mode 100644 index 00000000000..8a6bf5063aa --- /dev/null +++ b/arch/score/include/asm/string.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_STRING_H +#define _ASM_SCORE_STRING_H + +extern void *memset(void *__s, int __c, size_t __count); +extern void *memcpy(void *__to, __const__ void *__from, size_t __n); +extern void *memmove(void *__dest, __const__ void *__src, size_t __n); + +#endif /* _ASM_SCORE_STRING_H */ diff --git a/arch/score/include/asm/swab.h b/arch/score/include/asm/swab.h new file mode 100644 index 00000000000..fadc3cc6d8a --- /dev/null +++ b/arch/score/include/asm/swab.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_SWAB_H +#define _ASM_SCORE_SWAB_H + +#include + +#endif /* _ASM_SCORE_SWAB_H */ diff --git a/arch/score/include/asm/syscalls.h b/arch/score/include/asm/syscalls.h new file mode 100644 index 00000000000..00c28e04fdf --- /dev/null +++ b/arch/score/include/asm/syscalls.h @@ -0,0 +1,9 @@ +#ifndef _ASM_SCORE_SYSCALLS_H +#define _ASM_SCORE_SYSCALLS_H + +asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); +#define sys_clone sys_clone + +#include + +#endif /* _ASM_SCORE_SYSCALLS_H */ diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h new file mode 100644 index 00000000000..589d5c7e171 --- /dev/null +++ b/arch/score/include/asm/system.h @@ -0,0 +1,90 @@ +#ifndef _ASM_SCORE_SYSTEM_H +#define _ASM_SCORE_SYSTEM_H + +#include +#include + +struct pt_regs; +struct task_struct; + +extern void *resume(void *last, void *next, void *next_ti); + +#define switch_to(prev, next, last) \ +do { \ + (last) = resume(prev, next, task_thread_info(next)); \ +} while (0) + +#define finish_arch_switch(prev) do {} while (0) + +typedef void (*vi_handler_t)(void); +extern unsigned long arch_align_stack(unsigned long sp); + +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() + +#define read_barrier_depends() do {} while (0) +#define smp_read_barrier_depends() do {} while (0) + +#define set_mb(var, value) do {var = value; wmb(); } while (0) + +#define __HAVE_ARCH_CMPXCHG 1 + +#include + +#ifndef __ASSEMBLY__ + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); + return retval; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +extern void __die(const char *, struct pt_regs *, const char *, + const char *, unsigned long) __attribute__((noreturn)); +extern void __die_if_kernel(const char *, struct pt_regs *, const char *, + const char *, unsigned long); + +#define die(msg, regs) \ + __die(msg, regs, __FILE__ ":", __func__, __LINE__) +#define die_if_kernel(msg, regs) \ + __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__) + +#endif /* !__ASSEMBLY__ */ +#endif /* _ASM_SCORE_SYSTEM_H */ diff --git a/arch/score/include/asm/termbits.h b/arch/score/include/asm/termbits.h new file mode 100644 index 00000000000..9a95c141243 --- /dev/null +++ b/arch/score/include/asm/termbits.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TERMBITS_H +#define _ASM_SCORE_TERMBITS_H + +#include + +#endif /* _ASM_SCORE_TERMBITS_H */ diff --git a/arch/score/include/asm/termios.h b/arch/score/include/asm/termios.h new file mode 100644 index 00000000000..40984e811ad --- /dev/null +++ b/arch/score/include/asm/termios.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TERMIOS_H +#define _ASM_SCORE_TERMIOS_H + +#include + +#endif /* _ASM_SCORE_TERMIOS_H */ diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h new file mode 100644 index 00000000000..0af8ca074c8 --- /dev/null +++ b/arch/score/include/asm/thread_info.h @@ -0,0 +1,103 @@ +#ifndef _ASM_SCORE_THREAD_INFO_H +#define _ASM_SCORE_THREAD_INFO_H + +#ifdef __KERNEL__ + +#define KU_MASK 0x08 +#define KU_USER 0x08 +#define KU_KERN 0x00 + +#ifndef __ASSEMBLY__ + +#include + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants + * must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + unsigned long tp_value; /* thread pointer */ + __u32 cpu; /* current CPU */ + + /* 0 => preemptable, < 0 => BUG */ + int preempt_count; + + /* + * thread address space: + * 0-0xBFFFFFFF for user-thead + * 0-0xFFFFFFFF for kernel-thread + */ + mm_segment_t addr_limit; + struct restart_block restart_block; + struct pt_regs *regs; +}; + +/* + * macros/functions for gaining access to the thread information structure + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("r28"); +#define current_thread_info() __current_thread_info + +/* thread information allocation */ +#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) +#define THREAD_MASK (THREAD_SIZE - 1UL) +#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR + +#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) +#define free_thread_info(info) kfree(info) + +#endif /* !__ASSEMBLY__ */ + +#define PREEMPT_ACTIVE 0x10000000 + +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in LSW + * - other flags in MSW + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ +#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ +#define TIF_MEMDIE 18 + +#define _TIF_SYSCALL_TRACE (1< + +#endif /* _ASM_SCORE_TIMEX_H */ diff --git a/arch/score/include/asm/tlb.h b/arch/score/include/asm/tlb.h new file mode 100644 index 00000000000..46882ed524e --- /dev/null +++ b/arch/score/include/asm/tlb.h @@ -0,0 +1,17 @@ +#ifndef _ASM_SCORE_TLB_H +#define _ASM_SCORE_TLB_H + +/* + * SCORE doesn't need any special per-pte or per-vma handling, except + * we need to flush cache for area to be unmapped. + */ +#define tlb_start_vma(tlb, vma) do {} while (0) +#define tlb_end_vma(tlb, vma) do {} while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0) +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +extern void score7_FTLB_refill_Handler(void); + +#include + +#endif /* _ASM_SCORE_TLB_H */ diff --git a/arch/score/include/asm/tlbflush.h b/arch/score/include/asm/tlbflush.h new file mode 100644 index 00000000000..9cce978367d --- /dev/null +++ b/arch/score/include/asm/tlbflush.h @@ -0,0 +1,142 @@ +#ifndef _ASM_SCORE_TLBFLUSH_H +#define _ASM_SCORE_TLBFLUSH_H + +#include + +/* + * TLB flushing: + * + * - flush_tlb_all() flushes all processes TLB entries + * - flush_tlb_mm(mm) flushes the specified mm context TLB entries + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + */ +extern void local_flush_tlb_all(void); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void local_flush_tlb_kernel_range(unsigned long start, + unsigned long end); +extern void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); +extern void local_flush_tlb_one(unsigned long vaddr); + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_range(vma, vmaddr, end) \ + local_flush_tlb_range(vma, vmaddr, end) +#define flush_tlb_kernel_range(vmaddr, end) \ + local_flush_tlb_kernel_range(vmaddr, end) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) + +#ifndef __ASSEMBLY__ + +static inline unsigned long pevn_get(void) +{ + unsigned long val; + + __asm__ __volatile__( + "mfcr %0, cr11\n" + "nop\nnop\n" + : "=r" (val)); + + return val; +} + +static inline void pevn_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr11\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline void pectx_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr12\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline unsigned long pectx_get(void) +{ + unsigned long val; + __asm__ __volatile__( + "mfcr %0, cr12\n" + "nop\nnop\n" + : "=r" (val)); + return val; +} +static inline unsigned long tlblock_get(void) +{ + unsigned long val; + + __asm__ __volatile__( + "mfcr %0, cr7\n" + "nop\nnop\n" + : "=r" (val)); + return val; +} +static inline void tlblock_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr7\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline void tlbpt_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr8\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +static inline long tlbpt_get(void) +{ + long val; + + __asm__ __volatile__( + "mfcr %0, cr8\n" + "nop\nnop\n" + : "=r" (val)); + + return val; +} + +static inline void peaddr_set(unsigned long val) +{ + __asm__ __volatile__( + "mtcr %0, cr9\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (val)); +} + +/* TLB operations. */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__("stlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_read(void) +{ + __asm__ __volatile__("mftlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__("mtptlb;nop;nop;nop;nop;nop"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__("mtrtlb;nop;nop;nop;nop;nop"); +} + +#endif /* Not __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_TLBFLUSH_H */ diff --git a/arch/score/include/asm/topology.h b/arch/score/include/asm/topology.h new file mode 100644 index 00000000000..425fba381f8 --- /dev/null +++ b/arch/score/include/asm/topology.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TOPOLOGY_H +#define _ASM_SCORE_TOPOLOGY_H + +#include + +#endif /* _ASM_SCORE_TOPOLOGY_H */ diff --git a/arch/score/include/asm/types.h b/arch/score/include/asm/types.h new file mode 100644 index 00000000000..2140032778e --- /dev/null +++ b/arch/score/include/asm/types.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_TYPES_H +#define _ASM_SCORE_TYPES_H + +#include + +#endif /* _ASM_SCORE_TYPES_H */ diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h new file mode 100644 index 00000000000..43ce28a1d0f --- /dev/null +++ b/arch/score/include/asm/uaccess.h @@ -0,0 +1,27 @@ +#ifndef _ASM_SCORE_UACCESS_H +#define _ASM_SCORE_UACCESS_H +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +struct pt_regs; +extern int fixup_exception(struct pt_regs *regs); + +#ifndef __ASSEMBLY__ + +#define __range_ok(addr, size) \ + ((((unsigned long)(addr) >= 0x80000000) \ + || ((unsigned long)(size) > 0x80000000) \ + || (((unsigned long)(addr) + (unsigned long)(size)) > 0x80000000))) + +#define __access_ok(addr, size) \ + (__range_ok((addr), (size)) == 0) + +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCORE_UACCESS_H */ diff --git a/arch/score/include/asm/unaligned.h b/arch/score/include/asm/unaligned.h new file mode 100644 index 00000000000..2fc06de51c6 --- /dev/null +++ b/arch/score/include/asm/unaligned.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_UNALIGNED_H +#define _ASM_SCORE_UNALIGNED_H + +#include + +#endif /* _ASM_SCORE_UNALIGNED_H */ diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h new file mode 100644 index 00000000000..9aa3a159bbf --- /dev/null +++ b/arch/score/include/asm/unistd.h @@ -0,0 +1,8 @@ +#ifndef _ASM_SCORE_UNISTD_H +#define _ASM_SCORE_UNISTD_H + +#define __ARCH_HAVE_MMU + +#include + +#endif /* _ASM_SCORE_UNISTD_H */ diff --git a/arch/score/include/asm/user.h b/arch/score/include/asm/user.h new file mode 100644 index 00000000000..3cf75720fe0 --- /dev/null +++ b/arch/score/include/asm/user.h @@ -0,0 +1,4 @@ +#ifndef _ASM_SCORE_USER_H +#define _ASM_SCORE_USER_H + +#endif /* _ASM_SCORE_USER_H */ diff --git a/arch/score/kernel/Makefile b/arch/score/kernel/Makefile new file mode 100644 index 00000000000..1e5de89d5c7 --- /dev/null +++ b/arch/score/kernel/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Linux/SCORE kernel. +# + +extra-y := head.o vmlinux.lds + +obj-y += entry.o init_task.o irq.o process.o ptrace.o \ + setup.o signal.o sys_score.o time.o traps.o + +obj-$(CONFIG_MODULES) += module.o diff --git a/arch/score/kernel/asm-offsets.c b/arch/score/kernel/asm-offsets.c new file mode 100644 index 00000000000..57788f44c6f --- /dev/null +++ b/arch/score/kernel/asm-offsets.c @@ -0,0 +1,216 @@ +/* + * arch/score/kernel/asm-offsets.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +#include + +void output_ptreg_defines(void) +{ + COMMENT("SCORE pt_regs offsets."); + OFFSET(PT_R0, pt_regs, regs[0]); + OFFSET(PT_R1, pt_regs, regs[1]); + OFFSET(PT_R2, pt_regs, regs[2]); + OFFSET(PT_R3, pt_regs, regs[3]); + OFFSET(PT_R4, pt_regs, regs[4]); + OFFSET(PT_R5, pt_regs, regs[5]); + OFFSET(PT_R6, pt_regs, regs[6]); + OFFSET(PT_R7, pt_regs, regs[7]); + OFFSET(PT_R8, pt_regs, regs[8]); + OFFSET(PT_R9, pt_regs, regs[9]); + OFFSET(PT_R10, pt_regs, regs[10]); + OFFSET(PT_R11, pt_regs, regs[11]); + OFFSET(PT_R12, pt_regs, regs[12]); + OFFSET(PT_R13, pt_regs, regs[13]); + OFFSET(PT_R14, pt_regs, regs[14]); + OFFSET(PT_R15, pt_regs, regs[15]); + OFFSET(PT_R16, pt_regs, regs[16]); + OFFSET(PT_R17, pt_regs, regs[17]); + OFFSET(PT_R18, pt_regs, regs[18]); + OFFSET(PT_R19, pt_regs, regs[19]); + OFFSET(PT_R20, pt_regs, regs[20]); + OFFSET(PT_R21, pt_regs, regs[21]); + OFFSET(PT_R22, pt_regs, regs[22]); + OFFSET(PT_R23, pt_regs, regs[23]); + OFFSET(PT_R24, pt_regs, regs[24]); + OFFSET(PT_R25, pt_regs, regs[25]); + OFFSET(PT_R26, pt_regs, regs[26]); + OFFSET(PT_R27, pt_regs, regs[27]); + OFFSET(PT_R28, pt_regs, regs[28]); + OFFSET(PT_R29, pt_regs, regs[29]); + OFFSET(PT_R30, pt_regs, regs[30]); + OFFSET(PT_R31, pt_regs, regs[31]); + + OFFSET(PT_ORIG_R4, pt_regs, orig_r4); + OFFSET(PT_ORIG_R7, pt_regs, orig_r7); + OFFSET(PT_CEL, pt_regs, cel); + OFFSET(PT_CEH, pt_regs, ceh); + OFFSET(PT_SR0, pt_regs, sr0); + OFFSET(PT_SR1, pt_regs, sr1); + OFFSET(PT_SR2, pt_regs, sr2); + OFFSET(PT_EPC, pt_regs, cp0_epc); + OFFSET(PT_EMA, pt_regs, cp0_ema); + OFFSET(PT_PSR, pt_regs, cp0_psr); + OFFSET(PT_ECR, pt_regs, cp0_ecr); + OFFSET(PT_CONDITION, pt_regs, cp0_condition); + OFFSET(PT_IS_SYSCALL, pt_regs, is_syscall); + + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + BLANK(); +} + +void output_task_defines(void) +{ + COMMENT("SCORE task_struct offsets."); + OFFSET(TASK_STATE, task_struct, state); + OFFSET(TASK_THREAD_INFO, task_struct, stack); + OFFSET(TASK_FLAGS, task_struct, flags); + OFFSET(TASK_MM, task_struct, mm); + OFFSET(TASK_PID, task_struct, pid); + DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); + BLANK(); +} + +void output_thread_info_defines(void) +{ + COMMENT("SCORE thread_info offsets."); + OFFSET(TI_TASK, thread_info, task); + OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_TP_VALUE, thread_info, tp_value); + OFFSET(TI_CPU, thread_info, cpu); + OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); + OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); + OFFSET(TI_REGS, thread_info, regs); + DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); + DEFINE(KERNEL_STACK_MASK, THREAD_MASK); + BLANK(); +} + +void output_thread_defines(void) +{ + COMMENT("SCORE specific thread_struct offsets."); + OFFSET(THREAD_REG0, task_struct, thread.reg0); + OFFSET(THREAD_REG2, task_struct, thread.reg2); + OFFSET(THREAD_REG3, task_struct, thread.reg3); + OFFSET(THREAD_REG12, task_struct, thread.reg12); + OFFSET(THREAD_REG13, task_struct, thread.reg13); + OFFSET(THREAD_REG14, task_struct, thread.reg14); + OFFSET(THREAD_REG15, task_struct, thread.reg15); + OFFSET(THREAD_REG16, task_struct, thread.reg16); + OFFSET(THREAD_REG17, task_struct, thread.reg17); + OFFSET(THREAD_REG18, task_struct, thread.reg18); + OFFSET(THREAD_REG19, task_struct, thread.reg19); + OFFSET(THREAD_REG20, task_struct, thread.reg20); + OFFSET(THREAD_REG21, task_struct, thread.reg21); + OFFSET(THREAD_REG29, task_struct, thread.reg29); + + OFFSET(THREAD_PSR, task_struct, thread.cp0_psr); + OFFSET(THREAD_EMA, task_struct, thread.cp0_ema); + OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr); + OFFSET(THREAD_ECODE, task_struct, thread.error_code); + OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); + BLANK(); +} + +void output_mm_defines(void) +{ + COMMENT("Size of struct page"); + DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); + BLANK(); + COMMENT("Linux mm_struct offsets."); + OFFSET(MM_USERS, mm_struct, mm_users); + OFFSET(MM_PGD, mm_struct, pgd); + OFFSET(MM_CONTEXT, mm_struct, context); + BLANK(); + DEFINE(_PAGE_SIZE, PAGE_SIZE); + DEFINE(_PAGE_SHIFT, PAGE_SHIFT); + BLANK(); + DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); + DEFINE(_PTE_T_SIZE, sizeof(pte_t)); + BLANK(); + DEFINE(_PGD_ORDER, PGD_ORDER); + DEFINE(_PTE_ORDER, PTE_ORDER); + BLANK(); + DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); + BLANK(); + DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); + DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); + BLANK(); +} + +void output_sc_defines(void) +{ + COMMENT("Linux sigcontext offsets."); + OFFSET(SC_REGS, sigcontext, sc_regs); + OFFSET(SC_MDCEH, sigcontext, sc_mdceh); + OFFSET(SC_MDCEL, sigcontext, sc_mdcel); + OFFSET(SC_PC, sigcontext, sc_pc); + OFFSET(SC_PSR, sigcontext, sc_psr); + OFFSET(SC_ECR, sigcontext, sc_ecr); + OFFSET(SC_EMA, sigcontext, sc_ema); + BLANK(); +} + +void output_signal_defined(void) +{ + COMMENT("Linux signal numbers."); + DEFINE(_SIGHUP, SIGHUP); + DEFINE(_SIGINT, SIGINT); + DEFINE(_SIGQUIT, SIGQUIT); + DEFINE(_SIGILL, SIGILL); + DEFINE(_SIGTRAP, SIGTRAP); + DEFINE(_SIGIOT, SIGIOT); + DEFINE(_SIGABRT, SIGABRT); + DEFINE(_SIGFPE, SIGFPE); + DEFINE(_SIGKILL, SIGKILL); + DEFINE(_SIGBUS, SIGBUS); + DEFINE(_SIGSEGV, SIGSEGV); + DEFINE(_SIGSYS, SIGSYS); + DEFINE(_SIGPIPE, SIGPIPE); + DEFINE(_SIGALRM, SIGALRM); + DEFINE(_SIGTERM, SIGTERM); + DEFINE(_SIGUSR1, SIGUSR1); + DEFINE(_SIGUSR2, SIGUSR2); + DEFINE(_SIGCHLD, SIGCHLD); + DEFINE(_SIGPWR, SIGPWR); + DEFINE(_SIGWINCH, SIGWINCH); + DEFINE(_SIGURG, SIGURG); + DEFINE(_SIGIO, SIGIO); + DEFINE(_SIGSTOP, SIGSTOP); + DEFINE(_SIGTSTP, SIGTSTP); + DEFINE(_SIGCONT, SIGCONT); + DEFINE(_SIGTTIN, SIGTTIN); + DEFINE(_SIGTTOU, SIGTTOU); + DEFINE(_SIGVTALRM, SIGVTALRM); + DEFINE(_SIGPROF, SIGPROF); + DEFINE(_SIGXCPU, SIGXCPU); + DEFINE(_SIGXFSZ, SIGXFSZ); + BLANK(); +} diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S new file mode 100644 index 00000000000..6c6b7ea58af --- /dev/null +++ b/arch/score/kernel/entry.S @@ -0,0 +1,542 @@ +/* + * arch/score/kernel/entry.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#include +#include +#include + +/* + * disable interrupts. + */ +.macro disable_irq + mfcr r8, cr0 + srli r8, r8, 1 + slli r8, r8, 1 + mtcr r8, cr0 + nop + nop + nop + nop + nop +.endm + +/* + * enable interrupts. + */ +.macro enable_irq + mfcr r8, cr0 + ori r8, 1 + mtcr r8, cr0 + nop + nop + nop + nop + nop +.endm + +__INIT +ENTRY(debug_exception_vector) + nop! + nop! + nop! + nop! + nop! + nop! + nop! + nop! + +ENTRY(general_exception_vector) # should move to addr 0x200 + j general_exception + nop! + nop! + nop! + nop! + nop! + nop! + +ENTRY(interrupt_exception_vector) # should move to addr 0x210 + j interrupt_exception + nop! + nop! + nop! + nop! + nop! + nop! + + .section ".text", "ax" + .align 2; +general_exception: + mfcr r31, cr2 + nop + la r30, exception_handlers + andi r31, 0x1f # get ecr.exc_code + slli r31, r31, 2 + add r30, r30, r31 + lw r30, [r30] + br r30 + +interrupt_exception: + SAVE_ALL + mfcr r4, cr2 + nop + lw r16, [r28, TI_REGS] + sw r0, [r28, TI_REGS] + la r3, ret_from_irq + srli r4, r4, 18 # get ecr.ip[7:2], interrupt No. + mv r5, r0 + j do_IRQ + +ENTRY(handle_nmi) # NMI #1 + SAVE_ALL + mv r4, r0 + la r8, nmi_exception_handler + brl r8 + j restore_all + +ENTRY(handle_adelinsn) # AdEL-instruction #2 + SAVE_ALL + mfcr r8, cr6 + nop + nop + sw r8, [r0, PT_EMA] + mv r4, r0 + la r8, do_adelinsn + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_ibe) # BusEL-instruction #5 + SAVE_ALL + mv r4, r0 + la r8, do_be + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_pel) # P-EL #6 + SAVE_ALL + mv r4, r0 + la r8, do_pel + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_ccu) # CCU #8 + SAVE_ALL + mv r4, r0 + la r8, do_ccu + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_ri) # RI #9 + SAVE_ALL + mv r4, r0 + la r8, do_ri + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_tr) # Trap #10 + SAVE_ALL + mv r4, r0 + la r8, do_tr + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_adedata) # AdES-instruction #12 + SAVE_ALL + mfcr r8, cr6 + nop + nop + sw r8, [r0, PT_EMA] + mv r4, r0 + la r8, do_adedata + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_cee) # CeE #16 + SAVE_ALL + mv r4, r0 + la r8, do_cee + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_cpe) # CpE #17 + SAVE_ALL + mv r4, r0 + la r8, do_cpe + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_dbe) # BusEL-data #18 + SAVE_ALL + mv r4, r0 + la r8, do_be + brl r8 + mv r4, r0 + j ret_from_exception + nop + +ENTRY(handle_reserved) # others + SAVE_ALL + mv r4, r0 + la r8, do_reserved + brl r8 + mv r4, r0 + j ret_from_exception + nop + +#ifndef CONFIG_PREEMPT +#define resume_kernel restore_all +#else +#define __ret_from_irq ret_from_exception +#endif + + .align 2 +#ifndef CONFIG_PREEMPT +ENTRY(ret_from_exception) + disable_irq # preempt stop + nop + j __ret_from_irq + nop +#endif + +ENTRY(ret_from_irq) + sw r16, [r28, TI_REGS] + +ENTRY(__ret_from_irq) + lw r8, [r0, PT_PSR] # returning to kernel mode? + andri.c r8, r8, KU_USER + beq resume_kernel + +resume_userspace: + disable_irq + lw r6, [r28, TI_FLAGS] # current->work + li r8, _TIF_WORK_MASK + and.c r8, r8, r6 # ignoring syscall_trace + bne work_pending + nop + j restore_all + nop + +#ifdef CONFIG_PREEMPT +resume_kernel: + disable_irq + lw r8, [r28, TI_PRE_COUNT] + cmpz.c r8 + bne r8, restore_all +need_resched: + lw r8, [r28, TI_FLAGS] + andri.c r9, r8, _TIF_NEED_RESCHED + beq restore_all + lw r8, [r28, PT_PSR] # Interrupts off? + andri.c r8, r8, 1 + beq restore_all + bl preempt_schedule_irq + nop + j need_resched + nop +#endif + +ENTRY(ret_from_fork) + bl schedule_tail # r4=struct task_struct *prev + +ENTRY(syscall_exit) + nop + disable_irq + lw r6, [r28, TI_FLAGS] # current->work + li r8, _TIF_WORK_MASK + and.c r8, r6, r8 + bne syscall_exit_work + +ENTRY(restore_all) # restore full frame + RESTORE_ALL_AND_RET + +work_pending: + andri.c r8, r6, _TIF_NEED_RESCHED # r6 is preloaded with TI_FLAGS + beq work_notifysig +work_resched: + bl schedule + nop + disable_irq + lw r6, [r28, TI_FLAGS] + li r8, _TIF_WORK_MASK + and.c r8, r6, r8 # is there any work to be done + # other than syscall tracing? + beq restore_all + andri.c r8, r6, _TIF_NEED_RESCHED + bne work_resched + +work_notifysig: + mv r4, r0 + li r5, 0 + bl do_notify_resume # r6 already loaded + nop + j resume_userspace + nop + +ENTRY(syscall_exit_work) + li r8, _TIF_SYSCALL_TRACE + and.c r8, r8, r6 # r6 is preloaded with TI_FLAGS + beq work_pending # trace bit set? + nop + enable_irq + mv r4, r0 + li r5, 1 + bl do_syscall_trace + nop + b resume_userspace + nop + +.macro save_context reg + sw r12, [\reg, THREAD_REG12]; + sw r13, [\reg, THREAD_REG13]; + sw r14, [\reg, THREAD_REG14]; + sw r15, [\reg, THREAD_REG15]; + sw r16, [\reg, THREAD_REG16]; + sw r17, [\reg, THREAD_REG17]; + sw r18, [\reg, THREAD_REG18]; + sw r19, [\reg, THREAD_REG19]; + sw r20, [\reg, THREAD_REG20]; + sw r21, [\reg, THREAD_REG21]; + sw r29, [\reg, THREAD_REG29]; + sw r2, [\reg, THREAD_REG2]; + sw r0, [\reg, THREAD_REG0] +.endm + +.macro restore_context reg + lw r12, [\reg, THREAD_REG12]; + lw r13, [\reg, THREAD_REG13]; + lw r14, [\reg, THREAD_REG14]; + lw r15, [\reg, THREAD_REG15]; + lw r16, [\reg, THREAD_REG16]; + lw r17, [\reg, THREAD_REG17]; + lw r18, [\reg, THREAD_REG18]; + lw r19, [\reg, THREAD_REG19]; + lw r20, [\reg, THREAD_REG20]; + lw r21, [\reg, THREAD_REG21]; + lw r29, [\reg, THREAD_REG29]; + lw r0, [\reg, THREAD_REG0]; + lw r2, [\reg, THREAD_REG2]; + lw r3, [\reg, THREAD_REG3] +.endm + +/* + * task_struct *resume(task_struct *prev, task_struct *next, + * struct thread_info *next_ti) + */ +ENTRY(resume) + mfcr r9, cr0 + nop + nop + sw r9, [r4, THREAD_PSR] + save_context r4 + sw r3, [r4, THREAD_REG3] + + mv r28, r6 + restore_context r5 + mv r8, r6 + addi r8, KERNEL_STACK_SIZE + subi r8, 32 + la r9, kernelsp; + sw r8, [r9]; + + mfcr r9, cr0 + ldis r7, 0x00ff + nop + and r9, r9, r7 + lw r6, [r5, THREAD_PSR] + not r7, r7 + and r6, r6, r7 + or r6, r6, r9 + mtcr r6, cr0 + nop; nop; nop; nop; nop + br r3 + +ENTRY(handle_sys) + SAVE_ALL + enable_irq + + sw r4, [r0, PT_ORIG_R4] #for restart syscall + sw r7, [r0, PT_ORIG_R7] #for restart syscall + sw r27, [r0, PT_IS_SYSCALL] # it from syscall + + lw r9, [r0, PT_EPC] # skip syscall on return + addi r9, 4 + sw r9, [r0, PT_EPC] + + cmpi.c r27, __NR_syscalls # check syscall number + bgtu illegal_syscall + + slli r8, r27, 3 # get syscall routine + la r11, sys_call_table + add r11, r11, r8 + lw r10, [r11] # get syscall entry + lw r11, [r11, 4] # get number of args + + cmpz.c r10 + beq illegal_syscall + + cmpi.c r11, 4 # more than 4 arguments? + bgtu stackargs + +stack_done: + lw r8, [r28, TI_FLAGS] + li r9, _TIF_SYSCALL_TRACE + and.c r8, r8, r9 + bne syscall_trace_entry + + brl r10 # Do The Real system call + + cmpi.c r4, 0 + blt 1f + ldi r8, 0 + sw r8, [r0, PT_R7] + b 2f +1: + cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134 + ble 2f + ldi r8, 0x1; + sw r8, [r0, PT_R7] + neg r4, r4 +2: + sw r4, [r0, PT_R4] # save result + +syscall_return: + disable_irq + lw r6, [r28, TI_FLAGS] # current->work + li r8, _TIF_WORK_MASK + and.c r8, r6, r8 + bne syscall_return_work + j restore_all + +syscall_return_work: + j syscall_exit_work + +syscall_trace_entry: + mv r16, r10 + mv r4, r0 + li r5, 0 + bl do_syscall_trace + + mv r8, r16 + lw r4, [r0, PT_R4] # Restore argument registers + lw r5, [r0, PT_R5] + lw r6, [r0, PT_R6] + lw r7, [r0, PT_R7] + brl r8 + + li r8, -EMAXERRNO - 1 # error? + sw r8, [r0, PT_R7] # set error flag + + neg r4, r4 # error + sw r4, [r0, PT_R0] # set flag for syscall + # restarting +1: sw r4, [r0, PT_R2] # result + j syscall_exit + +stackargs: + lw r8, [r0, PT_R0] + andri.c r9, r8, 3 # test whether user sp is align a word + bne bad_stack + subi r11, 5 + slli r9, r11, 2 + add.c r9, r9, r8 + + bmi bad_stack + la r9, 3f # calculate branch address + slli r11, r11, 3 + sub r9, r9, r11 + br r9 + +2: lw r9, [r8, 20] # argument 6 from usp + sw r9, [r0, 20] + +3: lw r9, [r8, 16] # argument 5 from usp + sw r9, [r0, 16] + j stack_done + + .section __ex_table,"a" + .word 2b, bad_stack + .word 3b, bad_stack + .previous + + /* + * The stackpointer for a call with more than 4 arguments is bad. + * We probably should handle this case a bit more drastic. + */ +bad_stack: + neg r27, r27 # error + sw r27, [r0, PT_ORIG_R4] + sw r27, [r0, PT_R4] + ldi r8, 1 # set error flag + sw r8, [r0, PT_R7] + j syscall_return + +illegal_syscall: + ldi r4, -ENOSYS # error + sw r4, [r0, PT_ORIG_R4] + sw r4, [r0, PT_R4] + ldi r9, 1 # set error flag + sw r9, [r0, PT_R7] + j syscall_return + +ENTRY(sys_execve) + mv r4, r0 + la r8, score_execve + br r8 + +ENTRY(sys_clone) + mv r4, r0 + la r8, score_clone + br r8 + +ENTRY(sys_rt_sigreturn) + mv r4, r0 + la r8, score_rt_sigreturn + br r8 + +ENTRY(sys_sigaltstack) + mv r4, r0 + la r8, score_sigaltstack + br r8 diff --git a/arch/score/kernel/head.S b/arch/score/kernel/head.S new file mode 100644 index 00000000000..22a7e3c7292 --- /dev/null +++ b/arch/score/kernel/head.S @@ -0,0 +1,70 @@ +/* + * arch/score/kernel/head.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include + +#include + + .extern start_kernel + .global init_thread_union + .global kernelsp + +__INIT +ENTRY(_stext) + la r30, __bss_start /* initialize BSS segment. */ + la r31, _end + xor r8, r8, r8 + +1: cmp.c r31, r30 + beq 2f + + sw r8, [r30] /* clean memory. */ + addi r30, 4 + b 1b + +2: la r28, init_thread_union /* set kernel stack. */ + mv r0, r28 + addi r0, KERNEL_STACK_SIZE - 32 + la r30, kernelsp + sw r0, [r30] + subi r0, 4*4 + xor r30, r30, r30 + ori r30, 0x02 /* enable MMU. */ + mtcr r30, cr4 + nop + nop + nop + nop + nop + nop + nop + + /* there is no parameter */ + xor r4, r4, r4 + xor r5, r5, r5 + xor r6, r6, r6 + xor r7, r7, r7 + la r30, start_kernel /* jump to init_arch */ + br r30 diff --git a/arch/score/kernel/init_task.c b/arch/score/kernel/init_task.c new file mode 100644 index 00000000000..9eecde00932 --- /dev/null +++ b/arch/score/kernel/init_task.c @@ -0,0 +1,49 @@ +/* + * arch/score/kernel/init_task.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); +EXPORT_SYMBOL(init_mm); + +/* + * Initial thread structure. + * + * We need to make sure that this is THREAD_SIZE aligned due to the + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry.. + */ +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); +EXPORT_SYMBOL(init_task); diff --git a/arch/score/kernel/irq.c b/arch/score/kernel/irq.c new file mode 100644 index 00000000000..55474e8e972 --- /dev/null +++ b/arch/score/kernel/irq.c @@ -0,0 +1,135 @@ +/* + * arch/score/kernel/irq.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#include + +/* + * handles all normal device IRQs + */ +asmlinkage void do_IRQ(int irq) +{ + irq_enter(); + generic_handle_irq(irq); + irq_exit(); +} + +static void score_mask(unsigned int irq_nr) +{ + unsigned int irq_source = 63 - irq_nr; + + if (irq_source < 32) + __raw_writel((__raw_readl((void *)P_INT_MASKL) | \ + (1 << irq_source)), (void *)P_INT_MASKL); + else + __raw_writel((__raw_readl((void *)P_INT_MASKH) | \ + (1 << (irq_source - 32))), (void *)P_INT_MASKH); +} + +static void score_unmask(unsigned int irq_nr) +{ + unsigned int irq_source = 63 - irq_nr; + + if (irq_source < 32) + __raw_writel((__raw_readl((void *)P_INT_MASKL) & \ + ~(1 << irq_source)), (void *)P_INT_MASKL); + else + __raw_writel((__raw_readl((void *)P_INT_MASKH) & \ + ~(1 << (irq_source - 32))), (void *)P_INT_MASKH); +} + +struct irq_chip score_irq_chip = { + .name = "Score7-level", + .mask = score_mask, + .mask_ack = score_mask, + .unmask = score_unmask, +}; + +/* + * initialise the interrupt system + */ +void __init init_IRQ(void) +{ + int index; + unsigned long target_addr; + + for (index = 0; index < NR_IRQS; ++index) + set_irq_chip_and_handler(index, &score_irq_chip, + handle_level_irq); + + for (target_addr = IRQ_VECTOR_BASE_ADDR; + target_addr <= IRQ_VECTOR_END_ADDR; + target_addr += IRQ_VECTOR_SIZE) + memcpy((void *)target_addr, \ + interrupt_exception_vector, IRQ_VECTOR_SIZE); + + __raw_writel(0xffffffff, (void *)P_INT_MASKL); + __raw_writel(0xffffffff, (void *)P_INT_MASKH); + + __asm__ __volatile__( + "mtcr %0, cr3\n\t" + : : "r" (EXCEPTION_VECTOR_BASE_ADDR | \ + VECTOR_ADDRESS_OFFSET_MODE16)); +} + +/* + * Generic, controller-independent functions: + */ +int show_interrupts(struct seq_file *p, void *v) +{ + int i = *(loff_t *)v, cpu; + struct irqaction *action; + unsigned long flags; + + if (i == 0) { + seq_puts(p, " "); + for_each_online_cpu(cpu) + seq_printf(p, "CPU%d ", cpu); + seq_putc(p, '\n'); + } + + if (i < NR_IRQS) { + spin_lock_irqsave(&irq_desc[i].lock, flags); + action = irq_desc[i].action; + if (!action) + goto unlock; + + seq_printf(p, "%3d: ", i); + seq_printf(p, "%10u ", kstat_irqs(i)); + seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-"); + seq_printf(p, " %s", action->name); + for (action = action->next; action; action = action->next) + seq_printf(p, ", %s", action->name); + + seq_putc(p, '\n'); +unlock: + spin_unlock_irqrestore(&irq_desc[i].lock, flags); + } + + return 0; +} diff --git a/arch/score/kernel/module.c b/arch/score/kernel/module.c new file mode 100644 index 00000000000..4ffce7fe156 --- /dev/null +++ b/arch/score/kernel/module.c @@ -0,0 +1,164 @@ +/* + * arch/score/kernel/module.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +void *module_alloc(unsigned long size) +{ + return size ? vmalloc(size) : NULL; +} + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); +} + +int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + return 0; +} + +int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relindex, + struct module *me) +{ + Elf32_Shdr *symsec = sechdrs + symindex; + Elf32_Shdr *relsec = sechdrs + relindex; + Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; + Elf32_Rel *rel = (void *)relsec->sh_addr; + unsigned int i; + + for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { + unsigned long loc; + Elf32_Sym *sym; + s32 offset; + + offset = ELF32_R_SYM(rel->r_info); + if ((offset < 0) || + (offset > (symsec->sh_size / sizeof(Elf32_Sym)))) { + printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", + me->name, relindex, i); + return -ENOEXEC; + } + + sym = ((Elf32_Sym *)symsec->sh_addr) + offset; + + if ((rel->r_offset < 0) || + (rel->r_offset > dstsec->sh_size - sizeof(u32))) { + printk(KERN_ERR "%s: out of bounds relocation, " + "section %d reloc %d offset %d size %d\n", + me->name, relindex, i, rel->r_offset, + dstsec->sh_size); + return -ENOEXEC; + } + + loc = dstsec->sh_addr + rel->r_offset; + switch (ELF32_R_TYPE(rel->r_info)) { + case R_SCORE_NONE: + break; + case R_SCORE_ABS32: + *(unsigned long *)loc += sym->st_value; + break; + case R_SCORE_HI16: + break; + case R_SCORE_LO16: { + unsigned long hi16_offset, offset; + unsigned long uvalue; + unsigned long temp, temp_hi; + temp_hi = *((unsigned long *)loc - 1); + temp = *(unsigned long *)loc; + + hi16_offset = (((((temp_hi) >> 16) & 0x3) << 15) | + ((temp_hi) & 0x7fff)) >> 1; + offset = ((temp >> 16 & 0x03) << 15) | + ((temp & 0x7fff) >> 1); + offset = (hi16_offset << 16) | (offset & 0xffff); + uvalue = sym->st_value + offset; + hi16_offset = (uvalue >> 16) << 1; + + temp_hi = ((temp_hi) & (~(0x37fff))) | + (hi16_offset & 0x7fff) | + ((hi16_offset << 1) & 0x30000); + *((unsigned long *)loc - 1) = temp_hi; + + offset = (uvalue & 0xffff) << 1; + temp = (temp & (~(0x37fff))) | (offset & 0x7fff) | + ((offset << 1) & 0x30000); + *(unsigned long *)loc = temp; + break; + } + case R_SCORE_24: { + unsigned long hi16_offset, offset; + unsigned long uvalue; + unsigned long temp; + + temp = *(unsigned long *)loc; + offset = (temp & 0x03FF7FFE); + hi16_offset = (offset & 0xFFFF0000); + offset = (hi16_offset | ((offset & 0xFFFF) << 1)) >> 2; + + uvalue = (sym->st_value + offset) >> 1; + uvalue = uvalue & 0x00ffffff; + + temp = (temp & 0xfc008001) | + ((uvalue << 2) & 0x3ff0000) | + ((uvalue & 0x3fff) << 1); + *(unsigned long *)loc = temp; + break; + } + default: + printk(KERN_ERR "%s: unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel->r_info)); + return -ENOEXEC; + } + } + + return 0; +} + +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + return 0; +} + +/* Given an address, look for it in the module exception tables. */ +const struct exception_table_entry *search_module_dbetables(unsigned long addr) +{ + return 0; +} + +/* Put in dbe list if necessary. */ +int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *me) +{ + return 0; +} + +void module_arch_cleanup(struct module *mod) {} diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c new file mode 100644 index 00000000000..aaa3085251f --- /dev/null +++ b/arch/score/kernel/process.c @@ -0,0 +1,165 @@ +/* + * arch/score/kernel/process.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +/* If or when software machine-restart is implemented, add code here. */ +void machine_restart(char *command) {} + +/* If or when software machine-halt is implemented, add code here. */ +void machine_halt(void) {} + +/* If or when software machine-power-off is implemented, add code here. */ +void machine_power_off(void) {} + +/* + * The idle thread. There's no useful work to be + * done, so just try to conserve power and have a + * low exit latency (ie sit in a loop waiting for + * somebody to say that they'd like to reschedule) + */ +void __noreturn cpu_idle(void) +{ + /* endless idle loop with no priority at all */ + while (1) { + while (!need_resched()) + barrier(); + + preempt_enable_no_resched(); + schedule(); + preempt_disable(); + } +} + +asmlinkage void ret_from_fork(void); + +void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + unsigned long status; + + /* New thread loses kernel privileges. */ + status = regs->cp0_psr & ~(KU_MASK); + status |= KU_USER; + regs->cp0_psr = status; + regs->cp0_epc = pc; + regs->regs[0] = sp; +} + +void exit_thread(void) {} + +/* + * When a process does an "exec", machine state like FPU and debug + * registers need to be reset. This is a hook function for that. + * Currently we don't have any such state to reset, so this is empty. + */ +void flush_thread(void) {} + +/* + * set up the kernel stack and exception frames for a new process + */ +int copy_thread(unsigned long clone_flags, unsigned long usp, + unsigned long unused, + struct task_struct *p, struct pt_regs *regs) +{ + struct thread_info *ti = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + + p->set_child_tid = NULL; + p->clear_child_tid = NULL; + + *childregs = *regs; + childregs->regs[7] = 0; /* Clear error flag */ + childregs->regs[4] = 0; /* Child gets zero as return value */ + regs->regs[4] = p->pid; + + if (childregs->cp0_psr & 0x8) { /* test kernel fork or user fork */ + childregs->regs[0] = usp; /* user fork */ + } else { + childregs->regs[28] = (unsigned long) ti; /* kernel fork */ + childregs->regs[0] = (unsigned long) childregs; + } + + p->thread.reg0 = (unsigned long) childregs; + p->thread.reg3 = (unsigned long) ret_from_fork; + p->thread.cp0_psr = 0; + + return 0; +} + +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) +{ + return 1; +} + +static void __noreturn +kernel_thread_helper(void *unused0, int (*fn)(void *), + void *arg, void *unused1) +{ + do_exit(fn(arg)); +} + +/* + * Create a kernel thread. + */ +long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + + regs.regs[6] = (unsigned long) arg; + regs.regs[5] = (unsigned long) fn; + regs.cp0_epc = (unsigned long) kernel_thread_helper; + regs.cp0_psr = (regs.cp0_psr & ~(0x1|0x4|0x8)) | \ + ((regs.cp0_psr & 0x3) << 2); + + return do_fork(flags | CLONE_VM | CLONE_UNTRACED, \ + 0, ®s, 0, NULL, NULL); +} + +unsigned long thread_saved_pc(struct task_struct *tsk) +{ + return task_pt_regs(tsk)->cp0_epc; +} + +unsigned long get_wchan(struct task_struct *task) +{ + if (!task || task == current || task->state == TASK_RUNNING) + return 0; + + if (!task_stack_page(task)) + return 0; + + return task_pt_regs(task)->cp0_epc; +} + +unsigned long arch_align_stack(unsigned long sp) +{ + return sp; +} diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c new file mode 100644 index 00000000000..8fe7209355a --- /dev/null +++ b/arch/score/kernel/ptrace.c @@ -0,0 +1,465 @@ +/* + * arch/score/kernel/ptrace.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include + +static int is_16bitinsn(unsigned long insn) +{ + if ((insn & INSN32_MASK) == INSN32_MASK) + return 0; + else + return 1; +} + +int +read_tsk_long(struct task_struct *child, + unsigned long addr, unsigned long *res) +{ + int copied; + + copied = access_process_vm(child, addr, res, sizeof(*res), 0); + + return copied != sizeof(*res) ? -EIO : 0; +} + +int +read_tsk_short(struct task_struct *child, + unsigned long addr, unsigned short *res) +{ + int copied; + + copied = access_process_vm(child, addr, res, sizeof(*res), 0); + + return copied != sizeof(*res) ? -EIO : 0; +} + +static int +write_tsk_short(struct task_struct *child, + unsigned long addr, unsigned short val) +{ + int copied; + + copied = access_process_vm(child, addr, &val, sizeof(val), 1); + + return copied != sizeof(val) ? -EIO : 0; +} + +static int +write_tsk_long(struct task_struct *child, + unsigned long addr, unsigned long val) +{ + int copied; + + copied = access_process_vm(child, addr, &val, sizeof(val), 1); + + return copied != sizeof(val) ? -EIO : 0; +} + +void set_single_step(struct task_struct *child) +{ + /* far_epc is the target of branch */ + unsigned int epc, far_epc = 0; + unsigned long epc_insn, far_epc_insn; + int ninsn_type; /* next insn type 0=16b, 1=32b */ + unsigned int tmp, tmp2; + struct pt_regs *regs = task_pt_regs(child); + child->thread.single_step = 1; + child->thread.ss_nextcnt = 1; + epc = regs->cp0_epc; + + read_tsk_long(child, epc, &epc_insn); + + if (is_16bitinsn(epc_insn)) { + if ((epc_insn & J16M) == J16) { + tmp = epc_insn & 0xFFE; + epc = (epc & 0xFFFFF000) | tmp; + } else if ((epc_insn & B16M) == B16) { + child->thread.ss_nextcnt = 2; + tmp = (epc_insn & 0xFF) << 1; + tmp = tmp << 23; + tmp = (unsigned int)((int) tmp >> 23); + far_epc = epc + tmp; + epc += 2; + } else if ((epc_insn & BR16M) == BR16) { + child->thread.ss_nextcnt = 2; + tmp = (epc_insn >> 4) & 0xF; + far_epc = regs->regs[tmp]; + epc += 2; + } else + epc += 2; + } else { + if ((epc_insn & J32M) == J32) { + tmp = epc_insn & 0x03FFFFFE; + tmp2 = tmp & 0x7FFF; + tmp = (((tmp >> 16) & 0x3FF) << 15) | tmp2; + epc = (epc & 0xFFC00000) | tmp; + } else if ((epc_insn & B32M) == B32) { + child->thread.ss_nextcnt = 2; + tmp = epc_insn & 0x03FFFFFE; /* discard LK bit */ + tmp2 = tmp & 0x3FF; + tmp = (((tmp >> 16) & 0x3FF) << 10) | tmp2; /* 20bit */ + tmp = tmp << 12; + tmp = (unsigned int)((int) tmp >> 12); + far_epc = epc + tmp; + epc += 4; + } else if ((epc_insn & BR32M) == BR32) { + child->thread.ss_nextcnt = 2; + tmp = (epc_insn >> 16) & 0x1F; + far_epc = regs->regs[tmp]; + epc += 4; + } else + epc += 4; + } + + if (child->thread.ss_nextcnt == 1) { + read_tsk_long(child, epc, &epc_insn); + + if (is_16bitinsn(epc_insn)) { + write_tsk_short(child, epc, SINGLESTEP16_INSN); + ninsn_type = 0; + } else { + write_tsk_long(child, epc, SINGLESTEP32_INSN); + ninsn_type = 1; + } + + if (ninsn_type == 0) { /* 16bits */ + child->thread.insn1_type = 0; + child->thread.addr1 = epc; + /* the insn may have 32bit data */ + child->thread.insn1 = (short)epc_insn; + } else { + child->thread.insn1_type = 1; + child->thread.addr1 = epc; + child->thread.insn1 = epc_insn; + } + } else { + /* branch! have two target child->thread.ss_nextcnt=2 */ + read_tsk_long(child, epc, &epc_insn); + read_tsk_long(child, far_epc, &far_epc_insn); + if (is_16bitinsn(epc_insn)) { + write_tsk_short(child, epc, SINGLESTEP16_INSN); + ninsn_type = 0; + } else { + write_tsk_long(child, epc, SINGLESTEP32_INSN); + ninsn_type = 1; + } + + if (ninsn_type == 0) { /* 16bits */ + child->thread.insn1_type = 0; + child->thread.addr1 = epc; + /* the insn may have 32bit data */ + child->thread.insn1 = (short)epc_insn; + } else { + child->thread.insn1_type = 1; + child->thread.addr1 = epc; + child->thread.insn1 = epc_insn; + } + + if (is_16bitinsn(far_epc_insn)) { + write_tsk_short(child, far_epc, SINGLESTEP16_INSN); + ninsn_type = 0; + } else { + write_tsk_long(child, far_epc, SINGLESTEP32_INSN); + ninsn_type = 1; + } + + if (ninsn_type == 0) { /* 16bits */ + child->thread.insn2_type = 0; + child->thread.addr2 = far_epc; + /* the insn may have 32bit data */ + child->thread.insn2 = (short)far_epc_insn; + } else { + child->thread.insn2_type = 1; + child->thread.addr2 = far_epc; + child->thread.insn2 = far_epc_insn; + } + } +} + +void clear_single_step(struct task_struct *child) +{ + if (child->thread.insn1_type == 0) + write_tsk_short(child, child->thread.addr1, + child->thread.insn1); + + if (child->thread.insn1_type == 1) + write_tsk_long(child, child->thread.addr1, + child->thread.insn1); + + if (child->thread.ss_nextcnt == 2) { /* branch */ + if (child->thread.insn1_type == 0) + write_tsk_short(child, child->thread.addr1, + child->thread.insn1); + if (child->thread.insn1_type == 1) + write_tsk_long(child, child->thread.addr1, + child->thread.insn1); + if (child->thread.insn2_type == 0) + write_tsk_short(child, child->thread.addr2, + child->thread.insn2); + if (child->thread.insn2_type == 1) + write_tsk_long(child, child->thread.addr2, + child->thread.insn2); + } + + child->thread.single_step = 0; + child->thread.ss_nextcnt = 0; +} + + +void ptrace_disable(struct task_struct *child) {} + +long +arch_ptrace(struct task_struct *child, long request, long addr, long data) +{ + int ret; + + if (request == PTRACE_TRACEME) { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + return -EPERM; + + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + return 0; + } + + ret = -ESRCH; + if (!child) + return ret; + + ret = -EPERM; + + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + return ret; + } + + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret < 0) + return ret; + + switch (request) { + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: { + unsigned long tmp; + int copied; + + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + ret = put_user(tmp, (unsigned long *) data); + return ret; + } + + /* Read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + struct pt_regs *regs; + unsigned long tmp; + + regs = task_pt_regs(child); + + tmp = 0; /* Default return value. */ + switch (addr) { + case 0 ... 31: + tmp = regs->regs[addr]; + break; + case PC: + tmp = regs->cp0_epc; + break; + case ECR: + tmp = regs->cp0_ecr; + break; + case EMA: + tmp = regs->cp0_ema; + break; + case CEH: + tmp = regs->ceh; + break; + case CEL: + tmp = regs->cel; + break; + case CONDITION: + tmp = regs->cp0_condition; + break; + case PSR: + tmp = regs->cp0_psr; + break; + case COUNTER: + tmp = regs->sr0; + break; + case LDCR: + tmp = regs->sr1; + break; + case STCR: + tmp = regs->sr2; + break; + default: + tmp = 0; + return -EIO; + } + + ret = put_user(tmp, (unsigned long *) data); + return ret; + } + + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = 0; + if (access_process_vm(child, addr, &data, sizeof(data), 1) + == sizeof(data)) + break; + ret = -EIO; + return ret; + + case PTRACE_POKEUSR: { + struct pt_regs *regs; + ret = 0; + regs = task_pt_regs(child); + + switch (addr) { + case 0 ... 31: + regs->regs[addr] = data; + break; + case PC: + regs->cp0_epc = data; + break; + case CEH: + regs->ceh = data; + break; + case CEL: + regs->cel = data; + break; + case CONDITION: + regs->cp0_condition = data; + break; + case PSR: + case COUNTER: + case STCR: + case LDCR: + break; /* user can't write the reg */ + default: + /* The rest are not allowed. */ + ret = -EIO; + break; + } + break; + } + + case PTRACE_SYSCALL: /* continue and stop at next + (return from) syscall. */ + case PTRACE_CONT: { /* restart after signal. */ + ret = -EIO; + if (!valid_signal(data)) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + + child->exit_code = data; + wake_up_process(child); + ret = 0; + break; + } + + /* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: + ret = 0; + if (child->state == EXIT_ZOMBIE) /* already dead. */ + break; + child->exit_code = SIGKILL; + clear_single_step(child); + wake_up_process(child); + break; + + case PTRACE_SINGLESTEP: { /* set the trap flag. */ + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + set_single_step(child); + child->exit_code = data; + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + } + + case PTRACE_DETACH: /* detach a process that was attached. */ + ret = ptrace_detach(child, data); + break; + + case PTRACE_SETOPTIONS: + if (data & PTRACE_O_TRACESYSGOOD) + child->ptrace |= PT_TRACESYSGOOD; + else + child->ptrace &= ~PT_TRACESYSGOOD; + ret = 0; + break; + + default: + ret = -EIO; + break; + } + + return ret; +} + +/* + * Notification of system call entry/exit + * - triggered by current->work.syscall_trace + */ +asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) +{ + if (!(current->ptrace & PT_PTRACED)) + return; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + + /* The 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery. */ + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? + 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c new file mode 100644 index 00000000000..a172ce170f6 --- /dev/null +++ b/arch/score/kernel/setup.c @@ -0,0 +1,157 @@ +/* + * arch/score/kernel/setup.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include + +struct screen_info screen_info; +unsigned long kernelsp; + +static char command_line[COMMAND_LINE_SIZE]; +static struct resource code_resource = { .name = "Kernel code",}; +static struct resource data_resource = { .name = "Kernel data",}; + +static void __init bootmem_init(void) +{ + unsigned long reserved_end, bootmap_size; + unsigned long size = initrd_end - initrd_start; + + reserved_end = (unsigned long)_end; + + min_low_pfn = 0; + max_low_pfn = MEM_SIZE / PAGE_SIZE; + + /* Initialize the boot-time allocator with low memory only. */ + bootmap_size = init_bootmem_node(NODE_DATA(0), reserved_end, + min_low_pfn, max_low_pfn); + add_active_range(0, min_low_pfn, max_low_pfn); + + free_bootmem(PFN_PHYS(reserved_end), + (max_low_pfn - reserved_end) << PAGE_SHIFT); + memory_present(0, reserved_end, max_low_pfn); + + /* Reserve space for the bootmem bitmap. */ + reserve_bootmem(PFN_PHYS(reserved_end), bootmap_size, BOOTMEM_DEFAULT); + + if (size == 0) { + printk(KERN_INFO "Initrd not found or empty"); + goto disable; + } + + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + printk(KERN_ERR "Initrd extends beyond end of memory"); + goto disable; + } + + /* Reserve space for the initrd bitmap. */ + reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); + initrd_below_start_ok = 1; + + pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", + initrd_start, size); + return; +disable: + printk(KERN_CONT " - disabling initrd\n"); + initrd_start = 0; + initrd_end = 0; +} + +static void __init resource_init(void) +{ + struct resource *res; + + code_resource.start = (unsigned long)_text; + code_resource.end = (unsigned long)_etext - 1; + data_resource.start = (unsigned long)_etext; + data_resource.end = (unsigned long)_edata - 1; + + res = alloc_bootmem(sizeof(struct resource)); + res->name = "System RAM"; + res->start = 0; + res->end = MEM_SIZE - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + request_resource(&iomem_resource, res); + + request_resource(res, &code_resource); + request_resource(res, &data_resource); +} + +void __init setup_arch(char **cmdline_p) +{ + randomize_va_space = 0; + *cmdline_p = command_line; + + cpu_cache_init(); + tlb_init(); + bootmem_init(); + paging_init(); + resource_init(); +} + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned long n = (unsigned long) v - 1; + + seq_printf(m, "processor\t\t: %ld\n", n); + seq_printf(m, "\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + unsigned long i = *pos; + + return i < 1 ? (void *) (i + 1) : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +static int __init topology_init(void) +{ + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c new file mode 100644 index 00000000000..b4ed1b3f807 --- /dev/null +++ b/arch/score/kernel/signal.c @@ -0,0 +1,355 @@ +/* + * arch/score/kernel/signal.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +struct rt_sigframe { + u32 rs_ass[4]; /* argument save space */ + u32 rs_code[2]; /* signal trampoline */ + struct siginfo rs_info; + struct ucontext rs_uc; +}; + +int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + unsigned long reg; + + reg = regs->cp0_epc; err |= __put_user(reg, &sc->sc_pc); + err |= __put_user(regs->cp0_psr, &sc->sc_psr); + err |= __put_user(regs->cp0_condition, &sc->sc_condition); + + +#define save_gp_reg(i) { \ + reg = regs->regs[i]; \ + err |= __put_user(reg, &sc->sc_regs[i]); \ +} while (0) + save_gp_reg(0); save_gp_reg(1); save_gp_reg(2); + save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); + save_gp_reg(6); save_gp_reg(7); save_gp_reg(8); + save_gp_reg(9); save_gp_reg(10); save_gp_reg(11); + save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); + save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); + save_gp_reg(18); save_gp_reg(19); save_gp_reg(20); + save_gp_reg(21); save_gp_reg(22); save_gp_reg(23); + save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); + save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); +#undef save_gp_reg + + reg = regs->ceh; err |= __put_user(reg, &sc->sc_mdceh); + reg = regs->cel; err |= __put_user(reg, &sc->sc_mdcel); + err |= __put_user(regs->cp0_ecr, &sc->sc_ecr); + err |= __put_user(regs->cp0_ema, &sc->sc_ema); + + return err; +} + +int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + u32 reg; + + err |= __get_user(regs->cp0_epc, &sc->sc_pc); + err |= __get_user(regs->cp0_condition, &sc->sc_condition); + + err |= __get_user(reg, &sc->sc_mdceh); + regs->ceh = (int) reg; + err |= __get_user(reg, &sc->sc_mdcel); + regs->cel = (int) reg; + + err |= __get_user(reg, &sc->sc_psr); + regs->cp0_psr = (int) reg; + err |= __get_user(reg, &sc->sc_ecr); + regs->cp0_ecr = (int) reg; + err |= __get_user(reg, &sc->sc_ema); + regs->cp0_ema = (int) reg; + +#define restore_gp_reg(i) do { \ + err |= __get_user(reg, &sc->sc_regs[i]); \ + regs->regs[i] = reg; \ +} while (0) + restore_gp_reg(0); restore_gp_reg(1); restore_gp_reg(2); + restore_gp_reg(3); restore_gp_reg(4); restore_gp_reg(5); + restore_gp_reg(6); restore_gp_reg(7); restore_gp_reg(8); + restore_gp_reg(9); restore_gp_reg(10); restore_gp_reg(11); + restore_gp_reg(12); restore_gp_reg(13); restore_gp_reg(14); + restore_gp_reg(15); restore_gp_reg(16); restore_gp_reg(17); + restore_gp_reg(18); restore_gp_reg(19); restore_gp_reg(20); + restore_gp_reg(21); restore_gp_reg(22); restore_gp_reg(23); + restore_gp_reg(24); restore_gp_reg(25); restore_gp_reg(26); + restore_gp_reg(27); restore_gp_reg(28); restore_gp_reg(29); +#undef restore_gp_reg + + return err; +} + +/* + * Determine which stack to use.. + */ +void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + size_t frame_size) +{ + unsigned long sp; + + /* Default to using normal stack */ + sp = regs->regs[0]; + sp -= 32; + + /* This is the X/Open sanctioned signal stack switching. */ + if ((ka->sa.sa_flags & SA_ONSTACK) && (!on_sig_stack(sp))) + sp = current->sas_ss_sp + current->sas_ss_size; + + return (void *)((sp - frame_size) & ~7); +} + +asmlinkage int score_sigaltstack(struct pt_regs *regs) +{ + const stack_t *uss = (const stack_t *) regs->regs[4]; + stack_t *uoss = (stack_t *) regs->regs[5]; + unsigned long usp = regs->regs[0]; + + return do_sigaltstack(uss, uoss, usp); +} + +asmlinkage void score_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + sigset_t set; + stack_t st; + int sig; + + frame = (struct rt_sigframe __user *) regs->regs[0]; + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); + if (sig < 0) + goto badframe; + else if (sig) + force_sig(sig, current); + + if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st))) + goto badframe; + + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + do_sigaltstack((stack_t __user *)&st, NULL, regs->regs[0]); + + __asm__ __volatile__( + "mv\tr0, %0\n\t" + "la\tr8, syscall_exit\n\t" + "br\tr8\n\t" + : : "r" (regs) : "r8"); + +badframe: + force_sig(SIGSEGV, current); +} + +int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, + int signr, sigset_t *set, siginfo_t *info) +{ + struct rt_sigframe *frame; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + /* + * Set up the return code ... + * + * li v0, __NR_rt_sigreturn + * syscall + */ + err |= __put_user(0x87788000 + __NR_rt_sigreturn*2, + frame->rs_code + 0); + err |= __put_user(0x80008002, frame->rs_code + 1); + flush_cache_sigtramp((unsigned long) frame->rs_code); + + err |= copy_siginfo_to_user(&frame->rs_info, info); + err |= __put_user(0, &frame->rs_uc.uc_flags); + err |= __put_user(0, &frame->rs_uc.uc_link); + err |= __put_user((void *)current->sas_ss_sp, + &frame->rs_uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->regs[0]), + &frame->rs_uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, + &frame->rs_uc.uc_stack.ss_size); + err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); + err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); + + if (err) + goto give_sigsegv; + + regs->regs[0] = (unsigned long) frame; + regs->regs[3] = (unsigned long) frame->rs_code; + regs->regs[4] = signr; + regs->regs[5] = (unsigned long) &frame->rs_info; + regs->regs[6] = (unsigned long) &frame->rs_uc; + regs->regs[29] = (unsigned long) ka->sa.sa_handler; + regs->cp0_epc = (unsigned long) ka->sa.sa_handler; + + return 0; + +give_sigsegv: + if (signr == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); + return -EFAULT; +} + +int handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) +{ + int ret; + + if (regs->is_syscall) { + switch (regs->regs[4]) { + case ERESTART_RESTARTBLOCK: + case ERESTARTNOHAND: + regs->regs[4] = EINTR; + break; + case ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->regs[4] = EINTR; + break; + } + case ERESTARTNOINTR: + regs->regs[4] = regs->orig_r4; + regs->regs[7] = regs->orig_r7; + regs->cp0_epc -= 8; + } + + regs->is_syscall = 0; + } + + /* + * Set up the stack frame + */ + ret = setup_rt_frame(ka, regs, sig, oldset, info); + + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); + if (!(ka->sa.sa_flags & SA_NODEFER)) + sigaddset(¤t->blocked, sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + return ret; +} + +asmlinkage void do_signal(struct pt_regs *regs) +{ + struct k_sigaction ka; + sigset_t *oldset; + siginfo_t info; + int signr; + + /* + * We want the common case to go fast, which is why we may in certain + * cases get here from kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return; + + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else + oldset = ¤t->blocked; + + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + if (signr > 0) { + /* Actually deliver the signal. */ + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { + /* + * A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } + + return; + } + + if (regs->is_syscall) { + if (regs->regs[4] == ERESTARTNOHAND || + regs->regs[4] == ERESTARTSYS || + regs->regs[4] == ERESTARTNOINTR) { + regs->regs[4] = regs->orig_r4; + regs->regs[7] = regs->orig_r7; + regs->cp0_epc -= 8; + } + + if (regs->regs[4] == ERESTART_RESTARTBLOCK) { + regs->regs[27] = __NR_restart_syscall; + regs->regs[4] = regs->orig_r4; + regs->regs[7] = regs->orig_r7; + regs->cp0_epc -= 8; + } + + regs->is_syscall = 0; /* Don't deal with this again. */ + } + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } +} + +/* + * notification of userspace execution resumption + * - triggered by the TIF_WORK_MASK flags + */ +asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, + __u32 thread_info_flags) +{ + /* deal with pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + do_signal(regs); +} diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c new file mode 100644 index 00000000000..6a60d1ee533 --- /dev/null +++ b/arch/score/kernel/sys_score.c @@ -0,0 +1,147 @@ +/* + * arch/score/kernel/syscall.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +unsigned long shm_align_mask = PAGE_SIZE - 1; +EXPORT_SYMBOL(shm_align_mask); + +asmlinkage unsigned long +sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, + unsigned long flags, unsigned long fd, unsigned long pgoff) +{ + int error = -EBADF; + struct file *file = NULL; + + if (pgoff & (~PAGE_MASK >> 12)) + return -EINVAL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + return error; + } + + down_write(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, + pgoff >> (PAGE_SHIFT - 12)); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); + + return error; +} + +/* + * Clone a task - this clones the calling program thread. + * This is called indirectly via a small wrapper + */ +asmlinkage int +score_clone(struct pt_regs *regs) +{ + unsigned long clone_flags; + unsigned long newsp; + int __user *parent_tidptr, *child_tidptr; + + clone_flags = regs->regs[4]; + newsp = regs->regs[5]; + if (!newsp) + newsp = regs->regs[0]; + parent_tidptr = (int __user *)regs->regs[6]; + + child_tidptr = NULL; + if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { + int __user *__user *usp = (int __user *__user *)regs->regs[0]; + + if (get_user(child_tidptr, &usp[4])) + return -EFAULT; + } + + return do_fork(clone_flags, newsp, regs, 0, + parent_tidptr, child_tidptr); +} + +/* + * sys_execve() executes a new program. + * This is called indirectly via a small wrapper + */ +asmlinkage int score_execve(struct pt_regs *regs) +{ + int error; + char *filename; + + filename = getname((char *) (long) regs->regs[4]); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + return error; + + error = do_execve(filename, (char **) (long) regs->regs[5], + (char **) (long) regs->regs[6], regs); + + putname(filename); + return error; +} + +/* + * If we ever come here the user sp is bad. Zap the process right away. + * Due to the bad stack signaling wouldn't work. + */ +asmlinkage void bad_stack(void) +{ + do_exit(SIGSEGV); +} + +/* + * Do a system call from kernel instead of calling sys_execve so we + * end up with proper pt_regs. + */ +int kernel_execve(const char *filename, char *const argv[], char *const envp[]) +{ + register unsigned long __r4 asm("r4") = (unsigned long) filename; + register unsigned long __r5 asm("r5") = (unsigned long) argv; + register unsigned long __r6 asm("r6") = (unsigned long) envp; + register unsigned long __r7 asm("r7"); + + __asm__ __volatile__ (" \n" + "ldi r27, %5 \n" + "syscall \n" + "mv %0, r4 \n" + "mv %1, r7 \n" + : "=&r" (__r4), "=r" (__r7) + : "r" (__r4), "r" (__r5), "r" (__r6), "i" (__NR_execve) + : "r8", "r9", "r10", "r11", "r22", "r23", "r24", "r25", + "r26", "r27", "memory"); + + if (__r7 == 0) + return __r4; + + return -__r4; +} diff --git a/arch/score/kernel/time.c b/arch/score/kernel/time.c new file mode 100644 index 00000000000..cd66ba327ca --- /dev/null +++ b/arch/score/kernel/time.c @@ -0,0 +1,99 @@ +/* + * arch/score/kernel/time.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include + +irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evdev = dev_id; + + /* clear timer interrupt flag */ + outl(1, P_TIMER0_CPP_REG); + evdev->event_handler(evdev); + + return IRQ_HANDLED; +} + +static struct irqaction timer_irq = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED | IRQF_TIMER, + .name = "timer", +}; + +static int score_timer_set_next_event(unsigned long delta, + struct clock_event_device *evdev) +{ + outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); + outl(delta, P_TIMER0_PRELOAD); + outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); + + return 0; +} + +static void score_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evdev) +{ + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL); + outl(SYSTEM_CLOCK/HZ, P_TIMER0_PRELOAD); + outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_RESUME: + case CLOCK_EVT_MODE_UNUSED: + break; + default: + BUG(); + } +} + +static struct clock_event_device score_clockevent = { + .name = "score_clockevent", + .features = CLOCK_EVT_FEAT_PERIODIC, + .shift = 16, + .set_next_event = score_timer_set_next_event, + .set_mode = score_timer_set_mode, +}; + +void __init time_init(void) +{ + timer_irq.dev_id = &score_clockevent; + setup_irq(IRQ_TIMER , &timer_irq); + + /* setup COMPARE clockevent */ + score_clockevent.mult = div_sc(SYSTEM_CLOCK, NSEC_PER_SEC, + score_clockevent.shift); + score_clockevent.max_delta_ns = clockevent_delta2ns((u32)~0, + &score_clockevent); + score_clockevent.min_delta_ns = clockevent_delta2ns(50, + &score_clockevent) + 1; + score_clockevent.cpumask = cpumask_of(0); + clockevents_register_device(&score_clockevent); +} diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c new file mode 100644 index 00000000000..957ae9eb356 --- /dev/null +++ b/arch/score/kernel/traps.c @@ -0,0 +1,349 @@ +/* + * arch/score/kernel/traps.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include +#include +#include + +unsigned long exception_handlers[32]; + +/* + * The architecture-independent show_stack generator + */ +void show_stack(struct task_struct *task, unsigned long *sp) +{ + int i; + long stackdata; + + sp = sp ? sp : (unsigned long *)&sp; + + printk(KERN_NOTICE "Stack: "); + i = 1; + while ((long) sp & (PAGE_SIZE - 1)) { + if (i && ((i % 8) == 0)) + printk(KERN_NOTICE "\n"); + if (i > 40) { + printk(KERN_NOTICE " ..."); + break; + } + + if (__get_user(stackdata, sp++)) { + printk(KERN_NOTICE " (Bad stack address)"); + break; + } + + printk(KERN_NOTICE " %08lx", stackdata); + i++; + } + printk(KERN_NOTICE "\n"); +} + +static void show_trace(long *sp) +{ + int i; + long addr; + + sp = sp ? sp : (long *) &sp; + + printk(KERN_NOTICE "Call Trace: "); + i = 1; + while ((long) sp & (PAGE_SIZE - 1)) { + if (__get_user(addr, sp++)) { + if (i && ((i % 6) == 0)) + printk(KERN_NOTICE "\n"); + printk(KERN_NOTICE " (Bad stack address)\n"); + break; + } + + if (kernel_text_address(addr)) { + if (i && ((i % 6) == 0)) + printk(KERN_NOTICE "\n"); + if (i > 40) { + printk(KERN_NOTICE " ..."); + break; + } + + printk(KERN_NOTICE " [<%08lx>]", addr); + i++; + } + } + printk(KERN_NOTICE "\n"); +} + +static void show_code(unsigned int *pc) +{ + long i; + + printk(KERN_NOTICE "\nCode:"); + + for (i = -3; i < 6; i++) { + unsigned long insn; + if (__get_user(insn, pc + i)) { + printk(KERN_NOTICE " (Bad address in epc)\n"); + break; + } + printk(KERN_NOTICE "%c%08lx%c", (i ? ' ' : '<'), + insn, (i ? ' ' : '>')); + } +} + +/* + * FIXME: really the generic show_regs should take a const pointer argument. + */ +void show_regs(struct pt_regs *regs) +{ + printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + printk("r8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11], + regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]); + printk("r16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19], + regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]); + printk("r24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + regs->regs[24], regs->regs[25], regs->regs[26], regs->regs[27], + regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]); + + printk("CEH : %08lx\n", regs->ceh); + printk("CEL : %08lx\n", regs->cel); + + printk("EMA:%08lx, epc:%08lx %s\nPSR: %08lx\nECR:%08lx\nCondition : %08lx\n", + regs->cp0_ema, regs->cp0_epc, print_tainted(), regs->cp0_psr, + regs->cp0_ecr, regs->cp0_condition); +} + +static void show_registers(struct pt_regs *regs) +{ + show_regs(regs); + printk(KERN_NOTICE "Process %s (pid: %d, stackpage=%08lx)\n", + current->comm, current->pid, (unsigned long) current); + show_stack(current_thread_info()->task, (long *) regs->regs[0]); + show_trace((long *) regs->regs[0]); + show_code((unsigned int *) regs->cp0_epc); + printk(KERN_NOTICE "\n"); +} + +/* + * The architecture-independent dump_stack generator + */ +void dump_stack(void) +{ + show_stack(current_thread_info()->task, + (long *) get_irq_regs()->regs[0]); +} +EXPORT_SYMBOL(dump_stack); + +void __die(const char *str, struct pt_regs *regs, const char *file, + const char *func, unsigned long line) +{ + console_verbose(); + printk("%s", str); + if (file && func) + printk(" in %s:%s, line %ld", file, func, line); + printk(":\n"); + show_registers(regs); + do_exit(SIGSEGV); +} + +void __die_if_kernel(const char *str, struct pt_regs *regs, + const char *file, const char *func, unsigned long line) +{ + if (!user_mode(regs)) + __die(str, regs, file, func, line); +} + +asmlinkage void do_adelinsn(struct pt_regs *regs) +{ + printk("do_ADE-linsn:ema:0x%08lx:epc:0x%08lx\n", + regs->cp0_ema, regs->cp0_epc); + die_if_kernel("do_ade execution Exception\n", regs); + force_sig(SIGBUS, current); +} + +asmlinkage void do_adedata(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + fixup = search_exception_tables(regs->cp0_epc); + if (fixup) { + regs->cp0_epc = fixup->fixup; + return; + } + printk("do_ADE-data:ema:0x%08lx:epc:0x%08lx\n", + regs->cp0_ema, regs->cp0_epc); + die_if_kernel("do_ade execution Exception\n", regs); + force_sig(SIGBUS, current); +} + +asmlinkage void do_pel(struct pt_regs *regs) +{ + die_if_kernel("do_pel execution Exception", regs); + force_sig(SIGFPE, current); +} + +asmlinkage void do_cee(struct pt_regs *regs) +{ + die_if_kernel("do_cee execution Exception", regs); + force_sig(SIGFPE, current); +} + +asmlinkage void do_cpe(struct pt_regs *regs) +{ + die_if_kernel("do_cpe execution Exception", regs); + force_sig(SIGFPE, current); +} + +asmlinkage void do_be(struct pt_regs *regs) +{ + die_if_kernel("do_be execution Exception", regs); + force_sig(SIGBUS, current); +} + +asmlinkage void do_ov(struct pt_regs *regs) +{ + siginfo_t info; + + die_if_kernel("do_ov execution Exception", regs); + + info.si_code = FPE_INTOVF; + info.si_signo = SIGFPE; + info.si_errno = 0; + info.si_addr = (void *)regs->cp0_epc; + force_sig_info(SIGFPE, &info, current); +} + +asmlinkage void do_tr(struct pt_regs *regs) +{ + die_if_kernel("do_tr execution Exception", regs); + force_sig(SIGTRAP, current); +} + +asmlinkage void do_ri(struct pt_regs *regs) +{ + unsigned long epc_insn; + unsigned long epc = regs->cp0_epc; + + read_tsk_long(current, epc, &epc_insn); + if (current->thread.single_step == 1) { + if ((epc == current->thread.addr1) || + (epc == current->thread.addr2)) { + clear_single_step(current); + force_sig(SIGTRAP, current); + return; + } else + BUG(); + } else if ((epc_insn == BREAKPOINT32_INSN) || + ((epc_insn & 0x0000FFFF) == 0x7002) || + ((epc_insn & 0xFFFF0000) == 0x70020000)) { + force_sig(SIGTRAP, current); + return; + } else { + die_if_kernel("do_ri execution Exception", regs); + force_sig(SIGILL, current); + } +} + +asmlinkage void do_ccu(struct pt_regs *regs) +{ + die_if_kernel("do_ccu execution Exception", regs); + force_sig(SIGILL, current); +} + +asmlinkage void do_reserved(struct pt_regs *regs) +{ + /* + * Game over - no way to handle this if it ever occurs. Most probably + * caused by a new unknown cpu type or after another deadly + * hard/software error. + */ + die_if_kernel("do_reserved execution Exception", regs); + show_regs(regs); + panic("Caught reserved exception - should not happen."); +} + +/* + * NMI exception handler. + */ +void nmi_exception_handler(struct pt_regs *regs) +{ + die_if_kernel("nmi_exception_handler execution Exception", regs); + die("NMI", regs); +} + +/* Install CPU exception handler */ +void *set_except_vector(int n, void *addr) +{ + unsigned long handler = (unsigned long) addr; + unsigned long old_handler = exception_handlers[n]; + + exception_handlers[n] = handler; + return (void *)old_handler; +} + +void __init trap_init(void) +{ + int i; + + pgd_current = (unsigned long)init_mm.pgd; + /* DEBUG EXCEPTION */ + memcpy((void *)DEBUG_VECTOR_BASE_ADDR, + &debug_exception_vector, DEBUG_VECTOR_SIZE); + /* NMI EXCEPTION */ + memcpy((void *)GENERAL_VECTOR_BASE_ADDR, + &general_exception_vector, GENERAL_VECTOR_SIZE); + + /* + * Initialise exception handlers + */ + for (i = 0; i <= 31; i++) + set_except_vector(i, handle_reserved); + + set_except_vector(1, handle_nmi); + set_except_vector(2, handle_adelinsn); + set_except_vector(3, handle_tlb_refill); + set_except_vector(4, handle_tlb_invaild); + set_except_vector(5, handle_ibe); + set_except_vector(6, handle_pel); + set_except_vector(7, handle_sys); + set_except_vector(8, handle_ccu); + set_except_vector(9, handle_ri); + set_except_vector(10, handle_tr); + set_except_vector(11, handle_adedata); + set_except_vector(12, handle_adedata); + set_except_vector(13, handle_tlb_refill); + set_except_vector(14, handle_tlb_invaild); + set_except_vector(15, handle_mod); + set_except_vector(16, handle_cee); + set_except_vector(17, handle_cpe); + set_except_vector(18, handle_dbe); + flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR); + + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + cpu_cache_init(); +} diff --git a/arch/score/kernel/vmlinux.lds.S b/arch/score/kernel/vmlinux.lds.S new file mode 100644 index 00000000000..f85569831d5 --- /dev/null +++ b/arch/score/kernel/vmlinux.lds.S @@ -0,0 +1,148 @@ +/* + * arch/score/kernel/vmlinux.lds.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +OUTPUT_ARCH(score) +ENTRY(_stext) + +jiffies = jiffies_64; + +SECTIONS +{ + . = CONFIG_MEMORY_START + 0x2000; + /* read-only */ + .text : { + _text = .; /* Text and read-only data */ + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + KPROBES_TEXT + *(.text.*) + *(.fixup) + . = ALIGN (4) ; + _etext = .; /* End of text section */ + } + + . = ALIGN(16); + RODATA + + /* Exception table */ + . = ALIGN(16); + __ex_table : { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } + + /* writeable */ + .data ALIGN (4096): { + *(.data.init_task) + + DATA_DATA + CONSTRUCTORS + } + + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + . = ALIGN(8); + .sdata : { + *(.sdata) + } + + . = ALIGN(32); + .data.cacheline_aligned : { + *(.data.cacheline_aligned) + } + _edata = .; /* End of data section */ + + /* will be freed after init */ + . = ALIGN(4096); /* Init code and data */ + __init_begin = .; + + . = ALIGN(4096); + .init.text : { + _sinittext = .; + INIT_TEXT + _einittext = .; + } + .init.data : { + INIT_DATA + } + . = ALIGN(16); + .init.setup : { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } + + .initcall.init : { + __initcall_start = .; + INITCALLS + __initcall_end = .; + } + + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } + SECURITY_INIT + + /* .exit.text is discarded at runtime, not link time, to deal with + * references from .rodata + */ + .exit.text : { + EXIT_TEXT + } + .exit.data : { + EXIT_DATA + } +#if defined(CONFIG_BLK_DEV_INITRD) + .init.ramfs ALIGN(4096): { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + . = ALIGN(4); + LONG(0); + } +#endif + . = ALIGN(4096); + __init_end = .; + /* freed after init ends here */ + + __bss_start = .; /* BSS */ + .sbss : { + *(.sbss) + *(.scommon) + } + .bss : { + *(.bss) + *(COMMON) + } + __bss_stop = .; + _end = .; +} diff --git a/arch/score/lib/Makefile b/arch/score/lib/Makefile new file mode 100644 index 00000000000..553e30e81fa --- /dev/null +++ b/arch/score/lib/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for SCORE-specific library files.. +# + +lib-y += string.o checksum.o checksum_copy.o + +# libgcc-style stuff needed in the kernel +obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o diff --git a/arch/score/lib/ashldi3.c b/arch/score/lib/ashldi3.c new file mode 100644 index 00000000000..15691a91043 --- /dev/null +++ b/arch/score/lib/ashldi3.c @@ -0,0 +1,46 @@ +/* + * arch/score/lib/ashldi3.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +long long __ashldi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.low = 0; + w.s.high = (unsigned int) uu.s.low << -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.low >> bm; + + w.s.low = (unsigned int) uu.s.low << b; + w.s.high = ((unsigned int) uu.s.high << b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__ashldi3); diff --git a/arch/score/lib/ashrdi3.c b/arch/score/lib/ashrdi3.c new file mode 100644 index 00000000000..d9814a5d8d3 --- /dev/null +++ b/arch/score/lib/ashrdi3.c @@ -0,0 +1,48 @@ +/* + * arch/score/lib/ashrdi3.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +long long __ashrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = + uu.s.high >> 31; + w.s.low = uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/score/lib/checksum.S b/arch/score/lib/checksum.S new file mode 100644 index 00000000000..706157edc7d --- /dev/null +++ b/arch/score/lib/checksum.S @@ -0,0 +1,255 @@ +/* + * arch/score/lib/csum_partial.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include + +#define ADDC(sum,reg) \ + add sum, sum, reg; \ + cmp.c reg, sum; \ + bleu 9f; \ + addi sum, 0x1; \ +9: + +#define CSUM_BIGCHUNK(src, offset, sum) \ + lw r8, [src, offset + 0x00]; \ + lw r9, [src, offset + 0x04]; \ + lw r10, [src, offset + 0x08]; \ + lw r11, [src, offset + 0x0c]; \ + ADDC(sum, r8); \ + ADDC(sum, r9); \ + ADDC(sum, r10); \ + ADDC(sum, r11); \ + lw r8, [src, offset + 0x10]; \ + lw r9, [src, offset + 0x14]; \ + lw r10, [src, offset + 0x18]; \ + lw r11, [src, offset + 0x1c]; \ + ADDC(sum, r8); \ + ADDC(sum, r9); \ + ADDC(sum, r10); \ + ADDC(sum, r11); \ + +#define src r4 +#define dest r5 +#define sum r27 + + .text +/* unknown src alignment and < 8 bytes to go */ +small_csumcpy: + mv r5, r10 + ldi r9, 0x0 + cmpi.c r25, 0x1 + beq pass_small_set_t7 /*already set, jump to pass_small_set_t7*/ + andri.c r25,r4 , 0x1 /*Is src 2 bytes aligned?*/ + +pass_small_set_t7: + beq aligned + cmpi.c r5, 0x0 + beq fold + lbu r9, [src] + slli r9,r9, 0x8 /*Little endian*/ + ADDC(sum, r9) + addi src, 0x1 + subi.c r5, 0x1 + + /*len still a full word */ +aligned: + andri.c r8, r5, 0x4 /*Len >= 4?*/ + beq len_less_4bytes + + /* Still a full word (4byte) to go,and the src is word aligned.*/ + andri.c r8, src, 0x3 /*src is 4bytes aligned, so use LW!!*/ + beq four_byte_aligned + lhu r9, [src] + addi src, 2 + ADDC(sum, r9) + lhu r9, [src] + addi src, 2 + ADDC(sum, r9) + b len_less_4bytes + +four_byte_aligned: /* Len >=4 and four byte aligned */ + lw r9, [src] + addi src, 4 + ADDC(sum, r9) + +len_less_4bytes: /* 2 byte aligned aligned and length<4B */ + andri.c r8, r5, 0x2 + beq len_less_2bytes + lhu r9, [src] + addi src, 0x2 /* src+=2 */ + ADDC(sum, r9) + +len_less_2bytes: /* len = 1 */ + andri.c r8, r5, 0x1 + beq fold /* less than 2 and not equal 1--> len=0 -> fold */ + lbu r9, [src] + +fold_ADDC: + ADDC(sum, r9) +fold: + /* fold checksum */ + slli r26, sum, 16 + add sum, sum, r26 + cmp.c r26, sum + srli sum, sum, 16 + bleu 1f /* if r26<=sum */ + addi sum, 0x1 /* r26>sum */ +1: + /* odd buffer alignment? r25 was set in csum_partial */ + cmpi.c r25, 0x0 + beq 1f + slli r26, sum, 8 + srli sum, sum, 8 + or sum, sum, r26 + andi sum, 0xffff +1: + .set optimize + /* Add the passed partial csum. */ + ADDC(sum, r6) + mv r4, sum + br r3 + .set volatile + + .align 5 +ENTRY(csum_partial) + ldi sum, 0 + ldi r25, 0 + mv r10, r5 + cmpi.c r5, 0x8 + blt small_csumcpy /* < 8(singed) bytes to copy */ + cmpi.c r5, 0x0 + beq out + andri.c r25, src, 0x1 /* odd buffer? */ + + beq word_align +hword_align: /* 1 byte */ + lbu r8, [src] + subi r5, 0x1 + slli r8, r8, 8 + ADDC(sum, r8) + addi src, 0x1 + +word_align: /* 2 bytes */ + andri.c r8, src, 0x2 /* 4bytes(dword)_aligned? */ + beq dword_align /* not, maybe dword_align */ + lhu r8, [src] + subi r5, 0x2 + ADDC(sum, r8) + addi src, 0x2 + +dword_align: /* 4bytes */ + mv r26, r5 /* maybe useless when len >=56 */ + ldi r8, 56 + cmp.c r8, r5 + bgtu do_end_words /* if a1(len)=128? */ + beq 1f /* len<128 */ + +/* r26 is the result that computed in oword_align */ +move_128bytes: + CSUM_BIGCHUNK(src, 0x00, sum) + CSUM_BIGCHUNK(src, 0x20, sum) + CSUM_BIGCHUNK(src, 0x40, sum) + CSUM_BIGCHUNK(src, 0x60, sum) + subi.c r26, 0x01 /* r26 equals len/128 */ + addi src, 0x80 + bne move_128bytes + +1: /* len<128,we process 64byte here */ + andri.c r10, r5, 0x40 + beq 1f + +move_64bytes: + CSUM_BIGCHUNK(src, 0x00, sum) + CSUM_BIGCHUNK(src, 0x20, sum) + addi src, 0x40 + +1: /* len<64 */ + andri r26, r5, 0x1c /* 0x1c=28 */ + andri.c r10, r5, 0x20 + beq do_end_words /* decided by andri */ + +move_32bytes: + CSUM_BIGCHUNK(src, 0x00, sum) + andri r26, r5, 0x1c + addri src, src, 0x20 + +do_end_words: /* len<32 */ + /* r26 was set already in dword_align */ + cmpi.c r26, 0x0 + beq maybe_end_cruft /* len<28 or len<56 */ + srli r26, r26, 0x2 + +end_words: + lw r8, [src] + subi.c r26, 0x1 /* unit is 4 byte */ + ADDC(sum, r8) + addi src, 0x4 + cmpi.c r26, 0x0 + bne end_words /* r26!=0 */ + +maybe_end_cruft: /* len<4 */ + andri r10, r5, 0x3 + +small_memcpy: + mv r5, r10 + j small_csumcpy + +out: + mv r4, sum + br r3 + +END(csum_partial) diff --git a/arch/score/lib/checksum_copy.c b/arch/score/lib/checksum_copy.c new file mode 100644 index 00000000000..04565dd3ded --- /dev/null +++ b/arch/score/lib/checksum_copy.c @@ -0,0 +1,52 @@ +/* + * arch/score/lib/csum_partial_copy.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include + +unsigned int csum_partial_copy(const char *src, char *dst, + int len, unsigned int sum) +{ + sum = csum_partial(src, len, sum); + memcpy(dst, src, len); + + return sum; +} + +unsigned int csum_partial_copy_from_user(const char *src, char *dst, + int len, unsigned int sum, + int *err_ptr) +{ + int missing; + + missing = copy_from_user(dst, src, len); + if (missing) { + memset(dst + len - missing, 0, missing); + *err_ptr = -EFAULT; + } + + return csum_partial(dst, len, sum); +} diff --git a/arch/score/lib/cmpdi2.c b/arch/score/lib/cmpdi2.c new file mode 100644 index 00000000000..1ed5290c66e --- /dev/null +++ b/arch/score/lib/cmpdi2.c @@ -0,0 +1,44 @@ +/* + * arch/score/lib/cmpdi2.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +word_type __cmpdi2(long long a, long long b) +{ + const DWunion au = { + .ll = a + }; + const DWunion bu = { + .ll = b + }; + + if (au.s.high < bu.s.high) + return 0; + else if (au.s.high > bu.s.high) + return 2; + + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + + return 1; +} +EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/score/lib/libgcc.h b/arch/score/lib/libgcc.h new file mode 100644 index 00000000000..0f12543d9f3 --- /dev/null +++ b/arch/score/lib/libgcc.h @@ -0,0 +1,37 @@ +/* + * arch/score/lib/libgcc.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include + +typedef int word_type __attribute__((mode(__word__))); + +struct DWstruct { + int low, high; +}; + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/score/lib/lshrdi3.c b/arch/score/lib/lshrdi3.c new file mode 100644 index 00000000000..ce21175fd79 --- /dev/null +++ b/arch/score/lib/lshrdi3.c @@ -0,0 +1,47 @@ +/* + * arch/score/lib/lshrdi3.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#include +#include "libgcc.h" + +long long __lshrdi3(long long u, word_type b) +{ + DWunion uu, w; + word_type bm; + + if (b == 0) + return u; + + uu.ll = u; + bm = 32 - b; + + if (bm <= 0) { + w.s.high = 0; + w.s.low = (unsigned int) uu.s.high >> -bm; + } else { + const unsigned int carries = (unsigned int) uu.s.high << bm; + + w.s.high = (unsigned int) uu.s.high >> b; + w.s.low = ((unsigned int) uu.s.low >> b) | carries; + } + + return w.ll; +} +EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S new file mode 100644 index 00000000000..943d0911ea8 --- /dev/null +++ b/arch/score/lib/string.S @@ -0,0 +1,196 @@ +/* + * arch/score/lib/string.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Chen Liqin + * Lennox Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + + .text + .align 2 +ENTRY(__strncpy_from_user) + cmpi.c r6, 0 + mv r9, r6 + ble .L2 +0: lbu r7, [r5] + ldi r8, 0 +1: sb r7, [r4] +2: lb r6, [r5] + cmp.c r6, r8 + beq .L2 + +.L5: + addi r8, 1 + cmp.c r8, r9 + beq .L7 +3: lbu r6, [r5, 1]+ +4: sb r6, [r4, 1]+ +5: lb r7, [r5] + cmpi.c r7, 0 + bne .L5 +.L7: + mv r4, r8 + br r3 +.L2: + ldi r8, 0 + mv r4, r8 + br r3 + .section .fixup, "ax" +99: + ldi r4, -EFAULT + br r3 + .previous + .section __ex_table, "a" + .align 2 + .word 0b ,99b + .word 1b ,99b + .word 2b ,99b + .word 3b ,99b + .word 4b ,99b + .word 5b ,99b + .previous + + .align 2 +ENTRY(__strnlen_user) + cmpi.c r5, 0 + ble .L11 +0: lb r6, [r4] + ldi r7, 0 + cmp.c r6, r7 + beq .L11 +.L15: + addi r7, 1 + cmp.c r7, r5 + beq .L23 +1: lb r6, [r4,1]+ + cmpi.c r6, 0 + bne .L15 +.L23: + addri r4, r7, 1 + br r3 + +.L11: + ldi r4, 1 + br r3 + .section .fixup, "ax" +99: + ldi r4, 0 + br r3 + + .section __ex_table,"a" + .align 2 + .word 0b, 99b + .word 1b, 99b + .previous + + .align 2 +ENTRY(__strlen_user) +0: lb r6, [r4] + mv r7, r4 + extsb r6, r6 + cmpi.c r6, 0 + mv r4, r6 + beq .L27 +.L28: +1: lb r6, [r7, 1]+ + addi r6, 1 + cmpi.c r6, 0 + bne .L28 +.L27: + br r3 + .section .fixup, "ax" + ldi r4, 0x0 + br r3 +99: + ldi r4, 0 + br r3 + .previous + .section __ex_table, "a" + .align 2 + .word 0b ,99b + .word 1b ,99b + .previous + + .align 2 +ENTRY(__copy_tofrom_user) + cmpi.c r6, 0 + mv r10,r6 + beq .L32 + ldi r9, 0 +.L34: + add r6, r5, r9 +0: lbu r8, [r6] + add r7, r4, r9 +1: sb r8, [r7] + addi r9, 1 + cmp.c r9, r10 + bne .L34 +.L32: + ldi r4, 0 + br r3 + .section .fixup, "ax" +99: + sub r4, r10, r9 + br r3 + .previous + .section __ex_table, "a" + .align 2 + .word 0b, 99b + .word 1b, 99b + .previous + + .align 2 +ENTRY(__clear_user) + cmpi.c r5, 0 + beq .L38 + ldi r6, 0 + mv r7, r6 +.L40: + addi r6, 1 +0: sb r7, [r4]+, 1 + cmp.c r6, r5 + bne .L40 +.L38: + ldi r4, 0 + br r3 + + .section .fixup, "ax" + br r3 + .previous + .section __ex_table, "a" + .align 2 +99: + .word 0b, 99b + .previous + + .align 2 +ENTRY(__put_user_unknown) + .set volatile + ldi r4, -EFAULT + br r3 + + .align 2 +ENTRY(__get_user_unknown) + ldi r5, 0 + ldi r4, -EFAULT + br r3 diff --git a/arch/score/lib/ucmpdi2.c b/arch/score/lib/ucmpdi2.c new file mode 100644 index 00000000000..b15241e0b07 --- /dev/null +++ b/arch/score/lib/ucmpdi2.c @@ -0,0 +1,38 @@ +/* + * arch/score/lib/ucmpdi2.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libgcc.h" + +word_type __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) + return 0; + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) + return 2; + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + return 1; +} +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/score/mm/Makefile b/arch/score/mm/Makefile new file mode 100644 index 00000000000..7b1e29b1f8c --- /dev/null +++ b/arch/score/mm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Linux/SCORE-specific parts of the memory manager. +# + +obj-y += cache.o extable.o fault.o init.o \ + tlb-miss.o tlb-score.o pgtable.o diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c new file mode 100644 index 00000000000..1ebc67f18c6 --- /dev/null +++ b/arch/score/mm/cache.c @@ -0,0 +1,308 @@ +/* + * arch/score/mm/cache.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* Cache operations. */ +void (*flush_cache_all)(void); +void (*__flush_cache_all)(void); +void (*flush_cache_mm)(struct mm_struct *mm); +void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +void (*flush_icache_range)(unsigned long start, unsigned long end); +void (*__flush_cache_vmap)(void); +void (*__flush_cache_vunmap)(void); +void (*flush_cache_sigtramp)(unsigned long addr); +void (*flush_data_cache_page)(unsigned long addr); +EXPORT_SYMBOL(flush_data_cache_page); +void (*flush_icache_all)(void); + +/*Score 7 cache operations*/ +static inline void s7___flush_cache_all(void); +static void s7_flush_cache_mm(struct mm_struct *mm); +static void s7_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +static void s7_flush_cache_page(struct vm_area_struct *vma, + unsigned long page, unsigned long pfn); +static void s7_flush_icache_range(unsigned long start, unsigned long end); +static void s7_flush_cache_sigtramp(unsigned long addr); +static void s7_flush_data_cache_page(unsigned long addr); +static void s7_flush_dcache_range(unsigned long start, unsigned long end); + +void __update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte) +{ + struct page *page; + unsigned long pfn, addr; + int exec = (vma->vm_flags & VM_EXEC); + + pfn = pte_pfn(pte); + if (unlikely(!pfn_valid(pfn))) + return; + page = pfn_to_page(pfn); + if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { + addr = (unsigned long) page_address(page); + if (exec) + s7_flush_data_cache_page(addr); + clear_bit(PG_arch_1, &page->flags); + } +} + +static inline void setup_protection_map(void) +{ + protection_map[0] = PAGE_NONE; + protection_map[1] = PAGE_READONLY; + protection_map[2] = PAGE_COPY; + protection_map[3] = PAGE_COPY; + protection_map[4] = PAGE_READONLY; + protection_map[5] = PAGE_READONLY; + protection_map[6] = PAGE_COPY; + protection_map[7] = PAGE_COPY; + protection_map[8] = PAGE_NONE; + protection_map[9] = PAGE_READONLY; + protection_map[10] = PAGE_SHARED; + protection_map[11] = PAGE_SHARED; + protection_map[12] = PAGE_READONLY; + protection_map[13] = PAGE_READONLY; + protection_map[14] = PAGE_SHARED; + protection_map[15] = PAGE_SHARED; +} + +void __devinit cpu_cache_init(void) +{ + flush_cache_all = s7_flush_cache_all; + __flush_cache_all = s7___flush_cache_all; + flush_cache_mm = s7_flush_cache_mm; + flush_cache_range = s7_flush_cache_range; + flush_cache_page = s7_flush_cache_page; + flush_icache_range = s7_flush_icache_range; + flush_cache_sigtramp = s7_flush_cache_sigtramp; + flush_data_cache_page = s7_flush_data_cache_page; + + setup_protection_map(); +} + +void s7_flush_icache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_icache_all\n" + "cache 0x10, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +void s7_flush_dcache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_dcache_all\n" + "cache 0x1f, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +void s7_flush_cache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_cache_all\n" + "cache 0x10, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1f, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +void s7___flush_cache_all(void) +{ + __asm__ __volatile__( + "la r8, s7_flush_cache_all\n" + "cache 0x10, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1f, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [r8, 0]\n" + "nop\nnop\nnop\nnop\nnop\nnop\n" + : : : "r8"); +} + +static void s7_flush_cache_mm(struct mm_struct *mm) +{ + if (!(mm->context)) + return; + s7_flush_cache_all(); +} + +/*if we flush a range precisely , the processing may be very long. +We must check each page in the range whether present. If the page is present, +we can flush the range in the page. Be careful, the range may be cross two +page, a page is present and another is not present. +*/ +/* +The interface is provided in hopes that the port can find +a suitably efficient method for removing multiple page +sized regions from the cache. +*/ +static void +s7_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int exec = vma->vm_flags & VM_EXEC; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + if (!(mm->context)) + return; + + pgdp = pgd_offset(mm, start); + pudp = pud_offset(pgdp, start); + pmdp = pmd_offset(pudp, start); + ptep = pte_offset(pmdp, start); + + while (start <= end) { + unsigned long tmpend; + pgdp = pgd_offset(mm, start); + pudp = pud_offset(pgdp, start); + pmdp = pmd_offset(pudp, start); + ptep = pte_offset(pmdp, start); + + if (!(pte_val(*ptep) & _PAGE_PRESENT)) { + start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); + continue; + } + tmpend = (start | (PAGE_SIZE-1)) > end ? + end : (start | (PAGE_SIZE-1)); + + s7_flush_dcache_range(start, tmpend); + if (exec) + s7_flush_icache_range(start, tmpend); + start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); + } +} + +static void +s7_flush_cache_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ + int exec = vma->vm_flags & VM_EXEC; + unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); + + s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); + + if (exec) + s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); +} + +static void s7_flush_cache_sigtramp(unsigned long addr) +{ + __asm__ __volatile__( + "cache 0x02, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + "cache 0x02, [%0, 0x4]\n" + "nop\nnop\nnop\nnop\nnop\n" + + "cache 0x0d, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + "cache 0x0d, [%0, 0x4]\n" + "nop\nnop\nnop\nnop\nnop\n" + + "cache 0x1a, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (addr)); +} + +/* +Just flush entire Dcache!! +You must ensure the page doesn't include instructions, because +the function will not flush the Icache. +The addr must be cache aligned. +*/ +static void s7_flush_data_cache_page(unsigned long addr) +{ + unsigned int i; + for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "cache 0x1a, [%0, 0]\n" + "nop\n" + : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} + +/* +1. WB and invalid a cache line of Dcache +2. Drain Write Buffer +the range must be smaller than PAGE_SIZE +*/ +static void s7_flush_dcache_range(unsigned long start, unsigned long end) +{ + int size, i; + + start = start & ~(L1_CACHE_BYTES - 1); + end = end & ~(L1_CACHE_BYTES - 1); + size = end - start; + /* flush dcache to ram, and invalidate dcache lines. */ + for (i = 0; i < size; i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + "cache 0x1a, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (start)); + start += L1_CACHE_BYTES; + } +} + +static void s7_flush_icache_range(unsigned long start, unsigned long end) +{ + int size, i; + start = start & ~(L1_CACHE_BYTES - 1); + end = end & ~(L1_CACHE_BYTES - 1); + + size = end - start; + /* invalidate icache lines. */ + for (i = 0; i < size; i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x02, [%0, 0]\n" + "nop\nnop\nnop\nnop\nnop\n" + : : "r" (start)); + start += L1_CACHE_BYTES; + } +} diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c new file mode 100644 index 00000000000..01ff6445171 --- /dev/null +++ b/arch/score/mm/extable.c @@ -0,0 +1,38 @@ +/* + * arch/score/mm/extable.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(regs->cp0_epc); + if (fixup) { + regs->cp0_epc = fixup->fixup; + return 1; + } + return 0; +} diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c new file mode 100644 index 00000000000..47b600e4b2c --- /dev/null +++ b/arch/score/mm/fault.c @@ -0,0 +1,235 @@ +/* + * arch/score/mm/fault.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, + unsigned long address) +{ + struct vm_area_struct *vma = NULL; + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + const int field = sizeof(unsigned long) * 2; + siginfo_t info; + int fault; + + info.si_code = SEGV_MAPERR; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) + goto vmalloc_fault; +#ifdef MODULE_START + if (unlikely(address >= MODULE_START && address < MODULE_END)) + goto vmalloc_fault; +#endif + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_atomic() || !mm) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + info.si_code = SEGV_ACCERR; + + if (write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) + goto bad_area; + } + +survive: + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(mm, vma, address, write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + + up_read(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + /* User mode accesses just cause a SIGSEGV */ + if (user_mode(regs)) { + tsk->thread.cp0_badvaddr = address; + tsk->thread.error_code = write; + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* info.si_code has been set above */ + info.si_addr = (void __user *) address; + force_sig_info(SIGSEGV, &info, tsk); + return; + } + +no_context: + /* Are we prepared to handle this kernel fault? */ + if (fixup_exception(regs)) { + current->thread.cp0_baduaddr = address; + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + bust_spinlocks(1); + + printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " + "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", + 0, field, address, field, regs->cp0_epc, + field, regs->regs[3]); + die("Oops", regs); + + /* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + up_read(&mm->mmap_sem); + if (is_global_init(tsk)) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + printk("VM: killing process %s\n", tsk->comm); + if (user_mode(regs)) + do_group_exit(SIGKILL); + goto no_context; + +do_sigbus: + up_read(&mm->mmap_sem); + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + else + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.cp0_badvaddr = address; + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *) address; + force_sig_info(SIGBUS, &info, tsk); + return; +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = __pgd_offset(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + pgd = (pgd_t *) pgd_current + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd_k)) + goto no_context; + set_pgd(pgd, *pgd_k); + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + return; + } +} diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c new file mode 100644 index 00000000000..7780eecc5a4 --- /dev/null +++ b/arch/score/mm/init.c @@ -0,0 +1,173 @@ +/* + * arch/score/mm/init.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +/* + * We have up to 8 empty zeroed pages so we can map one of the right colour + * when needed. + */ +unsigned long zero_page_mask; +unsigned long empty_zero_page; +EXPORT_SYMBOL_GPL(empty_zero_page); + +static struct kcore_list kcore_mem, kcore_vmalloc; + +unsigned long setup_zero_pages(void) +{ + unsigned int order = 0; + unsigned long size; + struct page *page; + + empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!empty_zero_page) + panic("Oh boy, that early out of memory?"); + + page = virt_to_page((void *) empty_zero_page); + split_page(page, order); + while (page < virt_to_page((void *) (empty_zero_page + + (PAGE_SIZE << order)))) { + SetPageReserved(page); + page++; + } + + size = PAGE_SIZE << order; + zero_page_mask = (size - 1) & PAGE_MASK; + + return 1UL << order; +} + +#ifndef CONFIG_NEED_MULTIPLE_NODES +static int __init page_is_ram(unsigned long pagenr) +{ + if (pagenr >= min_low_pfn && pagenr < max_low_pfn) + return 1; + else + return 0; +} + +void __init paging_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long lastpfn; + + pagetable_init(); + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + lastpfn = max_low_pfn; + free_area_init_nodes(max_zone_pfns); +} + +void __init mem_init(void) +{ + unsigned long codesize, reservedpages, datasize, initsize; + unsigned long tmp, ram = 0; + + max_mapnr = max_low_pfn; + high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); + totalram_pages += free_all_bootmem(); + totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ + reservedpages = 0; + + for (tmp = 0; tmp < max_low_pfn; tmp++) + if (page_is_ram(tmp)) { + ram++; + if (PageReserved(pfn_to_page(tmp))) + reservedpages++; + } + + num_physpages = ram; + codesize = (unsigned long) &_etext - (unsigned long) &_text; + datasize = (unsigned long) &_edata - (unsigned long) &_etext; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); + kclist_add(&kcore_vmalloc, (void *) VMALLOC_START, + VMALLOC_END - VMALLOC_START); + + printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " + "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + ram << (PAGE_SHIFT-10), codesize >> 10, + reservedpages << (PAGE_SHIFT-10), datasize >> 10, + initsize >> 10, + (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); +} +#endif /* !CONFIG_NEED_MULTIPLE_NODES */ + +void free_init_pages(const char *what, unsigned long begin, unsigned long end) +{ + unsigned long pfn; + + for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { + struct page *page = pfn_to_page(pfn); + void *addr = phys_to_virt(PFN_PHYS(pfn)); + + ClearPageReserved(page); + init_page_count(page); + memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); + __free_page(page); + totalram_pages++; + } + printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + free_init_pages("initrd memory", + virt_to_phys((void *) start), + virt_to_phys((void *) end)); +} +#endif + +void __init_refok free_initmem(void) +{ + free_init_pages("unused kernel memory", + (unsigned long)__init_begin, (unsigned long)__init_end); +} + +unsigned long pgd_current; + +#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE< + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +void pgd_init(unsigned long page) +{ + unsigned long *p = (unsigned long *) page; + int i; + + for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { + p[i + 0] = (unsigned long) invalid_pte_table; + p[i + 1] = (unsigned long) invalid_pte_table; + p[i + 2] = (unsigned long) invalid_pte_table; + p[i + 3] = (unsigned long) invalid_pte_table; + p[i + 4] = (unsigned long) invalid_pte_table; + p[i + 5] = (unsigned long) invalid_pte_table; + p[i + 6] = (unsigned long) invalid_pte_table; + p[i + 7] = (unsigned long) invalid_pte_table; + } +} + +void __init pagetable_init(void) +{ + unsigned long vaddr; + pgd_t *pgd_base; + + /* Initialize the entire pgd. */ + pgd_init((unsigned long) swapper_pg_dir); + pgd_init((unsigned long) swapper_pg_dir + + sizeof(pgd_t) * USER_PTRS_PER_PGD); + + pgd_base = swapper_pg_dir; + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; +} diff --git a/arch/score/mm/tlb-miss.S b/arch/score/mm/tlb-miss.S new file mode 100644 index 00000000000..f27651914e8 --- /dev/null +++ b/arch/score/mm/tlb-miss.S @@ -0,0 +1,199 @@ +/* + * arch/score/mm/tlbex.S + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +/* +* After this macro runs, the pte faulted on is +* in register PTE, a ptr into the table in which +* the pte belongs is in PTR. +*/ + .macro load_pte, pte, ptr + la \ptr, pgd_current + lw \ptr, [\ptr, 0] + mfcr \pte, cr6 + srli \pte, \pte, 22 + slli \pte, \pte, 2 + add \ptr, \ptr, \pte + lw \ptr, [\ptr, 0] + mfcr \pte, cr6 + srli \pte, \pte, 10 + andi \pte, 0xffc + add \ptr, \ptr, \pte + lw \pte, [\ptr, 0] + .endm + + .macro pte_reload, ptr + lw \ptr, [\ptr, 0] + mtcr \ptr, cr12 + nop + nop + nop + nop + nop + .endm + + .macro do_fault, write + SAVE_ALL + mfcr r6, cr6 + mv r4, r0 + ldi r5, \write + la r8, do_page_fault + brl r8 + j ret_from_exception + .endm + + .macro pte_writable, pte, ptr, label + andi \pte, 0x280 + cmpi.c \pte, 0x280 + bne \label + lw \pte, [\ptr, 0] /*reload PTE*/ + .endm + +/* + * Make PTE writable, update software status bits as well, + * then store at PTR. + */ + .macro pte_makewrite, pte, ptr + ori \pte, 0x426 + sw \pte, [\ptr, 0] + .endm + + .text +ENTRY(score7_FTLB_refill_Handler) + la r31, pgd_current /* get pgd pointer */ + lw r31, [r31, 0] /* get the address of PGD */ + mfcr r30, cr6 + srli r30, r30, 22 /* PGDIR_SHIFT = 22*/ + slli r30, r30, 2 + add r31, r31, r30 + lw r31, [r31, 0] /* get the address of the start address of PTE table */ + + mfcr r30, cr9 + andi r30, 0xfff /* equivalent to get PET index and right shift 2 bits */ + add r31, r31, r30 + lw r30, [r31, 0] /* load pte entry */ + mtcr r30, cr12 + nop + nop + nop + nop + nop + mtrtlb + nop + nop + nop + nop + nop + rte /* 6 cycles to make sure tlb entry works */ + +ENTRY(score7_KSEG_refill_Handler) + la r31, pgd_current /* get pgd pointer */ + lw r31, [r31, 0] /* get the address of PGD */ + mfcr r30, cr6 + srli r30, r30, 22 /* PGDIR_SHIFT = 22 */ + slli r30, r30, 2 + add r31, r31, r30 + lw r31, [r31, 0] /* get the address of the start address of PTE table */ + + mfcr r30, cr6 /* get Bad VPN */ + srli r30, r30, 10 + andi r30, 0xffc /* PTE VPN mask (bit 11~2) */ + + add r31, r31, r30 + lw r30, [r31, 0] /* load pte entry */ + mtcr r30, cr12 + nop + nop + nop + nop + nop + mtrtlb + nop + nop + nop + nop + nop + rte /* 6 cycles to make sure tlb entry works */ + +nopage_tlbl: + do_fault 0 /* Read */ + +ENTRY(handle_tlb_refill) + load_pte r30, r31 + pte_writable r30, r31, handle_tlb_refill_nopage + pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */ + pte_reload r31 + mtrtlb + nop + nop + nop + nop + nop + rte +handle_tlb_refill_nopage: + do_fault 0 /* Read */ + +ENTRY(handle_tlb_invaild) + load_pte r30, r31 + stlb /* find faulting entry */ + pte_writable r30, r31, handle_tlb_invaild_nopage + pte_makewrite r30, r31 /* Access|Modify|Dirty|Valid */ + pte_reload r31 + mtptlb + nop + nop + nop + nop + nop + rte +handle_tlb_invaild_nopage: + do_fault 0 /* Read */ + +ENTRY(handle_mod) + load_pte r30, r31 + stlb /* find faulting entry */ + andi r30, _PAGE_WRITE /* Writable? */ + cmpz.c r30 + beq nowrite_mod + lw r30, [r31, 0] /* reload into r30 */ + + /* Present and writable bits set, set accessed and dirty bits. */ + pte_makewrite r30, r31 + + /* Now reload the entry into the tlb. */ + pte_reload r31 + mtptlb + nop + nop + nop + nop + nop + rte + +nowrite_mod: + do_fault 1 /* Write */ diff --git a/arch/score/mm/tlb-score.c b/arch/score/mm/tlb-score.c new file mode 100644 index 00000000000..4fa5aa5afec --- /dev/null +++ b/arch/score/mm/tlb-score.c @@ -0,0 +1,251 @@ +/* + * arch/score/mm/tlb-score.c + * + * Score Processor version. + * + * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. + * Lennox Wu + * Chen Liqin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include +#include +#include + +#define TLBSIZE 32 + +unsigned long asid_cache = ASID_FIRST_VERSION; +EXPORT_SYMBOL(asid_cache); + +void local_flush_tlb_all(void) +{ + unsigned long flags; + unsigned long old_ASID; + int entry; + + local_irq_save(flags); + old_ASID = pevn_get() & ASID_MASK; + pectx_set(0); /* invalid */ + entry = tlblock_get(); /* skip locked entries*/ + + for (; entry < TLBSIZE; entry++) { + tlbpt_set(entry); + pevn_set(KSEG1); + barrier(); + tlb_write_indexed(); + } + pevn_set(old_ASID); + local_irq_restore(flags); +} + +/* + * If mm is currently active_mm, we can't really drop it. Instead, + * we will get a new one for it. + */ +static inline void +drop_mmu_context(struct mm_struct *mm) +{ + unsigned long flags; + + local_irq_save(flags); + get_new_mmu_context(mm); + pevn_set(mm->context & ASID_MASK); + local_irq_restore(flags); +} + +void local_flush_tlb_mm(struct mm_struct *mm) +{ + if (mm->context != 0) + drop_mmu_context(mm); +} + +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long vma_mm_context = mm->context; + if (mm->context != 0) { + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size <= TLBSIZE) { + int oldpid = pevn_get() & ASID_MASK; + int newpid = vma_mm_context & ASID_MASK; + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + while (start < end) { + int idx; + + pevn_set(start | newpid); + start += PAGE_SIZE; + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(0); + pevn_set(KSEG1); + if (idx < 0) + continue; + tlb_write_indexed(); + } + pevn_set(oldpid); + } else { + /* Bigger than TLBSIZE, get new ASID directly */ + get_new_mmu_context(mm); + if (mm == current->active_mm) + pevn_set(vma_mm_context & ASID_MASK); + } + local_irq_restore(flags); + } +} + +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + local_irq_save(flags); + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (size <= TLBSIZE) { + int pid = pevn_get(); + + start &= PAGE_MASK; + end += PAGE_SIZE - 1; + end &= PAGE_MASK; + + while (start < end) { + long idx; + + pevn_set(start); + start += PAGE_SIZE; + tlb_probe(); + idx = tlbpt_get(); + if (idx < 0) + continue; + pectx_set(0); + pevn_set(KSEG1); + barrier(); + tlb_write_indexed(); + } + pevn_set(pid); + } else { + local_flush_tlb_all(); + } + + local_irq_restore(flags); +} + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + if (!vma || vma->vm_mm->context != 0) { + unsigned long flags; + int oldpid, newpid, idx; + unsigned long vma_ASID = vma->vm_mm->context; + + newpid = vma_ASID & ASID_MASK; + page &= PAGE_MASK; + local_irq_save(flags); + oldpid = pevn_get() & ASID_MASK; + pevn_set(page | newpid); + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(0); + pevn_set(KSEG1); + if (idx < 0) /* p_bit(31) - 1: miss, 0: hit*/ + goto finish; + barrier(); + tlb_write_indexed(); +finish: + pevn_set(oldpid); + local_irq_restore(flags); + } +} + +/* + * This one is only used for pages with the global bit set so we don't care + * much about the ASID. + */ +void local_flush_tlb_one(unsigned long page) +{ + unsigned long flags; + int oldpid, idx; + + local_irq_save(flags); + oldpid = pevn_get(); + page &= (PAGE_MASK << 1); + pevn_set(page); + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(0); + if (idx >= 0) { + /* Make sure all entries differ. */ + pevn_set(KSEG1); + barrier(); + tlb_write_indexed(); + } + pevn_set(oldpid); + local_irq_restore(flags); +} + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + unsigned long flags; + int idx, pid; + + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) + return; + + pid = pevn_get() & ASID_MASK; + + local_irq_save(flags); + address &= PAGE_MASK; + pevn_set(address | pid); + barrier(); + tlb_probe(); + idx = tlbpt_get(); + pectx_set(pte_val(pte)); + pevn_set(address | pid); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + + pevn_set(pid); + local_irq_restore(flags); +} + +void __cpuinit tlb_init(void) +{ + tlblock_set(0); + local_flush_tlb_all(); + memcpy((void *)(EXCEPTION_VECTOR_BASE_ADDR + 0x100), + &score7_FTLB_refill_Handler, 0xFC); + flush_icache_range(EXCEPTION_VECTOR_BASE_ADDR + 0x100, + EXCEPTION_VECTOR_BASE_ADDR + 0x1FC); +} -- cgit v1.2.3-70-g09d2 From 0402c91af944c61bf788370f03326959a35cb8be Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Fri, 19 Jun 2009 13:53:49 +0800 Subject: score: update files according to review comments modified: arch/score/include/asm/cacheflush.h modified: arch/score/include/asm/delay.h modified: arch/score/include/asm/errno.h modified: arch/score/include/asm/pgtable-bits.h modified: arch/score/include/asm/pgtable.h modified: arch/score/include/asm/ptrace.h modified: arch/score/include/asm/unistd.h modified: arch/score/kernel/entry.S modified: arch/score/kernel/process.c modified: arch/score/kernel/ptrace.c modified: arch/score/kernel/signal.c modified: arch/score/kernel/sys_score.c modified: arch/score/kernel/traps.c modified: arch/score/mm/cache.c Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- arch/score/include/asm/cacheflush.h | 18 ++--- arch/score/include/asm/delay.h | 11 ++- arch/score/include/asm/errno.h | 1 - arch/score/include/asm/pgtable-bits.h | 2 + arch/score/include/asm/pgtable.h | 59 ++++++++------ arch/score/include/asm/ptrace.h | 18 +++-- arch/score/include/asm/unistd.h | 3 +- arch/score/kernel/entry.S | 6 +- arch/score/kernel/process.c | 2 +- arch/score/kernel/ptrace.c | 144 +++++++++------------------------- arch/score/kernel/signal.c | 6 +- arch/score/kernel/sys_score.c | 7 +- arch/score/kernel/traps.c | 2 +- arch/score/mm/cache.c | 125 +++++++++-------------------- 14 files changed, 154 insertions(+), 250 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h index 1c74628caf7..07cc8fc457c 100644 --- a/arch/score/include/asm/cacheflush.h +++ b/arch/score/include/asm/cacheflush.h @@ -4,18 +4,16 @@ /* Keep includes the same across arches. */ #include -extern void (*flush_cache_all)(void); -extern void (*flush_cache_mm)(struct mm_struct *mm); -extern void (*flush_cache_range)(struct vm_area_struct *vma, +extern void flush_cache_all(void); +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void (*flush_cache_page)(struct vm_area_struct *vma, +extern void flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); -extern void (*flush_cache_sigtramp)(unsigned long addr); -extern void (*flush_icache_all)(void); -extern void (*flush_icache_range)(unsigned long start, unsigned long end); -extern void (*flush_data_cache_page)(unsigned long addr); - -extern void s7_flush_cache_all(void); +extern void flush_cache_sigtramp(unsigned long addr); +extern void flush_icache_all(void); +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void flush_dcache_range(unsigned long start, unsigned long end); #define flush_cache_dup_mm(mm) do {} while (0) #define flush_dcache_page(page) do {} while (0) diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h index ad716f6d922..6726ec199dc 100644 --- a/arch/score/include/asm/delay.h +++ b/arch/score/include/asm/delay.h @@ -3,17 +3,22 @@ static inline void __delay(unsigned long loops) { + /* 3 cycles per loop. */ __asm__ __volatile__ ( - "1:\tsubi\t%0,1\n\t" + "1:\tsubi\t%0, 3\n\t" "cmpz.c\t%0\n\t" - "bne\t1b\n\t" + "ble\t1b\n\t" : "=r" (loops) : "0" (loops)); } static inline void __udelay(unsigned long usecs) { - __delay(usecs); + unsigned long loops_per_usec; + + loops_per_usec = (loops_per_jiffy * HZ) / 1000000; + + __delay(usecs * loops_per_usec); } #define udelay(usecs) __udelay(usecs) diff --git a/arch/score/include/asm/errno.h b/arch/score/include/asm/errno.h index 7cd3e1f07c0..29ff39d5ab4 100644 --- a/arch/score/include/asm/errno.h +++ b/arch/score/include/asm/errno.h @@ -2,6 +2,5 @@ #define _ASM_SCORE_ERRNO_H #include -#define EMAXERRNO 1024 #endif /* _ASM_SCORE_ERRNO_H */ diff --git a/arch/score/include/asm/pgtable-bits.h b/arch/score/include/asm/pgtable-bits.h index ca16d357a64..7d65a96a82e 100644 --- a/arch/score/include/asm/pgtable-bits.h +++ b/arch/score/include/asm/pgtable-bits.h @@ -17,6 +17,8 @@ #define _CACHE_MASK (1<<3) #define _PAGE_BUFFERABLE (1<<4) /*Fallow Spec. */ +#define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) #define _PAGE_CHG_MASK \ (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_CACHE) diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0f7177a4220..5e913e57c67 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -106,24 +106,6 @@ static inline void pmd_clear(pmd_t *pmdp) ((swp_entry_t) { pte_val(pte)}) #define __swp_entry_to_pte(x) ((pte_t) {(x).val}) -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) static inline pte_t pte_mkspecial(pte_t pte) { return pte; } @@ -136,10 +118,15 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -#define pgd_present(pgd) (1) /* pages are always present on non MMU */ +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_present(pgd) (1) #define pgd_none(pgd) (0) #define pgd_bad(pgd) (0) -#define pgd_clear(pgdp) +#define pgd_clear(pgdp) do { } while (0) #define kern_addr_valid(addr) (1) #define pmd_offset(a, b) ((void *) 0) @@ -150,11 +137,33 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define pud_offset(pgd, address) ((pud_t *) pgd) -#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ -#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + _PAGE_CACHE) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHE) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | _PAGE_CACHE) +#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ + __WRITEABLE | _PAGE_GLOBAL & ~_PAGE_CACHE) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED #define pgprot_noncached(x) (x) diff --git a/arch/score/include/asm/ptrace.h b/arch/score/include/asm/ptrace.h index 1a4900ac49f..66b14c8891c 100644 --- a/arch/score/include/asm/ptrace.h +++ b/arch/score/include/asm/ptrace.h @@ -1,6 +1,9 @@ #ifndef _ASM_SCORE_PTRACE_H #define _ASM_SCORE_PTRACE_H +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + #define PC 32 #define CONDITION 33 #define ECR 34 @@ -76,12 +79,17 @@ struct pt_regs { */ #define user_mode(regs) ((regs->cp0_psr & 8) == 8) -#define instruction_pointer(regs) (0) -#define profile_pc(regs) instruction_pointer(regs) +#define instruction_pointer(regs) ((unsigned long)(regs)->cp0_epc) +#define profile_pc(regs) instruction_pointer(regs) -extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern void do_syscall_trace(struct pt_regs *regs, int entryexit); extern int read_tsk_long(struct task_struct *, unsigned long, unsigned long *); -extern void clear_single_step(struct task_struct *); -#endif +extern int read_tsk_short(struct task_struct *, unsigned long, + unsigned short *); + +#define arch_has_single_step() (1) +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); +#endif /* __KERNEL__ */ #endif /* _ASM_SCORE_PTRACE_H */ diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h index 9aa3a159bbf..f0f84deeb56 100644 --- a/arch/score/include/asm/unistd.h +++ b/arch/score/include/asm/unistd.h @@ -1,7 +1,8 @@ -#ifndef _ASM_SCORE_UNISTD_H +#if !defined(_ASM_SCORE_UNISTD_H) || defined(__SYSCALL) #define _ASM_SCORE_UNISTD_H #define __ARCH_HAVE_MMU +#define __ARCH_WANT_IPC_PARSE_VERSION #include diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 6c6b7ea58af..0af89b2f16b 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -23,7 +23,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#include +#include #include #include @@ -434,7 +434,7 @@ stack_done: sw r8, [r0, PT_R7] b 2f 1: - cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134 + cmpi.c r4, -MAX_ERRNO - 1 ble 2f ldi r8, 0x1; sw r8, [r0, PT_R7] @@ -466,7 +466,7 @@ syscall_trace_entry: lw r7, [r0, PT_R7] brl r8 - li r8, -EMAXERRNO - 1 # error? + li r8, -MAX_ERRNO - 1 sw r8, [r0, PT_R7] # set error flag neg r4, r4 # error diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index aaa3085251f..d93966f7ac8 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -56,7 +56,7 @@ void __noreturn cpu_idle(void) } } -asmlinkage void ret_from_fork(void); +void ret_from_fork(void); void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c index 8fe7209355a..19911e3187b 100644 --- a/arch/score/kernel/ptrace.c +++ b/arch/score/kernel/ptrace.c @@ -80,7 +80,35 @@ write_tsk_long(struct task_struct *child, return copied != sizeof(val) ? -EIO : 0; } -void set_single_step(struct task_struct *child) +/* + * Get all user integer registers. + */ +static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) +{ + struct pt_regs *regs = task_pt_regs(tsk); + + return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; +} + +/* + * Set all user integer registers. + */ +static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) +{ + struct pt_regs newregs; + int ret; + + ret = -EFAULT; + if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { + struct pt_regs *regs = task_pt_regs(tsk); + *regs = newregs; + ret = 0; + } + + return ret; +} + +void user_enable_single_step(struct task_struct *child) { /* far_epc is the target of branch */ unsigned int epc, far_epc = 0; @@ -201,7 +229,7 @@ void set_single_step(struct task_struct *child) } } -void clear_single_step(struct task_struct *child) +void user_disable_single_step(struct task_struct *child) { if (child->thread.insn1_type == 0) write_tsk_short(child, child->thread.addr1, @@ -230,54 +258,17 @@ void clear_single_step(struct task_struct *child) child->thread.ss_nextcnt = 0; } - -void ptrace_disable(struct task_struct *child) {} +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret; - if (request == PTRACE_TRACEME) { - /* are we already being traced? */ - if (current->ptrace & PT_PTRACED) - return -EPERM; - - /* set the ptrace bit in the process flags. */ - current->ptrace |= PT_PTRACED; - return 0; - } - - ret = -ESRCH; - if (!child) - return ret; - - ret = -EPERM; - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - return ret; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - return ret; - switch (request) { - case PTRACE_PEEKTEXT: /* read word at location addr. */ - case PTRACE_PEEKDATA: { - unsigned long tmp; - int copied; - - copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); - ret = -EIO; - if (copied != sizeof(tmp)) - break; - - ret = put_user(tmp, (unsigned long *) data); - return ret; - } - /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; @@ -329,15 +320,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) return ret; } - case PTRACE_POKETEXT: /* write the word at location addr. */ - case PTRACE_POKEDATA: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(data), 1) - == sizeof(data)) - break; - ret = -EIO; - return ret; - case PTRACE_POKEUSR: { struct pt_regs *regs; ret = 0; @@ -372,64 +354,16 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - case PTRACE_SYSCALL: /* continue and stop at next - (return from) syscall. */ - case PTRACE_CONT: { /* restart after signal. */ - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - - child->exit_code = data; - wake_up_process(child); - ret = 0; - break; - } - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - ret = 0; - if (child->state == EXIT_ZOMBIE) /* already dead. */ - break; - child->exit_code = SIGKILL; - clear_single_step(child); - wake_up_process(child); + case PTRACE_GETREGS: + ret = ptrace_getregs(child, (void __user *)data); break; - case PTRACE_SINGLESTEP: { /* set the trap flag. */ - ret = -EIO; - if ((unsigned long) data > _NSIG) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - set_single_step(child); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - } - - case PTRACE_DETACH: /* detach a process that was attached. */ - ret = ptrace_detach(child, data); - break; - - case PTRACE_SETOPTIONS: - if (data & PTRACE_O_TRACESYSGOOD) - child->ptrace |= PT_TRACESYSGOOD; - else - child->ptrace &= ~PT_TRACESYSGOOD; - ret = 0; + case PTRACE_SETREGS: + ret = ptrace_setregs(child, (void __user *)data); break; default: - ret = -EIO; + ret = ptrace_request(child, request, addr, data); break; } diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index b4ed1b3f807..5c004084d17 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -132,7 +132,7 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return (void *)((sp - frame_size) & ~7); } -asmlinkage int score_sigaltstack(struct pt_regs *regs) +int score_sigaltstack(struct pt_regs *regs) { const stack_t *uss = (const stack_t *) regs->regs[4]; stack_t *uoss = (stack_t *) regs->regs[5]; @@ -141,7 +141,7 @@ asmlinkage int score_sigaltstack(struct pt_regs *regs) return do_sigaltstack(uss, uoss, usp); } -asmlinkage void score_rt_sigreturn(struct pt_regs *regs) +void score_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; sigset_t set; @@ -276,7 +276,7 @@ int handle_signal(unsigned long sig, siginfo_t *info, return ret; } -asmlinkage void do_signal(struct pt_regs *regs) +void do_signal(struct pt_regs *regs) { struct k_sigaction ka; sigset_t *oldset; diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 6a60d1ee533..68655f4cbce 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -64,8 +64,7 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, * Clone a task - this clones the calling program thread. * This is called indirectly via a small wrapper */ -asmlinkage int -score_clone(struct pt_regs *regs) +int score_clone(struct pt_regs *regs) { unsigned long clone_flags; unsigned long newsp; @@ -93,7 +92,7 @@ score_clone(struct pt_regs *regs) * sys_execve() executes a new program. * This is called indirectly via a small wrapper */ -asmlinkage int score_execve(struct pt_regs *regs) +int score_execve(struct pt_regs *regs) { int error; char *filename; @@ -114,7 +113,7 @@ asmlinkage int score_execve(struct pt_regs *regs) * If we ever come here the user sp is bad. Zap the process right away. * Due to the bad stack signaling wouldn't work. */ -asmlinkage void bad_stack(void) +void bad_stack(void) { do_exit(SIGSEGV); } diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index 957ae9eb356..0e46fb19a84 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -252,7 +252,7 @@ asmlinkage void do_ri(struct pt_regs *regs) if (current->thread.single_step == 1) { if ((epc == current->thread.addr1) || (epc == current->thread.addr2)) { - clear_single_step(current); + user_disable_single_step(current); force_sig(SIGTRAP, current); return; } else diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c index 1ebc67f18c6..dbac9d9dfdd 100644 --- a/arch/score/mm/cache.c +++ b/arch/score/mm/cache.c @@ -32,34 +32,26 @@ #include -/* Cache operations. */ -void (*flush_cache_all)(void); -void (*__flush_cache_all)(void); -void (*flush_cache_mm)(struct mm_struct *mm); -void (*flush_cache_range)(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -void (*flush_cache_page)(struct vm_area_struct *vma, - unsigned long page, unsigned long pfn); -void (*flush_icache_range)(unsigned long start, unsigned long end); -void (*__flush_cache_vmap)(void); -void (*__flush_cache_vunmap)(void); -void (*flush_cache_sigtramp)(unsigned long addr); -void (*flush_data_cache_page)(unsigned long addr); -EXPORT_SYMBOL(flush_data_cache_page); -void (*flush_icache_all)(void); - -/*Score 7 cache operations*/ -static inline void s7___flush_cache_all(void); -static void s7_flush_cache_mm(struct mm_struct *mm); -static void s7_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -static void s7_flush_cache_page(struct vm_area_struct *vma, - unsigned long page, unsigned long pfn); -static void s7_flush_icache_range(unsigned long start, unsigned long end); -static void s7_flush_cache_sigtramp(unsigned long addr); -static void s7_flush_data_cache_page(unsigned long addr); -static void s7_flush_dcache_range(unsigned long start, unsigned long end); +/* +Just flush entire Dcache!! +You must ensure the page doesn't include instructions, because +the function will not flush the Icache. +The addr must be cache aligned. +*/ +static void flush_data_cache_page(unsigned long addr) +{ + unsigned int i; + for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { + __asm__ __volatile__( + "cache 0x0e, [%0, 0]\n" + "cache 0x1a, [%0, 0]\n" + "nop\n" + : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} +/* called by update_mmu_cache. */ void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { @@ -74,7 +66,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address, if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) { addr = (unsigned long) page_address(page); if (exec) - s7_flush_data_cache_page(addr); + flush_data_cache_page(addr); clear_bit(PG_arch_1, &page->flags); } } @@ -101,44 +93,22 @@ static inline void setup_protection_map(void) void __devinit cpu_cache_init(void) { - flush_cache_all = s7_flush_cache_all; - __flush_cache_all = s7___flush_cache_all; - flush_cache_mm = s7_flush_cache_mm; - flush_cache_range = s7_flush_cache_range; - flush_cache_page = s7_flush_cache_page; - flush_icache_range = s7_flush_icache_range; - flush_cache_sigtramp = s7_flush_cache_sigtramp; - flush_data_cache_page = s7_flush_data_cache_page; - setup_protection_map(); } -void s7_flush_icache_all(void) +void flush_icache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_icache_all\n" + "la r8, flush_icache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" : : : "r8"); } -void s7_flush_dcache_all(void) -{ - __asm__ __volatile__( - "la r8, s7_flush_dcache_all\n" - "cache 0x1f, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" - "cache 0x1a, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" - : : : "r8"); -} - -void s7_flush_cache_all(void) +void flush_dcache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_cache_all\n" - "cache 0x10, [r8, 0]\n" - "nop\nnop\nnop\nnop\nnop\nnop\n" + "la r8, flush_dcache_all\n" "cache 0x1f, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1a, [r8, 0]\n" @@ -146,10 +116,10 @@ void s7_flush_cache_all(void) : : : "r8"); } -void s7___flush_cache_all(void) +void flush_cache_all(void) { __asm__ __volatile__( - "la r8, s7_flush_cache_all\n" + "la r8, flush_cache_all\n" "cache 0x10, [r8, 0]\n" "nop\nnop\nnop\nnop\nnop\nnop\n" "cache 0x1f, [r8, 0]\n" @@ -159,11 +129,11 @@ void s7___flush_cache_all(void) : : : "r8"); } -static void s7_flush_cache_mm(struct mm_struct *mm) +void flush_cache_mm(struct mm_struct *mm) { if (!(mm->context)) return; - s7_flush_cache_all(); + flush_cache_all(); } /*if we flush a range precisely , the processing may be very long. @@ -176,8 +146,7 @@ The interface is provided in hopes that the port can find a suitably efficient method for removing multiple page sized regions from the cache. */ -static void -s7_flush_cache_range(struct vm_area_struct *vma, +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -209,27 +178,26 @@ s7_flush_cache_range(struct vm_area_struct *vma, tmpend = (start | (PAGE_SIZE-1)) > end ? end : (start | (PAGE_SIZE-1)); - s7_flush_dcache_range(start, tmpend); + flush_dcache_range(start, tmpend); if (exec) - s7_flush_icache_range(start, tmpend); + flush_icache_range(start, tmpend); start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1); } } -static void -s7_flush_cache_page(struct vm_area_struct *vma, +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int exec = vma->vm_flags & VM_EXEC; unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT); - s7_flush_dcache_range(kaddr, kaddr + PAGE_SIZE); + flush_dcache_range(kaddr, kaddr + PAGE_SIZE); if (exec) - s7_flush_icache_range(kaddr, kaddr + PAGE_SIZE); + flush_icache_range(kaddr, kaddr + PAGE_SIZE); } -static void s7_flush_cache_sigtramp(unsigned long addr) +void flush_cache_sigtramp(unsigned long addr) { __asm__ __volatile__( "cache 0x02, [%0, 0]\n" @@ -247,31 +215,12 @@ static void s7_flush_cache_sigtramp(unsigned long addr) : : "r" (addr)); } -/* -Just flush entire Dcache!! -You must ensure the page doesn't include instructions, because -the function will not flush the Icache. -The addr must be cache aligned. -*/ -static void s7_flush_data_cache_page(unsigned long addr) -{ - unsigned int i; - for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) { - __asm__ __volatile__( - "cache 0x0e, [%0, 0]\n" - "cache 0x1a, [%0, 0]\n" - "nop\n" - : : "r" (addr)); - addr += L1_CACHE_BYTES; - } -} - /* 1. WB and invalid a cache line of Dcache 2. Drain Write Buffer the range must be smaller than PAGE_SIZE */ -static void s7_flush_dcache_range(unsigned long start, unsigned long end) +void flush_dcache_range(unsigned long start, unsigned long end) { int size, i; @@ -290,7 +239,7 @@ static void s7_flush_dcache_range(unsigned long start, unsigned long end) } } -static void s7_flush_icache_range(unsigned long start, unsigned long end) +void flush_icache_range(unsigned long start, unsigned long end) { int size, i; start = start & ~(L1_CACHE_BYTES - 1); -- cgit v1.2.3-70-g09d2 From 72ea3723411c18cace4c8c9e0ccf4116d5e6eaaa Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:11:55 +0200 Subject: score: unset __ARCH_WANT_IPC_PARSE_VERSION This really should not be needed. The change for not changing the IPC code for every new architecture just went into 2.6.31, so we can skip it now. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/unistd.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h index f0f84deeb56..0d98b72f462 100644 --- a/arch/score/include/asm/unistd.h +++ b/arch/score/include/asm/unistd.h @@ -2,7 +2,6 @@ #define _ASM_SCORE_UNISTD_H #define __ARCH_HAVE_MMU -#define __ARCH_WANT_IPC_PARSE_VERSION #include -- cgit v1.2.3-70-g09d2 From 78229db4c4f78f27ecf772fe7489a70530ba9862 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:22:30 +0200 Subject: score: remove __{put,get}_user_unknown The point of these extern declarations is to provoke a link error, so an architecture must not provide a symbol for them. Signed-off-by: Arnd Bergmann --- arch/score/lib/string.S | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'arch') diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S index 943d0911ea8..00b7d3a2fc6 100644 --- a/arch/score/lib/string.S +++ b/arch/score/lib/string.S @@ -182,15 +182,3 @@ ENTRY(__clear_user) 99: .word 0b, 99b .previous - - .align 2 -ENTRY(__put_user_unknown) - .set volatile - ldi r4, -EFAULT - br r3 - - .align 2 -ENTRY(__get_user_unknown) - ldi r5, 0 - ldi r4, -EFAULT - br r3 -- cgit v1.2.3-70-g09d2 From f673c032ed13ed8f3fda5922c2190da2892398bc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:31:54 +0200 Subject: score: add generic sys_call_table This adds back a sys_call_table to the score architecture, which got lost in the conversion to the generic unistd.h file. It's rather worrying that the code got submitted without a system call table, which evidently means that it got zero testing. Since the system call table has a different layout from the old one (which was modeled after the mips-o32 one), I also try to fix the entry.S path to use it. In the modified calling conventions, all system call arguments are passed as registers r4 through r9, instead of r4 through r7 plus stack for the fifth and sixth argument. This matches what other architectures to when they normally pass arguments on the stack. Signed-off-by: Arnd Bergmann --- arch/score/kernel/Makefile | 3 ++- arch/score/kernel/entry.S | 47 +++----------------------------------- arch/score/kernel/sys_call_table.c | 12 ++++++++++ arch/score/kernel/sys_score.c | 9 +------- 4 files changed, 18 insertions(+), 53 deletions(-) create mode 100644 arch/score/kernel/sys_call_table.c (limited to 'arch') diff --git a/arch/score/kernel/Makefile b/arch/score/kernel/Makefile index 1e5de89d5c7..f218673b5d3 100644 --- a/arch/score/kernel/Makefile +++ b/arch/score/kernel/Makefile @@ -5,6 +5,7 @@ extra-y := head.o vmlinux.lds obj-y += entry.o init_task.o irq.o process.o ptrace.o \ - setup.o signal.o sys_score.o time.o traps.o + setup.o signal.o sys_score.o time.o traps.o \ + sys_call_table.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 0af89b2f16b..2f169175bbd 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -400,6 +400,8 @@ ENTRY(handle_sys) sw r4, [r0, PT_ORIG_R4] #for restart syscall sw r7, [r0, PT_ORIG_R7] #for restart syscall sw r27, [r0, PT_IS_SYSCALL] # it from syscall + sw r8, [r0, 16] # argument 5 from user r8 + sw r9, [r0, 20] # argument 6 from user r9 lw r9, [r0, PT_EPC] # skip syscall on return addi r9, 4 @@ -408,19 +410,14 @@ ENTRY(handle_sys) cmpi.c r27, __NR_syscalls # check syscall number bgtu illegal_syscall - slli r8, r27, 3 # get syscall routine + slli r8, r27, 2 # get syscall routine la r11, sys_call_table add r11, r11, r8 lw r10, [r11] # get syscall entry - lw r11, [r11, 4] # get number of args cmpz.c r10 beq illegal_syscall - cmpi.c r11, 4 # more than 4 arguments? - bgtu stackargs - -stack_done: lw r8, [r28, TI_FLAGS] li r9, _TIF_SYSCALL_TRACE and.c r8, r8, r9 @@ -475,44 +472,6 @@ syscall_trace_entry: 1: sw r4, [r0, PT_R2] # result j syscall_exit -stackargs: - lw r8, [r0, PT_R0] - andri.c r9, r8, 3 # test whether user sp is align a word - bne bad_stack - subi r11, 5 - slli r9, r11, 2 - add.c r9, r9, r8 - - bmi bad_stack - la r9, 3f # calculate branch address - slli r11, r11, 3 - sub r9, r9, r11 - br r9 - -2: lw r9, [r8, 20] # argument 6 from usp - sw r9, [r0, 20] - -3: lw r9, [r8, 16] # argument 5 from usp - sw r9, [r0, 16] - j stack_done - - .section __ex_table,"a" - .word 2b, bad_stack - .word 3b, bad_stack - .previous - - /* - * The stackpointer for a call with more than 4 arguments is bad. - * We probably should handle this case a bit more drastic. - */ -bad_stack: - neg r27, r27 # error - sw r27, [r0, PT_ORIG_R4] - sw r27, [r0, PT_R4] - ldi r8, 1 # set error flag - sw r8, [r0, PT_R7] - j syscall_return - illegal_syscall: ldi r4, -ENOSYS # error sw r4, [r0, PT_ORIG_R4] diff --git a/arch/score/kernel/sys_call_table.c b/arch/score/kernel/sys_call_table.c new file mode 100644 index 00000000000..287369b88c4 --- /dev/null +++ b/arch/score/kernel/sys_call_table.c @@ -0,0 +1,12 @@ +#include +#include +#include + +#include + +#undef __SYSCALL +#define __SYSCALL(nr, call) [nr] = (call), + +void *sys_call_table[__NR_syscalls] = { +#include +}; diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 68655f4cbce..33188610432 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c @@ -75,14 +75,7 @@ int score_clone(struct pt_regs *regs) if (!newsp) newsp = regs->regs[0]; parent_tidptr = (int __user *)regs->regs[6]; - - child_tidptr = NULL; - if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { - int __user *__user *usp = (int __user *__user *)regs->regs[0]; - - if (get_user(child_tidptr, &usp[4])) - return -EFAULT; - } + child_tidptr = (int __user *)regs->regs[8]; return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); -- cgit v1.2.3-70-g09d2 From b5022df4c275607f0824526eceb3c217e85279f3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 11:45:18 +0200 Subject: score: remove init_mm init_mm is now part of the common code and not provided by the architecture any more. Signed-off-by: Arnd Bergmann --- arch/score/kernel/init_task.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/init_task.c b/arch/score/kernel/init_task.c index 9eecde00932..54d9552afd6 100644 --- a/arch/score/kernel/init_task.c +++ b/arch/score/kernel/init_task.c @@ -26,8 +26,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); /* * Initial thread structure. -- cgit v1.2.3-70-g09d2 From 2f476ef61f0e00fe3fcb96693b6a624a6c52fad9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 Jun 2009 13:40:41 +0200 Subject: score: add prototypes for wrapped syscalls Every system call should be declared, so this adds missing declarations for the ones we were missing so far. Signed-off-by: Arnd Bergmann --- arch/score/include/asm/syscalls.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/score/include/asm/syscalls.h b/arch/score/include/asm/syscalls.h index 00c28e04fdf..1dd5e0d6b0c 100644 --- a/arch/score/include/asm/syscalls.h +++ b/arch/score/include/asm/syscalls.h @@ -1,8 +1,10 @@ #ifndef _ASM_SCORE_SYSCALLS_H #define _ASM_SCORE_SYSCALLS_H -asmlinkage long sys_clone(int flags, unsigned long stack, struct pt_regs *regs); -#define sys_clone sys_clone +asmlinkage long score_clone(struct pt_regs *regs); +asmlinkage long score_execve(struct pt_regs *regs); +asmlinkage long score_sigaltstack(struct pt_regs *regs); +asmlinkage long score_rt_sigreturn(struct pt_regs *regs); #include -- cgit v1.2.3-70-g09d2 From feaa0457ec8351cae855edc9a3052ac49322538e Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 20 Jun 2009 16:15:40 +0530 Subject: x86: ds.c fix invalid assignment Fixes the type mixups that cause the following sparse warnings: CHECK arch/x86/kernel/ds.c arch/x86/kernel/ds.c:549:19: warning: incorrect type in argument 2 (invalid types) arch/x86/kernel/ds.c:549:19: expected bad type enum bts_field field arch/x86/kernel/ds.c:549:19: got int arch/x86/kernel/ds.c:514:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:514:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:514:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:514:7: error: invalid assignment arch/x86/kernel/ds.c:514:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:514:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:514:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:514:7: error: invalid assignment arch/x86/kernel/ds.c:514:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:514:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:514:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:514:7: error: invalid assignment arch/x86/kernel/ds.c:514:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:514:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:514:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:514:7: error: invalid assignment arch/x86/kernel/ds.c:514:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:514:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:514:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:514:7: error: invalid assignment arch/x86/kernel/ds.c:514:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:514:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:514:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:514:7: error: invalid assignment arch/x86/kernel/ds.c:520:35: error: incompatible types for operation (*) arch/x86/kernel/ds.c:520:35: left side has type unsigned char static [unsigned] [toplevel] sizeof_ptr_field arch/x86/kernel/ds.c:520:35: right side has type bad type enum bts_field field arch/x86/kernel/ds.c:520:7: error: invalid assignment arch/x86/kernel/ds.c:520:35: error: incompatible types for operation (*) Signed-off-by: Jaswinder Singh Rajput Cc: Markus Metzger LKML-Reference: <1245494740.8613.12.camel@localhost.localdomain> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ds.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 48bfe138603..ef42a038f1a 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -509,15 +509,15 @@ enum bts_field { bts_escape = ((unsigned long)-1 & ~bts_qual_mask) }; -static inline unsigned long bts_get(const char *base, enum bts_field field) +static inline unsigned long bts_get(const char *base, unsigned long field) { base += (ds_cfg.sizeof_ptr_field * field); return *(unsigned long *)base; } -static inline void bts_set(char *base, enum bts_field field, unsigned long val) +static inline void bts_set(char *base, unsigned long field, unsigned long val) { - base += (ds_cfg.sizeof_ptr_field * field);; + base += (ds_cfg.sizeof_ptr_field * field); (*(unsigned long *)base) = val; } -- cgit v1.2.3-70-g09d2 From e487683990972bf9aa4e688434c46ead76748bca Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 20 Jun 2009 23:27:16 -0700 Subject: x86, mce: fix typo in comment in asm/mce.h Fix comment to match the actual declaration. Signed-off-by: Borislav Petkov Cc: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/mce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 5cdd8d100ec..b50b9e9042c 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -9,7 +9,7 @@ */ #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ -#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ +#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ -- cgit v1.2.3-70-g09d2 From a95436e44a76a32dcbe7c8df59701ddde53017c1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 20 Jun 2009 23:28:22 -0700 Subject: x86, mce: use atomic_inc_return() instead of add by 1 Use atomic_inc_return() instead of atomic_add_return() by 1. Signed-off-by: Borislav Petkov Cc: Andi Kleen Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 284d1de968b..7da8fec9ca8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -242,7 +242,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) /* * Make sure only one CPU runs in machine check panic */ - if (atomic_add_return(1, &mce_paniced) > 1) + if (atomic_inc_return(&mce_paniced) > 1) wait_for_panic(); barrier(); @@ -705,7 +705,7 @@ static int mce_start(int *no_way_out) * global_nwo should be updated before mce_callin */ smp_wmb(); - order = atomic_add_return(1, &mce_callin); + order = atomic_inc_return(&mce_callin); /* * Wait for everyone. -- cgit v1.2.3-70-g09d2 From e831a9c6186ca1f63fdf2f41628193dd690ab440 Mon Sep 17 00:00:00 2001 From: Chen Liqin Date: Mon, 22 Jun 2009 17:10:57 +0800 Subject: score: move save arg5 and arg6 instruction in front of enable_irq Because enable_irq clobber r8 before arg5 was saved. modified: arch/score/kernel/entry.S Signed-off-by: Chen Liqin Signed-off-by: Arnd Bergmann --- arch/score/kernel/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 2f169175bbd..10e859ddefd 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -395,13 +395,13 @@ ENTRY(resume) ENTRY(handle_sys) SAVE_ALL + sw r8, [r0, 16] # argument 5 from user r8 + sw r9, [r0, 20] # argument 6 from user r9 enable_irq sw r4, [r0, PT_ORIG_R4] #for restart syscall sw r7, [r0, PT_ORIG_R7] #for restart syscall sw r27, [r0, PT_IS_SYSCALL] # it from syscall - sw r8, [r0, 16] # argument 5 from user r8 - sw r9, [r0, 20] # argument 6 from user r9 lw r9, [r0, PT_EPC] # skip syscall on return addi r9, 4 -- cgit v1.2.3-70-g09d2 From c9944881acf02b6f25fa62a0441a98b7dc0d7ae6 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 24 Jun 2009 13:42:40 +0800 Subject: crypto: aes-ni - Don't print message with KERN_ERR on old system When the aes-intel module is loaded on a system that does not have the AES instructions, it prints Intel AES-NI instructions are not detected. at level KERN_ERR. Since aes-intel is aliased to "aes" it will be tried whenever anything uses AES and spam the console. This doesn't match existing practice for how to handle "no hardware" when initializing a module, so downgrade the message to KERN_INFO. Signed-off-by: Roland Dreier Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index c580c5ec1ca..d3ec8d588d4 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -636,7 +636,7 @@ static int __init aesni_init(void) int err; if (!cpu_has_aes) { - printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); + printk(KERN_INFO "Intel AES-NI instructions are not detected.\n"); return -ENODEV; } if ((err = crypto_register_alg(&aesni_alg))) -- cgit v1.2.3-70-g09d2 From e74e396204bfcb67570ba4517b08f5918e69afea Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 30 Mar 2009 19:07:44 +0900 Subject: percpu: use dynamic percpu allocator as the default percpu allocator This patch makes most !CONFIG_HAVE_SETUP_PER_CPU_AREA archs use dynamic percpu allocator. The first chunk is allocated using embedding helper and 8k is reserved for modules. This ensures that the new allocator behaves almost identically to the original allocator as long as static percpu variables are concerned, so it shouldn't introduce much breakage. s390 and alpha use custom SHIFT_PERCPU_PTR() to work around addressing range limit the addressing model imposes. Unfortunately, this breaks if the address is specified using a variable, so for now, the two archs aren't converted. The following architectures are affected by this change. * sh * arm * cris * mips * sparc(32) * blackfin * avr32 * parisc (broken, under investigation) * m32r * powerpc(32) As this change makes the dynamic allocator the default one, CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is replaced with its invert - CONFIG_HAVE_LEGACY_PER_CPU_AREA, which is added to yet-to-be converted archs. These archs implement their own setup_per_cpu_areas() and the conversion is not trivial. * powerpc(64) * sparc(64) * ia64 * alpha * s390 Boot and batch alloc/free tests on x86_32 with debug code (x86_32 doesn't use default first chunk initialization). Compile tested on sparc(32), powerpc(32), arm and alpha. Kyle McMartin reported that this change breaks parisc. The problem is still under investigation and he is okay with pushing this patch forward and fixing parisc later. [ Impact: use dynamic allocator for most archs w/o custom percpu setup ] Signed-off-by: Tejun Heo Acked-by: Rusty Russell Acked-by: David S. Miller Acked-by: Benjamin Herrenschmidt Acked-by: Martin Schwidefsky Reviewed-by: Christoph Lameter Cc: Paul Mundt Cc: Russell King Cc: Mikael Starvik Cc: Ralf Baechle Cc: Bryan Wu Cc: Kyle McMartin Cc: Matthew Wilcox Cc: Grant Grundler Cc: Hirokazu Takata Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Heiko Carstens Cc: Ingo Molnar --- arch/alpha/Kconfig | 3 +++ arch/ia64/Kconfig | 3 +++ arch/powerpc/Kconfig | 3 +++ arch/s390/Kconfig | 3 +++ arch/sparc/Kconfig | 3 +++ arch/x86/Kconfig | 3 --- include/linux/percpu.h | 12 +++++++++--- init/main.c | 24 ------------------------ kernel/module.c | 6 +++--- mm/Makefile | 2 +- mm/allocpercpu.c | 28 ++++++++++++++++++++++++++++ mm/percpu.c | 40 +++++++++++++++++++++++++++++++++++++++- 12 files changed, 95 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 9fb8aae5c39..05d86407188 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -70,6 +70,9 @@ config AUTO_IRQ_AFFINITY depends on SMP default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 170042b420d..328d2f8b8c3 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -89,6 +89,9 @@ config GENERIC_TIME_VSYSCALL bool default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + config HAVE_SETUP_PER_CPU_AREA def_bool y diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bf6cedfa05d..a774c2acbe6 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -46,6 +46,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ bool default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool PPC64 + config HAVE_SETUP_PER_CPU_AREA def_bool PPC64 diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a14dba0e4d6..f4a3cc62d28 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -75,6 +75,9 @@ config VIRT_CPU_ACCOUNTING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + mainmenu "Linux Kernel Configuration" config S390 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3f8b6a92eab..7a8698b913f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -92,6 +92,9 @@ config AUDIT_ARCH bool default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y if SPARC64 + config HAVE_SETUP_PER_CPU_AREA def_bool y if SPARC64 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d1430ef6b4f..a48a90076d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -149,9 +149,6 @@ config ARCH_HAS_CACHE_LINE_SIZE config HAVE_SETUP_PER_CPU_AREA def_bool y -config HAVE_DYNAMIC_PER_CPU_AREA - def_bool y - config HAVE_CPUMASK_OF_CPU_MAP def_bool X86_64_SMP diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 26fd9d12f05..e5000343dd6 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -34,7 +34,7 @@ #ifdef CONFIG_SMP -#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA +#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) @@ -80,7 +80,7 @@ extern ssize_t __init pcpu_embed_first_chunk( extern void *__alloc_reserved_percpu(size_t size, size_t align); -#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ +#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ struct percpu_data { void *ptrs[1]; @@ -99,11 +99,15 @@ struct percpu_data { (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ +#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ extern void *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void *__pdata); +#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void __init setup_per_cpu_areas(void); +#endif + #else /* CONFIG_SMP */ #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) @@ -124,6 +128,8 @@ static inline void free_percpu(void *p) kfree(p); } +static inline void __init setup_per_cpu_areas(void) { } + #endif /* CONFIG_SMP */ #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ diff --git a/init/main.c b/init/main.c index 09131ec090c..602d724afa5 100644 --- a/init/main.c +++ b/init/main.c @@ -357,7 +357,6 @@ static void __init smp_init(void) #define smp_init() do { } while (0) #endif -static inline void setup_per_cpu_areas(void) { } static inline void setup_nr_cpu_ids(void) { } static inline void smp_prepare_cpus(unsigned int maxcpus) { } @@ -378,29 +377,6 @@ static void __init setup_nr_cpu_ids(void) nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } -#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; - -EXPORT_SYMBOL(__per_cpu_offset); - -static void __init setup_per_cpu_areas(void) -{ - unsigned long size, i; - char *ptr; - unsigned long nr_possible_cpus = num_possible_cpus(); - - /* Copy section for each CPU (we discard the original) */ - size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); - ptr = alloc_bootmem_pages(size * nr_possible_cpus); - - for_each_possible_cpu(i) { - __per_cpu_offset[i] = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - ptr += size; - } -} -#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ - /* Called by boot processor to activate the rest. */ static void __init smp_init(void) { diff --git a/kernel/module.c b/kernel/module.c index 38928fcaff2..f5934954fa9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -364,7 +364,7 @@ EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP -#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA +#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA static void *percpu_modalloc(unsigned long size, unsigned long align, const char *name) @@ -389,7 +389,7 @@ static void percpu_modfree(void *freeme) free_percpu(freeme); } -#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ +#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */ /* Number of blocks used and allocated. */ static unsigned int pcpu_num_used, pcpu_num_allocated; @@ -535,7 +535,7 @@ static int percpu_modinit(void) } __initcall(percpu_modinit); -#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ +#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ static unsigned int find_pcpusec(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, diff --git a/mm/Makefile b/mm/Makefile index 5e0bd642669..c77c6487552 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -33,7 +33,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o -ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA +ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA obj-$(CONFIG_SMP) += percpu.o else obj-$(CONFIG_SMP) += allocpercpu.o diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index dfdee6a4735..df34ceae0c6 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -5,6 +5,8 @@ */ #include #include +#include +#include #ifndef cache_line_size #define cache_line_size() L1_CACHE_BYTES @@ -147,3 +149,29 @@ void free_percpu(void *__pdata) kfree(__percpu_disguise(__pdata)); } EXPORT_SYMBOL_GPL(free_percpu); + +/* + * Generic percpu area setup. + */ +#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; + +EXPORT_SYMBOL(__per_cpu_offset); + +void __init setup_per_cpu_areas(void) +{ + unsigned long size, i; + char *ptr; + unsigned long nr_possible_cpus = num_possible_cpus(); + + /* Copy section for each CPU (we discard the original) */ + size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); + ptr = alloc_bootmem_pages(size * nr_possible_cpus); + + for_each_possible_cpu(i) { + __per_cpu_offset[i] = ptr - __per_cpu_start; + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + ptr += size; + } +} +#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ diff --git a/mm/percpu.c b/mm/percpu.c index b70f2acd885..b14984566f5 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -43,7 +43,7 @@ * * To use this allocator, arch code should do the followings. * - * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA + * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be @@ -1275,3 +1275,41 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, reserved_size, dyn_size, pcpue_unit_size, pcpue_ptr, NULL); } + +/* + * Generic percpu area setup. + * + * The embedding helper is used because its behavior closely resembles + * the original non-dynamic generic percpu area setup. This is + * important because many archs have addressing restrictions and might + * fail if the percpu area is located far away from the previous + * location. As an added bonus, in non-NUMA cases, embedding is + * generally a good idea TLB-wise because percpu area can piggy back + * on the physical linear memory mapping which uses large page + * mappings on applicable archs. + */ +#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +void __init setup_per_cpu_areas(void) +{ + size_t static_size = __per_cpu_end - __per_cpu_start; + ssize_t unit_size; + unsigned long delta; + unsigned int cpu; + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, -1); + if (unit_size < 0) + panic("Failed to initialized percpu areas."); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + cpu * unit_size; +} +#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ -- cgit v1.2.3-70-g09d2 From 405d967dc70002991f8fc35c20e0d3cbc7614f63 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:38 +0900 Subject: linker script: throw away .discard section x86 throws away .discard section but no other archs do. Also, .discard is not thrown away while linking modules. Make every arch and module linking throw it away. This will be used to define dummy variables for percpu declarations and definitions. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: always throw away everything in .discard ] Signed-off-by: Tejun Heo Cc: Ivan Kokshaysky Cc: Richard Henderson Cc: Russell King Cc: Haavard Skinnemoen Cc: Bryan Wu Cc: Mikael Starvik Cc: Jesper Nilsson Cc: David Howells Cc: Yoshinori Sato Cc: Tony Luck Cc: Hirokazu Takata Cc: Geert Uytterhoeven Cc: Michal Simek Cc: Ralf Baechle Cc: Kyle McMartin Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Paul Mundt Cc: David S. Miller Cc: Jeff Dike Cc: Chris Zankel Cc: Rusty Russell Cc: Ingo Molnar --- Makefile | 2 +- arch/alpha/kernel/vmlinux.lds.S | 1 + arch/arm/kernel/vmlinux.lds.S | 1 + arch/avr32/kernel/vmlinux.lds.S | 1 + arch/blackfin/kernel/vmlinux.lds.S | 1 + arch/cris/kernel/vmlinux.lds.S | 1 + arch/frv/kernel/vmlinux.lds.S | 2 ++ arch/h8300/kernel/vmlinux.lds.S | 1 + arch/ia64/kernel/vmlinux.lds.S | 1 + arch/m32r/kernel/vmlinux.lds.S | 1 + arch/m68k/kernel/vmlinux-std.lds | 1 + arch/m68k/kernel/vmlinux-sun3.lds | 1 + arch/m68knommu/kernel/vmlinux.lds.S | 1 + arch/microblaze/kernel/vmlinux.lds.S | 2 ++ arch/mips/kernel/vmlinux.lds.S | 1 + arch/mn10300/kernel/vmlinux.lds.S | 1 + arch/parisc/kernel/vmlinux.lds.S | 1 + arch/powerpc/kernel/vmlinux.lds.S | 1 + arch/s390/kernel/vmlinux.lds.S | 1 + arch/sh/kernel/vmlinux.lds.S | 1 + arch/sparc/kernel/vmlinux.lds.S | 1 + arch/um/kernel/dyn.lds.S | 2 ++ arch/um/kernel/uml.lds.S | 2 ++ arch/xtensa/kernel/vmlinux.lds.S | 1 + include/asm-generic/vmlinux.lds.h | 8 ++++++++ scripts/module-common.lds | 8 ++++++++ 26 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 scripts/module-common.lds (limited to 'arch') diff --git a/Makefile b/Makefile index 46e1c9d03d5..12245be0512 100644 --- a/Makefile +++ b/Makefile @@ -327,7 +327,7 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ MODFLAGS = -DMODULE CFLAGS_MODULE = $(MODFLAGS) AFLAGS_MODULE = $(MODFLAGS) -LDFLAGS_MODULE = +LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds CFLAGS_KERNEL = AFLAGS_KERNEL = CFLAGS_GCOV = -fprofile-arcs -ftest-coverage diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index b9d6568e5f7..75fe1d6877e 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -139,6 +139,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .mdebug 0 : { diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6c077979254..e256c57b898 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -82,6 +82,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) *(.ARM.exidx.exit.text) *(.ARM.extab.exit.text) #ifndef CONFIG_MMU diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index 7910d41eb88..b8324608ec0 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -131,6 +131,7 @@ SECTIONS /DISCARD/ : { EXIT_DATA *(.exitcall.exit) + *(.discard) } DWARF_DEBUG diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 6ac307ca0d8..6e8eabd8f0a 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -280,5 +280,6 @@ SECTIONS /DISCARD/ : { *(.exitcall.exit) + *(.discard) } } diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 0d2adfc794d..a3175ebb38c 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -145,6 +145,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024; diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index 22d9787406e..64b5a5e4d35 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -177,6 +177,8 @@ SECTIONS .debug_ranges 0 : { *(.debug_ranges) } .comment 0 : { *(.comment) } + + /DISCARD/ : { *(.discard) } } __kernel_image_size_no_bss = __bss_start - __kernel_image_start; diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 43a87b9085b..03d6c0df33d 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -154,6 +154,7 @@ SECTIONS } /DISCARD/ : { *(.exitcall.exit) + *(.discard) } .romfs : { diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 4a95e86b9ac..13d95897587 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -29,6 +29,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) } diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 4179adf6c62..480a49944cf 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -125,6 +125,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } /* Stabs debugging sections. */ diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 01d212bb05a..905a797ada9 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -87,6 +87,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } /* Stabs debugging sections. */ diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index c192f773db9..47d04be322a 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -82,6 +82,7 @@ __init_begin = .; EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .crap : { diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index b7fe505e358..68111a61a77 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -188,6 +188,7 @@ SECTIONS { EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .bss : { diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index d34d38dcd12..a207543c592 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -162,4 +162,6 @@ SECTIONS { } . = ALIGN(4096); _end = .; + + /DISCARD/ : { *(.discard) } } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 58738c8d754..45901609b74 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -179,6 +179,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) /* ABI crap starts here */ *(.MIPS.options) diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index 24de6b90f40..5d9f2f96ad9 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -146,6 +146,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) } STABS_DEBUG diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index fd2cc4fd2b6..ccf58341845 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -240,6 +240,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) #ifdef CONFIG_64BIT /* temporary hack until binutils is fixed to not emit these * for static binaries diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8ef8a14abc9..7fca9355fd3 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -40,6 +40,7 @@ SECTIONS /* Sections to be discarded. */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) EXIT_DATA } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a53db23ee09..98867dfea46 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -161,6 +161,7 @@ SECTIONS /DISCARD/ : { EXIT_DATA *(.exitcall.exit) + *(.discard) } /* Debugging sections. */ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index f53c76acaed..766976d27b2 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -171,6 +171,7 @@ SECTIONS */ /DISCARD/ : { *(.exitcall.exit) + *(.discard) } STABS_DEBUG diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index fcbbd000ec0..d63cf914667 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -175,6 +175,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } STABS_DEBUG diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 9975e1ab44f..2916d6eadff 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -156,4 +156,6 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + + /DISCARD/ : { *(.discard) } } diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 11b835248b8..1f8a622cabe 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -100,4 +100,6 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + + /DISCARD/ : { *(.discard) } } diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index 41c159cd872..b1e24638acd 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -287,6 +287,7 @@ SECTIONS EXIT_TEXT EXIT_DATA *(.exitcall.exit) + *(.discard) } .xt.lit : { *(.xt.lit) } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 55413e568f0..a19120c4e10 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -628,6 +628,14 @@ #define INITRAMFS #endif +#define DISCARDS \ + /DISCARD/ : { \ + EXIT_TEXT \ + EXIT_DATA \ + *(.exitcall.exit) \ + *(.discard) \ + } + /** * PERCPU_VADDR - define output section for percpu area * @vaddr: explicit base address (optional) diff --git a/scripts/module-common.lds b/scripts/module-common.lds new file mode 100644 index 00000000000..47a1f9ae0ed --- /dev/null +++ b/scripts/module-common.lds @@ -0,0 +1,8 @@ +/* + * Common module linker script, always used when linking a module. + * Archs are free to supply their own linker scripts. ld will + * combine them automatically. + */ +SECTIONS { + /DISCARD/ : { *(.discard) } +} -- cgit v1.2.3-70-g09d2 From fe87f94f341a4b4097285b46f003059b26eb59bf Mon Sep 17 00:00:00 2001 From: Jesper Nilsson Date: Wed, 24 Jun 2009 15:13:41 +0900 Subject: CRIS: Change DEFINE_PER_CPU of current_pgd to be non volatile. The DEFINE_PER_CPU of current_pgd was on CRIS defined using volatile, which is not needed. Remove volatile. Tested on an ARTPEC-3 (CRISv32) board. tj: extern DEFINE_PER_CPU() replaced with DECLARE_PER_CPU() [ Impact: code cleanup ] Signed-off-by: Jesper Nilsson Signed-off-by: Tejun Heo --- arch/cris/include/asm/mmu_context.h | 3 ++- arch/cris/mm/fault.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h index 72ba08dcfd1..1d45fd6365b 100644 --- a/arch/cris/include/asm/mmu_context.h +++ b/arch/cris/include/asm/mmu_context.h @@ -17,7 +17,8 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, * registers like cr3 on the i386 */ -extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */ +/* defined in arch/cris/mm/fault.c */ +DECLARE_PER_CPU(pgd_t *, current_pgd); static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index f925115e325..4a7cdd9ea1e 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -29,7 +29,7 @@ extern void die_if_kernel(const char *, struct pt_regs *, long); /* current active page directory */ -volatile DEFINE_PER_CPU(pgd_t *,current_pgd); +DEFINE_PER_CPU(pgd_t *, current_pgd); unsigned long cris_signal_return_page; /* -- cgit v1.2.3-70-g09d2 From 204fba4aa303ea4a7bb726a539bf4a5b9e3203d0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:45 +0900 Subject: percpu: cleanup percpu array definitions Currently, the following three different ways to define percpu arrays are in use. 1. DEFINE_PER_CPU(elem_type[array_len], array_name); 2. DEFINE_PER_CPU(elem_type, array_name[array_len]); 3. DEFINE_PER_CPU(elem_type, array_name)[array_len]; Unify to #1 which correctly separates the roles of the two parameters and thus allows more flexibility in the way percpu variables are defined. [ Impact: cleanup ] Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter Cc: Ingo Molnar Cc: Tony Luck Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Jeremy Fitzhardinge Cc: linux-mm@kvack.org Cc: Christoph Lameter Cc: David S. Miller --- arch/ia64/kernel/smp.c | 2 +- arch/ia64/sn/kernel/setup.c | 2 +- arch/powerpc/mm/stab.c | 2 +- arch/powerpc/platforms/ps3/smp.c | 2 +- arch/x86/kernel/cpu/cpu_debug.c | 4 ++-- arch/x86/kernel/cpu/mcheck/mce_amd.c | 2 +- arch/x86/kernel/cpu/perf_counter.c | 2 +- drivers/xen/events.c | 4 ++-- mm/quicklist.c | 2 +- mm/slub.c | 4 ++-- net/ipv4/syncookies.c | 2 +- net/ipv6/syncookies.c | 2 +- 12 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index f0c521b0ba4..94cf78ba28f 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -58,7 +58,7 @@ static struct local_tlb_flush_counts { unsigned int count; } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; -static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; +static DEFINE_PER_CPU(unsigned short [NR_CPUS], shadow_flush_counts) ____cacheline_aligned; #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index e456f062f24..ece1bf99449 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); -DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); +DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 98cd1dc2ae7..6e9b69c9985 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -31,7 +31,7 @@ struct stab_entry { #define NR_STAB_CACHE_ENTRIES 8 static DEFINE_PER_CPU(long, stab_cache_ptr); -static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); +static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache); /* * Create a segment table entry for the given esid/vsid pair. diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index f6e04bcc70e..51ffde40af2 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c @@ -37,7 +37,7 @@ */ #define MSG_COUNT 4 -static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]); +static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); static void do_message_pass(int target, int msg) { diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 6b2a52dd040..dca325c0399 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c @@ -30,8 +30,8 @@ #include #include -static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); -static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); +static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); +static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); static DEFINE_PER_CPU(int, cpu_priv_count); static DEFINE_MUTEX(cpu_debug_lock); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index ddae21620bd..bd2a2fa8462 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -69,7 +69,7 @@ struct threshold_bank { struct threshold_block *blocks; cpumask_var_t cpus; }; -static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); +static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); #ifdef CONFIG_SMP static unsigned char shared_bank[NR_BANKS] = { diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 76dfef23f78..4946288d683 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) x86_pmu_disable_counter(hwc, idx); } -static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); +static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); /* * Set the next IRQ period, based on the hwc->period_left value. diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 891d2e90753..ab581fa6268 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -47,10 +47,10 @@ static DEFINE_SPINLOCK(irq_mapping_update_lock); /* IRQ <-> VIRQ mapping. */ -static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; +static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; /* IRQ <-> IPI mapping */ -static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; +static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; /* Interrupt types. */ enum xen_irq_type { diff --git a/mm/quicklist.c b/mm/quicklist.c index e66d07d1b4f..6eedf7e473d 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c @@ -19,7 +19,7 @@ #include #include -DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; +DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist); #define FRACTION_OF_NODE_MEM 16 diff --git a/mm/slub.c b/mm/slub.c index ce62b770e2f..23bb79acc4b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2086,8 +2086,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) */ #define NR_KMEM_CACHE_CPU 100 -static DEFINE_PER_CPU(struct kmem_cache_cpu, - kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; +static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU], + kmem_cache_cpu); static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index cd2b97f1b6e..84d90f2799b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -37,7 +37,7 @@ __initcall(init_syncookies); #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; +static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 8c2513982b6..23d0d6db046 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -74,7 +74,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, return child; } -static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; +static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, __be16 sport, __be16 dport, u32 count, int c) -- cgit v1.2.3-70-g09d2 From b9bf3121af348d9255f1c917830fe8c2df52efcb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:47 +0900 Subject: percpu: use DEFINE_PER_CPU_SHARED_ALIGNED() There are a few places where ___cacheline_aligned* is used with DEFINE_PER_CPU(). Use DEFINE_PER_CPU_SHARED_ALIGNED() instead. DEFINE_PER_CPU_SHARED_ALIGNED() applies alignment only on SMPs. While all other converted places used _in_smp variant or only get compiled for SMP, net/rds used unconditional ____cacheline_aligned. I don't see any reason these data structures should be aligned on UP and thus converted together. Signed-off-by: Tejun Heo Cc: Mike Frysinger Cc: Tony Luck Cc: Andy Grover --- arch/blackfin/mm/sram-alloc.c | 6 +++--- arch/ia64/kernel/smp.c | 3 ++- kernel/sched.c | 4 ++-- net/rds/ib_stats.c | 2 +- net/rds/iw_stats.c | 2 +- net/rds/page.c | 2 +- 6 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c index 0bc3c4ef0aa..99e4dbb1dfd 100644 --- a/arch/blackfin/mm/sram-alloc.c +++ b/arch/blackfin/mm/sram-alloc.c @@ -42,9 +42,9 @@ #include #include "blackfin_sram.h" -static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp; -static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp; -static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp; +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock); +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock); +static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock); static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; /* the data structure for L1 scratchpad and DATA SRAM */ diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 94cf78ba28f..93ebfea43c6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -58,7 +58,8 @@ static struct local_tlb_flush_counts { unsigned int count; } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; -static DEFINE_PER_CPU(unsigned short [NR_CPUS], shadow_flush_counts) ____cacheline_aligned; +static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], + shadow_flush_counts); #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 diff --git a/kernel/sched.c b/kernel/sched.c index 7c9098d186e..34fd81d2178 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -318,12 +318,12 @@ struct task_group root_task_group; /* Default task group's sched entity on each cpu */ static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); /* Default task group's cfs_rq on each cpu */ -static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; +static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_cfs_rq); #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); -static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); #endif /* CONFIG_RT_GROUP_SCHED */ #else /* !CONFIG_USER_SCHED */ #define root_task_group init_task_group diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c index 02e3e3d50d4..301ae51ae40 100644 --- a/net/rds/ib_stats.c +++ b/net/rds/ib_stats.c @@ -37,7 +37,7 @@ #include "rds.h" #include "ib.h" -DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned; +DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); static char *rds_ib_stat_names[] = { "ib_connect_raced", diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c index ccc7e8f0bf0..fafea3cc92d 100644 --- a/net/rds/iw_stats.c +++ b/net/rds/iw_stats.c @@ -37,7 +37,7 @@ #include "rds.h" #include "iw.h" -DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned; +DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats); static char *rds_iw_stat_names[] = { "iw_connect_raced", diff --git a/net/rds/page.c b/net/rds/page.c index c460743a89a..de7bb84bcd7 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -39,7 +39,7 @@ struct rds_page_remainder { unsigned long r_offset; }; -DEFINE_PER_CPU(struct rds_page_remainder, rds_page_remainders) ____cacheline_aligned; +DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); /* * returns 0 on success or -errno on failure. -- cgit v1.2.3-70-g09d2 From 245b2e70eabd797932adb263a65da0bab3711753 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:48 +0900 Subject: percpu: clean up percpu variable definitions Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo Reviewed-by: Christoph Lameter Cc: Ivan Kokshaysky Cc: Jens Axboe Cc: Dave Jones Cc: Jeremy Fitzhardinge Cc: linux-mm Cc: David S. Miller Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Li Zefan Cc: Catalin Marinas Cc: Andi Kleen --- arch/x86/kernel/cpu/mcheck/mce.c | 8 ++++---- arch/x86/kernel/cpu/perf_counter.c | 14 +++++++------- block/as-iosched.c | 10 +++++----- block/cfq-iosched.c | 10 +++++----- drivers/cpufreq/cpufreq_conservative.c | 12 ++++++------ drivers/cpufreq/cpufreq_ondemand.c | 15 ++++++++------- drivers/xen/events.c | 9 +++++---- kernel/perf_counter.c | 6 +++--- kernel/trace/trace_events.c | 6 +++--- mm/kmemleak-test.c | 6 +++--- mm/page-writeback.c | 5 +++-- net/ipv4/syncookies.c | 5 +++-- net/ipv6/syncookies.c | 5 +++-- 13 files changed, 58 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 284d1de968b..cba8cd3e957 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status) */ static int check_interval = 5 * 60; /* 5 minutes */ -static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ +static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); static void mcheck_timer(unsigned long data) @@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data) * Alert userspace if needed. If we logged an MCE, reduce the * polling interval, otherwise increase the polling interval. */ - n = &__get_cpu_var(next_interval); + n = &__get_cpu_var(mce_next_interval); if (mce_notify_irq()) *n = max(*n/2, HZ/100); else @@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) static void mce_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); - int *n = &__get_cpu_var(next_interval); + int *n = &__get_cpu_var(mce_next_interval); if (mce_ignore_ce) return; @@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: t->expires = round_jiffies(jiffies + - __get_cpu_var(next_interval)); + __get_cpu_var(mce_next_interval)); add_timer_on(t, cpu); smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); break; diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 4946288d683..5fdf63aaaba 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) x86_pmu_disable_counter(hwc, idx); } -static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); +static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); /* * Set the next IRQ period, based on the hwc->period_left value. @@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, if (left > x86_pmu.max_period) left = x86_pmu.max_period; - per_cpu(prev_left[idx], smp_processor_id()) = left; + per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; /* * The hw counter starts counting from this counter offset, @@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void) rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu.perfctr + idx, pmc_count); - prev_left = per_cpu(prev_left[idx], cpu); + prev_left = per_cpu(pmc_prev_left[idx], cpu); pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", cpu, idx, pmc_ctrl); @@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) entry->ip[entry->nr++] = ip; } -static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); -static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); static void @@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) struct perf_callchain_entry *entry; if (in_nmi()) - entry = &__get_cpu_var(nmi_entry); + entry = &__get_cpu_var(pmc_nmi_entry); else - entry = &__get_cpu_var(irq_entry); + entry = &__get_cpu_var(pmc_irq_entry); entry->nr = 0; diff --git a/block/as-iosched.c b/block/as-iosched.c index 7a12cf6ee1d..ce8ba57c655 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -146,7 +146,7 @@ enum arq_state { #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) -static DEFINE_PER_CPU(unsigned long, ioc_count); +static DEFINE_PER_CPU(unsigned long, as_ioc_count); static struct completion *ioc_gone; static DEFINE_SPINLOCK(ioc_gone_lock); @@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad); static void free_as_io_context(struct as_io_context *aic) { kfree(aic); - elv_ioc_count_dec(ioc_count); + elv_ioc_count_dec(as_ioc_count); if (ioc_gone) { /* * AS scheduler is exiting, grab exit lock and check @@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic) * complete ioc_gone and set it back to NULL. */ spin_lock(&ioc_gone_lock); - if (ioc_gone && !elv_ioc_count_read(ioc_count)) { + if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) { complete(ioc_gone); ioc_gone = NULL; } @@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void) ret->seek_total = 0; ret->seek_samples = 0; ret->seek_mean = 0; - elv_ioc_count_inc(ioc_count); + elv_ioc_count_inc(as_ioc_count); } return ret; @@ -1507,7 +1507,7 @@ static void __exit as_exit(void) ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); - if (elv_ioc_count_read(ioc_count)) + if (elv_ioc_count_read(as_ioc_count)) wait_for_completion(&all_gone); synchronize_rcu(); } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 833ec18eaa6..0f1cc7d3855 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125; static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_ioc_pool; -static DEFINE_PER_CPU(unsigned long, ioc_count); +static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); static struct completion *ioc_gone; static DEFINE_SPINLOCK(ioc_gone_lock); @@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head) cic = container_of(head, struct cfq_io_context, rcu_head); kmem_cache_free(cfq_ioc_pool, cic); - elv_ioc_count_dec(ioc_count); + elv_ioc_count_dec(cfq_ioc_count); if (ioc_gone) { /* @@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head) * complete ioc_gone and set it back to NULL */ spin_lock(&ioc_gone_lock); - if (ioc_gone && !elv_ioc_count_read(ioc_count)) { + if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { complete(ioc_gone); ioc_gone = NULL; } @@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) INIT_HLIST_NODE(&cic->cic_list); cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; - elv_ioc_count_inc(ioc_count); + elv_ioc_count_inc(cfq_ioc_count); } return cic; @@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void) * this also protects us from entering cfq_slab_kill() with * pending RCU callbacks */ - if (elv_ioc_count_read(ioc_count)) + if (elv_ioc_count_read(cfq_ioc_count)) wait_for_completion(&all_gone); cfq_slab_kill(); } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7fc58af748b..a7ef465c83b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -65,7 +65,7 @@ struct cpu_dbs_info_s { int cpu; unsigned int enable:1; }; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ @@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, freq->cpu); struct cpufreq_policy *policy; @@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(cpu_dbs_info, j); + dbs_info = &per_cpu(cs_cpu_dbs_info, j); dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) @@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cputime64_t cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; - j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); @@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int j; int rc; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: @@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 1911d172935..36f292a7bd0 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -73,7 +73,7 @@ struct cpu_dbs_info_s { unsigned int enable:1, sample_type:1; }; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ @@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, unsigned int freq_hi, freq_lo; unsigned int index = 0; unsigned int jiffies_total, jiffies_hi, jiffies_lo; - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); if (!dbs_info->freq_table) { dbs_info->freq_lo = 0; @@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void) { int i; for_each_online_cpu(i) { - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i); dbs_info->freq_table = cpufreq_frequency_get_table(i); dbs_info->freq_lo = 0; } @@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(cpu_dbs_info, j); + dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) @@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) unsigned int load, load_freq; int freq_avg; - j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); @@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int j; int rc; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: @@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, diff --git a/drivers/xen/events.c b/drivers/xen/events.c index ab581fa6268..7d2987e9b1b 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static DEFINE_PER_CPU(unsigned, xed_nesting_count); + /* * Search the CPUs pending events bitmasks. For each one found, map * the event number to an irq, and feed it into do_IRQ() for @@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) struct pt_regs *old_regs = set_irq_regs(regs); struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); - static DEFINE_PER_CPU(unsigned, nesting_count); unsigned count; exit_idle(); @@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) vcpu_info->evtchn_upcall_pending = 0; - if (__get_cpu_var(nesting_count)++) + if (__get_cpu_var(xed_nesting_count)++) goto out; #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ @@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) BUG_ON(!irqs_disabled()); - count = __get_cpu_var(nesting_count); - __get_cpu_var(nesting_count) = 0; + count = __get_cpu_var(xed_nesting_count); + __get_cpu_var(xed_nesting_count) = 0; } while(count != 1); out: diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 1a933a221ea..1fd7a2e7575 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader, void __weak perf_counter_print_debug(void) { } -static DEFINE_PER_CPU(int, disable_count); +static DEFINE_PER_CPU(int, perf_disable_count); void __perf_disable(void) { - __get_cpu_var(disable_count)++; + __get_cpu_var(perf_disable_count)++; } bool __perf_enable(void) { - return !--__get_cpu_var(disable_count); + return !--__get_cpu_var(perf_disable_count); } void perf_disable(void) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index aa08be69a1b..54b1de5074b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1318,7 +1318,7 @@ static __init void event_trace_self_tests(void) #ifdef CONFIG_FUNCTION_TRACER -static DEFINE_PER_CPU(atomic_t, test_event_disable); +static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); static void function_test_events_call(unsigned long ip, unsigned long parent_ip) @@ -1334,7 +1334,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) pc = preempt_count(); resched = ftrace_preempt_disable(); cpu = raw_smp_processor_id(); - disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); + disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); if (disabled != 1) goto out; @@ -1352,7 +1352,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) trace_nowake_buffer_unlock_commit(event, flags, pc); out: - atomic_dec(&per_cpu(test_event_disable, cpu)); + atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); ftrace_preempt_enable(resched); } diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c index d5292fc6f52..177a5169bbd 100644 --- a/mm/kmemleak-test.c +++ b/mm/kmemleak-test.c @@ -36,7 +36,7 @@ struct test_node { }; static LIST_HEAD(test_list); -static DEFINE_PER_CPU(void *, test_pointer); +static DEFINE_PER_CPU(void *, kmemleak_test_pointer); /* * Some very simple testing. This function needs to be extended for @@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void) } for_each_possible_cpu(i) { - per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); + per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); pr_info("kmemleak: kmalloc(129) = %p\n", - per_cpu(test_pointer, i)); + per_cpu(kmemleak_test_pointer, i)); } return 0; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7b0dcea4935..2c075dcf03d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -607,6 +607,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) } } +static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; + /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied @@ -624,7 +626,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied) { - static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; unsigned long ratelimit; unsigned long *p; @@ -637,7 +638,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, * tasks in balance_dirty_pages(). Period. */ preempt_disable(); - p = &__get_cpu_var(ratelimits); + p = &__get_cpu_var(bdp_ratelimits); *p += nr_pages_dirtied; if (unlikely(*p >= ratelimit)) { *p = 0; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 84d90f2799b..a6e0e077ac3 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -37,12 +37,13 @@ __initcall(init_syncookies); #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); +static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], + ipv4_cookie_scratch); static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) { - __u32 *tmp = __get_cpu_var(cookie_scratch); + __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch); memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); tmp[0] = (__force u32)saddr; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 23d0d6db046..6b6ae913b5d 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, return child; } -static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); +static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], + ipv6_cookie_scratch); static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, __be16 sport, __be16 dport, u32 count, int c) { - __u32 *tmp = __get_cpu_var(cookie_scratch); + __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); /* * we have 320 bits of information to hash, copy in the remaining -- cgit v1.2.3-70-g09d2 From 6088464cf1ae9fb3d2ccc0ec5feb3f5b971098d8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:52 +0900 Subject: alpha: kill unnecessary __used attribute in PER_CPU_ATTRIBUTES With the previous percpu variable definition change, all percpu variables are global and there's no need to specify __used, which only triggers on recent compilers anyway. Kill it. [ Impact: remove unnecessary percpu attribute ] Signed-off-by: Tejun Heo Cc: Ivan Kokshaysky Cc: Richard Henderson --- arch/alpha/include/asm/percpu.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 06c5c7a4afd..7f0a9c4f2fd 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -30,7 +30,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #ifndef MODULE #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) -#define PER_CPU_ATTRIBUTES #else /* * To calculate addresses of locally defined variables, GCC uses 32-bit @@ -49,8 +48,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; : "=&r"(__ptr), "=&r"(tmp_gp)); \ (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) -#define PER_CPU_ATTRIBUTES __used - #endif /* MODULE */ /* @@ -71,8 +68,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define __get_cpu_var(var) per_cpu_var(var) #define __raw_get_cpu_var(var) per_cpu_var(var) -#define PER_CPU_ATTRIBUTES - #endif /* SMP */ #ifdef CONFIG_SMP -- cgit v1.2.3-70-g09d2 From 9b7dbc7dc0365a943af2d73b1376a6f0aac5dc0d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:52 +0900 Subject: alpha: switch to dynamic percpu allocator Alpha implements custom SHIFT_PERCPU_PTR for modules because percpu area can be located far away from the 4G area where the module text is located. The custom SHIFT_PERCPU_PTR forces GOT usage using ldq instruction with literal relocation; however, the relocation can't be used with dynamically allocated percpu variables. Fortunately, similar result can be achieved by using weak percpu variable definitions. This patch makes alpha use weak definitions and switch to dynamic percpu allocator. asm/tlbflush.h was getting linux/sched.h via asm/percpu.h which no longer needs it. Include linux/sched.h directly in asm/tlbflush.h. Compile tested. Generation of litereal relocation verified. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: use dynamic percpu allocator ] Signed-off-by: Tejun Heo Acked-by: Ivan Kokshaysky Cc: Richard Henderson --- arch/alpha/Kconfig | 3 -- arch/alpha/include/asm/percpu.h | 95 ++++----------------------------------- arch/alpha/include/asm/tlbflush.h | 1 + 3 files changed, 9 insertions(+), 90 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 05d86407188..9fb8aae5c39 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -70,9 +70,6 @@ config AUTO_IRQ_AFFINITY depends on SMP default y -config HAVE_LEGACY_PER_CPU_AREA - def_bool y - source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h index 7f0a9c4f2fd..2c12378e3aa 100644 --- a/arch/alpha/include/asm/percpu.h +++ b/arch/alpha/include/asm/percpu.h @@ -1,97 +1,18 @@ #ifndef __ALPHA_PERCPU_H #define __ALPHA_PERCPU_H -#include -#include -#include - /* - * Determine the real variable name from the name visible in the - * kernel sources. - */ -#define per_cpu_var(var) per_cpu__##var - -#ifdef CONFIG_SMP - -/* - * per_cpu_offset() is the offset that has to be added to a - * percpu variable to get to the instance for a certain processor. - */ -extern unsigned long __per_cpu_offset[NR_CPUS]; - -#define per_cpu_offset(x) (__per_cpu_offset[x]) - -#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) -#ifdef CONFIG_DEBUG_PREEMPT -#define my_cpu_offset per_cpu_offset(smp_processor_id()) -#else -#define my_cpu_offset __my_cpu_offset -#endif - -#ifndef MODULE -#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) -#else -/* - * To calculate addresses of locally defined variables, GCC uses 32-bit - * displacement from the GP. Which doesn't work for per cpu variables in - * modules, as an offset to the kernel per cpu area is way above 4G. + * To calculate addresses of locally defined variables, GCC uses + * 32-bit displacement from the GP. Which doesn't work for per cpu + * variables in modules, as an offset to the kernel per cpu area is + * way above 4G. * - * This forces allocation of a GOT entry for per cpu variable using - * ldq instruction with a 'literal' relocation. - */ -#define SHIFT_PERCPU_PTR(var, offset) ({ \ - extern int simple_identifier_##var(void); \ - unsigned long __ptr, tmp_gp; \ - asm ( "br %1, 1f \n\ - 1: ldgp %1, 0(%1) \n\ - ldq %0, per_cpu__" #var"(%1)\t!literal" \ - : "=&r"(__ptr), "=&r"(tmp_gp)); \ - (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) - -#endif /* MODULE */ - -/* - * A percpu variable may point to a discarded regions. The following are - * established ways to produce a usable pointer from the percpu variable - * offset. + * Always use weak definitions for percpu variables in modules. */ -#define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) -#define __get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) -#define __raw_get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) - -#else /* ! SMP */ - -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) -#define __get_cpu_var(var) per_cpu_var(var) -#define __raw_get_cpu_var(var) per_cpu_var(var) - -#endif /* SMP */ - -#ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" -#else -#define PER_CPU_BASE_SECTION ".data" -#endif - -#ifdef CONFIG_SMP - -#ifdef MODULE -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#endif -#define PER_CPU_FIRST_SECTION ".first" - -#else - -#define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_FIRST_SECTION "" - +#if defined(MODULE) && defined(CONFIG_SMP) +#define ARCH_NEEDS_WEAK_PER_CPU #endif -#define PER_CPU_ATTRIBUTES +#include #endif /* __ALPHA_PERCPU_H */ diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h index 9d87aaa08c0..e89e0c2e15b 100644 --- a/arch/alpha/include/asm/tlbflush.h +++ b/arch/alpha/include/asm/tlbflush.h @@ -2,6 +2,7 @@ #define _ALPHA_TLBFLUSH_H #include +#include #include #include -- cgit v1.2.3-70-g09d2 From 9a0ef2923abd2cc2c6f78d3663ac7af34c0220e8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:53 +0900 Subject: s390: switch to dynamic percpu allocator 64bit s390 shares the same problem with alpha regarding percpu symbol addressing from modules. It needs assembly magic to force GOTENT reference when building module as the percpu address will be outside the usual 4G range from the module text. This can be solved by using weak percpu variable definitions. This patch makes s390 use weak definitions and switch to dynamic percpu allocator. Please note that weak attribute is not added if !SMP as percpu variables behave exactly the same as normal variables on UP. Compile tested. Generation of GOTENT reference verified. This patch is based on Ivan Kokshaysky's alpha percpu patch. [ Impact: use dynamic percpu allocator ] Signed-off-by: Tejun Heo Cc: Martin Schwidefsky Cc: Heiko Carstens --- arch/s390/Kconfig | 3 --- arch/s390/include/asm/percpu.h | 32 ++++++++------------------------ 2 files changed, 8 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f4a3cc62d28..a14dba0e4d6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -75,9 +75,6 @@ config VIRT_CPU_ACCOUNTING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config HAVE_LEGACY_PER_CPU_AREA - def_bool y - mainmenu "Linux Kernel Configuration" config S390 diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 408d60b4f75..f7ad8719d02 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -1,37 +1,21 @@ #ifndef __ARCH_S390_PERCPU__ #define __ARCH_S390_PERCPU__ -#include -#include - /* * s390 uses its own implementation for per cpu data, the offset of * the cpu local data area is cached in the cpu's lowcore memory. - * For 64 bit module code s390 forces the use of a GOT slot for the - * address of the per cpu variable. This is needed because the module - * may be more than 4G above the per cpu area. */ -#if defined(__s390x__) && defined(MODULE) - -#define SHIFT_PERCPU_PTR(ptr,offset) (({ \ - extern int simple_identifier_##var(void); \ - unsigned long *__ptr; \ - asm ( "larl %0, %1@GOTENT" \ - : "=a" (__ptr) : "X" (ptr) ); \ - (typeof(ptr))((*__ptr) + (offset)); })) - -#else - -#define SHIFT_PERCPU_PTR(ptr, offset) (({ \ - extern int simple_identifier_##var(void); \ - unsigned long __ptr; \ - asm ( "" : "=a" (__ptr) : "0" (ptr) ); \ - (typeof(ptr)) (__ptr + (offset)); })) +#define __my_cpu_offset S390_lowcore.percpu_offset +/* + * For 64 bit module code, the module may be more than 4G above the + * per cpu area, use weak definitions to force the compiler to + * generate external references. + */ +#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE) +#define ARCH_NEEDS_WEAK_PER_CPU #endif -#define __my_cpu_offset S390_lowcore.percpu_offset - #include #endif /* __ARCH_S390_PERCPU__ */ -- cgit v1.2.3-70-g09d2 From bf4bb2b1f285ec56e7f3cbf0190761b42131871c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 16:57:03 +0900 Subject: sparc64: fix build breakage introduced by percpu-convert-most patchset Commit e74e396204bfcb67570ba4517b08f5918e69afea incorrectly added HAVE_LEGACY_PER_CPU_AREA to sparc64 although it already has been converted to dynamic percpu allocator. Drop both HAVE_{LEGACY|DYNAMIC}_PER_CPU_AREA. Signed-off-by: Tejun Heo Acked-by: David Miller --- arch/sparc/Kconfig | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 7a8698b913f..4f6ed0f113f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -92,15 +92,9 @@ config AUDIT_ARCH bool default y -config HAVE_LEGACY_PER_CPU_AREA - def_bool y if SPARC64 - config HAVE_SETUP_PER_CPU_AREA def_bool y if SPARC64 -config HAVE_DYNAMIC_PER_CPU_AREA - def_bool y if SPARC64 - config GENERIC_HARDIRQS_NO__DO_IRQ bool def_bool y if SPARC64 -- cgit v1.2.3-70-g09d2 From 2495fbf7effa6868f5d74124ae9b22a57980755b Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 26 Jun 2009 10:53:57 -0700 Subject: x86, setup: remove obsolete pre-Kconfig CONFIG_VIDEO_ variables There were a set of pre-Kconfig configuration variables defined in the video code. There is absolutely no evidence that they have been tweaked by anybody in modern history, so just get rid of them and hope nobody notices. If someone does complain, these should be made real Kconfig variables. Reported-by: Robert P. J. Day Signed-off-by: H. Peter Anvin --- arch/x86/boot/video-vesa.c | 7 +------ arch/x86/boot/video-vga.c | 10 ---------- arch/x86/boot/video.c | 5 ----- arch/x86/boot/video.h | 20 ++------------------ 4 files changed, 3 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index c700147d6ff..d7ef26ba454 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -31,7 +31,6 @@ static inline void vesa_store_mode_params_graphics(void) {} static int vesa_probe(void) { -#if defined(CONFIG_VIDEO_VESA) || defined(CONFIG_FIRMWARE_EDID) struct biosregs ireg, oreg; u16 mode; addr_t mode_ptr; @@ -49,8 +48,7 @@ static int vesa_probe(void) vginfo.signature != VESA_MAGIC || vginfo.version < 0x0102) return 0; /* Not present */ -#endif /* CONFIG_VIDEO_VESA || CONFIG_FIRMWARE_EDID */ -#ifdef CONFIG_VIDEO_VESA + set_fs(vginfo.video_mode_ptr.seg); mode_ptr = vginfo.video_mode_ptr.off; @@ -102,9 +100,6 @@ static int vesa_probe(void) } return nmodes; -#else - return 0; -#endif /* CONFIG_VIDEO_VESA */ } static int vesa_set_mode(struct mode_info *mode) diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c index 8f8d827e254..819caa1f200 100644 --- a/arch/x86/boot/video-vga.c +++ b/arch/x86/boot/video-vga.c @@ -47,14 +47,6 @@ static u8 vga_set_basic_mode(void) initregs(&ireg); -#ifdef CONFIG_VIDEO_400_HACK - if (adapter >= ADAPTER_VGA) { - ireg.ax = 0x1202; - ireg.bx = 0x0030; - intcall(0x10, &ireg, NULL); - } -#endif - ax = 0x0f00; intcall(0x10, &ireg, &oreg); mode = oreg.al; @@ -62,11 +54,9 @@ static u8 vga_set_basic_mode(void) set_fs(0); rows = rdfs8(0x484); /* rows minus one */ -#ifndef CONFIG_VIDEO_400_HACK if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) && (rows == 0 || rows == 24)) return mode; -#endif if (mode != 3 && mode != 7) mode = 3; diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index bad728b76fc..d42da380249 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c @@ -221,7 +221,6 @@ static unsigned int mode_menu(void) } } -#ifdef CONFIG_VIDEO_RETAIN /* Save screen content to the heap */ static struct saved_screen { int x, y; @@ -299,10 +298,6 @@ static void restore_screen(void) ireg.dl = saved.curx; intcall(0x10, &ireg, NULL); } -#else -#define save_screen() ((void)0) -#define restore_screen() ((void)0) -#endif void set_video(void) { diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h index 5bb174a997f..ff339c5db31 100644 --- a/arch/x86/boot/video.h +++ b/arch/x86/boot/video.h @@ -17,19 +17,8 @@ #include -/* Enable autodetection of SVGA adapters and modes. */ -#undef CONFIG_VIDEO_SVGA - -/* Enable autodetection of VESA modes */ -#define CONFIG_VIDEO_VESA - -/* Retain screen contents when switching modes */ -#define CONFIG_VIDEO_RETAIN - -/* Force 400 scan lines for standard modes (hack to fix bad BIOS behaviour */ -#undef CONFIG_VIDEO_400_HACK - -/* This code uses an extended set of video mode numbers. These include: +/* + * This code uses an extended set of video mode numbers. These include: * Aliases for standard modes * NORMAL_VGA (-1) * EXTENDED_VGA (-2) @@ -67,13 +56,8 @@ /* The "recalculate timings" flag */ #define VIDEO_RECALC 0x8000 -/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */ -#ifdef CONFIG_VIDEO_RETAIN void store_screen(void); #define DO_STORE() store_screen() -#else -#define DO_STORE() ((void)0) -#endif /* CONFIG_VIDEO_RETAIN */ /* * Mode table structures -- cgit v1.2.3-70-g09d2 From 702b59e623c3ca80beaeab79ca5ede4ea23170e2 Mon Sep 17 00:00:00 2001 From: Hartley Sweeten Date: Fri, 26 Jun 2009 21:36:36 +0100 Subject: [ARM] 5573/1: ep93xx: ensure typesafe io ARM: ep93xx: ensure typesafe io For the ep93xx platform, all EP93XX_*_BASE defines are based on virtual addresses. Ensure that all these defines are properly typesafe for the __raw_{read/write}* macros. Signed-off-by: H Hartley Sweeten Acked-by: Ryan Mallon Signed-off-by: Russell King --- arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h | 61 +++++++++++++------------ arch/arm/mach-ep93xx/include/mach/io.h | 17 ++++++- 2 files changed, 47 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h index 967c079180d..49b256b3ddf 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h @@ -52,40 +52,43 @@ #define EP93XX_AHB_VIRT_BASE 0xfef00000 #define EP93XX_AHB_SIZE 0x00100000 +#define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x)) + #define EP93XX_APB_PHYS_BASE 0x80800000 #define EP93XX_APB_VIRT_BASE 0xfed00000 #define EP93XX_APB_SIZE 0x00200000 +#define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x)) + /* AHB peripherals */ -#define EP93XX_DMA_BASE ((void __iomem *) \ - (EP93XX_AHB_VIRT_BASE + 0x00000000)) +#define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000) -#define EP93XX_ETHERNET_BASE (EP93XX_AHB_VIRT_BASE + 0x00010000) #define EP93XX_ETHERNET_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00010000) +#define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000) -#define EP93XX_USB_BASE (EP93XX_AHB_VIRT_BASE + 0x00020000) #define EP93XX_USB_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00020000) +#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000) -#define EP93XX_RASTER_BASE (EP93XX_AHB_VIRT_BASE + 0x00030000) +#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000) -#define EP93XX_GRAPHICS_ACCEL_BASE (EP93XX_AHB_VIRT_BASE + 0x00040000) +#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000) -#define EP93XX_SDRAM_CONTROLLER_BASE (EP93XX_AHB_VIRT_BASE + 0x00060000) +#define EP93XX_SDRAM_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00060000) -#define EP93XX_PCMCIA_CONTROLLER_BASE (EP93XX_AHB_VIRT_BASE + 0x00080000) +#define EP93XX_PCMCIA_CONTROLLER_BASE EP93XX_AHB_IOMEM(0x00080000) -#define EP93XX_BOOT_ROM_BASE (EP93XX_AHB_VIRT_BASE + 0x00090000) +#define EP93XX_BOOT_ROM_BASE EP93XX_AHB_IOMEM(0x00090000) -#define EP93XX_IDE_BASE (EP93XX_AHB_VIRT_BASE + 0x000a0000) +#define EP93XX_IDE_BASE EP93XX_AHB_IOMEM(0x000a0000) -#define EP93XX_VIC1_BASE (EP93XX_AHB_VIRT_BASE + 0x000b0000) +#define EP93XX_VIC1_BASE EP93XX_AHB_IOMEM(0x000b0000) -#define EP93XX_VIC2_BASE (EP93XX_AHB_VIRT_BASE + 0x000c0000) +#define EP93XX_VIC2_BASE EP93XX_AHB_IOMEM(0x000c0000) /* APB peripherals */ -#define EP93XX_TIMER_BASE (EP93XX_APB_VIRT_BASE + 0x00010000) +#define EP93XX_TIMER_BASE EP93XX_APB_IOMEM(0x00010000) #define EP93XX_TIMER_REG(x) (EP93XX_TIMER_BASE + (x)) #define EP93XX_TIMER1_LOAD EP93XX_TIMER_REG(0x00) #define EP93XX_TIMER1_VALUE EP93XX_TIMER_REG(0x04) @@ -102,11 +105,11 @@ #define EP93XX_TIMER3_CONTROL EP93XX_TIMER_REG(0x88) #define EP93XX_TIMER3_CLEAR EP93XX_TIMER_REG(0x8c) -#define EP93XX_I2S_BASE (EP93XX_APB_VIRT_BASE + 0x00020000) +#define EP93XX_I2S_BASE EP93XX_APB_IOMEM(0x00020000) -#define EP93XX_SECURITY_BASE (EP93XX_APB_VIRT_BASE + 0x00030000) +#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000) -#define EP93XX_GPIO_BASE (EP93XX_APB_VIRT_BASE + 0x00040000) +#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000) #define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x)) #define EP93XX_GPIO_F_INT_TYPE1 EP93XX_GPIO_REG(0x4c) #define EP93XX_GPIO_F_INT_TYPE2 EP93XX_GPIO_REG(0x50) @@ -124,32 +127,32 @@ #define EP93XX_GPIO_B_INT_ENABLE EP93XX_GPIO_REG(0xb8) #define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc) -#define EP93XX_AAC_BASE (EP93XX_APB_VIRT_BASE + 0x00080000) +#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000) -#define EP93XX_SPI_BASE (EP93XX_APB_VIRT_BASE + 0x000a0000) +#define EP93XX_SPI_BASE EP93XX_APB_IOMEM(0x000a0000) -#define EP93XX_IRDA_BASE (EP93XX_APB_VIRT_BASE + 0x000b0000) +#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000) -#define EP93XX_UART1_BASE (EP93XX_APB_VIRT_BASE + 0x000c0000) #define EP93XX_UART1_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000c0000) +#define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000) -#define EP93XX_UART2_BASE (EP93XX_APB_VIRT_BASE + 0x000d0000) #define EP93XX_UART2_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000d0000) +#define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000) -#define EP93XX_UART3_BASE (EP93XX_APB_VIRT_BASE + 0x000e0000) #define EP93XX_UART3_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000e0000) +#define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000) -#define EP93XX_KEY_MATRIX_BASE (EP93XX_APB_VIRT_BASE + 0x000f0000) +#define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000) -#define EP93XX_ADC_BASE (EP93XX_APB_VIRT_BASE + 0x00100000) -#define EP93XX_TOUCHSCREEN_BASE (EP93XX_APB_VIRT_BASE + 0x00100000) +#define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000) +#define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000) -#define EP93XX_PWM_BASE (EP93XX_APB_VIRT_BASE + 0x00110000) +#define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000) -#define EP93XX_RTC_BASE (EP93XX_APB_VIRT_BASE + 0x00120000) #define EP93XX_RTC_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x00120000) +#define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000) -#define EP93XX_SYSCON_BASE (EP93XX_APB_VIRT_BASE + 0x00130000) +#define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000) #define EP93XX_SYSCON_REG(x) (EP93XX_SYSCON_BASE + (x)) #define EP93XX_SYSCON_POWER_STATE EP93XX_SYSCON_REG(0x00) #define EP93XX_SYSCON_PWRCNT EP93XX_SYSCON_REG(0x04) @@ -179,7 +182,7 @@ #define EP93XX_SYSCON_DEVICE_CONFIG_U1EN (1<<18) #define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0) -#define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000) +#define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000) #endif diff --git a/arch/arm/mach-ep93xx/include/mach/io.h b/arch/arm/mach-ep93xx/include/mach/io.h index fd5f081cc8b..cebcc1c53d6 100644 --- a/arch/arm/mach-ep93xx/include/mach/io.h +++ b/arch/arm/mach-ep93xx/include/mach/io.h @@ -1,8 +1,21 @@ /* * arch/arm/mach-ep93xx/include/mach/io.h */ +#ifndef __ASM_MACH_IO_H +#define __ASM_MACH_IO_H #define IO_SPACE_LIMIT 0xffffffff -#define __io(p) __typesafe_io(p) -#define __mem_pci(p) (p) +#define __io(p) __typesafe_io(p) +#define __mem_pci(p) (p) + +/* + * A typesafe __io() variation for variable initialisers + */ +#ifdef __ASSEMBLER__ +#define IOMEM(p) p +#else +#define IOMEM(p) ((void __iomem __force *)(p)) +#endif + +#endif /* __ASM_MACH_IO_H */ -- cgit v1.2.3-70-g09d2 From ddf4f3d994651ee2924432a618d9caefed411dc1 Mon Sep 17 00:00:00 2001 From: Hartley Sweeten Date: Fri, 26 Jun 2009 21:39:27 +0100 Subject: [ARM] 5574/1: ep93xx: gpio.c: fix header includes and __iomem pointers Fix ep93xx gpio.c header includes and __iomem pointers. 1. should be included instead of 2. should be included instead of 3. data_reg and data_dir_reg in struct ep93xx_gpio_chip should be void __iomem pointers not unsigned int Signed-off-by: H Hartley Sweeten Acked-by: Ryan Mallon Signed-off-by: Russell King --- arch/arm/mach-ep93xx/gpio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c index 482cf3d2fbc..7b7a564916f 100644 --- a/arch/arm/mach-ep93xx/gpio.c +++ b/arch/arm/mach-ep93xx/gpio.c @@ -17,15 +17,15 @@ #include #include #include +#include -#include -#include +#include struct ep93xx_gpio_chip { struct gpio_chip chip; - unsigned int data_reg; - unsigned int data_dir_reg; + void __iomem *data_reg; + void __iomem *data_dir_reg; }; #define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip) -- cgit v1.2.3-70-g09d2 From f04989bbf4a40077dc7ddcc3dccde11a5f3e91f2 Mon Sep 17 00:00:00 2001 From: Hartley Sweeten Date: Fri, 26 Jun 2009 21:40:34 +0100 Subject: [ARM] 5575/1: ep93xx: Show gpio interrupt type in debugfs output. ep93xx: Show gpio interrupt type in debugfs output. EP93xx uses a private implementation for the debugfs output. Modify this output so it includes the interrupt type when the gpio is configured as an interrupt Signed-off-by: H Hartley Sweeten Acked-by: Ryan Mallon Signed-off-by: Russell King --- arch/arm/mach-ep93xx/gpio.c | 56 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c index 7b7a564916f..2d83d69e2ee 100644 --- a/arch/arm/mach-ep93xx/gpio.c +++ b/arch/arm/mach-ep93xx/gpio.c @@ -111,15 +111,61 @@ static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); u8 data_reg, data_dir_reg; - int i; + int gpio, i; data_reg = __raw_readb(ep93xx_chip->data_reg); data_dir_reg = __raw_readb(ep93xx_chip->data_dir_reg); - for (i = 0; i < chip->ngpio; i++) - seq_printf(s, "GPIO %s%d: %s %s\n", chip->label, i, - (data_reg & (1 << i)) ? "set" : "clear", - (data_dir_reg & (1 << i)) ? "out" : "in"); + gpio = ep93xx_chip->chip.base; + for (i = 0; i < chip->ngpio; i++, gpio++) { + int is_out = data_dir_reg & (1 << i); + + seq_printf(s, " %s%d gpio-%-3d (%-12s) %s %s", + chip->label, i, gpio, + gpiochip_is_requested(chip, i) ? : "", + is_out ? "out" : "in ", + (data_reg & (1 << i)) ? "hi" : "lo"); + + if (!is_out) { + int irq = gpio_to_irq(gpio); + struct irq_desc *desc = irq_desc + irq; + + if (irq >= 0 && desc->action) { + char *trigger; + + switch (desc->status & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_NONE: + trigger = "(default)"; + break; + case IRQ_TYPE_EDGE_FALLING: + trigger = "edge-falling"; + break; + case IRQ_TYPE_EDGE_RISING: + trigger = "edge-rising"; + break; + case IRQ_TYPE_EDGE_BOTH: + trigger = "edge-both"; + break; + case IRQ_TYPE_LEVEL_HIGH: + trigger = "level-high"; + break; + case IRQ_TYPE_LEVEL_LOW: + trigger = "level-low"; + break; + default: + trigger = "?trigger?"; + break; + } + + seq_printf(s, " irq-%d %s%s", + irq, trigger, + (desc->status & IRQ_WAKEUP) + ? " wakeup" : ""); + } + } + + seq_printf(s, "\n"); + } } #define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \ -- cgit v1.2.3-70-g09d2 From 226ddb9833a3c2f1087bfac70659d8e318d3c31f Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 24 Jun 2009 17:13:47 +0100 Subject: [ARM] 5564/1: at91: add gpio button and leds support for at91sam9rlek This adds input keyboard gpio support on at91sam9rlek board. It adds button 1 and 2 (left and right click). It also adds gpio leds ds1, ds2 and ds3. Signed-off-by: Nicolas Ferre Acked-by: Andrew Victor Signed-off-by: Russell King --- arch/arm/mach-at91/board-sam9rlek.c | 79 +++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c index 35e12a49d1a..9120d5d3a6f 100644 --- a/arch/arm/mach-at91/board-sam9rlek.c +++ b/arch/arm/mach-at91/board-sam9rlek.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include