summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/include/asm/compat.h2
-rw-r--r--arch/s390/include/asm/hardirq.h4
-rw-r--r--arch/s390/include/asm/irqflags.h51
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/system.h3
-rw-r--r--arch/s390/include/asm/topology.h27
-rw-r--r--arch/s390/kernel/mem_detect.c4
-rw-r--r--arch/s390/kernel/module.c3
-rw-r--r--arch/s390/kernel/topology.c150
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/maccess.c4
12 files changed, 161 insertions, 101 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f0777a47e3a..75976a14194 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,7 @@ config S390
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
@@ -198,6 +199,13 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_BOOK
+ bool "Book scheduler support"
+ depends on SMP
+ help
+ Book scheduler support improves the CPU scheduler's decision making
+ when dealing with machines that have several books.
+
config MATHEMU
bool "IEEE FPU emulation"
depends on MARCH_G5
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 104f2007f09..a875c2f542e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
#endif
-static inline void __user *compat_alloc_user_space(long len)
+static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 498bc389238..881d94590ae 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -12,10 +12,6 @@
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
#include <asm/lowcore.h>
#define local_softirq_pending() (S390_lowcore.softirq_pending)
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 15b3ac25389..865d6d891ac 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -8,8 +8,8 @@
#include <linux/types.h>
-/* store then or system mask. */
-#define __raw_local_irq_stosm(__or) \
+/* store then OR system mask. */
+#define __arch_local_irq_stosm(__or) \
({ \
unsigned long __mask; \
asm volatile( \
@@ -18,8 +18,8 @@
__mask; \
})
-/* store then and system mask. */
-#define __raw_local_irq_stnsm(__and) \
+/* store then AND system mask. */
+#define __arch_local_irq_stnsm(__and) \
({ \
unsigned long __mask; \
asm volatile( \
@@ -29,39 +29,44 @@
})
/* set system mask. */
-#define __raw_local_irq_ssm(__mask) \
-({ \
- asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
-})
+static inline void __arch_local_irq_ssm(unsigned long flags)
+{
+ asm volatile("ssm %0" : : "Q" (flags) : "memory");
+}
-/* interrupt control.. */
-static inline unsigned long raw_local_irq_enable(void)
+static inline unsigned long arch_local_save_flags(void)
{
- return __raw_local_irq_stosm(0x03);
+ return __arch_local_irq_stosm(0x00);
}
-static inline unsigned long raw_local_irq_disable(void)
+static inline unsigned long arch_local_irq_save(void)
{
- return __raw_local_irq_stnsm(0xfc);
+ return __arch_local_irq_stnsm(0xfc);
}
-#define raw_local_save_flags(x) \
-do { \
- typecheck(unsigned long, x); \
- (x) = __raw_local_irq_stosm(0x00); \
-} while (0)
+static inline void arch_local_irq_disable(void)
+{
+ arch_local_irq_save();
+}
-static inline void raw_local_irq_restore(unsigned long flags)
+static inline void arch_local_irq_enable(void)
{
- __raw_local_irq_ssm(flags);
+ __arch_local_irq_stosm(0x03);
}
-static inline int raw_irqs_disabled_flags(unsigned long flags)
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ __arch_local_irq_ssm(flags);
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (3UL << (BITS_PER_LONG - 8)));
}
-/* For spinlocks etc */
-#define raw_local_irq_save(x) ((x) = raw_local_irq_disable())
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 3840cbe7763..a75f168d271 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -4,7 +4,6 @@
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
*/
-static inline void set_perf_event_pending(void) {}
-static inline void clear_perf_event_pending(void) {}
+/* Empty, just to avoid compiling error */
#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index cef66210c84..1f2ebc4afd8 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
-extern void account_system_vtime(struct task_struct *);
#ifdef CONFIG_PFAULT
extern void pfault_irq_init(void);
@@ -399,7 +398,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline void
__set_psw_mask(unsigned long mask)
{
- __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
+ __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
}
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 831bd033ea7..051107a2c5e 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -3,15 +3,32 @@
#include <linux/cpumask.h>
-#define mc_capable() (1)
-
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
-
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
+static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
+#define mc_capable() (1)
+
+#ifdef CONFIG_SCHED_BOOK
+
+extern unsigned char cpu_book_id[NR_CPUS];
+extern cpumask_t cpu_book_map[NR_CPUS];
+
+static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
+{
+ return &cpu_book_map[cpu];
+}
+
+#define topology_book_id(cpu) (cpu_book_id[cpu])
+#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
+
+#endif /* CONFIG_SCHED_BOOK */
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
@@ -30,6 +47,8 @@ static inline void s390_init_cpu_topology(void)
};
#endif
+#define SD_BOOK_INIT SD_CPU_INIT
+
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 559af0d0787..0fbe4e32f7b 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -54,11 +54,11 @@ void detect_memory_layout(struct mem_chunk chunk[])
* right thing and we don't get scheduled away with low address
* protection disabled.
*/
- flags = __raw_local_irq_stnsm(0xf8);
+ flags = __arch_local_irq_stnsm(0xf8);
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28);
find_memory_chunks(chunk);
__ctl_load(cr0, 0, 0);
- __raw_local_irq_ssm(flags);
+ arch_local_irq_restore(flags);
}
EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 22cfd634c35..f7167ee4604 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr,
{
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
- return module_bug_finalize(hdr, sechdrs, me);
+ return 0;
}
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bcef00766a6..13559c99384 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -57,8 +57,8 @@ struct tl_info {
union tl_entry tle[0];
};
-struct core_info {
- struct core_info *next;
+struct mask_info {
+ struct mask_info *next;
unsigned char id;
cpumask_t mask;
};
@@ -66,7 +66,6 @@ struct core_info {
static int topology_enabled;
static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info;
-static struct core_info core_info;
static int machine_has_topology;
static struct timer_list topology_timer;
static void set_topology_timer(void);
@@ -74,38 +73,37 @@ static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);
+static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+#ifdef CONFIG_SCHED_BOOK
+static struct mask_info book_info;
+cpumask_t cpu_book_map[NR_CPUS];
+unsigned char cpu_book_id[NR_CPUS];
+#endif
+
+static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
- struct core_info *core = &core_info;
- unsigned long flags;
cpumask_t mask;
cpus_clear(mask);
if (!topology_enabled || !machine_has_topology)
return cpu_possible_map;
- spin_lock_irqsave(&topology_lock, flags);
- while (core) {
- if (cpu_isset(cpu, core->mask)) {
- mask = core->mask;
+ while (info) {
+ if (cpu_isset(cpu, info->mask)) {
+ mask = info->mask;
break;
}
- core = core->next;
+ info = info->next;
}
- spin_unlock_irqrestore(&topology_lock, flags);
if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu);
return mask;
}
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
-{
- return &cpu_core_map[cpu];
-}
-
-static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
+static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book,
+ struct mask_info *core)
{
unsigned int cpu;
@@ -117,23 +115,35 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
- if (cpu_logical_map(lcpu) == rcpu) {
- cpu_set(lcpu, core->mask);
- cpu_core_id[lcpu] = core->id;
- smp_cpu_polarization[lcpu] = tl_cpu->pp;
- }
+ if (cpu_logical_map(lcpu) != rcpu)
+ continue;
+#ifdef CONFIG_SCHED_BOOK
+ cpu_set(lcpu, book->mask);
+ cpu_book_id[lcpu] = book->id;
+#endif
+ cpu_set(lcpu, core->mask);
+ cpu_core_id[lcpu] = core->id;
+ smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
}
-static void clear_cores(void)
+static void clear_masks(void)
{
- struct core_info *core = &core_info;
+ struct mask_info *info;
- while (core) {
- cpus_clear(core->mask);
- core = core->next;
+ info = &core_info;
+ while (info) {
+ cpus_clear(info->mask);
+ info = info->next;
+ }
+#ifdef CONFIG_SCHED_BOOK
+ info = &book_info;
+ while (info) {
+ cpus_clear(info->mask);
+ info = info->next;
}
+#endif
}
static union tl_entry *next_tle(union tl_entry *tle)
@@ -146,29 +156,36 @@ static union tl_entry *next_tle(union tl_entry *tle)
static void tl_to_cores(struct tl_info *info)
{
+#ifdef CONFIG_SCHED_BOOK
+ struct mask_info *book = &book_info;
+#else
+ struct mask_info *book = NULL;
+#endif
+ struct mask_info *core = &core_info;
union tl_entry *tle, *end;
- struct core_info *core = &core_info;
+
spin_lock_irq(&topology_lock);
- clear_cores();
+ clear_masks();
tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
- case 5:
- case 4:
- case 3:
+#ifdef CONFIG_SCHED_BOOK
case 2:
+ book = book->next;
+ book->id = tle->container.id;
break;
+#endif
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
- add_cpus_to_core(&tle->cpu, core);
+ add_cpus_to_mask(&tle->cpu, book, core);
break;
default:
- clear_cores();
+ clear_masks();
machine_has_topology = 0;
goto out;
}
@@ -221,10 +238,29 @@ int topology_set_cpu_management(int fc)
static void update_cpu_core_map(void)
{
+ unsigned long flags;
int cpu;
- for_each_possible_cpu(cpu)
- cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+ spin_lock_irqsave(&topology_lock, flags);
+ for_each_possible_cpu(cpu) {
+ cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
+#ifdef CONFIG_SCHED_BOOK
+ cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
+#endif
+ }
+ spin_unlock_irqrestore(&topology_lock, flags);
+}
+
+static void store_topology(struct tl_info *info)
+{
+#ifdef CONFIG_SCHED_BOOK
+ int rc;
+
+ rc = stsi(info, 15, 1, 3);
+ if (rc != -ENOSYS)
+ return;
+#endif
+ stsi(info, 15, 1, 2);
}
int arch_update_cpu_topology(void)
@@ -238,7 +274,7 @@ int arch_update_cpu_topology(void)
topology_update_polarization_simple();
return 0;
}
- stsi(info, 15, 1, 2);
+ store_topology(info);
tl_to_cores(info);
update_cpu_core_map();
for_each_online_cpu(cpu) {
@@ -299,12 +335,24 @@ out:
}
__initcall(init_topology_update);
+static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset)
+{
+ int i, nr_masks;
+
+ nr_masks = info->mag[NR_MAG - offset];
+ for (i = 0; i < info->mnest - offset; i++)
+ nr_masks *= info->mag[NR_MAG - offset - 1 - i];
+ nr_masks = max(nr_masks, 1);
+ for (i = 0; i < nr_masks; i++) {
+ mask->next = alloc_bootmem(sizeof(struct mask_info));
+ mask = mask->next;
+ }
+}
+
void __init s390_init_cpu_topology(void)
{
unsigned long long facility_bits;
struct tl_info *info;
- struct core_info *core;
- int nr_cores;
int i;
if (stfle(&facility_bits, 1) <= 0)
@@ -315,25 +363,13 @@ void __init s390_init_cpu_topology(void)
tl_info = alloc_bootmem_pages(PAGE_SIZE);
info = tl_info;
- stsi(info, 15, 1, 2);
-
- nr_cores = info->mag[NR_MAG - 2];
- for (i = 0; i < info->mnest - 2; i++)
- nr_cores *= info->mag[NR_MAG - 3 - i];
-
+ store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
-
- core = &core_info;
- for (i = 0; i < nr_cores; i++) {
- core->next = alloc_bootmem(sizeof(struct core_info));
- core = core->next;
- if (!core)
- goto error;
- }
- return;
-error:
- machine_has_topology = 0;
+ alloc_masks(info, &core_info, 2);
+#ifdef CONFIG_SCHED_BOOK
+ alloc_masks(info, &book_info, 3);
+#endif
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 30eb6d02ddb..94b8ba2ec85 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -50,7 +50,6 @@ EXPORT_SYMBOL(empty_zero_page);
*/
void __init paging_init(void)
{
- static const int ssm_mask = 0x04000000L;
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
@@ -72,7 +71,7 @@ void __init paging_init(void)
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
- __raw_local_irq_ssm(ssm_mask);
+ arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
atomic_set(&init_mm.context.attach_count, 1);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index a8c2af8c650..71a4b0d34be 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -71,7 +71,7 @@ int memcpy_real(void *dest, void *src, size_t count)
if (!count)
return 0;
- flags = __raw_local_irq_stnsm(0xf8UL);
+ flags = __arch_local_irq_stnsm(0xf8UL);
asm volatile (
"0: mvcle %1,%2,0x0\n"
"1: jo 0b\n"
@@ -82,6 +82,6 @@ int memcpy_real(void *dest, void *src, size_t count)
"+d" (_len2), "=m" (*((long *) dest))
: "m" (*((long *) src))
: "cc", "memory");
- __raw_local_irq_ssm(flags);
+ arch_local_irq_restore(flags);
return rc;
}