summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/topology.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-09 08:11:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-09 08:11:13 -0800
commit72f318897e50c29b91efd1ed24515a93c138a2ba (patch)
tree7e7ef8138d5afacd1be4655e4458dc4cee432d1e /arch/s390/kernel/topology.c
parenta0e86bd4252519321b0d102dc4ed90557aa7bee9 (diff)
parent2fa1d4fce599809e6bd7d95756709a5faef30710 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (31 commits) [S390] disassembler: mark exception causing instructions [S390] Enable exception traces by default [S390] return address of compat signals [S390] sysctl: get rid of dead declaration [S390] dasd: fix fixpoint divide exception in define_extent [S390] dasd: add sanity check to detect path connection error [S390] qdio: fix kernel panic for zfcp 31-bit [S390] Add s390x description to Documentation/kdump/kdump.txt [S390] Add VMCOREINFO_SYMBOL(high_memory) to vmcoreinfo [S390] dasd: fix expiration handling for recovery requests [S390] outstanding interrupts vs. smp_send_stop [S390] ipc: call generic sys_ipc demultiplexer [S390] zcrypt: Fix error return codes. [S390] zcrypt: Rework length parameter checking. [S390] cleanup trap handling [S390] Remove Kerntypes leftovers [S390] topology: increase poll frequency if change is anticipated [S390] entry[64].S improvements [S390] make arch/s390 subdirectories depend on config option [S390] kvm: move cmf host id constant out of lowcore ... Fix up conflicts in arch/s390/kernel/{smp.c,topology.c} due to the sysdev removal clashing with "topology: get rid of ifdefs" which moved some of that code around.
Diffstat (limited to 'arch/s390/kernel/topology.c')
-rw-r--r--arch/s390/kernel/topology.c275
1 files changed, 187 insertions, 88 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 6e0e29b29a7..7370a41948c 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,22 +1,22 @@
/*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007,2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/device.h>
+#include <linux/workqueue.h>
#include <linux/bootmem.h>
+#include <linux/cpuset.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/workqueue.h>
+#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/smp.h>
-#include <linux/cpuset.h>
-#include <asm/delay.h>
+#include <linux/mm.h>
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
@@ -31,7 +31,6 @@ struct mask_info {
static int topology_enabled = 1;
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static struct timer_list topology_timer;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
@@ -41,11 +40,12 @@ static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
-#ifdef CONFIG_SCHED_BOOK
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
unsigned char cpu_book_id[NR_CPUS];
-#endif
+
+/* smp_cpu_state_mutex must be held when accessing this array */
+int cpu_polarization[NR_CPUS];
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
@@ -71,7 +71,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
struct mask_info *book,
struct mask_info *core,
- int z10)
+ int one_core_per_cpu)
{
unsigned int cpu;
@@ -85,18 +85,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) != rcpu)
continue;
-#ifdef CONFIG_SCHED_BOOK
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
-#endif
cpumask_set_cpu(lcpu, &core->mask);
- if (z10) {
+ if (one_core_per_cpu) {
cpu_core_id[lcpu] = rcpu;
core = core->next;
} else {
cpu_core_id[lcpu] = core->id;
}
- smp_cpu_polarization[lcpu] = tl_cpu->pp;
+ cpu_set_polarization(lcpu, tl_cpu->pp);
}
}
return core;
@@ -111,13 +109,11 @@ static void clear_masks(void)
cpumask_clear(&info->mask);
info = info->next;
}
-#ifdef CONFIG_SCHED_BOOK
info = &book_info;
while (info) {
cpumask_clear(&info->mask);
info = info->next;
}
-#endif
}
static union topology_entry *next_tle(union topology_entry *tle)
@@ -127,66 +123,75 @@ static union topology_entry *next_tle(union topology_entry *tle)
return (union topology_entry *)((struct topology_container *)tle + 1);
}
-static void tl_to_cores(struct sysinfo_15_1_x *info)
+static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
{
-#ifdef CONFIG_SCHED_BOOK
- struct mask_info *book = &book_info;
- struct cpuid cpu_id;
-#else
- struct mask_info *book = NULL;
-#endif
struct mask_info *core = &core_info;
+ struct mask_info *book = &book_info;
union topology_entry *tle, *end;
- int z10 = 0;
-#ifdef CONFIG_SCHED_BOOK
- get_cpu_id(&cpu_id);
- z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098;
-#endif
- spin_lock_irq(&topology_lock);
- clear_masks();
tle = info->tle;
end = (union topology_entry *)((unsigned long)info + info->length);
while (tle < end) {
-#ifdef CONFIG_SCHED_BOOK
- if (z10) {
- switch (tle->nl) {
- case 1:
- book = book->next;
- book->id = tle->container.id;
- break;
- case 0:
- core = add_cpus_to_mask(&tle->cpu, book, core, z10);
- break;
- default:
- clear_masks();
- goto out;
- }
- tle = next_tle(tle);
- continue;
- }
-#endif
switch (tle->nl) {
-#ifdef CONFIG_SCHED_BOOK
case 2:
book = book->next;
book->id = tle->container.id;
break;
-#endif
case 1:
core = core->next;
core->id = tle->container.id;
break;
case 0:
- add_cpus_to_mask(&tle->cpu, book, core, z10);
+ add_cpus_to_mask(&tle->cpu, book, core, 0);
break;
default:
clear_masks();
- goto out;
+ return;
}
tle = next_tle(tle);
}
-out:
+}
+
+static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
+{
+ struct mask_info *core = &core_info;
+ struct mask_info *book = &book_info;
+ union topology_entry *tle, *end;
+
+ tle = info->tle;
+ end = (union topology_entry *)((unsigned long)info + info->length);
+ while (tle < end) {
+ switch (tle->nl) {
+ case 1:
+ book = book->next;
+ book->id = tle->container.id;
+ break;
+ case 0:
+ core = add_cpus_to_mask(&tle->cpu, book, core, 1);
+ break;
+ default:
+ clear_masks();
+ return;
+ }
+ tle = next_tle(tle);
+ }
+}
+
+static void tl_to_cores(struct sysinfo_15_1_x *info)
+{
+ struct cpuid cpu_id;
+
+ get_cpu_id(&cpu_id);
+ spin_lock_irq(&topology_lock);
+ clear_masks();
+ switch (cpu_id.machine) {
+ case 0x2097:
+ case 0x2098:
+ __tl_to_cores_z10(info);
+ break;
+ default:
+ __tl_to_cores_generic(info);
+ }
spin_unlock_irq(&topology_lock);
}
@@ -196,7 +201,7 @@ static void topology_update_polarization_simple(void)
mutex_lock(&smp_cpu_state_mutex);
for_each_possible_cpu(cpu)
- smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+ cpu_set_polarization(cpu, POLARIZATION_HRZ);
mutex_unlock(&smp_cpu_state_mutex);
}
@@ -215,8 +220,7 @@ static int ptf(unsigned long fc)
int topology_set_cpu_management(int fc)
{
- int cpu;
- int rc;
+ int cpu, rc;
if (!MACHINE_HAS_TOPOLOGY)
return -EOPNOTSUPP;
@@ -227,7 +231,7 @@ int topology_set_cpu_management(int fc)
if (rc)
return -EBUSY;
for_each_possible_cpu(cpu)
- smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+ cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
return rc;
}
@@ -239,22 +243,18 @@ static void update_cpu_core_map(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
-#ifdef CONFIG_SCHED_BOOK
cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
-#endif
}
spin_unlock_irqrestore(&topology_lock, flags);
}
void store_topology(struct sysinfo_15_1_x *info)
{
-#ifdef CONFIG_SCHED_BOOK
int rc;
rc = stsi(info, 15, 1, 3);
if (rc != -ENOSYS)
return;
-#endif
stsi(info, 15, 1, 2);
}
@@ -296,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored)
set_topology_timer();
}
+static struct timer_list topology_timer =
+ TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
+
+static atomic_t topology_poll = ATOMIC_INIT(0);
+
static void set_topology_timer(void)
{
- topology_timer.function = topology_timer_fn;
- topology_timer.data = 0;
- topology_timer.expires = jiffies + 60 * HZ;
- add_timer(&topology_timer);
+ if (atomic_add_unless(&topology_poll, -1, 0))
+ mod_timer(&topology_timer, jiffies + HZ / 10);
+ else
+ mod_timer(&topology_timer, jiffies + HZ * 60);
+}
+
+void topology_expect_change(void)
+{
+ if (!MACHINE_HAS_TOPOLOGY)
+ return;
+ /* This is racy, but it doesn't matter since it is just a heuristic.
+ * Worst case is that we poll in a higher frequency for a bit longer.
+ */
+ if (atomic_read(&topology_poll) > 60)
+ return;
+ atomic_add(60, &topology_poll);
+ set_topology_timer();
}
static int __init early_parse_topology(char *p)
@@ -313,23 +331,6 @@ static int __init early_parse_topology(char *p)
}
early_param("topology", early_parse_topology);
-static int __init init_topology_update(void)
-{
- int rc;
-
- rc = 0;
- if (!MACHINE_HAS_TOPOLOGY) {
- topology_update_polarization_simple();
- goto out;
- }
- init_timer_deferrable(&topology_timer);
- set_topology_timer();
-out:
- update_cpu_core_map();
- return rc;
-}
-__initcall(init_topology_update);
-
static void __init alloc_masks(struct sysinfo_15_1_x *info,
struct mask_info *mask, int offset)
{
@@ -357,10 +358,108 @@ void __init s390_init_cpu_topology(void)
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
- printk(" %d", info->mag[i]);
- printk(" / %d\n", info->mnest);
+ printk(KERN_CONT " %d", info->mag[i]);
+ printk(KERN_CONT " / %d\n", info->mnest);
alloc_masks(info, &core_info, 1);
-#ifdef CONFIG_SCHED_BOOK
alloc_masks(info, &book_info, 2);
-#endif
}
+
+static int cpu_management;
+
+static ssize_t dispatching_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ count = sprintf(buf, "%d\n", cpu_management);
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+
+static ssize_t dispatching_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int val, rc;
+ char delim;
+
+ if (sscanf(buf, "%d %c", &val, &delim) != 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ rc = 0;
+ get_online_cpus();
+ mutex_lock(&smp_cpu_state_mutex);
+ if (cpu_management == val)
+ goto out;
+ rc = topology_set_cpu_management(val);
+ if (rc)
+ goto out;
+ cpu_management = val;
+ topology_expect_change();
+out:
+ mutex_unlock(&smp_cpu_state_mutex);
+ put_online_cpus();
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(dispatching, 0644, dispatching_show,
+ dispatching_store);
+
+static ssize_t cpu_polarization_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cpu = dev->id;
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ switch (cpu_read_polarization(cpu)) {
+ case POLARIZATION_HRZ:
+ count = sprintf(buf, "horizontal\n");
+ break;
+ case POLARIZATION_VL:
+ count = sprintf(buf, "vertical:low\n");
+ break;
+ case POLARIZATION_VM:
+ count = sprintf(buf, "vertical:medium\n");
+ break;
+ case POLARIZATION_VH:
+ count = sprintf(buf, "vertical:high\n");
+ break;
+ default:
+ count = sprintf(buf, "unknown\n");
+ break;
+ }
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
+
+static struct attribute *topology_cpu_attrs[] = {
+ &dev_attr_polarization.attr,
+ NULL,
+};
+
+static struct attribute_group topology_cpu_attr_group = {
+ .attrs = topology_cpu_attrs,
+};
+
+int topology_cpu_init(struct cpu *cpu)
+{
+ return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+}
+
+static int __init topology_init(void)
+{
+ if (!MACHINE_HAS_TOPOLOGY) {
+ topology_update_polarization_simple();
+ goto out;
+ }
+ set_topology_timer();
+out:
+ update_cpu_core_map();
+ return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
+}
+device_initcall(topology_init);