summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-31 11:25:51 +1100
committerPaul Mackerras <paulus@samba.org>2008-01-31 11:25:51 +1100
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /arch/s390/kernel
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/ipl.c1035
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/ptrace.c15
-rw-r--r--arch/s390/kernel/setup.c139
-rw-r--r--arch/s390/kernel/signal.c20
-rw-r--r--arch/s390/kernel/smp.c575
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/traps.c20
-rw-r--r--arch/s390/kernel/vmlinux.lds.S20
12 files changed, 1177 insertions, 675 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 56cb71007cd..b3b650a93c7 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
-#
-# This is just to get the dependencies...
-#
-binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 1b3af7dab81..9f7b73b180f 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -276,7 +276,7 @@ void __init startup_init(void)
create_kernel_nss();
sort_main_extable();
setup_lowcore_early();
- sclp_readinfo_early();
+ sclp_read_info_early();
sclp_facilities_detect();
memsize = sclp_memory_detect();
#ifndef CONFIG_64BIT
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index a87b1976d40..79dccd206a6 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -157,7 +157,7 @@ startup_continue:
.long 0xb2b10000 # store facility list
tm 0xc8,0x08 # check bit for clearing-by-ASCE
bno 0f-.LPG1(%r13)
- lhi %r1,2094
+ lhi %r1,2048
lhi %r2,0
.long 0xb98e2001
oi 7(%r12),0x80 # set IDTE flag
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index ce0856d3250..db28cca81fe 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2,7 +2,7 @@
* arch/s390/kernel/ipl.c
* ipl/reipl/dump support for Linux on s390.
*
- * Copyright (C) IBM Corp. 2005,2006
+ * Copyright IBM Corp. 2005,2007
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com>
@@ -31,6 +31,43 @@
#define IPL_FCP_DUMP_STR "fcp_dump"
#define IPL_NSS_STR "nss"
+#define DUMP_CCW_STR "ccw"
+#define DUMP_FCP_STR "fcp"
+#define DUMP_NONE_STR "none"
+
+/*
+ * Four shutdown trigger types are supported:
+ * - panic
+ * - halt
+ * - power off
+ * - reipl
+ */
+#define ON_PANIC_STR "on_panic"
+#define ON_HALT_STR "on_halt"
+#define ON_POFF_STR "on_poff"
+#define ON_REIPL_STR "on_reboot"
+
+struct shutdown_action;
+struct shutdown_trigger {
+ char *name;
+ struct shutdown_action *action;
+};
+
+/*
+ * Five shutdown action types are supported:
+ */
+#define SHUTDOWN_ACTION_IPL_STR "ipl"
+#define SHUTDOWN_ACTION_REIPL_STR "reipl"
+#define SHUTDOWN_ACTION_DUMP_STR "dump"
+#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd"
+#define SHUTDOWN_ACTION_STOP_STR "stop"
+
+struct shutdown_action {
+ char *name;
+ void (*fn) (struct shutdown_trigger *trigger);
+ int (*init) (void);
+};
+
static char *ipl_type_str(enum ipl_type type)
{
switch (type) {
@@ -54,10 +91,6 @@ enum dump_type {
DUMP_TYPE_FCP = 4,
};
-#define DUMP_NONE_STR "none"
-#define DUMP_CCW_STR "ccw"
-#define DUMP_FCP_STR "fcp"
-
static char *dump_type_str(enum dump_type type)
{
switch (type) {
@@ -99,30 +132,6 @@ enum dump_method {
DUMP_METHOD_FCP_DIAG,
};
-enum shutdown_action {
- SHUTDOWN_REIPL,
- SHUTDOWN_DUMP,
- SHUTDOWN_STOP,
-};
-
-#define SHUTDOWN_REIPL_STR "reipl"
-#define SHUTDOWN_DUMP_STR "dump"
-#define SHUTDOWN_STOP_STR "stop"
-
-static char *shutdown_action_str(enum shutdown_action action)
-{
- switch (action) {
- case SHUTDOWN_REIPL:
- return SHUTDOWN_REIPL_STR;
- case SHUTDOWN_DUMP:
- return SHUTDOWN_DUMP_STR;
- case SHUTDOWN_STOP:
- return SHUTDOWN_STOP_STR;
- default:
- return NULL;
- }
-}
-
static int diag308_set_works = 0;
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE;
static struct ipl_parameter_block *dump_block_fcp;
static struct ipl_parameter_block *dump_block_ccw;
-static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
-
static struct sclp_ipl_info sclp_ipl_info;
int diag308(unsigned long subcode, void *addr)
@@ -162,22 +169,25 @@ EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
-static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \
+static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
char *page) \
{ \
return sprintf(page, _format, _value); \
} \
-static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
+static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
-static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \
+static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
char *page) \
{ \
return sprintf(page, _fmt_out, \
(unsigned long long) _value); \
} \
-static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \
+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
unsigned long long value; \
@@ -186,25 +196,27 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \
_value = value; \
return len; \
} \
-static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
+static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store);
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
-static ssize_t sys_##_prefix##_##_name##_show(struct kset *kset, \
+static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
char *page) \
{ \
return sprintf(page, _fmt_out, _value); \
} \
-static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset, \
+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
- if (sscanf(buf, _fmt_in, _value) != 1) \
- return -EINVAL; \
+ strncpy(_value, buf, sizeof(_value) - 1); \
+ strstrip(_value); \
return len; \
} \
-static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
+static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store);
@@ -240,44 +252,19 @@ static __init enum ipl_type get_ipl_type(void)
return IPL_TYPE_FCP;
}
-void __init setup_ipl_info(void)
-{
- ipl_info.type = get_ipl_type();
- switch (ipl_info.type) {
- case IPL_TYPE_CCW:
- ipl_info.data.ccw.dev_id.devno = ipl_devno;
- ipl_info.data.ccw.dev_id.ssid = 0;
- break;
- case IPL_TYPE_FCP:
- case IPL_TYPE_FCP_DUMP:
- ipl_info.data.fcp.dev_id.devno =
- IPL_PARMBLOCK_START->ipl_info.fcp.devno;
- ipl_info.data.fcp.dev_id.ssid = 0;
- ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
- ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
- break;
- case IPL_TYPE_NSS:
- strncpy(ipl_info.data.nss.name, kernel_nss_name,
- sizeof(ipl_info.data.nss.name));
- break;
- case IPL_TYPE_UNKNOWN:
- default:
- /* We have no info to copy */
- break;
- }
-}
-
struct ipl_info ipl_info;
EXPORT_SYMBOL_GPL(ipl_info);
-static ssize_t ipl_type_show(struct kset *kset, char *page)
+static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
{
return sprintf(page, "%s\n", ipl_type_str(ipl_info.type));
}
-static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
+static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
-static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
+static ssize_t sys_ipl_device_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
@@ -292,7 +279,7 @@ static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
}
}
-static struct subsys_attribute sys_ipl_device_attr =
+static struct kobj_attribute sys_ipl_device_attr =
__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
@@ -367,7 +354,8 @@ static struct attribute_group ipl_fcp_attr_group = {
/* CCW ipl device attributes */
-static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
+static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
{
char loadparm[LOADPARM_LEN + 1] = {};
@@ -379,7 +367,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
return sprintf(page, "%s\n", loadparm);
}
-static struct subsys_attribute sys_ipl_ccw_loadparm_attr =
+static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
static struct attribute *ipl_ccw_attrs[] = {
@@ -418,10 +406,76 @@ static struct attribute_group ipl_unknown_attr_group = {
.attrs = ipl_unknown_attrs,
};
-static decl_subsys(ipl, NULL, NULL);
+static struct kset *ipl_kset;
+
+static int __init ipl_register_fcp_files(void)
+{
+ int rc;
+
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
+ if (rc)
+ goto out;
+ rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
+ if (rc)
+ goto out_ipl_parm;
+ rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
+ if (!rc)
+ goto out;
+
+ sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
+
+out_ipl_parm:
+ sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
+out:
+ return rc;
+}
+
+static void ipl_run(struct shutdown_trigger *trigger)
+{
+ diag308(DIAG308_IPL, NULL);
+ if (MACHINE_IS_VM)
+ __cpcmd("IPL", NULL, 0, NULL);
+ else if (ipl_info.type == IPL_TYPE_CCW)
+ reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
+}
+
+static int ipl_init(void)
+{
+ int rc;
+
+ ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
+ if (!ipl_kset) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ switch (ipl_info.type) {
+ case IPL_TYPE_CCW:
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
+ break;
+ case IPL_TYPE_FCP:
+ case IPL_TYPE_FCP_DUMP:
+ rc = ipl_register_fcp_files();
+ break;
+ case IPL_TYPE_NSS:
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
+ break;
+ default:
+ rc = sysfs_create_group(&ipl_kset->kobj,
+ &ipl_unknown_attr_group);
+ break;
+ }
+out:
+ if (rc)
+ panic("ipl_init failed: rc = %i\n", rc);
+
+ return 0;
+}
+
+static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
+ ipl_init};
/*
- * reipl section
+ * reipl shutdown action: Reboot Linux on shutdown.
*/
/* FCP reipl device attributes */
@@ -465,7 +519,8 @@ static void reipl_get_ascii_loadparm(char *loadparm)
strstrip(loadparm);
}
-static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
+static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
{
char buf[LOADPARM_LEN + 1];
@@ -473,7 +528,8 @@ static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
return sprintf(page, "%s\n", buf);
}
-static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
+static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
const char *buf, size_t len)
{
int i, lp_len;
@@ -500,7 +556,7 @@ static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
return len;
}
-static struct subsys_attribute sys_reipl_ccw_loadparm_attr =
+static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
reipl_ccw_loadparm_store);
@@ -539,7 +595,9 @@ static int reipl_set_type(enum ipl_type type)
switch(type) {
case IPL_TYPE_CCW:
- if (MACHINE_IS_VM)
+ if (diag308_set_works)
+ reipl_method = REIPL_METHOD_CCW_DIAG;
+ else if (MACHINE_IS_VM)
reipl_method = REIPL_METHOD_CCW_VM;
else
reipl_method = REIPL_METHOD_CCW_CIO;
@@ -568,13 +626,15 @@ static int reipl_set_type(enum ipl_type type)
return 0;
}
-static ssize_t reipl_type_show(struct kset *kset, char *page)
+static ssize_t reipl_type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
{
return sprintf(page, "%s\n", ipl_type_str(reipl_type));
}
-static ssize_t reipl_type_store(struct kset *kset, const char *buf,
- size_t len)
+static ssize_t reipl_type_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
{
int rc = -EINVAL;
@@ -587,140 +647,12 @@ static ssize_t reipl_type_store(struct kset *kset, const char *buf,
return (rc != 0) ? rc : len;
}
-static struct subsys_attribute reipl_type_attr =
- __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
-
-static decl_subsys(reipl, NULL, NULL);
-
-/*
- * dump section
- */
-
-/* FCP dump device attributes */
-
-DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
- dump_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
- dump_block_fcp->ipl_info.fcp.lun);
-DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
- dump_block_fcp->ipl_info.fcp.bootprog);
-DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
- dump_block_fcp->ipl_info.fcp.br_lba);
-DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
- dump_block_fcp->ipl_info.fcp.devno);
-
-static struct attribute *dump_fcp_attrs[] = {
- &sys_dump_fcp_device_attr.attr,
- &sys_dump_fcp_wwpn_attr.attr,
- &sys_dump_fcp_lun_attr.attr,
- &sys_dump_fcp_bootprog_attr.attr,
- &sys_dump_fcp_br_lba_attr.attr,
- NULL,
-};
-
-static struct attribute_group dump_fcp_attr_group = {
- .name = IPL_FCP_STR,
- .attrs = dump_fcp_attrs,
-};
-
-/* CCW dump device attributes */
-
-DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- dump_block_ccw->ipl_info.ccw.devno);
-
-static struct attribute *dump_ccw_attrs[] = {
- &sys_dump_ccw_device_attr.attr,
- NULL,
-};
-
-static struct attribute_group dump_ccw_attr_group = {
- .name = IPL_CCW_STR,
- .attrs = dump_ccw_attrs,
-};
-
-/* dump type */
-
-static int dump_set_type(enum dump_type type)
-{
- if (!(dump_capabilities & type))
- return -EINVAL;
- switch(type) {
- case DUMP_TYPE_CCW:
- if (MACHINE_IS_VM)
- dump_method = DUMP_METHOD_CCW_VM;
- else if (diag308_set_works)
- dump_method = DUMP_METHOD_CCW_DIAG;
- else
- dump_method = DUMP_METHOD_CCW_CIO;
- break;
- case DUMP_TYPE_FCP:
- dump_method = DUMP_METHOD_FCP_DIAG;
- break;
- default:
- dump_method = DUMP_METHOD_NONE;
- }
- dump_type = type;
- return 0;
-}
-
-static ssize_t dump_type_show(struct kset *kset, char *page)
-{
- return sprintf(page, "%s\n", dump_type_str(dump_type));
-}
-
-static ssize_t dump_type_store(struct kset *kset, const char *buf,
- size_t len)
-{
- int rc = -EINVAL;
-
- if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
- rc = dump_set_type(DUMP_TYPE_NONE);
- else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
- rc = dump_set_type(DUMP_TYPE_CCW);
- else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
- rc = dump_set_type(DUMP_TYPE_FCP);
- return (rc != 0) ? rc : len;
-}
-
-static struct subsys_attribute dump_type_attr =
- __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
-
-static decl_subsys(dump, NULL, NULL);
-
-/*
- * Shutdown actions section
- */
-
-static decl_subsys(shutdown_actions, NULL, NULL);
-
-/* on panic */
-
-static ssize_t on_panic_show(struct kset *kset, char *page)
-{
- return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
-}
-
-static ssize_t on_panic_store(struct kset *kset, const char *buf,
- size_t len)
-{
- if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
- on_panic_action = SHUTDOWN_REIPL;
- else if (strncmp(buf, SHUTDOWN_DUMP_STR,
- strlen(SHUTDOWN_DUMP_STR)) == 0)
- on_panic_action = SHUTDOWN_DUMP;
- else if (strncmp(buf, SHUTDOWN_STOP_STR,
- strlen(SHUTDOWN_STOP_STR)) == 0)
- on_panic_action = SHUTDOWN_STOP;
- else
- return -EINVAL;
-
- return len;
-}
+static struct kobj_attribute reipl_type_attr =
+ __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
-static struct subsys_attribute on_panic_attr =
- __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+static struct kset *reipl_kset;
-void do_reipl(void)
+void reipl_run(struct shutdown_trigger *trigger)
{
struct ccw_dev_id devid;
static char buf[100];
@@ -729,8 +661,6 @@ void do_reipl(void)
switch (reipl_method) {
case REIPL_METHOD_CCW_CIO:
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
- if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
- diag308(DIAG308_IPL, NULL);
devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
@@ -771,98 +701,6 @@ void do_reipl(void)
default:
break;
}
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
-}
-
-static void do_dump(void)
-{
- struct ccw_dev_id devid;
- static char buf[100];
-
- switch (dump_method) {
- case DUMP_METHOD_CCW_CIO:
- smp_send_stop();
- devid.devno = dump_block_ccw->ipl_info.ccw.devno;
- devid.ssid = 0;
- reipl_ccw_dev(&devid);
- break;
- case DUMP_METHOD_CCW_VM:
- smp_send_stop();
- sprintf(buf, "STORE STATUS");
- __cpcmd(buf, NULL, 0, NULL);
- sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
- __cpcmd(buf, NULL, 0, NULL);
- break;
- case DUMP_METHOD_CCW_DIAG:
- diag308(DIAG308_SET, dump_block_ccw);
- diag308(DIAG308_DUMP, NULL);
- break;
- case DUMP_METHOD_FCP_DIAG:
- diag308(DIAG308_SET, dump_block_fcp);
- diag308(DIAG308_DUMP, NULL);
- break;
- case DUMP_METHOD_NONE:
- default:
- return;
- }
- printk(KERN_EMERG "Dump failed!\n");
-}
-
-/* init functions */
-
-static int __init ipl_register_fcp_files(void)
-{
- int rc;
-
- rc = sysfs_create_group(&ipl_subsys.kobj,
- &ipl_fcp_attr_group);
- if (rc)
- goto out;
- rc = sysfs_create_bin_file(&ipl_subsys.kobj,
- &ipl_parameter_attr);
- if (rc)
- goto out_ipl_parm;
- rc = sysfs_create_bin_file(&ipl_subsys.kobj,
- &ipl_scp_data_attr);
- if (!rc)
- goto out;
-
- sysfs_remove_bin_file(&ipl_subsys.kobj, &ipl_parameter_attr);
-
-out_ipl_parm:
- sysfs_remove_group(&ipl_subsys.kobj, &ipl_fcp_attr_group);
-out:
- return rc;
-}
-
-static int __init ipl_init(void)
-{
- int rc;
-
- rc = firmware_register(&ipl_subsys);
- if (rc)
- return rc;
- switch (ipl_info.type) {
- case IPL_TYPE_CCW:
- rc = sysfs_create_group(&ipl_subsys.kobj,
- &ipl_ccw_attr_group);
- break;
- case IPL_TYPE_FCP:
- case IPL_TYPE_FCP_DUMP:
- rc = ipl_register_fcp_files();
- break;
- case IPL_TYPE_NSS:
- rc = sysfs_create_group(&ipl_subsys.kobj,
- &ipl_nss_attr_group);
- break;
- default:
- rc = sysfs_create_group(&ipl_subsys.kobj,
- &ipl_unknown_attr_group);
- break;
- }
- if (rc)
- firmware_unregister(&ipl_subsys);
- return rc;
}
static void __init reipl_probe(void)
@@ -883,7 +721,7 @@ static int __init reipl_nss_init(void)
if (!MACHINE_IS_VM)
return 0;
- rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_nss_attr_group);
+ rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
if (rc)
return rc;
strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
@@ -898,7 +736,7 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_ccw)
return -ENOMEM;
- rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_ccw_attr_group);
+ rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group);
if (rc) {
free_page((unsigned long)reipl_block_ccw);
return rc;
@@ -907,6 +745,7 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
@@ -915,8 +754,7 @@ static int __init reipl_ccw_init(void)
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
LOADPARM_LEN);
- /* FIXME: check for diag308_set_works when enabling diag ccw reipl */
- if (!MACHINE_IS_VM)
+ if (!MACHINE_IS_VM && !diag308_set_works)
sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
if (ipl_info.type == IPL_TYPE_CCW)
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
@@ -936,7 +774,7 @@ static int __init reipl_fcp_init(void)
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
- rc = sysfs_create_group(&reipl_subsys.kobj, &reipl_fcp_attr_group);
+ rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
if (rc) {
free_page((unsigned long)reipl_block_fcp);
return rc;
@@ -954,16 +792,16 @@ static int __init reipl_fcp_init(void)
return 0;
}
-static int __init reipl_init(void)
+static int reipl_init(void)
{
int rc;
- rc = firmware_register(&reipl_subsys);
- if (rc)
- return rc;
- rc = subsys_create_file(&reipl_subsys, &reipl_type_attr);
+ reipl_kset = kset_create_and_add("reipl", NULL, firmware_kobj);
+ if (!reipl_kset)
+ return -ENOMEM;
+ rc = sysfs_create_file(&reipl_kset->kobj, &reipl_type_attr.attr);
if (rc) {
- firmware_unregister(&reipl_subsys);
+ kset_unregister(reipl_kset);
return rc;
}
rc = reipl_ccw_init();
@@ -981,6 +819,140 @@ static int __init reipl_init(void)
return 0;
}
+static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
+ reipl_run, reipl_init};
+
+/*
+ * dump shutdown action: Dump Linux on shutdown.
+ */
+
+/* FCP dump device attributes */
+
+DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+ dump_block_fcp->ipl_info.fcp.wwpn);
+DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
+ dump_block_fcp->ipl_info.fcp.lun);
+DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
+ dump_block_fcp->ipl_info.fcp.bootprog);
+DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
+ dump_block_fcp->ipl_info.fcp.br_lba);
+DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
+ dump_block_fcp->ipl_info.fcp.devno);
+
+static struct attribute *dump_fcp_attrs[] = {
+ &sys_dump_fcp_device_attr.attr,
+ &sys_dump_fcp_wwpn_attr.attr,
+ &sys_dump_fcp_lun_attr.attr,
+ &sys_dump_fcp_bootprog_attr.attr,
+ &sys_dump_fcp_br_lba_attr.attr,
+ NULL,
+};
+
+static struct attribute_group dump_fcp_attr_group = {
+ .name = IPL_FCP_STR,
+ .attrs = dump_fcp_attrs,
+};
+
+/* CCW dump device attributes */
+
+DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+ dump_block_ccw->ipl_info.ccw.devno);
+
+static struct attribute *dump_ccw_attrs[] = {
+ &sys_dump_ccw_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group dump_ccw_attr_group = {
+ .name = IPL_CCW_STR,
+ .attrs = dump_ccw_attrs,
+};
+
+/* dump type */
+
+static int dump_set_type(enum dump_type type)
+{
+ if (!(dump_capabilities & type))
+ return -EINVAL;
+ switch (type) {
+ case DUMP_TYPE_CCW:
+ if (diag308_set_works)
+ dump_method = DUMP_METHOD_CCW_DIAG;
+ else if (MACHINE_IS_VM)
+ dump_method = DUMP_METHOD_CCW_VM;
+ else
+ dump_method = DUMP_METHOD_CCW_CIO;
+ break;
+ case DUMP_TYPE_FCP:
+ dump_method = DUMP_METHOD_FCP_DIAG;
+ break;
+ default:
+ dump_method = DUMP_METHOD_NONE;
+ }
+ dump_type = type;
+ return 0;
+}
+
+static ssize_t dump_type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%s\n", dump_type_str(dump_type));
+}
+
+static ssize_t dump_type_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ int rc = -EINVAL;
+
+ if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
+ rc = dump_set_type(DUMP_TYPE_NONE);
+ else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
+ rc = dump_set_type(DUMP_TYPE_CCW);
+ else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
+ rc = dump_set_type(DUMP_TYPE_FCP);
+ return (rc != 0) ? rc : len;
+}
+
+static struct kobj_attribute dump_type_attr =
+ __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
+
+static struct kset *dump_kset;
+
+static void dump_run(struct shutdown_trigger *trigger)
+{
+ struct ccw_dev_id devid;
+ static char buf[100];
+
+ switch (dump_method) {
+ case DUMP_METHOD_CCW_CIO:
+ smp_send_stop();
+ devid.devno = dump_block_ccw->ipl_info.ccw.devno;
+ devid.ssid = 0;
+ reipl_ccw_dev(&devid);
+ break;
+ case DUMP_METHOD_CCW_VM:
+ smp_send_stop();
+ sprintf(buf, "STORE STATUS");
+ __cpcmd(buf, NULL, 0, NULL);
+ sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
+ __cpcmd(buf, NULL, 0, NULL);
+ break;
+ case DUMP_METHOD_CCW_DIAG:
+ diag308(DIAG308_SET, dump_block_ccw);
+ diag308(DIAG308_DUMP, NULL);
+ break;
+ case DUMP_METHOD_FCP_DIAG:
+ diag308(DIAG308_SET, dump_block_fcp);
+ diag308(DIAG308_DUMP, NULL);
+ break;
+ case DUMP_METHOD_NONE:
+ default:
+ return;
+ }
+ printk(KERN_EMERG "Dump failed!\n");
+}
+
static int __init dump_ccw_init(void)
{
int rc;
@@ -988,7 +960,7 @@ static int __init dump_ccw_init(void)
dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_ccw)
return -ENOMEM;
- rc = sysfs_create_group(&dump_subsys.kobj, &dump_ccw_attr_group);
+ rc = sysfs_create_group(&dump_kset->kobj, &dump_ccw_attr_group);
if (rc) {
free_page((unsigned long)dump_block_ccw);
return rc;
@@ -1012,7 +984,7 @@ static int __init dump_fcp_init(void)
dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!dump_block_fcp)
return -ENOMEM;
- rc = sysfs_create_group(&dump_subsys.kobj, &dump_fcp_attr_group);
+ rc = sysfs_create_group(&dump_kset->kobj, &dump_fcp_attr_group);
if (rc) {
free_page((unsigned long)dump_block_fcp);
return rc;
@@ -1026,33 +998,16 @@ static int __init dump_fcp_init(void)
return 0;
}
-#define SHUTDOWN_ON_PANIC_PRIO 0
-
-static int shutdown_on_panic_notify(struct notifier_block *self,
- unsigned long event, void *data)
-{
- if (on_panic_action == SHUTDOWN_DUMP)
- do_dump();
- else if (on_panic_action == SHUTDOWN_REIPL)
- do_reipl();
- return NOTIFY_OK;
-}
-
-static struct notifier_block shutdown_on_panic_nb = {
- .notifier_call = shutdown_on_panic_notify,
- .priority = SHUTDOWN_ON_PANIC_PRIO
-};
-
-static int __init dump_init(void)
+static int dump_init(void)
{
int rc;
- rc = firmware_register(&dump_subsys);
- if (rc)
- return rc;
- rc = subsys_create_file(&dump_subsys, &dump_type_attr);
+ dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
+ if (!dump_kset)
+ return -ENOMEM;
+ rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
if (rc) {
- firmware_unregister(&dump_subsys);
+ kset_unregister(dump_kset);
return rc;
}
rc = dump_ccw_init();
@@ -1065,46 +1020,381 @@ static int __init dump_init(void)
return 0;
}
-static int __init shutdown_actions_init(void)
+static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
+ dump_run, dump_init};
+
+/*
+ * vmcmd shutdown action: Trigger vm command on shutdown.
+ */
+
+static char vmcmd_on_reboot[128];
+static char vmcmd_on_panic[128];
+static char vmcmd_on_halt[128];
+static char vmcmd_on_poff[128];
+
+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
+DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
+
+static struct attribute *vmcmd_attrs[] = {
+ &sys_vmcmd_on_reboot_attr.attr,
+ &sys_vmcmd_on_panic_attr.attr,
+ &sys_vmcmd_on_halt_attr.attr,
+ &sys_vmcmd_on_poff_attr.attr,
+ NULL,
+};
+
+static struct attribute_group vmcmd_attr_group = {
+ .attrs = vmcmd_attrs,
+};
+
+static struct kset *vmcmd_kset;
+
+static void vmcmd_run(struct shutdown_trigger *trigger)
+{
+ char *cmd, *next_cmd;
+
+ if (strcmp(trigger->name, ON_REIPL_STR) == 0)
+ cmd = vmcmd_on_reboot;
+ else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
+ cmd = vmcmd_on_panic;
+ else if (strcmp(trigger->name, ON_HALT_STR) == 0)
+ cmd = vmcmd_on_halt;
+ else if (strcmp(trigger->name, ON_POFF_STR) == 0)
+ cmd = vmcmd_on_poff;
+ else
+ return;
+
+ if (strlen(cmd) == 0)
+ return;
+ do {
+ next_cmd = strchr(cmd, '\n');
+ if (next_cmd) {
+ next_cmd[0] = 0;
+ next_cmd += 1;
+ }
+ __cpcmd(cmd, NULL, 0, NULL);
+ cmd = next_cmd;
+ } while (cmd != NULL);
+}
+
+static int vmcmd_init(void)
{
- int rc;
+ if (!MACHINE_IS_VM)
+ return -ENOTSUPP;
+ vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
+ if (!vmcmd_kset)
+ return -ENOMEM;
+ return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
+}
- rc = firmware_register(&shutdown_actions_subsys);
- if (rc)
- return rc;
- rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
- if (rc) {
- firmware_unregister(&shutdown_actions_subsys);
- return rc;
+static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
+ vmcmd_run, vmcmd_init};
+
+/*
+ * stop shutdown action: Stop Linux on shutdown.
+ */
+
+static void stop_run(struct shutdown_trigger *trigger)
+{
+ if (strcmp(trigger->name, ON_PANIC_STR) == 0)
+ disabled_wait((unsigned long) __builtin_return_address(0));
+ else {
+ signal_processor(smp_processor_id(), sigp_stop);
+ for (;;);
}
- atomic_notifier_chain_register(&panic_notifier_list,
- &shutdown_on_panic_nb);
- return 0;
}
-static int __init s390_ipl_init(void)
+static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
+ stop_run, NULL};
+
+/* action list */
+
+static struct shutdown_action *shutdown_actions_list[] = {
+ &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action};
+#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
+
+/*
+ * Trigger section
+ */
+
+static struct kset *shutdown_actions_kset;
+
+static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
+ size_t len)
{
- int rc;
+ int i;
+ for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
+ if (!shutdown_actions_list[i])
+ continue;
+ if (strncmp(buf, shutdown_actions_list[i]->name,
+ strlen(shutdown_actions_list[i]->name)) == 0) {
+ trigger->action = shutdown_actions_list[i];
+ return len;
+ }
+ }
+ return -EINVAL;
+}
- sclp_get_ipl_info(&sclp_ipl_info);
+/* on reipl */
+
+static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
+ &reipl_action};
+
+static ssize_t on_reboot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%s\n", on_reboot_trigger.action->name);
+}
+
+static ssize_t on_reboot_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return set_trigger(buf, &on_reboot_trigger, len);
+}
+
+static struct kobj_attribute on_reboot_attr =
+ __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
+
+static void do_machine_restart(char *__unused)
+{
+ smp_send_stop();
+ on_reboot_trigger.action->fn(&on_reboot_trigger);
+ reipl_run(NULL);
+}
+void (*_machine_restart)(char *command) = do_machine_restart;
+
+/* on panic */
+
+static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
+
+static ssize_t on_panic_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%s\n", on_panic_trigger.action->name);
+}
+
+static ssize_t on_panic_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return set_trigger(buf, &on_panic_trigger, len);
+}
+
+static struct kobj_attribute on_panic_attr =
+ __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+
+static void do_panic(void)
+{
+ on_panic_trigger.action->fn(&on_panic_trigger);
+ stop_run(&on_panic_trigger);
+}
+
+/* on halt */
+
+static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
+
+static ssize_t on_halt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%s\n", on_halt_trigger.action->name);
+}
+
+static ssize_t on_halt_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return set_trigger(buf, &on_halt_trigger, len);
+}
+
+static struct kobj_attribute on_halt_attr =
+ __ATTR(on_halt, 0644, on_halt_show, on_halt_store);
+
+
+static void do_machine_halt(void)
+{
+ smp_send_stop();
+ on_halt_trigger.action->fn(&on_halt_trigger);
+ stop_run(&on_halt_trigger);
+}
+void (*_machine_halt)(void) = do_machine_halt;
+
+/* on power off */
+
+static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
+
+static ssize_t on_poff_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%s\n", on_poff_trigger.action->name);
+}
+
+static ssize_t on_poff_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return set_trigger(buf, &on_poff_trigger, len);
+}
+
+static struct kobj_attribute on_poff_attr =
+ __ATTR(on_poff, 0644, on_poff_show, on_poff_store);
+
+
+static void do_machine_power_off(void)
+{
+ smp_send_stop();
+ on_poff_trigger.action->fn(&on_poff_trigger);
+ stop_run(&on_poff_trigger);
+}
+void (*_machine_power_off)(void) = do_machine_power_off;
+
+static void __init shutdown_triggers_init(void)
+{
+ shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
+ firmware_kobj);
+ if (!shutdown_actions_kset)
+ goto fail;
+ if (sysfs_create_file(&shutdown_actions_kset->kobj,
+ &on_reboot_attr.attr))
+ goto fail;
+ if (sysfs_create_file(&shutdown_actions_kset->kobj,
+ &on_panic_attr.attr))
+ goto fail;
+ if (sysfs_create_file(&shutdown_actions_kset->kobj,
+ &on_halt_attr.attr))
+ goto fail;
+ if (sysfs_create_file(&shutdown_actions_kset->kobj,
+ &on_poff_attr.attr))
+ goto fail;
+
+ return;
+fail:
+ panic("shutdown_triggers_init failed\n");
+}
+
+static void __init shutdown_actions_init(void)
+{
+ int i;
+
+ for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
+ if (!shutdown_actions_list[i]->init)
+ continue;
+ if (shutdown_actions_list[i]->init())
+ shutdown_actions_list[i] = NULL;
+ }
+}
+
+static int __init s390_ipl_init(void)
+{
reipl_probe();
- rc = ipl_init();
- if (rc)
- return rc;
- rc = reipl_init();
- if (rc)
- return rc;
- rc = dump_init();
- if (rc)
- return rc;
- rc = shutdown_actions_init();
- if (rc)
- return rc;
+ sclp_get_ipl_info(&sclp_ipl_info);
+ shutdown_actions_init();
+ shutdown_triggers_init();
return 0;
}
__initcall(s390_ipl_init);
+static void __init strncpy_skip_quote(char *dst, char *src, int n)
+{
+ int sx, dx;
+
+ dx = 0;
+ for (sx = 0; src[sx] != 0; sx++) {
+ if (src[sx] == '"')
+ continue;
+ dst[dx++] = src[sx];
+ if (dx >= n)
+ break;
+ }
+}
+
+static int __init vmcmd_on_reboot_setup(char *str)
+{
+ if (!MACHINE_IS_VM)
+ return 1;
+ strncpy_skip_quote(vmcmd_on_reboot, str, 127);
+ vmcmd_on_reboot[127] = 0;
+ on_reboot_trigger.action = &vmcmd_action;
+ return 1;
+}
+__setup("vmreboot=", vmcmd_on_reboot_setup);
+
+static int __init vmcmd_on_panic_setup(char *str)
+{
+ if (!MACHINE_IS_VM)
+ return 1;
+ strncpy_skip_quote(vmcmd_on_panic, str, 127);
+ vmcmd_on_panic[127] = 0;
+ on_panic_trigger.action = &vmcmd_action;
+ return 1;
+}
+__setup("vmpanic=", vmcmd_on_panic_setup);
+
+static int __init vmcmd_on_halt_setup(char *str)
+{
+ if (!MACHINE_IS_VM)
+ return 1;
+ strncpy_skip_quote(vmcmd_on_halt, str, 127);
+ vmcmd_on_halt[127] = 0;
+ on_halt_trigger.action = &vmcmd_action;
+ return 1;
+}
+__setup("vmhalt=", vmcmd_on_halt_setup);
+
+static int __init vmcmd_on_poff_setup(char *str)
+{
+ if (!MACHINE_IS_VM)
+ return 1;
+ strncpy_skip_quote(vmcmd_on_poff, str, 127);
+ vmcmd_on_poff[127] = 0;
+ on_poff_trigger.action = &vmcmd_action;
+ return 1;
+}
+__setup("vmpoff=", vmcmd_on_poff_setup);
+
+static int on_panic_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ do_panic();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+ .notifier_call = on_panic_notify,
+ .priority = 0,
+};
+
+void __init setup_ipl(void)
+{
+ ipl_info.type = get_ipl_type();
+ switch (ipl_info.type) {
+ case IPL_TYPE_CCW:
+ ipl_info.data.ccw.dev_id.devno = ipl_devno;
+ ipl_info.data.ccw.dev_id.ssid = 0;
+ break;
+ case IPL_TYPE_FCP:
+ case IPL_TYPE_FCP_DUMP:
+ ipl_info.data.fcp.dev_id.devno =
+ IPL_PARMBLOCK_START->ipl_info.fcp.devno;
+ ipl_info.data.fcp.dev_id.ssid = 0;
+ ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
+ ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
+ break;
+ case IPL_TYPE_NSS:
+ strncpy(ipl_info.data.nss.name, kernel_nss_name,
+ sizeof(ipl_info.data.nss.name));
+ break;
+ case IPL_TYPE_UNKNOWN:
+ default:
+ /* We have no info to copy */
+ break;
+ }
+ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+}
+
void __init ipl_save_parameters(void)
{
struct cio_iplinfo iplinfo;
@@ -1185,3 +1475,4 @@ void s390_reset_system(void)
do_reset_calls();
}
+
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 29f7884b4ff..0e7aca03930 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -36,7 +36,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/notifier.h>
-
+#include <linux/utsname.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -182,13 +182,15 @@ void cpu_idle(void)
void show_regs(struct pt_regs *regs)
{
- struct task_struct *tsk = current;
-
- printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted());
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, task_pid_nr(current), (void *) tsk,
- (void *) tsk->thread.ksp);
-
+ print_modules();
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 1d81bf9488a..6e036bae987 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task)
per_info->control_regs.bits.storage_alt_space_ctl = 0;
}
-static void set_single_step(struct task_struct *task)
+void user_enable_single_step(struct task_struct *task)
{
task->thread.per_info.single_step = 1;
FixPerRegisters(task);
}
-static void clear_single_step(struct task_struct *task)
+void user_disable_single_step(struct task_struct *task)
{
task->thread.per_info.single_step = 0;
FixPerRegisters(task);
@@ -107,7 +107,7 @@ void
ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
- clear_single_step(child);
+ user_disable_single_step(child);
}
#ifndef CONFIG_64BIT
@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* make sure the single step bit is not set. */
- clear_single_step(child);
+ user_disable_single_step(child);
wake_up_process(child);
return 0;
@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
return 0;
child->exit_code = SIGKILL;
/* make sure the single step bit is not set. */
- clear_single_step(child);
+ user_disable_single_step(child);
wake_up_process(child);
return 0;
@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
return -EIO;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
- if (data)
- set_tsk_thread_flag(child, TIF_SINGLE_STEP);
- else
- set_single_step(child);
+ user_enable_single_step(child);
/* give it a chance to run. */
wake_up_process(child);
return 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 577aa7dd660..766c783bd7a 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void)
}
/*
- * VM halt and poweroff setup routines
- */
-char vmhalt_cmd[128] = "";
-char vmpoff_cmd[128] = "";
-static char vmpanic_cmd[128] = "";
-
-static void strncpy_skip_quote(char *dst, char *src, int n)
-{
- int sx, dx;
-
- dx = 0;
- for (sx = 0; src[sx] != 0; sx++) {
- if (src[sx] == '"') continue;
- dst[dx++] = src[sx];
- if (dx >= n) break;
- }
-}
-
-static int __init vmhalt_setup(char *str)
-{
- strncpy_skip_quote(vmhalt_cmd, str, 127);
- vmhalt_cmd[127] = 0;
- return 1;
-}
-
-__setup("vmhalt=", vmhalt_setup);
-
-static int __init vmpoff_setup(char *str)
-{
- strncpy_skip_quote(vmpoff_cmd, str, 127);
- vmpoff_cmd[127] = 0;
- return 1;
-}
-
-__setup("vmpoff=", vmpoff_setup);
-
-static int vmpanic_notify(struct notifier_block *self, unsigned long event,
- void *data)
-{
- if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
- cpcmd(vmpanic_cmd, NULL, 0, NULL);
-
- return NOTIFY_OK;
-}
-
-#define PANIC_PRI_VMPANIC 0
-
-static struct notifier_block vmpanic_nb = {
- .notifier_call = vmpanic_notify,
- .priority = PANIC_PRI_VMPANIC
-};
-
-static int __init vmpanic_setup(char *str)
-{
- static int register_done __initdata = 0;
-
- strncpy_skip_quote(vmpanic_cmd, str, 127);
- vmpanic_cmd[127] = 0;
- if (!register_done) {
- register_done = 1;
- atomic_notifier_chain_register(&panic_notifier_list,
- &vmpanic_nb);
- }
- return 1;
-}
-
-__setup("vmpanic=", vmpanic_setup);
-
-/*
* condev= and conmode= setup parameter.
*/
@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
static inline void setup_zfcpdump(unsigned int console_devno) {}
#endif /* CONFIG_ZFCPDUMP */
-#ifdef CONFIG_SMP
-void (*_machine_restart)(char *command) = machine_restart_smp;
-void (*_machine_halt)(void) = machine_halt_smp;
-void (*_machine_power_off)(void) = machine_power_off_smp;
-#else
-/*
- * Reboot, halt and power_off routines for non SMP.
- */
-static void do_machine_restart_nonsmp(char * __unused)
-{
- do_reipl();
-}
-
-static void do_machine_halt_nonsmp(void)
-{
- if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- __cpcmd(vmhalt_cmd, NULL, 0, NULL);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
-}
-
-static void do_machine_power_off_nonsmp(void)
-{
- if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- __cpcmd(vmpoff_cmd, NULL, 0, NULL);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
-}
-
-void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
-void (*_machine_halt)(void) = do_machine_halt_nonsmp;
-void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
-#endif
-
/*
* Reboot, halt and power_off stubs. They just call _machine_restart,
* _machine_halt or _machine_power_off.
@@ -559,7 +458,9 @@ setup_resources(void)
data_resource.start = (unsigned long) &_etext;
data_resource.end = (unsigned long) &_edata - 1;
- for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+ for (i = 0; i < MEMORY_CHUNKS; i++) {
+ if (!memory_chunk[i].size)
+ continue;
res = alloc_bootmem_low(sizeof(struct resource));
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
switch (memory_chunk[i].type) {
@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
static void __init setup_memory_end(void)
{
unsigned long memory_size;
- unsigned long max_mem, max_phys;
+ unsigned long max_mem;
int i;
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
@@ -625,10 +526,31 @@ static void __init setup_memory_end(void)
memory_end = ZFCPDUMP_HSA_SIZE;
#endif
memory_size = 0;
- max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
memory_end &= PAGE_MASK;
- max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
+ max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
+ memory_end = min(max_mem, memory_end);
+
+ /*
+ * Make sure all chunks are MAX_ORDER aligned so we don't need the
+ * extra checks that HOLES_IN_ZONE would require.
+ */
+ for (i = 0; i < MEMORY_CHUNKS; i++) {
+ unsigned long start, end;
+ struct mem_chunk *chunk;
+ unsigned long align;
+
+ chunk = &memory_chunk[i];
+ align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
+ start = (chunk->addr + align - 1) & ~(align - 1);
+ end = (chunk->addr + chunk->size) & ~(align - 1);
+ if (start >= end)
+ memset(chunk, 0, sizeof(*chunk));
+ else {
+ chunk->addr = start;
+ chunk->size = end - start;
+ }
+ }
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];
@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p)
parse_early_param();
- setup_ipl_info();
+ setup_ipl();
setup_memory_end();
setup_addressing_mode();
setup_memory();
@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p)
cpu_init();
__cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
- smp_setup_cpu_possible_map();
/*
* Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p)
void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
{
- printk("cpu %d "
+ printk(KERN_INFO "cpu %d "
#ifdef CONFIG_SMP
"phys_idx=%d "
#endif
@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
static void c_stop(struct seq_file *m, void *v)
{
}
-struct seq_operations cpuinfo_op = {
+const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d264671c1b7..4449bf32cbf 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs)
if (signr > 0) {
/* Whee! Actually deliver the signal. */
+ int ret;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT)) {
extern int handle_signal32(unsigned long sig,
@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs)
siginfo_t *info,
sigset_t *oldset,
struct pt_regs *regs);
- if (handle_signal32(
- signr, &ka, &info, oldset, regs) == 0) {
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
- return;
+ ret = handle_signal32(signr, &ka, &info, oldset, regs);
}
+ else
#endif
- if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ ret = handle_signal(signr, &ka, &info, oldset, regs);
+ if (!ret) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs)
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ /*
+ * If we would have taken a single-step trap
+ * for a normal instruction, act like we took
+ * one for the handler setup.
+ */
+ if (current->thread.per_info.single_step)
+ set_thread_flag(TIF_SINGLE_STEP);
}
return;
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 264ea906db4..aa37fa15451 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -42,6 +42,7 @@
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/lowcore.h>
+#include <asm/sclp.h>
#include <asm/cpu.h>
/*
@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
cpumask_t cpu_online_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
+cpumask_t cpu_possible_map = CPU_MASK_ALL;
EXPORT_SYMBOL(cpu_possible_map);
static struct task_struct *current_set[NR_CPUS];
+static u8 smp_cpu_type;
+static int smp_use_sigp_detection;
+
+enum s390_cpu_state {
+ CPU_STATE_STANDBY,
+ CPU_STATE_CONFIGURED,
+};
+
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_MUTEX(smp_cpu_state_mutex);
+#endif
+static int smp_cpu_state[NR_CPUS];
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+
static void smp_ext_bitcall(int, ec_bit_sig);
/*
@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
}
EXPORT_SYMBOL(smp_call_function_single);
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int
+smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
+{
+ preempt_disable();
+ __smp_call_function_map(func, info, 0, wait, mask);
+ preempt_enable();
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
void smp_send_stop(void)
{
int cpu, rc;
@@ -217,33 +261,6 @@ void smp_send_stop(void)
}
/*
- * Reboot, halt and power_off routines for SMP.
- */
-void machine_restart_smp(char *__unused)
-{
- smp_send_stop();
- do_reipl();
-}
-
-void machine_halt_smp(void)
-{
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- __cpcmd(vmhalt_cmd, NULL, 0, NULL);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
- for (;;);
-}
-
-void machine_power_off_smp(void)
-{
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- __cpcmd(vmpoff_cmd, NULL, 0, NULL);
- signal_processor(smp_processor_id(), sigp_stop_and_store_status);
- for (;;);
-}
-
-/*
* This is the main routine where commands issued by other
* cpus are handled.
*/
@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
}
EXPORT_SYMBOL(smp_ctl_clear_bit);
+/*
+ * In early ipl state a temp. logically cpu number is needed, so the sigp
+ * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
+ * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
+ */
+#define CPU_INIT_NO 1
+
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
/*
@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
"kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
return;
}
- zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
- __cpu_logical_map[1] = (__u16) phy_cpu;
- while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy)
+ zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
+ __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
+ while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
+ sigp_busy)
cpu_relax();
memcpy(zfcpdump_save_areas[cpu],
(void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
-/*
- * Lets check how many CPUs we have.
- */
-static unsigned int __init smp_count_cpus(void)
+static int cpu_stopped(int cpu)
{
- unsigned int cpu, num_cpus;
- __u16 boot_cpu_addr;
+ __u32 status;
- /*
- * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
- */
+ /* Check for stopped state */
+ if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
+ sigp_status_stored) {
+ if (status & 0x40)
+ return 1;
+ }
+ return 0;
+}
+
+static int cpu_known(int cpu_id)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (__cpu_logical_map[cpu] == cpu_id)
+ return 1;
+ }
+ return 0;
+}
+
+static int smp_rescan_cpus_sigp(cpumask_t avail)
+{
+ int cpu_id, logical_cpu;
+
+ logical_cpu = first_cpu(avail);
+ if (logical_cpu == NR_CPUS)
+ return 0;
+ for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
+ if (cpu_known(cpu_id))
+ continue;
+ __cpu_logical_map[logical_cpu] = cpu_id;
+ if (!cpu_stopped(logical_cpu))
+ continue;
+ cpu_set(logical_cpu, cpu_present_map);
+ smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
+ logical_cpu = next_cpu(logical_cpu, avail);
+ if (logical_cpu == NR_CPUS)
+ break;
+ }
+ return 0;
+}
+
+static int smp_rescan_cpus_sclp(cpumask_t avail)
+{
+ struct sclp_cpu_info *info;
+ int cpu_id, logical_cpu, cpu;
+ int rc;
+
+ logical_cpu = first_cpu(avail);
+ if (logical_cpu == NR_CPUS)
+ return 0;
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ rc = sclp_get_cpu_info(info);
+ if (rc)
+ goto out;
+ for (cpu = 0; cpu < info->combined; cpu++) {
+ if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
+ continue;
+ cpu_id = info->cpu[cpu].address;
+ if (cpu_known(cpu_id))
+ continue;
+ __cpu_logical_map[logical_cpu] = cpu_id;
+ cpu_set(logical_cpu, cpu_present_map);
+ if (cpu >= info->configured)
+ smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
+ else
+ smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
+ logical_cpu = next_cpu(logical_cpu, avail);
+ if (logical_cpu == NR_CPUS)
+ break;
+ }
+out:
+ kfree(info);
+ return rc;
+}
+
+static int smp_rescan_cpus(void)
+{
+ cpumask_t avail;
+
+ cpus_xor(avail, cpu_possible_map, cpu_present_map);
+ if (smp_use_sigp_detection)
+ return smp_rescan_cpus_sigp(avail);
+ else
+ return smp_rescan_cpus_sclp(avail);
+}
+
+static void __init smp_detect_cpus(void)
+{
+ unsigned int cpu, c_cpus, s_cpus;
+ struct sclp_cpu_info *info;
+ u16 boot_cpu_addr, cpu_addr;
+
+ c_cpus = 1;
+ s_cpus = 0;
boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
- current_thread_info()->cpu = 0;
- num_cpus = 1;
- for (cpu = 0; cpu <= 65535; cpu++) {
- if ((__u16) cpu == boot_cpu_addr)
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ panic("smp_detect_cpus failed to allocate memory\n");
+ /* Use sigp detection algorithm if sclp doesn't work. */
+ if (sclp_get_cpu_info(info)) {
+ smp_use_sigp_detection = 1;
+ for (cpu = 0; cpu <= 65535; cpu++) {
+ if (cpu == boot_cpu_addr)
+ continue;
+ __cpu_logical_map[CPU_INIT_NO] = cpu;
+ if (!cpu_stopped(CPU_INIT_NO))
+ continue;
+ smp_get_save_area(c_cpus, cpu);
+ c_cpus++;
+ }
+ goto out;
+ }
+
+ if (info->has_cpu_type) {
+ for (cpu = 0; cpu < info->combined; cpu++) {
+ if (info->cpu[cpu].address == boot_cpu_addr) {
+ smp_cpu_type = info->cpu[cpu].type;
+ break;
+ }
+ }
+ }
+
+ for (cpu = 0; cpu < info->combined; cpu++) {
+ if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
+ continue;
+ cpu_addr = info->cpu[cpu].address;
+ if (cpu_addr == boot_cpu_addr)
continue;
- __cpu_logical_map[1] = (__u16) cpu;
- if (signal_processor(1, sigp_sense) == sigp_not_operational)
+ __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
+ if (!cpu_stopped(CPU_INIT_NO)) {
+ s_cpus++;
continue;
- smp_get_save_area(num_cpus, cpu);
- num_cpus++;
+ }
+ smp_get_save_area(c_cpus, cpu_addr);
+ c_cpus++;
}
- printk("Detected %d CPU's\n", (int) num_cpus);
- printk("Boot cpu address %2X\n", boot_cpu_addr);
- return num_cpus;
+out:
+ kfree(info);
+ printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
+ get_online_cpus();
+ smp_rescan_cpus();
+ put_online_cpus();
}
/*
@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
return 0;
}
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
-
static void __init smp_create_idle(unsigned int cpu)
{
struct task_struct *p;
@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
}
-static int cpu_stopped(int cpu)
+static int __cpuinit smp_alloc_lowcore(int cpu)
{
- __u32 status;
+ unsigned long async_stack, panic_stack;
+ struct _lowcore *lowcore;
+ int lc_order;
+
+ lc_order = sizeof(long) == 8 ? 1 : 0;
+ lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
+ if (!lowcore)
+ return -ENOMEM;
+ async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+ if (!async_stack)
+ goto out_async_stack;
+ panic_stack = __get_free_page(GFP_KERNEL);
+ if (!panic_stack)
+ goto out_panic_stack;
+
+ *lowcore = S390_lowcore;
+ lowcore->async_stack = async_stack + ASYNC_SIZE;
+ lowcore->panic_stack = panic_stack + PAGE_SIZE;
- /* Check for stopped state */
- if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
- sigp_status_stored) {
- if (status & 0x40)
- return 1;
+#ifndef CONFIG_64BIT
+ if (MACHINE_HAS_IEEE) {
+ unsigned long save_area;
+
+ save_area = get_zeroed_page(GFP_KERNEL);
+ if (!save_area)
+ goto out_save_area;
+ lowcore->extended_save_area_addr = (u32) save_area;
}
+#endif
+ lowcore_ptr[cpu] = lowcore;
return 0;
+
+#ifndef CONFIG_64BIT
+out_save_area:
+ free_page(panic_stack);
+#endif
+out_panic_stack:
+ free_pages(async_stack, ASYNC_ORDER);
+out_async_stack:
+ free_pages((unsigned long) lowcore, lc_order);
+ return -ENOMEM;
}
-/* Upping and downing of CPUs */
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_free_lowcore(int cpu)
+{
+ struct _lowcore *lowcore;
+ int lc_order;
+
+ lc_order = sizeof(long) == 8 ? 1 : 0;
+ lowcore = lowcore_ptr[cpu];
+#ifndef CONFIG_64BIT
+ if (MACHINE_HAS_IEEE)
+ free_page((unsigned long) lowcore->extended_save_area_addr);
+#endif
+ free_page(lowcore->panic_stack - PAGE_SIZE);
+ free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
+ free_pages((unsigned long) lowcore, lc_order);
+ lowcore_ptr[cpu] = NULL;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
-int __cpu_up(unsigned int cpu)
+/* Upping and downing of CPUs */
+int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
struct _lowcore *cpu_lowcore;
struct stack_frame *sf;
sigp_ccode ccode;
- int curr_cpu;
- for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
- __cpu_logical_map[cpu] = (__u16) curr_cpu;
- if (cpu_stopped(cpu))
- break;
- }
-
- if (!cpu_stopped(cpu))
- return -ENODEV;
+ if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
+ return -EIO;
+ if (smp_alloc_lowcore(cpu))
+ return -ENOMEM;
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->kernel_stack = (unsigned long)
task_stack_page(idle) + THREAD_SIZE;
+ cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
- sizeof(struct pt_regs)
- sizeof(struct stack_frame));
@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
cpu_lowcore->current_task = (unsigned long) idle;
cpu_lowcore->cpu_data.cpu_nr = cpu;
+ cpu_lowcore->softirq_pending = 0;
+ cpu_lowcore->ext_call_fast = 0;
eieio();
while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
return 0;
}
-static unsigned int __initdata additional_cpus;
-static unsigned int __initdata possible_cpus;
-
-void __init smp_setup_cpu_possible_map(void)
+static int __init setup_possible_cpus(char *s)
{
- unsigned int phy_cpus, pos_cpus, cpu;
-
- phy_cpus = smp_count_cpus();
- pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
-
- if (possible_cpus)
- pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
+ int pcpus, cpu;
- for (cpu = 0; cpu < pos_cpus; cpu++)
+ pcpus = simple_strtoul(s, NULL, 0);
+ cpu_possible_map = cpumask_of_cpu(0);
+ for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
cpu_set(cpu, cpu_possible_map);
-
- phy_cpus = min(phy_cpus, pos_cpus);
-
- for (cpu = 0; cpu < phy_cpus; cpu++)
- cpu_set(cpu, cpu_present_map);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-static int __init setup_additional_cpus(char *s)
-{
- additional_cpus = simple_strtoul(s, NULL, 0);
- return 0;
-}
-early_param("additional_cpus", setup_additional_cpus);
-
-static int __init setup_possible_cpus(char *s)
-{
- possible_cpus = simple_strtoul(s, NULL, 0);
return 0;
}
early_param("possible_cpus", setup_possible_cpus);
+#ifdef CONFIG_HOTPLUG_CPU
+
int __cpu_disable(void)
{
struct ec_creg_mask_parms cr_parms;
@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
/* Wait until target cpu is down */
while (!smp_cpu_not_running(cpu))
cpu_relax();
- printk("Processor %d spun down\n", cpu);
+ smp_free_lowcore(cpu);
+ printk(KERN_INFO "Processor %d spun down\n", cpu);
}
void cpu_die(void)
@@ -625,49 +796,19 @@ void cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
-/*
- * Cycle through the processors and setup structures.
- */
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long stack;
unsigned int cpu;
- int i;
+
+ smp_detect_cpus();
/* request the 0x1201 emergency signal external interrupt */
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
panic("Couldn't request external interrupt 0x1201");
memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
- /*
- * Initialize prefix pages and stacks for all possible cpus
- */
print_cpu_info(&S390_lowcore.cpu_data);
+ smp_alloc_lowcore(smp_processor_id());
- for_each_possible_cpu(i) {
- lowcore_ptr[i] = (struct _lowcore *)
- __get_free_pages(GFP_KERNEL | GFP_DMA,
- sizeof(void*) == 8 ? 1 : 0);
- stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
- if (!lowcore_ptr[i] || !stack)
- panic("smp_boot_cpus failed to allocate memory\n");
-
- *(lowcore_ptr[i]) = S390_lowcore;
- lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
- stack = __get_free_pages(GFP_KERNEL, 0);
- if (!stack)
- panic("smp_boot_cpus failed to allocate memory\n");
- lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lowcore_ptr[i]->extended_save_area_addr =
- (__u32) __get_free_pages(GFP_KERNEL, 0);
- if (!lowcore_ptr[i]->extended_save_area_addr)
- panic("smp_boot_cpus failed to "
- "allocate memory\n");
- }
-#endif
- }
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
ctl_set_bit(14, 29); /* enable extended save area */
@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != 0);
+ current_thread_info()->cpu = 0;
+ cpu_set(0, cpu_present_map);
cpu_set(0, cpu_online_map);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
+ smp_cpu_state[0] = CPU_STATE_CONFIGURED;
spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
- cpu_present_map = cpu_possible_map;
}
/*
@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
+#ifdef CONFIG_HOTPLUG_CPU
+static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
+{
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+
+static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ int cpu = dev->id;
+ int val, rc;
+ char delim;
+
+ if (sscanf(buf, "%d %c", &val, &delim) != 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ get_online_cpus();
+ rc = -EBUSY;
+ if (cpu_online(cpu))
+ goto out;
+ rc = 0;
+ switch (val) {
+ case 0:
+ if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
+ rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
+ if (!rc)
+ smp_cpu_state[cpu] = CPU_STATE_STANDBY;
+ }
+ break;
+ case 1:
+ if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
+ rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
+ if (!rc)
+ smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
+ }
+ break;
+ default:
+ break;
+ }
+out:
+ put_online_cpus();
+ mutex_unlock(&smp_cpu_state_mutex);
+ return rc ? rc : count;
+}
+static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
+{
+ return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
+}
+static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
+
+
+static struct attribute *cpu_common_attrs[] = {
+#ifdef CONFIG_HOTPLUG_CPU
+ &attr_configure.attr,
+#endif
+ &attr_address.attr,
+ NULL,
+};
+
+static struct attribute_group cpu_common_attr_group = {
+ .attrs = cpu_common_attrs,
+};
static ssize_t show_capability(struct sys_device *dev, char *buf)
{
@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
}
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
-static struct attribute *cpu_attrs[] = {
+static struct attribute *cpu_online_attrs[] = {
&attr_capability.attr,
&attr_idle_count.attr,
&attr_idle_time_us.attr,
NULL,
};
-static struct attribute_group cpu_attr_group = {
- .attrs = cpu_attrs,
+static struct attribute_group cpu_online_attr_group = {
+ .attrs = cpu_online_attrs,
};
static int __cpuinit smp_cpu_notify(struct notifier_block *self,
@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
idle->idle_time = 0;
idle->idle_count = 0;
spin_unlock_irq(&idle->lock);
- if (sysfs_create_group(&s->kobj, &cpu_attr_group))
+ if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
return NOTIFY_BAD;
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- sysfs_remove_group(&s->kobj, &cpu_attr_group);
+ sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
break;
}
return NOTIFY_OK;
@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
.notifier_call = smp_cpu_notify,
};
+static int smp_add_present_cpu(int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
+ int rc;
+
+ c->hotpluggable = 1;
+ rc = register_cpu(c, cpu);
+ if (rc)
+ goto out;
+ rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
+ if (rc)
+ goto out_cpu;
+ if (!cpu_online(cpu))
+ goto out;
+ rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+ if (!rc)
+ return 0;
+ sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
+out_cpu:
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu(c);
+#endif
+out:
+ return rc;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static ssize_t rescan_store(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ cpumask_t newcpus;
+ int cpu;
+ int rc;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ get_online_cpus();
+ newcpus = cpu_present_map;
+ rc = smp_rescan_cpus();
+ if (rc)
+ goto out;
+ cpus_andnot(newcpus, cpu_present_map, newcpus);
+ for_each_cpu_mask(cpu, newcpus) {
+ rc = smp_add_present_cpu(cpu);
+ if (rc)
+ cpu_clear(cpu, cpu_present_map);
+ }
+ rc = 0;
+out:
+ put_online_cpus();
+ mutex_unlock(&smp_cpu_state_mutex);
+ return rc ? rc : count;
+}
+static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
+#endif /* CONFIG_HOTPLUG_CPU */
+
static int __init topology_init(void)
{
int cpu;
@@ -800,16 +1071,14 @@ static int __init topology_init(void)
register_cpu_notifier(&smp_cpu_nb);
- for_each_possible_cpu(cpu) {
- struct cpu *c = &per_cpu(cpu_devices, cpu);
- struct sys_device *s = &c->sysdev;
-
- c->hotpluggable = 1;
- register_cpu(c, cpu);
- if (!cpu_online(cpu))
- continue;
- s = &c->sysdev;
- rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
+#ifdef CONFIG_HOTPLUG_CPU
+ rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &attr_rescan.attr);
+ if (rc)
+ return rc;
+#endif
+ for_each_present_cpu(cpu) {
+ rc = smp_add_present_cpu(cpu);
if (rc)
return rc;
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 22b800ce212..3bbac1293be 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -1145,7 +1145,7 @@ static void etr_work_fn(struct work_struct *work)
* Sysfs interface functions
*/
static struct sysdev_class etr_sysclass = {
- set_kset_name("etr")
+ .name = "etr",
};
static struct sys_device etr_port0_dev = {
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8ed16a83fba..52b8342c6bf 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -31,6 +31,7 @@
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/bug.h>
+#include <linux/utsname.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
*/
void dump_stack(void)
{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
show_stack(NULL, NULL);
}
-
EXPORT_SYMBOL(dump_stack);
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err)
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
- printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
- print_modules();
+ printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP");
+#endif
+ printk("\n");
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
show_regs(regs);
bust_spinlocks(0);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 849120e3e28..7d43c3cd3ef 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -17,6 +17,12 @@ ENTRY(_start)
jiffies = jiffies_64;
#endif
+PHDRS {
+ text PT_LOAD FLAGS(5); /* R_E */
+ data PT_LOAD FLAGS(7); /* RWE */
+ note PT_NOTE FLAGS(0); /* ___ */
+}
+
SECTIONS
{
. = 0x00000000;
@@ -33,6 +39,9 @@ SECTIONS
_etext = .; /* End of text section */
+ NOTES :text :note
+ BUG_TABLE :text
+
RODATA
#ifdef CONFIG_SHARED_KERNEL
@@ -49,9 +58,6 @@ SECTIONS
__stop___ex_table = .;
}
- NOTES
- BUG_TABLE
-
.data : { /* Data */
DATA_DATA
CONSTRUCTORS
@@ -91,7 +97,7 @@ SECTIONS
__init_begin = .;
.init.text : {
_sinittext = .;
- *(.init.text)
+ INIT_TEXT
_einittext = .;
}
/*
@@ -99,11 +105,11 @@ SECTIONS
* to deal with references from __bug_table
*/
.exit.text : {
- *(.exit.text)
+ EXIT_TEXT
}
.init.data : {
- *(.init.data)
+ INIT_DATA
}
. = ALIGN(0x100);
.init.setup : {
@@ -150,7 +156,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
- *(.exit.data)
+ EXIT_DATA
*(.exitcall.exit)
}