From 083e14c09b7ae0247b9944a386fdc32cd0719da1 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 11 Jan 2013 13:15:35 +0100 Subject: s390/modules: add relocation overflow checking Given enough debug options some modules can grow large enough that the GOT table gets bigger than 4K. On s390 the modules are compiled with -fpic which limits the GOT to 4K. The end result is a module that is loaded but won't work. Add a sanity check to apply_rela and return with an error if a relocation error is detected for a module. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/module.c | 140 +++++++++++++++++++++++++++++----------------- 1 file changed, 89 insertions(+), 51 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 4610deafd95..06f17311628 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -65,8 +65,7 @@ void module_free(struct module *mod, void *module_region) vfree(module_region); } -static void -check_rela(Elf_Rela *rela, struct module *me) +static void check_rela(Elf_Rela *rela, struct module *me) { struct mod_arch_syminfo *info; @@ -115,9 +114,8 @@ check_rela(Elf_Rela *rela, struct module *me) * Account for GOT and PLT relocations. We can't add sections for * got and plt but we can increase the core module size. */ -int -module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - char *secstrings, struct module *me) +int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *me) { Elf_Shdr *symtab; Elf_Sym *symbols; @@ -179,13 +177,52 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, return 0; } -static int -apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - struct module *me) +static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, + int sign, int bits, int shift) +{ + unsigned long umax; + long min, max; + + if (val & ((1UL << shift) - 1)) + return -ENOEXEC; + if (sign) { + val = (Elf_Addr)(((long) val) >> shift); + min = -(1L << (bits - 1)); + max = (1L << (bits - 1)) - 1; + if ((long) val < min || (long) val > max) + return -ENOEXEC; + } else { + val >>= shift; + umax = ((1UL << (bits - 1)) << 1) - 1; + if ((unsigned long) val > umax) + return -ENOEXEC; + } + + if (bits == 8) + *(unsigned char *) loc = val; + else if (bits == 12) + *(unsigned short *) loc = (val & 0xfff) | + (*(unsigned short *) loc & 0xf000); + else if (bits == 16) + *(unsigned short *) loc = val; + else if (bits == 20) + *(unsigned int *) loc = (val & 0xfff) << 16 | + (val & 0xff000) >> 4 | + (*(unsigned int *) loc & 0xf00000ff); + else if (bits == 32) + *(unsigned int *) loc = val; + else if (bits == 64) + *(unsigned long *) loc = val; + return 0; +} + +static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, + const char *strtab, struct module *me) { struct mod_arch_syminfo *info; Elf_Addr loc, val; int r_type, r_sym; + int rc; /* This is where to make the change */ loc = base + rela->r_offset; @@ -205,20 +242,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_64: /* Direct 64 bit. */ val += rela->r_addend; if (r_type == R_390_8) - *(unsigned char *) loc = val; + rc = apply_rela_bits(loc, val, 0, 8, 0); else if (r_type == R_390_12) - *(unsigned short *) loc = (val & 0xfff) | - (*(unsigned short *) loc & 0xf000); + rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_20) - *(unsigned int *) loc = - (*(unsigned int *) loc & 0xf00000ff) | - (val & 0xfff) << 16 | (val & 0xff000) >> 4; + rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ @@ -227,15 +261,15 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PC64: /* PC relative 64 bit. */ val += rela->r_addend - loc; if (r_type == R_390_PC16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 1, 16, 0); else if (r_type == R_390_PC16DBL) - *(unsigned short *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PC32DBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PC32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_PC64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 1, 64, 0); break; case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ @@ -260,26 +294,24 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = info->got_offset + rela->r_addend; if (r_type == R_390_GOT12 || r_type == R_390_GOTPLT12) - *(unsigned short *) loc = (val & 0xfff) | - (*(unsigned short *) loc & 0xf000); + rc = apply_rela_bits(loc, val, 0, 12, 0); else if (r_type == R_390_GOT16 || r_type == R_390_GOTPLT16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOT20 || r_type == R_390_GOTPLT20) - *(unsigned int *) loc = - (*(unsigned int *) loc & 0xf00000ff) | - (val & 0xfff) << 16 | (val & 0xff000) >> 4; + rc = apply_rela_bits(loc, val, 1, 20, 0); else if (r_type == R_390_GOT32 || r_type == R_390_GOTPLT32) - *(unsigned int *) loc = val; - else if (r_type == R_390_GOTENT || - r_type == R_390_GOTPLTENT) - *(unsigned int *) loc = - (val + (Elf_Addr) me->module_core - loc) >> 1; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) { + val += (Elf_Addr) me->module_core - loc; + rc = apply_rela_bits(loc, val, 1, 32, 1); + } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ @@ -321,17 +353,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val += rela->r_addend - loc; } if (r_type == R_390_PLT16DBL) - *(unsigned short *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 16, 1); else if (r_type == R_390_PLTOFF16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_PLT32DBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); else if (r_type == R_390_PLT32 || r_type == R_390_PLTOFF32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_PLT64 || r_type == R_390_PLTOFF64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */ @@ -339,20 +371,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = val + rela->r_addend - ((Elf_Addr) me->module_core + me->arch.got_offset); if (r_type == R_390_GOTOFF16) - *(unsigned short *) loc = val; + rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOTOFF32) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 0, 32, 0); else if (r_type == R_390_GOTOFF64) - *(unsigned long *) loc = val; + rc = apply_rela_bits(loc, val, 0, 64, 0); break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ val = (Elf_Addr) me->module_core + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) - *(unsigned int *) loc = val; + rc = apply_rela_bits(loc, val, 1, 32, 0); else if (r_type == R_390_GOTPCDBL) - *(unsigned int *) loc = val >> 1; + rc = apply_rela_bits(loc, val, 1, 32, 1); break; case R_390_COPY: case R_390_GLOB_DAT: /* Create GOT entry. */ @@ -360,19 +392,25 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_RELATIVE: /* Adjust by program base. */ /* Only needed if we want to support loading of modules linked with -shared. */ - break; + return -ENOEXEC; default: - printk(KERN_ERR "module %s: Unknown relocation: %u\n", + printk(KERN_ERR "module %s: unknown relocation: %u\n", me->name, r_type); return -ENOEXEC; } + if (rc) { + printk(KERN_ERR "module %s: relocation error for symbol %s " + "(r_type %i, value 0x%lx)\n", + me->name, strtab + symtab[r_sym].st_name, + r_type, (unsigned long) val); + return rc; + } return 0; } -int -apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, - unsigned int symindex, unsigned int relsec, - struct module *me) +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) { Elf_Addr base; Elf_Sym *symtab; @@ -388,7 +426,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); for (i = 0; i < n; i++, rela++) { - rc = apply_rela(rela, base, symtab, me); + rc = apply_rela(rela, base, symtab, strtab, me); if (rc) return rc; } -- cgit v1.2.3-70-g09d2 From 9a17e972529e07d6e2531e6b6712bf29687df8a6 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Tue, 15 Jan 2013 19:04:39 +0100 Subject: s390/chsc: cleanup SEI helper functions Cleanup the functions used to call SEI. Also provide !CONFIG_PCI dummys for pci error handling. Reviewed-by: Peter Oberparleiter Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pci.h | 5 ++++ drivers/s390/cio/chsc.c | 68 +++++++++++++++++++++------------------------ 2 files changed, 37 insertions(+), 36 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index b1fa93c606a..23d6a245e8a 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -160,9 +160,14 @@ void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *); int zpci_msihash_init(void); void zpci_msihash_exit(void); +#ifdef CONFIG_PCI /* Error handling and recovery */ void zpci_event_error(void *); void zpci_event_availability(void *); +#else /* CONFIG_PCI */ +static inline void zpci_event_error(void *e) {} +static inline void zpci_event_availability(void *e) {} +#endif /* CONFIG_PCI */ /* Helpers */ struct zpci_dev *get_zdev(struct pci_dev *); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 10729bbcece..31ceef1beb8 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -435,7 +435,6 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) { -#ifdef CONFIG_PCI switch (sei_area->cc) { case 1: zpci_event_error(sei_area->ccdf); @@ -444,11 +443,10 @@ static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) zpci_event_availability(sei_area->ccdf); break; default: - CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", sei_area->cc); break; } -#endif } static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) @@ -471,13 +469,19 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) chsc_process_sei_scm_change(sei_area); break; default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", sei_area->cc); break; } + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) { + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); + } } -static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) +static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { do { memset(sei, 0, sizeof(*sei)); @@ -488,40 +492,37 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) if (chsc(sei)) break; - if (sei->response.code == 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei successful\n"); - - /* Check if we might have lost some information. */ - if (sei->u.nt0_area.flags & 0x40) { - CIO_CRW_EVENT(2, "chsc: event overflow\n"); - css_schedule_eval_all(); - } - - switch (sei->nt) { - case 0: - chsc_process_sei_nt0(&sei->u.nt0_area); - break; - case 2: - chsc_process_sei_nt2(&sei->u.nt2_area); - break; - default: - CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n", - sei->nt); - break; - } - } else { + if (sei->response.code != 0x0001) { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei->response.code); break; } - } while (sei->u.nt0_area.flags & 0x80); - return 0; + CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); + switch (sei->nt) { + case 0: + chsc_process_sei_nt0(&sei->u.nt0_area); + break; + case 2: + chsc_process_sei_nt2(&sei->u.nt2_area); + break; + default: + CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); + break; + } + } while (sei->u.nt0_area.flags & 0x80); } +/* + * Handle channel subsystem related CRWs. + * Use store event information to find out what's going on. + * + * Note: Access to sei_page is serialized through machine check handler + * thread, so no need for locking. + */ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { - struct chsc_sei *sei; + struct chsc_sei *sei = sei_page; if (overflow) { css_schedule_eval_all(); @@ -531,14 +532,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, crw0->erc, crw0->rsid); - if (!sei_page) - return; - /* Access to sei_page is serialized through machine check handler - * thread, so no need for locking. */ - sei = sei_page; CIO_TRACE_EVENT(2, "prcss"); - __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); + chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); } void chsc_chp_online(struct chp_id chpid) -- cgit v1.2.3-70-g09d2 From 0894b3ae776a60c6bad994e1d8f809ceb59904da Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Mon, 21 Jan 2013 18:35:15 +0100 Subject: s390/ipl: Implement diag308 loop for zfcpdump When a zfcpdump is triggered and a second dump on the same CEC is already in progress for another LPAR, diagnose 308 returns with an error code until the first dump is finished. Currently the second Linux stops with a disabled wait PSW in that case. This is improved now by by triggering diag 308 in a loop until it works. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 6ffcd320321..d8a6a385d04 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -1414,6 +1414,16 @@ static struct kobj_attribute dump_type_attr = static struct kset *dump_kset; +static void diag308_dump(void *dump_block) +{ + diag308(DIAG308_SET, dump_block); + while (1) { + if (diag308(DIAG308_DUMP, NULL) != 0x302) + break; + udelay_simple(USEC_PER_SEC); + } +} + static void __dump_run(void *unused) { struct ccw_dev_id devid; @@ -1432,12 +1442,10 @@ static void __dump_run(void *unused) __cpcmd(buf, NULL, 0, NULL); break; case DUMP_METHOD_CCW_DIAG: - diag308(DIAG308_SET, dump_block_ccw); - diag308(DIAG308_DUMP, NULL); + diag308_dump(dump_block_ccw); break; case DUMP_METHOD_FCP_DIAG: - diag308(DIAG308_SET, dump_block_fcp); - diag308(DIAG308_DUMP, NULL); + diag308_dump(dump_block_fcp); break; default: break; -- cgit v1.2.3-70-g09d2 From 1aae0560d160ee6ebef927a35e4f405306a079df Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Jan 2013 09:49:40 +0100 Subject: s390/time: rename tod clock access functions Fix name clash with some common code device drivers and add "tod" to all tod clock access function names. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/appldata/appldata_mem.c | 2 +- arch/s390/appldata/appldata_net_sum.c | 2 +- arch/s390/appldata/appldata_os.c | 2 +- arch/s390/hypfs/hypfs_vm.c | 2 +- arch/s390/include/asm/timex.h | 18 +++++++++--------- arch/s390/kernel/debug.c | 2 +- arch/s390/kernel/early.c | 6 +++--- arch/s390/kernel/nmi.c | 2 +- arch/s390/kernel/smp.c | 10 +++++----- arch/s390/kernel/time.c | 26 +++++++++++++------------- arch/s390/kernel/vtime.c | 2 +- arch/s390/kvm/interrupt.c | 6 +++--- arch/s390/lib/delay.c | 16 ++++++++-------- drivers/s390/block/dasd.c | 18 +++++++++--------- drivers/s390/block/dasd_3990_erp.c | 8 ++++---- drivers/s390/block/dasd_alias.c | 4 ++-- drivers/s390/block/dasd_diag.c | 10 +++++----- drivers/s390/block/dasd_eckd.c | 30 +++++++++++++++--------------- drivers/s390/block/dasd_eer.c | 2 +- drivers/s390/block/dasd_erp.c | 4 ++-- drivers/s390/block/dasd_fba.c | 2 +- drivers/s390/char/sclp.c | 4 ++-- drivers/s390/char/zcore.c | 2 +- drivers/s390/cio/cio.c | 4 ++-- drivers/s390/cio/cmf.c | 6 +++--- drivers/s390/cio/css.c | 2 +- drivers/s390/cio/device_fsm.c | 2 +- drivers/s390/cio/qdio_main.c | 12 ++++++------ drivers/s390/net/qeth_core.h | 2 +- drivers/s390/scsi/zfcp_fsf.c | 2 +- drivers/s390/scsi/zfcp_qdio.c | 2 +- 31 files changed, 106 insertions(+), 106 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 02d9a1cf505..7ef60b52d6e 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c @@ -108,7 +108,7 @@ static void appldata_get_mem_data(void *data) mem_data->totalswap = P2K(val.totalswap); mem_data->freeswap = P2K(val.freeswap); - mem_data->timestamp = get_clock(); + mem_data->timestamp = get_tod_clock(); mem_data->sync_count_2++; } diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 1370e358d49..2d224b94535 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -111,7 +111,7 @@ static void appldata_get_net_sum_data(void *data) net_data->tx_dropped = tx_dropped; net_data->collisions = collisions; - net_data->timestamp = get_clock(); + net_data->timestamp = get_tod_clock(); net_data->sync_count_2++; } diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 87521ba682e..de8e2b3b018 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -156,7 +156,7 @@ static void appldata_get_os_data(void *data) } ops.size = new_size; } - os_data->timestamp = get_clock(); + os_data->timestamp = get_tod_clock(); os_data->sync_count_2++; } diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 4f6afaa8bd8..f364dcf77e8 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -245,7 +245,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size) d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); if (IS_ERR(d2fc)) return PTR_ERR(d2fc); - get_clock_ext(d2fc->hdr.tod_ext); + get_tod_clock_ext(d2fc->hdr.tod_ext); d2fc->hdr.len = count * sizeof(struct diag2fc_data); d2fc->hdr.version = DBFS_D2FC_HDR_VERSION; d2fc->hdr.count = count; diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 4c060bb5b8e..8ad8af91503 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -15,7 +15,7 @@ #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL /* Inline functions for clock register access. */ -static inline int set_clock(__u64 time) +static inline int set_tod_clock(__u64 time) { int cc; @@ -27,7 +27,7 @@ static inline int set_clock(__u64 time) return cc; } -static inline int store_clock(__u64 *time) +static inline int store_tod_clock(__u64 *time) { int cc; @@ -71,7 +71,7 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_clock(void) +static inline unsigned long long get_tod_clock(void) { unsigned long long clk; @@ -83,21 +83,21 @@ static inline unsigned long long get_clock(void) return clk; } -static inline void get_clock_ext(char *clk) +static inline void get_tod_clock_ext(char *clk) { asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); } -static inline unsigned long long get_clock_xt(void) +static inline unsigned long long get_tod_clock_xt(void) { unsigned char clk[16]; - get_clock_ext(clk); + get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } static inline cycles_t get_cycles(void) { - return (cycles_t) get_clock() >> 2; + return (cycles_t) get_tod_clock() >> 2; } int get_sync_clock(unsigned long long *clock); @@ -123,9 +123,9 @@ extern u64 sched_clock_base_cc; * function, otherwise the returned value is not guaranteed to * be monotonic. */ -static inline unsigned long long get_clock_monotonic(void) +static inline unsigned long long get_tod_clock_monotonic(void) { - return get_clock_xt() - sched_clock_base_cc; + return get_tod_clock_xt() - sched_clock_base_cc; } /** diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 4e8215e0d4b..09a94cd9deb 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -867,7 +867,7 @@ static inline void debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, int exception) { - active->id.stck = get_clock(); + active->id.stck = get_tod_clock(); active->id.fields.cpuid = smp_processor_id(); active->caller = __builtin_return_address(0); active->id.fields.exception = exception; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 1f0eee9e7da..1ee98e56fc6 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -47,10 +47,10 @@ static void __init reset_tod_clock(void) { u64 time; - if (store_clock(&time) == 0) + if (store_tod_clock(&time) == 0) return; /* TOD clock not running. Set the clock to Unix Epoch. */ - if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) + if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) disabled_wait(0); sched_clock_base_cc = TOD_UNIX_EPOCH; @@ -173,7 +173,7 @@ static noinline __init void create_kernel_nss(void) } /* re-initialize cputime accounting. */ - sched_clock_base_cc = get_clock(); + sched_clock_base_cc = get_tod_clock(); S390_lowcore.last_update_clock = sched_clock_base_cc; S390_lowcore.last_update_timer = 0x7fffffffffffffffULL; S390_lowcore.user_timer = 0; diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 7918fbea36b..504175ebf8b 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -293,7 +293,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) * retry this instruction. */ spin_lock(&ipd_lock); - tmp = get_clock(); + tmp = get_tod_clock(); if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) ipd_count++; else diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 7433a2f9e5c..549c9d173c0 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -365,16 +365,16 @@ void smp_emergency_stop(cpumask_t *cpumask) u64 end; int cpu; - end = get_clock() + (1000000UL << 12); + end = get_tod_clock() + (1000000UL << 12); for_each_cpu(cpu, cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; set_bit(ec_stop_cpu, &pcpu->ec_mask); while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 0, NULL) == SIGP_CC_BUSY && - get_clock() < end) + get_tod_clock() < end) cpu_relax(); } - while (get_clock() < end) { + while (get_tod_clock() < end) { for_each_cpu(cpu, cpumask) if (pcpu_stopped(pcpu_devices + cpu)) cpumask_clear_cpu(cpu, cpumask); @@ -694,7 +694,7 @@ static void __init smp_detect_cpus(void) */ static void __cpuinit smp_start_secondary(void *cpuvoid) { - S390_lowcore.last_update_clock = get_clock(); + S390_lowcore.last_update_clock = get_tod_clock(); S390_lowcore.restart_stack = (unsigned long) restart_stack; S390_lowcore.restart_fn = (unsigned long) do_restart; S390_lowcore.restart_data = 0; @@ -947,7 +947,7 @@ static ssize_t show_idle_time(struct device *dev, unsigned int sequence; do { - now = get_clock(); + now = get_tod_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_time = ACCESS_ONCE(idle->idle_time); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 0aa98db8a80..876546b9cfa 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return tod_to_ns(get_clock_monotonic()); + return tod_to_ns(get_tod_clock_monotonic()); } /* @@ -194,7 +194,7 @@ static void stp_reset(void); void read_persistent_clock(struct timespec *ts) { - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); + tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); } void read_boot_clock(struct timespec *ts) @@ -204,7 +204,7 @@ void read_boot_clock(struct timespec *ts) static cycle_t read_tod_clock(struct clocksource *cs) { - return get_clock(); + return get_tod_clock(); } static struct clocksource clocksource_tod = { @@ -342,7 +342,7 @@ int get_sync_clock(unsigned long long *clock) sw_ptr = &get_cpu_var(clock_sync_word); sw0 = atomic_read(sw_ptr); - *clock = get_clock(); + *clock = get_tod_clock(); sw1 = atomic_read(sw_ptr); put_cpu_var(clock_sync_word); if (sw0 == sw1 && (sw0 & 0x80000000U)) @@ -486,7 +486,7 @@ static void etr_reset(void) .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, .es = 0, .sl = 0 }; if (etr_setr(&etr_eacr) == 0) { - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); if (etr_port0_online && etr_port1_online) set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); @@ -768,8 +768,8 @@ static int etr_sync_clock(void *data) __ctl_set_bit(14, 21); __ctl_set_bit(0, 29); clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; - old_clock = get_clock(); - if (set_clock(clock) == 0) { + old_clock = get_tod_clock(); + if (set_tod_clock(clock) == 0) { __udelay(1); /* Wait for the clock to start. */ __ctl_clear_bit(0, 29); __ctl_clear_bit(14, 21); @@ -845,7 +845,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p0 = etr_port0_online; if (!eacr.p0) eacr.e0 = 0; @@ -858,7 +858,7 @@ static struct etr_eacr etr_handle_events(struct etr_eacr eacr) * assume that this can have caused an stepping * port switch. */ - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); eacr.p1 = etr_port1_online; if (!eacr.p1) eacr.e1 = 0; @@ -974,7 +974,7 @@ static void etr_update_eacr(struct etr_eacr eacr) etr_eacr = eacr; etr_setr(&etr_eacr); if (dp_changed) - etr_tolec = get_clock(); + etr_tolec = get_tod_clock(); } /* @@ -1012,7 +1012,7 @@ static void etr_work_fn(struct work_struct *work) /* Store aib to get the current ETR status word. */ BUG_ON(etr_stetr(&aib) != 0); etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ - now = get_clock(); + now = get_tod_clock(); /* * Update the port information if the last stepping port change @@ -1537,10 +1537,10 @@ static int stp_sync_clock(void *data) if (stp_info.todoff[0] || stp_info.todoff[1] || stp_info.todoff[2] || stp_info.todoff[3] || stp_info.tmd != 2) { - old_clock = get_clock(); + old_clock = get_tod_clock(); rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); if (rc == 0) { - delta = adjust_time(old_clock, get_clock(), 0); + delta = adjust_time(old_clock, get_tod_clock(), 0); fixup_clock_comparator(delta); rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e84b8b68444..8911a169af4 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -191,7 +191,7 @@ cputime64_t s390_get_idle_time(int cpu) unsigned int sequence; do { - now = get_clock(); + now = get_tod_clock(); sequence = ACCESS_ONCE(idle->sequence); idle_enter = ACCESS_ONCE(idle->clock_idle_enter); idle_exit = ACCESS_ONCE(idle->clock_idle_exit); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 82c481ddef7..87418b50f21 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -362,7 +362,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) } if ((!rc) && (vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) { + get_tod_clock() + vcpu->arch.sie_block->epoch)) { if ((!psw_extint_disabled(vcpu)) && (vcpu->arch.sie_block->gcr[0] & 0x800ul)) rc = 1; @@ -402,7 +402,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - now = get_clock() + vcpu->arch.sie_block->epoch; + now = get_tod_clock() + vcpu->arch.sie_block->epoch; if (vcpu->arch.sie_block->ckc < now) { __unset_cpu_idle(vcpu); return 0; @@ -492,7 +492,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) } if ((vcpu->arch.sie_block->ckc < - get_clock() + vcpu->arch.sie_block->epoch)) + get_tod_clock() + vcpu->arch.sie_block->epoch)) __try_deliver_ckc_interrupt(vcpu); if (atomic_read(&fi->active)) { diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 42d0cf89121..c61b9fad43c 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -32,7 +32,7 @@ static void __udelay_disabled(unsigned long long usecs) unsigned long cr0, cr6, new; u64 clock_saved, end; - end = get_clock() + (usecs << 12); + end = get_tod_clock() + (usecs << 12); clock_saved = local_tick_disable(); __ctl_store(cr0, 0, 0); __ctl_store(cr6, 6, 6); @@ -45,7 +45,7 @@ static void __udelay_disabled(unsigned long long usecs) set_clock_comparator(end); vtime_stop_cpu(); local_irq_disable(); - } while (get_clock() < end); + } while (get_tod_clock() < end); lockdep_on(); __ctl_load(cr0, 0, 0); __ctl_load(cr6, 6, 6); @@ -56,7 +56,7 @@ static void __udelay_enabled(unsigned long long usecs) { u64 clock_saved, end; - end = get_clock() + (usecs << 12); + end = get_tod_clock() + (usecs << 12); do { clock_saved = 0; if (end < S390_lowcore.clock_comparator) { @@ -67,7 +67,7 @@ static void __udelay_enabled(unsigned long long usecs) local_irq_disable(); if (clock_saved) local_tick_enable(clock_saved); - } while (get_clock() < end); + } while (get_tod_clock() < end); } /* @@ -111,8 +111,8 @@ void udelay_simple(unsigned long long usecs) { u64 end; - end = get_clock() + (usecs << 12); - while (get_clock() < end) + end = get_tod_clock() + (usecs << 12); + while (get_tod_clock() < end) cpu_relax(); } @@ -122,10 +122,10 @@ void __ndelay(unsigned long long nsecs) nsecs <<= 9; do_div(nsecs, 125); - end = get_clock() + nsecs; + end = get_tod_clock() + nsecs; if (nsecs & ~0xfffUL) __udelay(nsecs >> 12); - while (get_clock() < end) + while (get_tod_clock() < end) barrier(); } EXPORT_SYMBOL(__ndelay); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 29225e1c159..e1d96344d73 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1352,7 +1352,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) switch (rc) { case 0: /* termination successful */ cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->starttime = 0; DBF_DEV_EVENT(DBF_DEBUG, device, "terminate cqr %p successful", @@ -1420,7 +1420,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_ERROR; return -EIO; } - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { @@ -1623,7 +1623,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } - now = get_clock(); + now = get_tod_clock(); cqr = (struct dasd_ccw_req *) intparm; /* check for conditions that should be handled immediately */ if (!cqr || @@ -1963,7 +1963,7 @@ int dasd_flush_device_queue(struct dasd_device *device) } break; case DASD_CQR_QUEUED: - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_CLEARED; break; default: /* no need to modify the others */ @@ -2210,7 +2210,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) wait_event(generic_waitq, _wait_for_wakeup(cqr)); } - maincqr->endclk = get_clock(); + maincqr->endclk = get_tod_clock(); if ((maincqr->status != DASD_CQR_DONE) && (maincqr->intrc != -ERESTARTSYS)) dasd_log_sense(maincqr, &maincqr->irb); @@ -2340,7 +2340,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) "Cancelling request %p failed with rc=%d\n", cqr, rc); } else { - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } break; default: /* already finished or clear pending - do nothing */ @@ -2568,7 +2568,7 @@ restart: } /* Rechain finished requests to final queue */ - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_move_tail(&cqr->blocklist, final_queue); } } @@ -2711,7 +2711,7 @@ restart_cb: } /* call the callback function */ spin_lock_irq(&block->request_queue_lock); - cqr->endclk = get_clock(); + cqr->endclk = get_tod_clock(); list_del_init(&cqr->blocklist); __dasd_cleanup_cqr(cqr); spin_unlock_irq(&block->request_queue_lock); @@ -3504,7 +3504,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, cqr->memdev = device; cqr->expires = 10*HZ; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index f8212d54013..d2613471368 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -229,7 +229,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) dctl_cqr->expires = 5 * 60 * HZ; dctl_cqr->retries = 2; - dctl_cqr->buildclk = get_clock(); + dctl_cqr->buildclk = get_tod_clock(); dctl_cqr->status = DASD_CQR_FILLED; @@ -1719,7 +1719,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) erp->magic = default_erp->magic; erp->expires = default_erp->expires; erp->retries = 256; - erp->buildclk = get_clock(); + erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; /* remove the default erp */ @@ -2322,7 +2322,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) DBF_DEV_EVENT(DBF_ERR, device, "%s", "Unable to allocate ERP request"); cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock (); + cqr->stopclk = get_tod_clock(); } else { DBF_DEV_EVENT(DBF_ERR, device, "Unable to allocate ERP request " @@ -2364,7 +2364,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) erp->magic = cqr->magic; erp->expires = cqr->expires; erp->retries = 256; - erp->buildclk = get_clock(); + erp->buildclk = get_tod_clock(); erp->status = DASD_CQR_FILLED; return erp; diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 6b556995bb3..a2597e683e7 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -448,7 +448,7 @@ static int read_unit_address_configuration(struct dasd_device *device, ccw->count = sizeof(*(lcu->uac)); ccw->cda = (__u32)(addr_t) lcu->uac; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* need to unset flag here to detect race with summary unit check */ @@ -733,7 +733,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu, cqr->memdev = device; cqr->block = NULL; cqr->expires = 5 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 704488d0f81..cc060335852 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -184,14 +184,14 @@ dasd_start_diag(struct dasd_ccw_req * cqr) private->iob.bio_list = dreq->bio; private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; - cqr->startclk = get_clock(); + cqr->startclk = get_tod_clock(); cqr->starttime = jiffies; cqr->retries--; rc = dia250(&private->iob, RW_BIO); switch (rc) { case 0: /* Synchronous I/O finished successfully */ - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); cqr->status = DASD_CQR_SUCCESS; /* Indicate to calling function that only a dasd_schedule_bh() and no timer is needed */ @@ -222,7 +222,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr) mdsk_term_io(device); mdsk_init_io(device, device->block->bp_block, 0, NULL); cqr->status = DASD_CQR_CLEAR_PENDING; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); dasd_schedule_device_bh(device); return 0; } @@ -276,7 +276,7 @@ static void dasd_ext_handler(struct ext_code ext_code, return; } - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); expires = 0; if ((ext_code.subcode & 0xff) == 0) { @@ -556,7 +556,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, } } cqr->retries = DIAG_MAX_RETRIES; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); if (blk_noretry_request(req) || block->base->features & DASD_FEATURE_FAILFAST) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index e37bc1620d1..33f26bfa62f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -862,7 +862,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, cqr->expires = 10*HZ; cqr->lpm = lpm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); } @@ -1449,7 +1449,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) ccw->count = sizeof(struct dasd_rssd_features); ccw->cda = (__u32)(addr_t) features; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { @@ -1501,7 +1501,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, cqr->block = NULL; cqr->retries = 256; cqr->expires = 10*HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -1841,7 +1841,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device) cqr->startdev = device; cqr->memdev = device; cqr->retries = 255; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2241,7 +2241,7 @@ dasd_eckd_format_device(struct dasd_device * device, fcp->startdev = device; fcp->memdev = device; fcp->retries = 256; - fcp->buildclk = get_clock(); + fcp->buildclk = get_tod_clock(); fcp->status = DASD_CQR_FILLED; return fcp; } @@ -2530,7 +2530,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2705,7 +2705,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } @@ -2998,7 +2998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; out_error: @@ -3201,7 +3201,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev, cqr->expires = startdev->default_expires * HZ; cqr->lpm = startdev->path_data.ppm; cqr->retries = 256; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) @@ -3402,7 +3402,7 @@ dasd_eckd_release(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -3457,7 +3457,7 @@ dasd_eckd_reserve(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -3511,7 +3511,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on_immediatly(cqr); @@ -3572,7 +3572,7 @@ static int dasd_eckd_snid(struct dasd_device *device, set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); cqr->retries = 5; cqr->expires = 10 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; cqr->lpm = usrparm.path_mask; @@ -3642,7 +3642,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp) ccw->count = sizeof(struct dasd_rssd_perf_stats_t); ccw->cda = (__u32)(addr_t) stats; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; rc = dasd_sleep_on(cqr); if (rc == 0) { @@ -3768,7 +3768,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) cqr->memdev = device; cqr->retries = 3; cqr->expires = 10 * HZ; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; /* Build the ccws */ diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index ff901b5509c..21ef63cf096 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -481,7 +481,7 @@ int dasd_eer_enable(struct dasd_device *device) ccw->flags = 0; ccw->cda = (__u32)(addr_t) cqr->data; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; cqr->callback = dasd_eer_snss_cb; diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index d01ef82f875..3250cb471f7 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -102,7 +102,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr) pr_err("%s: default ERP has run out of retries and failed\n", dev_name(&device->cdev->dev)); cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } return cqr; } /* end dasd_default_erp_action */ @@ -146,7 +146,7 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr) cqr->status = DASD_CQR_DONE; else { cqr->status = DASD_CQR_FAILED; - cqr->stopclk = get_clock(); + cqr->stopclk = get_tod_clock(); } return cqr; diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 41469858434..4dd0e2f6047 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -370,7 +370,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, cqr->block = block; cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */ cqr->retries = 32; - cqr->buildclk = get_clock(); + cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; return cqr; } diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 12c16a65dd2..bd6871bf545 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -450,7 +450,7 @@ sclp_sync_wait(void) timeout = 0; if (timer_pending(&sclp_request_timer)) { /* Get timeout TOD value */ - timeout = get_clock() + + timeout = get_tod_clock() + sclp_tod_from_jiffies(sclp_request_timer.expires - jiffies); } @@ -472,7 +472,7 @@ sclp_sync_wait(void) while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ if (timer_pending(&sclp_request_timer) && - get_clock() > timeout && + get_tod_clock() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(sclp_request_timer.data); cpu_relax(); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 681749e7f6d..1d61a01576d 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -637,7 +637,7 @@ static int __init zcore_header_init(int arch, struct zcore_header *hdr) hdr->rmem_size = memory; hdr->mem_end = sys_info.mem_size; hdr->num_pages = memory / PAGE_SIZE; - hdr->tod = get_clock(); + hdr->tod = get_tod_clock(); get_cpu_id(&hdr->cpu_id); for (i = 0; zfcpdump_save_areas[i]; i++) { prefix = zfcpdump_save_areas[i]->pref_reg; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index c8faf6230b0..986ef6a92a4 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -962,9 +962,9 @@ static void css_reset(void) atomic_inc(&chpid_reset_count); } /* Wait for machine check for all channel paths. */ - timeout = get_clock() + (RCHP_TIMEOUT << 12); + timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); while (atomic_read(&chpid_reset_count) != 0) { - if (get_clock() > timeout) + if (get_tod_clock() > timeout) break; cpu_relax(); } diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index c9fc61c0a86..4495e0627a4 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -33,7 +33,7 @@ #include #include #include -#include /* get_clock() */ +#include /* get_tod_clock() */ #include #include @@ -326,7 +326,7 @@ static int cmf_copy_block(struct ccw_device *cdev) memcpy(cmb_data->last_block, hw_block, cmb_data->size); memcpy(reference_buf, hw_block, cmb_data->size); } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); - cmb_data->last_update = get_clock(); + cmb_data->last_update = get_tod_clock(); kfree(reference_buf); return 0; } @@ -428,7 +428,7 @@ static void cmf_generic_reset(struct ccw_device *cdev) memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); cmb_data->last_update = 0; } - cdev->private->cmb_start_time = get_clock(); + cdev->private->cmb_start_time = get_tod_clock(); spin_unlock_irq(cdev->ccwlock); } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fd00afd8b85..a239237d43f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -780,7 +780,7 @@ static int __init setup_css(int nr) css->cssid = nr; dev_set_name(&css->device, "css%x", nr); css->device.release = channel_subsystem_release; - tod_high = (u32) (get_clock() >> 32); + tod_high = (u32) (get_tod_clock() >> 32); css_generate_pgid(css, tod_high); return 0; } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1bb1d00095a..c7638c54325 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -47,7 +47,7 @@ static void ccw_timeout_log(struct ccw_device *cdev) cc = stsch_err(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " - "device information:\n", get_clock()); + "device information:\n", get_tod_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, orb, sizeof(*orb), 0); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 1671d3461f2..abc550e5dd3 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -338,10 +338,10 @@ again: retries++; if (!start_time) { - start_time = get_clock(); + start_time = get_tod_clock(); goto again; } - if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) + if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) goto again; } if (retries) { @@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock(); /* * Don't check 128 buffers, as otherwise qdio_inbound_q_moved @@ -563,7 +563,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q) if (bufnr != q->last_move) { q->last_move = bufnr; if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_clock(); + q->u.in.timestamp = get_tod_clock(); return 1; } else return 0; @@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * At this point we know, that inbound first_to_check * has (probably) not moved (see qdio_inbound_processing). */ - if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { + if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", q->first_to_check); return 1; @@ -772,7 +772,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) int count, stop; unsigned char state = 0; - q->timestamp = get_clock(); + q->timestamp = get_tod_clock(); if (need_siga_sync(q)) if (((queue_type(q) != QDIO_IQDIO_QFMT) && diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 480fbeab025..b4796a40b00 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -816,7 +816,7 @@ static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) static inline int qeth_get_micros(void) { - return (int) (get_clock() >> 12); + return (int) (get_tod_clock() >> 12); } static inline int qeth_get_ip_version(struct sk_buff *skb) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c96320d79fb..c7e148f33b2 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -727,7 +727,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) zfcp_reqlist_add(adapter->req_list, req); req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); - req->issued = get_clock(); + req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { del_timer(&req->timer); /* lookup request again, list might have changed */ diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 50b5615848f..665e3cfaaf8 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -68,7 +68,7 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) unsigned long long now, span; int used; - now = get_clock_monotonic(); + now = get_tod_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); qdio->req_q_util += used * span; -- cgit v1.2.3-70-g09d2 From e06ef372839c0c33f5f91f892ae632cef38cd259 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Jan 2013 13:56:14 +0100 Subject: s390/barrier: convert mb() to define again Some of the now available common code drivers only compile if mb() is a define. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/barrier.h | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 10a50880294..16760eeb79b 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -13,15 +13,12 @@ * to devices. */ -static inline void mb(void) -{ #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - /* Fast-BCR without checkpoint synchronization */ - asm volatile("bcr 14,0" : : : "memory"); +/* Fast-BCR without checkpoint synchronization */ +#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0) #else - asm volatile("bcr 15,0" : : : "memory"); +#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0) #endif -} #define rmb() mb() #define wmb() mb() -- cgit v1.2.3-70-g09d2 From e978948db125cc3b90cc324c68e7787f0ac7be4a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Jan 2013 14:16:02 +0100 Subject: s390/dma: provide dma_cache_sync() function Provide empty dma_cache_sync() function. Acked-by: Sebastian Ott Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/dma-mapping.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 8a32f7dfd3a..e74bc7ac72e 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -20,8 +20,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); + +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ +} #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -- cgit v1.2.3-70-g09d2 From a50b2eae8b9aed3840d7e045c9417ce5e6c5ce91 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Jan 2013 14:17:42 +0100 Subject: s390/dma: remove dma_is_consistent() declaration There is no such function nor any caller in the whole kernel. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/dma-mapping.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index e74bc7ac72e..9411db653ba 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -19,7 +19,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) } extern int dma_set_mask(struct device *dev, u64 mask); -extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) -- cgit v1.2.3-70-g09d2 From 1e5635d10d8112e61776b9513491329f7b0859ce Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Jan 2013 15:52:16 +0100 Subject: s390/pci: rename pci_probe to s390_pci_probe pci_probe is too generic and has a name clash with other common code parts. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pci.h | 2 +- arch/s390/pci/pci.c | 8 ++++---- drivers/pci/hotplug/s390_pci_hpc.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 23d6a245e8a..6383c44c662 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -186,7 +186,7 @@ void zpci_dma_exit(void); extern struct mutex zpci_list_lock; extern struct list_head zpci_list; extern struct pci_hp_callback_ops hotplug_ops; -extern unsigned int pci_probe; +extern unsigned int s390_pci_probe; /* FMB */ int zpci_fmb_enable_device(struct zpci_dev *); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 60e0372545d..aa74409db65 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -1072,13 +1072,13 @@ static void zpci_mem_exit(void) kmem_cache_destroy(zdev_fmb_cache); } -unsigned int pci_probe = 1; -EXPORT_SYMBOL_GPL(pci_probe); +unsigned int s390_pci_probe = 1; +EXPORT_SYMBOL_GPL(s390_pci_probe); char * __init pcibios_setup(char *str) { if (!strcmp(str, "off")) { - pci_probe = 0; + s390_pci_probe = 0; return NULL; } return str; @@ -1088,7 +1088,7 @@ static int __init pci_base_init(void) { int rc; - if (!pci_probe) + if (!s390_pci_probe) return 0; if (!test_facility(2) || !test_facility(69) diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index dee68e0698e..8fe2a8aa89a 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -231,7 +231,7 @@ static int __init pci_hotplug_s390_init(void) * right now. */ - if (!pci_probe) + if (!s390_pci_probe) return -EOPNOTSUPP; /* register callbacks for slot handling from arch code */ -- cgit v1.2.3-70-g09d2 From bddb7ae217cea0ef722468fce823f9e19a69c561 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 30 Jan 2013 16:38:55 +0100 Subject: s390/mm: provide PAGE_SHARED define Only needed to make some drivers compile... Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 098adbb6266..a009d4dd70c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -385,6 +385,7 @@ extern unsigned long MODULES_END; #define PAGE_RW __pgprot(_PAGE_TYPE_RW) #define PAGE_KERNEL PAGE_RW +#define PAGE_SHARED PAGE_KERNEL #define PAGE_COPY PAGE_RO /* -- cgit v1.2.3-70-g09d2 From 151a0eb6c8e4398f76453c791d8fd8f8167a7517 Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Wed, 30 Jan 2013 17:51:56 +0100 Subject: s390/perf: cpum_cf: fallback to software sampling events The CPU-measurement counter facility does not support sampling events and returns -EINVAL in that case. This return code lets the perf tool fail. To fall back to software sampling events, return -ENOENT instead. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/perf_cpum_cf.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 86ec7447e1f..390d9ae57bb 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -367,13 +367,6 @@ static int __hw_perf_event_init(struct perf_event *event) if (ev >= PERF_CPUM_CF_MAX_CTR) return -EINVAL; - /* The CPU measurement counter facility does not have any interrupts - * to do sampling. Sampling must be provided by external means, - * for example, by timers. - */ - if (hwc->sample_period) - return -EINVAL; - /* Use the hardware perf event structure to store the counter number * in 'config' member and the counter set to which the counter belongs * in the 'config_base'. The counter set (config_base) is then used @@ -418,6 +411,12 @@ static int cpumf_pmu_event_init(struct perf_event *event) case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: case PERF_TYPE_RAW: + /* The CPU measurement counter facility does not have overflow + * interrupts to do sampling. Sampling must be provided by + * external means, for example, by timers. + */ + if (is_sampling_event(event)) + return -ENOENT; err = __hw_perf_event_init(event); break; default: -- cgit v1.2.3-70-g09d2 From bf4ec24ff8ab54d56c835eb61212a1e87270d7c8 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 31 Jan 2013 19:53:12 +0100 Subject: s390/pci: cleanup clp inline assembly Tell gcc that the memory region pointed to by req will be used (and changed). Also remove the (now) superfluous memory constraint. Acked-by: Gerald Schaefer Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/pci/pci_clp.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 2c847143cbd..702bd269368 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -19,18 +19,19 @@ * Call Logical Processor * Retry logic is handled by the caller. */ -static inline u8 clp_instr(void *req) +static inline u8 clp_instr(void *data) { - u64 ilpm; + struct { u8 _[CLP_BLK_SIZE]; } *req = data; + u64 ignored; u8 cc; asm volatile ( - " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n" + " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n" " ipm %[cc]\n" " srl %[cc],28\n" - : [cc] "=d" (cc), [ilpm] "=d" (ilpm) + : [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req) : [req] "a" (req) - : "cc", "memory"); + : "cc"); return cc; } -- cgit v1.2.3-70-g09d2 From add09d61fee72d7a346051332b6d99f18989504c Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 31 Jan 2013 19:54:03 +0100 Subject: s390/pci: cleanup clp page allocation Use the __get_free_pages wrapper in clp_alloc_block. Also change the allocation to use one page only. This page is used as CLP response block e.g. to list available pci functions. Using one page we can list > 250 pci functions at once and we have code to loop around this CLP command (if not all functions fit into to the CLP block) already in place. Acked-by: Gerald Schaefer Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/clp.h | 2 +- arch/s390/pci/pci_clp.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h index 6c3aecc245f..a0e71a501f7 100644 --- a/arch/s390/include/asm/clp.h +++ b/arch/s390/include/asm/clp.h @@ -2,7 +2,7 @@ #define _ASM_S390_CLP_H /* CLP common request & response block size */ -#define CLP_BLK_SIZE (PAGE_SIZE * 2) +#define CLP_BLK_SIZE PAGE_SIZE struct clp_req_hdr { u16 len; diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 702bd269368..f339fe2feb1 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -37,8 +37,7 @@ static inline u8 clp_instr(void *data) static void *clp_alloc_block(void) { - struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); - return (page) ? page_address(page) : NULL; + return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); } static void clp_free_block(void *ptr) -- cgit v1.2.3-70-g09d2 From 53923354d69e4748506bfee932b7c6b309a15c21 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 31 Jan 2013 19:55:17 +0100 Subject: s390/pci: fix hotplug module init Loading the pci hotplug module when no devices are present will fail but unfortunately some hotplug callbacks stay registered to the pci bus level. Fix this by not letting module loading fail when no pci devices are present and provide proper {de}registration functions for these callbacks. Reviewed-by: Gerald Schaefer Signed-off-by: Sebastian Ott Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pci.h | 4 ++- arch/s390/pci/pci.c | 27 ++++++++++++++---- drivers/pci/hotplug/s390_pci_hpc.c | 58 ++++++++++++++++---------------------- 3 files changed, 49 insertions(+), 40 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 6383c44c662..05333b7f046 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -185,9 +185,11 @@ void zpci_dma_exit(void); /* Hotplug */ extern struct mutex zpci_list_lock; extern struct list_head zpci_list; -extern struct pci_hp_callback_ops hotplug_ops; extern unsigned int s390_pci_probe; +void zpci_register_hp_ops(struct pci_hp_callback_ops *); +void zpci_deregister_hp_ops(void); + /* FMB */ int zpci_fmb_enable_device(struct zpci_dev *); int zpci_fmb_disable_device(struct zpci_dev *); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index aa74409db65..27b4c17855b 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -51,8 +51,7 @@ EXPORT_SYMBOL_GPL(zpci_list); DEFINE_MUTEX(zpci_list_lock); EXPORT_SYMBOL_GPL(zpci_list_lock); -struct pci_hp_callback_ops hotplug_ops; -EXPORT_SYMBOL_GPL(hotplug_ops); +static struct pci_hp_callback_ops *hotplug_ops; static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); static DEFINE_SPINLOCK(zpci_domain_lock); @@ -974,8 +973,8 @@ int zpci_create_device(struct zpci_dev *zdev) mutex_lock(&zpci_list_lock); list_add_tail(&zdev->entry, &zpci_list); - if (hotplug_ops.create_slot) - hotplug_ops.create_slot(zdev); + if (hotplug_ops) + hotplug_ops->create_slot(zdev); mutex_unlock(&zpci_list_lock); if (zdev->state == ZPCI_FN_STATE_STANDBY) @@ -989,8 +988,8 @@ int zpci_create_device(struct zpci_dev *zdev) out_start: mutex_lock(&zpci_list_lock); list_del(&zdev->entry); - if (hotplug_ops.remove_slot) - hotplug_ops.remove_slot(zdev); + if (hotplug_ops) + hotplug_ops->remove_slot(zdev); mutex_unlock(&zpci_list_lock); out_bus: zpci_free_domain(zdev); @@ -1072,6 +1071,22 @@ static void zpci_mem_exit(void) kmem_cache_destroy(zdev_fmb_cache); } +void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) +{ + mutex_lock(&zpci_list_lock); + hotplug_ops = ops; + mutex_unlock(&zpci_list_lock); +} +EXPORT_SYMBOL_GPL(zpci_register_hp_ops); + +void zpci_deregister_hp_ops(void) +{ + mutex_lock(&zpci_list_lock); + hotplug_ops = NULL; + mutex_unlock(&zpci_list_lock); +} +EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); + unsigned int s390_pci_probe = 1; EXPORT_SYMBOL_GPL(s390_pci_probe); diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 8fe2a8aa89a..7db249a2501 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -172,25 +172,6 @@ error: return -ENOMEM; } -static int __init init_pci_slots(void) -{ - struct zpci_dev *zdev; - int device = 0; - - /* - * Create a structure for each slot, and register that slot - * with the pci_hotplug subsystem. - */ - mutex_lock(&zpci_list_lock); - list_for_each_entry(zdev, &zpci_list, entry) { - init_pci_slot(zdev); - device++; - } - - mutex_unlock(&zpci_list_lock); - return (device) ? 0 : -ENODEV; -} - static void exit_pci_slot(struct zpci_dev *zdev) { struct list_head *tmp, *n; @@ -205,6 +186,26 @@ static void exit_pci_slot(struct zpci_dev *zdev) } } +static struct pci_hp_callback_ops hp_ops = { + .create_slot = init_pci_slot, + .remove_slot = exit_pci_slot, +}; + +static void __init init_pci_slots(void) +{ + struct zpci_dev *zdev; + + /* + * Create a structure for each slot, and register that slot + * with the pci_hotplug subsystem. + */ + mutex_lock(&zpci_list_lock); + list_for_each_entry(zdev, &zpci_list, entry) { + init_pci_slot(zdev); + } + mutex_unlock(&zpci_list_lock); +} + static void __exit exit_pci_slots(void) { struct list_head *tmp, *n; @@ -224,28 +225,19 @@ static void __exit exit_pci_slots(void) static int __init pci_hotplug_s390_init(void) { - /* - * Do specific initialization stuff for your driver here - * like initializing your controller hardware (if any) and - * determining the number of slots you have in the system - * right now. - */ - if (!s390_pci_probe) return -EOPNOTSUPP; - /* register callbacks for slot handling from arch code */ - mutex_lock(&zpci_list_lock); - hotplug_ops.create_slot = init_pci_slot; - hotplug_ops.remove_slot = exit_pci_slot; - mutex_unlock(&zpci_list_lock); - pr_info("registered hotplug slot callbacks\n"); - return init_pci_slots(); + zpci_register_hp_ops(&hp_ops); + init_pci_slots(); + + return 0; } static void __exit pci_hotplug_s390_exit(void) { exit_pci_slots(); + zpci_deregister_hp_ops(); } module_init(pci_hotplug_s390_init); -- cgit v1.2.3-70-g09d2 From 0e0d04a8677f33360cfbb5f8c7aa4ee8cbf5a287 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 6 Feb 2013 10:15:55 +0100 Subject: s390/Kconfig: sort list of arch selected config options Just like on other architectures. The intention is that this will reduce merge conflicts if new config options get added in sorted order as well. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 115 +++++++++++++++++++++++++++--------------------------- 1 file changed, 58 insertions(+), 57 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b5ea38c2564..17775cf1534 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,86 +60,87 @@ config PCI_QUIRKS config S390 def_bool y - select USE_GENERIC_SMP_HELPERS if SMP - select GENERIC_CPU_DEVICES if !SMP - select HAVE_SYSCALL_WRAPPERS - select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST - select HAVE_FTRACE_MCOUNT_RECORD - select HAVE_C_RECORDMCOUNT - select HAVE_SYSCALL_TRACEPOINTS - select SYSCTL_EXCEPTION_TRACE - select HAVE_DYNAMIC_FTRACE - select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_OPROFILE - select HAVE_KPROBES - select HAVE_KRETPROBES - select HAVE_KVM if 64BIT - select HAVE_ARCH_TRACEHOOK - select INIT_ALL_POSSIBLE - select HAVE_IRQ_WORK - select HAVE_PERF_EVENTS - select ARCH_HAVE_NMI_SAFE_CMPXCHG - select HAVE_DEBUG_KMEMLEAK - select HAVE_KERNEL_GZIP - select HAVE_KERNEL_BZIP2 - select HAVE_KERNEL_LZMA - select HAVE_KERNEL_LZO - select HAVE_KERNEL_XZ - select HAVE_ARCH_MUTEX_CPU_RELAX - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 - select HAVE_BPF_JIT if 64BIT && PACK_STACK - select ARCH_SAVE_PAGE_KEYS if HIBERNATION - select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE - select HAVE_MEMBLOCK - select HAVE_MEMBLOCK_NODE_MAP - select HAVE_CMPXCHG_LOCAL - select HAVE_CMPXCHG_DOUBLE - select HAVE_ALIGNED_STRUCT_PAGE if SLUB - select HAVE_VIRT_CPU_ACCOUNTING - select VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK - select BUILDTIME_EXTABLE_SORT - select ARCH_INLINE_SPIN_TRYLOCK - select ARCH_INLINE_SPIN_TRYLOCK_BH - select ARCH_INLINE_SPIN_LOCK - select ARCH_INLINE_SPIN_LOCK_BH - select ARCH_INLINE_SPIN_LOCK_IRQ - select ARCH_INLINE_SPIN_LOCK_IRQSAVE - select ARCH_INLINE_SPIN_UNLOCK - select ARCH_INLINE_SPIN_UNLOCK_BH - select ARCH_INLINE_SPIN_UNLOCK_IRQ - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE - select ARCH_INLINE_READ_TRYLOCK + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH select ARCH_INLINE_READ_LOCK_IRQ select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_TRYLOCK select ARCH_INLINE_READ_UNLOCK select ARCH_INLINE_READ_UNLOCK_BH select ARCH_INLINE_READ_UNLOCK_IRQ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE - select ARCH_INLINE_WRITE_TRYLOCK + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE select ARCH_INLINE_WRITE_LOCK select ARCH_INLINE_WRITE_LOCK_BH select ARCH_INLINE_WRITE_LOCK_IRQ select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_TRYLOCK select ARCH_INLINE_WRITE_UNLOCK select ARCH_INLINE_WRITE_UNLOCK_BH select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE - select HAVE_UID16 if 32BIT + select ARCH_SAVE_PAGE_KEYS if HIBERNATION select ARCH_WANT_IPC_PARSE_VERSION - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT + select BUILDTIME_EXTABLE_SORT + select CLONE_BACKWARDS2 + select GENERIC_CLOCKEVENTS + select GENERIC_CPU_DEVICES if !SMP + select GENERIC_KERNEL_THREAD select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL_OLD - select GENERIC_CLOCKEVENTS - select KTIME_SCALAR if 32BIT + select HAVE_ALIGNED_STRUCT_PAGE if SLUB + select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 + select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT + select HAVE_BPF_JIT if 64BIT && PACK_STACK + select HAVE_CMPXCHG_DOUBLE + select HAVE_CMPXCHG_LOCAL + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_IRQ_WORK + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZMA + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ + select HAVE_KPROBES + select HAVE_KRETPROBES + select HAVE_KVM if 64BIT + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC + select HAVE_OPROFILE + select HAVE_PERF_EVENTS + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_SYSCALL_WRAPPERS + select HAVE_UID16 if 32BIT + select HAVE_VIRT_CPU_ACCOUNTING + select INIT_ALL_POSSIBLE + select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS2 + select SYSCTL_EXCEPTION_TRACE + select USE_GENERIC_SMP_HELPERS if SMP + select VIRT_CPU_ACCOUNTING config SCHED_OMIT_FRAME_POINTER def_bool y -- cgit v1.2.3-70-g09d2 From 5303a0fe8ce8c7493025a3b60a403439edb4159a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 9 Feb 2013 14:07:50 +0100 Subject: s390/bpf,jit: add vlan tag support s390 version of 855ddb56 "x86: bpf_jit_comp: add vlan tag support". Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/net/bpf_jit_comp.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index bb284419b0f..0972e91cced 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -7,6 +7,7 @@ */ #include #include +#include #include #include #include @@ -254,6 +255,8 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) case BPF_S_ANC_HATYPE: case BPF_S_ANC_RXHASH: case BPF_S_ANC_CPU: + case BPF_S_ANC_VLAN_TAG: + case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_S_RET_K: /* first instruction sets A register */ break; @@ -699,6 +702,24 @@ call_fn: /* lg %r1,(%r13) */ /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash)); break; + case BPF_S_ANC_VLAN_TAG: + case BPF_S_ANC_VLAN_TAG_PRESENT: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + /* lhi %r5,0 */ + EMIT4(0xa7580000); + /* icm %r5,3,(%r2) */ + EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); + if (filter->code == BPF_S_ANC_VLAN_TAG) { + /* nill %r5,0xefff */ + EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); + } else { + /* nill %r5,0x1000 */ + EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT); + /* srl %r5,12 */ + EMIT4_DISP(0x88500000, 12); + } + break; case BPF_S_ANC_CPU: /* A = smp_processor_id() */ #ifdef CONFIG_SMP /* l %r5, */ -- cgit v1.2.3-70-g09d2 From a4e69245bd7793a620ed67442c00fa1f2dd56891 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 11 Feb 2013 14:26:24 +0100 Subject: s390/linker skript: discard exit.data at runtime Discard exit.data section at run time, not link time, since exit.text references exit.data and causes this build error: `.exit.data' referenced in section `.exit.text' of drivers/built-in.o: defined in discarded section `.exit.data' of drivers/built-in.o Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/vmlinux.lds.S | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 79cb51adc74..35b13ed0af5 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -75,6 +75,10 @@ SECTIONS EXIT_TEXT } + .exit.data : { + EXIT_DATA + } + /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) -- cgit v1.2.3-70-g09d2 From 486c0a0bc80d370471b21662bf03f04fbb37cdc6 Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Mon, 11 Feb 2013 14:29:49 +0100 Subject: s390/mm: Fix crst upgrade of mmap with MAP_FIXED Right now the page table upgrade does not happen if the end address of a fixed mapping is greater than TASK_SIZE. Enhance s390_mmap_check() to handle MAP_FIXED mappings correctly. Signed-off-by: Hendrik Brueckner Reviewed-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/mman.h | 4 ++-- arch/s390/mm/mmap.c | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index 0e47a576d66..9977e08df5b 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -9,7 +9,7 @@ #include #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) -int s390_mmap_check(unsigned long addr, unsigned long len); -#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len) +int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); +#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) #endif #endif /* __S390_MMAN_H__ */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index c59a5efa58b..06bafec0027 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -101,12 +101,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm) #else -int s390_mmap_check(unsigned long addr, unsigned long len) +int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { int rc; - if (!is_compat_task() && - len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { + if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) + return 0; + if (!(flags & MAP_FIXED)) + addr = 0; + if ((addr + len) >= TASK_SIZE) { rc = crst_table_upgrade(current->mm, 1UL << 53); if (rc) return rc; -- cgit v1.2.3-70-g09d2 From abf09bed3cceadd809f0356065c2ada6cee90d4a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 7 Nov 2012 13:17:37 +0100 Subject: s390/mm: implement software dirty bits The s390 architecture is unique in respect to dirty page detection, it uses the change bit in the per-page storage key to track page modifications. All other architectures track dirty bits by means of page table entries. This property of s390 has caused numerous problems in the past, e.g. see git commit ef5d437f71afdf4a "mm: fix XFS oops due to dirty pages without buffers on s390". To avoid future issues in regard to per-page dirty bits convert s390 to a fault based software dirty bit detection mechanism. All user page table entries which are marked as clean will be hardware read-only, even if the pte is supposed to be writable. A write by the user process will trigger a protection fault which will cause the user pte to be marked as dirty and the hardware read-only bit is removed. With this change the dirty bit in the storage key is irrelevant for Linux as a host, but the storage key is still required for KVM guests. The effect is that page_test_and_clear_dirty and the related code can be removed. The referenced bit in the storage key is still used by the page_test_and_clear_young primitive to provide page age information. For page cache pages of mappings with mapping_cap_account_dirty there will not be any change in behavior as the dirty bit tracking already uses read-only ptes to control the amount of dirty pages. Only for swap cache pages and pages of mappings without mapping_cap_account_dirty there can be additional protection faults. To avoid an excessive number of additional faults the mk_pte primitive checks for PageDirty if the pgprot value allows for writes and pre-dirties the pte. That avoids all additional faults for tmpfs and shmem pages until these pages are added to the swap cache. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/page.h | 22 ------- arch/s390/include/asm/pgtable.h | 131 +++++++++++++++++++++++++++------------- arch/s390/include/asm/sclp.h | 1 - arch/s390/include/asm/setup.h | 16 ++--- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/lib/uaccess_pt.c | 2 +- arch/s390/mm/pageattr.c | 2 +- arch/s390/mm/vmem.c | 24 +++----- drivers/s390/char/sclp_cmd.c | 10 +-- include/asm-generic/pgtable.h | 10 --- include/linux/page-flags.h | 8 --- mm/rmap.c | 24 -------- 12 files changed, 112 insertions(+), 140 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a86ad408407..75ce9b065f9 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -154,28 +154,6 @@ static inline int page_reset_referenced(unsigned long addr) #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ -/* - * Test and clear dirty bit in storage key. - * We can't clear the changed bit atomically. This is a potential - * race against modification of the referenced bit. This function - * should therefore only be called if it is not mapped in any - * address space. - * - * Note that the bit gets set whenever page content is changed. That means - * also when the page is modified by DMA or from inside the kernel. - */ -#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) -{ - unsigned char skey; - - skey = page_get_storage_key(pfn << PAGE_SHIFT); - if (!(skey & _PAGE_CHANGED)) - return 0; - page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped); - return 1; -} - /* * Test and clear referenced bit in storage key. */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index a009d4dd70c..97de1200c84 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -29,6 +29,7 @@ #ifndef __ASSEMBLY__ #include #include +#include #include #include @@ -221,13 +222,15 @@ extern unsigned long MODULES_END; /* Software bits in the page table entry */ #define _PAGE_SWT 0x001 /* SW pte type bit t */ #define _PAGE_SWX 0x002 /* SW pte type bit x */ -#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */ -#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */ -#define _PAGE_SPECIAL 0x010 /* SW associated with special page */ +#define _PAGE_SWC 0x004 /* SW pte changed bit */ +#define _PAGE_SWR 0x008 /* SW pte referenced bit */ +#define _PAGE_SWW 0x010 /* SW pte write bit */ +#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ #define __HAVE_ARCH_PTE_SPECIAL /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ + _PAGE_SWC | _PAGE_SWR) /* Six different types of pages. */ #define _PAGE_TYPE_EMPTY 0x400 @@ -321,6 +324,7 @@ extern unsigned long MODULES_END; /* Bits in the region table entry */ #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ +#define _REGION_ENTRY_RO 0x200 /* region protection bit */ #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ @@ -382,9 +386,10 @@ extern unsigned long MODULES_END; */ #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) #define PAGE_RO __pgprot(_PAGE_TYPE_RO) -#define PAGE_RW __pgprot(_PAGE_TYPE_RW) +#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) +#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) -#define PAGE_KERNEL PAGE_RW +#define PAGE_KERNEL PAGE_RWC #define PAGE_SHARED PAGE_KERNEL #define PAGE_COPY PAGE_RO @@ -632,23 +637,23 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); /* Clear page changed & referenced bit in the storage key */ if (bits & _PAGE_CHANGED) - page_set_storage_key(address, skey ^ bits, 1); + page_set_storage_key(address, skey ^ bits, 0); else if (bits) page_reset_referenced(address); /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ /* Get host changed & referenced bits from pgste */ bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52; - /* Clear host bits in pgste. */ + /* Transfer page changed & referenced bit to kvm user bits */ + pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ + /* Clear relevant host bits in pgste. */ pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT); pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT); /* Copy page access key and fetch protection bit to pgste */ pgste_val(pgste) |= (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; - /* Transfer changed and referenced to kvm user bits */ - pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */ - /* Transfer changed & referenced to pte sofware bits */ - pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */ + /* Transfer referenced bit to pte */ + pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; #endif return pgste; @@ -661,20 +666,25 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) if (!pte_present(*ptep)) return pgste; + /* Get referenced bit from storage key */ young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); - /* Transfer page referenced bit to pte software bit (host view) */ - if (young || (pgste_val(pgste) & RCP_HR_BIT)) + if (young) + pgste_val(pgste) |= RCP_GR_BIT; + /* Get host referenced bit from pgste */ + if (pgste_val(pgste) & RCP_HR_BIT) { + pgste_val(pgste) &= ~RCP_HR_BIT; + young = 1; + } + /* Transfer referenced bit to kvm user bits and pte */ + if (young) { + pgste_val(pgste) |= KVM_UR_BIT; pte_val(*ptep) |= _PAGE_SWR; - /* Clear host referenced bit in pgste. */ - pgste_val(pgste) &= ~RCP_HR_BIT; - /* Transfer page referenced bit to guest bit in pgste */ - pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */ + } #endif return pgste; - } -static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) +static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) { #ifdef CONFIG_PGSTE unsigned long address; @@ -688,10 +698,23 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) /* Set page access key and fetch protection bit from pgste */ nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; if (okey != nkey) - page_set_storage_key(address, nkey, 1); + page_set_storage_key(address, nkey, 0); #endif } +static inline void pgste_set_pte(pte_t *ptep, pte_t entry) +{ + if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { + /* + * Without enhanced suppression-on-protection force + * the dirty bit on for all writable ptes. + */ + pte_val(entry) |= _PAGE_SWC; + pte_val(entry) &= ~_PAGE_RO; + } + *ptep = entry; +} + /** * struct gmap_struct - guest address space * @mm: pointer to the parent mm_struct @@ -750,11 +773,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_set_pte(ptep, pgste, entry); - *ptep = entry; + pgste_set_key(ptep, pgste, entry); + pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); - } else + } else { + if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) + pte_val(entry) |= _PAGE_CO; *ptep = entry; + } } /* @@ -763,16 +789,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, */ static inline int pte_write(pte_t pte) { - return (pte_val(pte) & _PAGE_RO) == 0; + return (pte_val(pte) & _PAGE_SWW) != 0; } static inline int pte_dirty(pte_t pte) { -#ifdef CONFIG_PGSTE - if (pte_val(pte) & _PAGE_SWC) - return 1; -#endif - return 0; + return (pte_val(pte) & _PAGE_SWC) != 0; } static inline int pte_young(pte_t pte) @@ -822,11 +844,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) |= pgprot_val(newprot); + if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) + pte_val(pte) &= ~_PAGE_RO; return pte; } static inline pte_t pte_wrprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_SWW; /* Do not clobber _PAGE_TYPE_NONE pages! */ if (!(pte_val(pte) & _PAGE_INVALID)) pte_val(pte) |= _PAGE_RO; @@ -835,20 +860,26 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) &= ~_PAGE_RO; + pte_val(pte) |= _PAGE_SWW; + if (pte_val(pte) & _PAGE_SWC) + pte_val(pte) &= ~_PAGE_RO; return pte; } static inline pte_t pte_mkclean(pte_t pte) { -#ifdef CONFIG_PGSTE pte_val(pte) &= ~_PAGE_SWC; -#endif + /* Do not clobber _PAGE_TYPE_NONE pages! */ + if (!(pte_val(pte) & _PAGE_INVALID)) + pte_val(pte) |= _PAGE_RO; return pte; } static inline pte_t pte_mkdirty(pte_t pte) { + pte_val(pte) |= _PAGE_SWC; + if (pte_val(pte) & _PAGE_SWW) + pte_val(pte) &= ~_PAGE_RO; return pte; } @@ -886,10 +917,10 @@ static inline pte_t pte_mkhuge(pte_t pte) pte_val(pte) |= _SEGMENT_ENTRY_INV; } /* - * Clear SW pte bits SWT and SWX, there are no SW bits in a segment - * table entry. + * Clear SW pte bits, there are no SW bits in a segment table entry. */ - pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); + pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC | + _PAGE_SWR | _PAGE_SWW); /* * Also set the change-override bit because we don't need dirty bit * tracking for hugetlbfs pages. @@ -1041,9 +1072,11 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long address, pte_t *ptep, pte_t pte) { - *ptep = pte; - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); + } else + *ptep = pte; } #define __HAVE_ARCH_PTEP_CLEAR_FLUSH @@ -1111,10 +1144,13 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, if (!mm_exclusive(mm)) __ptep_ipte(address, ptep); - *ptep = pte_wrprotect(pte); + pte = pte_wrprotect(pte); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + pgste_set_pte(ptep, pte); pgste_set_unlock(ptep, pgste); + } else + *ptep = pte; } return pte; } @@ -1132,10 +1168,12 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, pgste = pgste_get_lock(ptep); __ptep_ipte(address, ptep); - *ptep = entry; - if (mm_has_pgste(vma->vm_mm)) + if (mm_has_pgste(vma->vm_mm)) { + pgste_set_pte(ptep, entry); pgste_set_unlock(ptep, pgste); + } else + *ptep = entry; return 1; } @@ -1153,8 +1191,13 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) { unsigned long physpage = page_to_phys(page); + pte_t __pte = mk_pte_phys(physpage, pgprot); - return mk_pte_phys(physpage, pgprot); + if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { + pte_val(__pte) |= _PAGE_SWC; + pte_val(__pte) &= ~_PAGE_RO; + } + return __pte; } #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) @@ -1246,6 +1289,8 @@ static inline int pmd_trans_splitting(pmd_t pmd) static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t entry) { + if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) + pmd_val(entry) |= _SEGMENT_ENTRY_CO; *pmdp = entry; } diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 833788693f0..06a13613604 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -46,7 +46,6 @@ int sclp_cpu_deconfigure(u8 cpu); void sclp_facilities_detect(void); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); -u8 sclp_get_fac85(void); int sclp_sdias_blk_count(void); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_chp_configure(struct chp_id chpid); diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index f69f76b3447..f6857516e52 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -64,13 +64,14 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_VM (1UL << 0) #define MACHINE_FLAG_IEEE (1UL << 1) -#define MACHINE_FLAG_CSP (1UL << 3) -#define MACHINE_FLAG_MVPG (1UL << 4) -#define MACHINE_FLAG_DIAG44 (1UL << 5) -#define MACHINE_FLAG_IDTE (1UL << 6) -#define MACHINE_FLAG_DIAG9C (1UL << 7) -#define MACHINE_FLAG_MVCOS (1UL << 8) -#define MACHINE_FLAG_KVM (1UL << 9) +#define MACHINE_FLAG_CSP (1UL << 2) +#define MACHINE_FLAG_MVPG (1UL << 3) +#define MACHINE_FLAG_DIAG44 (1UL << 4) +#define MACHINE_FLAG_IDTE (1UL << 5) +#define MACHINE_FLAG_DIAG9C (1UL << 6) +#define MACHINE_FLAG_MVCOS (1UL << 7) +#define MACHINE_FLAG_KVM (1UL << 8) +#define MACHINE_FLAG_ESOP (1UL << 9) #define MACHINE_FLAG_EDAT1 (1UL << 10) #define MACHINE_FLAG_EDAT2 (1UL << 11) #define MACHINE_FLAG_LPAR (1UL << 12) @@ -84,6 +85,7 @@ extern unsigned int s390_user_mode; #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) +#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f090e819bf7..2923781590a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -147,7 +147,7 @@ int kvm_dev_ioctl_check_extension(long ext) r = KVM_MAX_VCPUS; break; case KVM_CAP_S390_COW: - r = sclp_get_fac85() & 0x2; + r = MACHINE_HAS_ESOP; break; default: r = 0; diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 9017a63dda3..a70ee84c024 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -50,7 +50,7 @@ static __always_inline unsigned long follow_table(struct mm_struct *mm, ptep = pte_offset_map(pmd, addr); if (!pte_present(*ptep)) return -0x11UL; - if (write && !pte_write(*ptep)) + if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) return -0x04UL; return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 29ccee3651f..d21040ed5e5 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -127,7 +127,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) pte_val(*pte) = _PAGE_TYPE_EMPTY; continue; } - *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); + pte_val(*pte) = __pa(address); } } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 6ed1426d27c..79699f46a44 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -85,11 +85,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; - pte_t pte; int ret = -ENOMEM; while (address < end) { - pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -101,9 +99,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { - pte_val(pte) |= _REGION3_ENTRY_LARGE; - pte_val(pte) |= _REGION_ENTRY_TYPE_R3; - pud_val(*pu_dir) = pte_val(pte); + pud_val(*pu_dir) = __pa(address) | + _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | + (ro ? _REGION_ENTRY_RO : 0); address += PUD_SIZE; continue; } @@ -118,8 +116,9 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; - pmd_val(*pm_dir) = pte_val(pte); + pmd_val(*pm_dir) = __pa(address) | + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | + (ro ? _SEGMENT_ENTRY_RO : 0); address += PMD_SIZE; continue; } @@ -132,7 +131,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) } pt_dir = pte_offset_kernel(pm_dir, address); - *pt_dir = pte; + pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); address += PAGE_SIZE; } ret = 0; @@ -199,7 +198,6 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; - pte_t pte; int ret = -ENOMEM; start_addr = (unsigned long) start; @@ -237,9 +235,8 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) new_page = vmemmap_alloc_block(PMD_SIZE, node); if (!new_page) goto out; - pte = mk_pte_phys(__pa(new_page), PAGE_RW); - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; - pmd_val(*pm_dir) = pte_val(pte); + pmd_val(*pm_dir) = __pa(new_page) | + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE; address = (address + PMD_SIZE) & PMD_MASK; continue; } @@ -260,8 +257,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; - pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); - *pt_dir = pte; + pte_val(*pt_dir) = __pa(new_page); } address += PAGE_SIZE; } diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index c44d13f607b..30a2255389e 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -56,7 +56,6 @@ static int __initdata early_read_info_sccb_valid; u64 sclp_facilities; static u8 sclp_fac84; -static u8 sclp_fac85; static unsigned long long rzm; static unsigned long long rnmax; @@ -131,7 +130,8 @@ void __init sclp_facilities_detect(void) sccb = &early_read_info_sccb; sclp_facilities = sccb->facilities; sclp_fac84 = sccb->fac84; - sclp_fac85 = sccb->fac85; + if (sccb->fac85 & 0x02) + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; rzm <<= 20; @@ -171,12 +171,6 @@ unsigned long long sclp_get_rzm(void) return rzm; } -u8 sclp_get_fac85(void) -{ - return sclp_fac85; -} -EXPORT_SYMBOL_GPL(sclp_get_fac85); - /* * This function will be called after sclp_facilities_detect(), which gets * called from early.c code. Therefore the sccb should have valid contents. diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 5cf680a98f9..bfd87685fc1 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -197,16 +197,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define page_test_and_clear_dirty(pfn, mapped) (0) -#endif - -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define pte_maybe_dirty(pte) pte_dirty(pte) -#else -#define pte_maybe_dirty(pte) (1) -#endif - #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #define page_test_and_clear_young(pfn) (0) #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 70473da47b3..6d53675c2b5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -303,21 +303,13 @@ static inline void __SetPageUptodate(struct page *page) static inline void SetPageUptodate(struct page *page) { -#ifdef CONFIG_S390 - if (!test_and_set_bit(PG_uptodate, &page->flags)) - page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, 0); -#else /* * Memory barrier must be issued before setting the PG_uptodate bit, * so that all previous stores issued in order to bring the page * uptodate are actually visible before PageUptodate becomes true. - * - * s390 doesn't need an explicit smp_wmb here because the test and - * set bit already provides full barriers. */ smp_wmb(); set_bit(PG_uptodate, &(page)->flags); -#endif } CLEARPAGEFLAG(Uptodate, uptodate) diff --git a/mm/rmap.c b/mm/rmap.c index 2c78f8cadc9..3d38edffda4 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1126,7 +1126,6 @@ void page_add_file_rmap(struct page *page) */ void page_remove_rmap(struct page *page) { - struct address_space *mapping = page_mapping(page); bool anon = PageAnon(page); bool locked; unsigned long flags; @@ -1143,29 +1142,6 @@ void page_remove_rmap(struct page *page) if (!atomic_add_negative(-1, &page->_mapcount)) goto out; - /* - * Now that the last pte has gone, s390 must transfer dirty - * flag from storage key to struct page. We can usually skip - * this if the page is anon, so about to be freed; but perhaps - * not if it's in swapcache - there might be another pte slot - * containing the swap entry, but page not yet written to swap. - * - * And we can skip it on file pages, so long as the filesystem - * participates in dirty tracking (note that this is not only an - * optimization but also solves problems caused by dirty flag in - * storage key getting set by a write from inside kernel); but need to - * catch shm and tmpfs and ramfs pages which have been modified since - * creation by read fault. - * - * Note that mapping must be decided above, before decrementing - * mapcount (which luckily provides a barrier): once page is unmapped, - * it could be truncated and page->mapping reset to NULL at any moment. - * Note also that we are relying on page_mapping(page) to set mapping - * to &swapper_space when PageSwapCache(page). - */ - if (mapping && !mapping_cap_account_dirty(mapping) && - page_test_and_clear_dirty(page_to_pfn(page), 1)) - set_page_dirty(page); /* * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED * and not charged by memcg for now. -- cgit v1.2.3-70-g09d2 From 23d18e8d9311db748b5b496bc4ba38500e3d408b Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Mon, 11 Feb 2013 18:11:09 +0100 Subject: s390/cleanup: rename SPP to LPP The set-program-parameter (SPP) instruction has been renamed to load-program-parameter (LPP) (see SA23-2260). Reflect this change and rename all macro/instruction references. Also remove the duplicate SPP/LPP entry in the kernel disassembler instruction list. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cpu_mf.h | 4 ++-- arch/s390/include/asm/setup.h | 6 +++--- arch/s390/kernel/dis.c | 1 - arch/s390/kernel/early.c | 2 +- arch/s390/kernel/entry64.S | 10 +++++----- 5 files changed, 11 insertions(+), 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 35f0020b7ba..f1eddd150dd 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -34,12 +34,12 @@ /* CPU measurement facility support */ static inline int cpum_cf_avail(void) { - return MACHINE_HAS_SPP && test_facility(67); + return MACHINE_HAS_LPP && test_facility(67); } static inline int cpum_sf_avail(void) { - return MACHINE_HAS_SPP && test_facility(68); + return MACHINE_HAS_LPP && test_facility(68); } diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index f6857516e52..ff67d730c00 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -75,7 +75,7 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_EDAT1 (1UL << 10) #define MACHINE_FLAG_EDAT2 (1UL << 11) #define MACHINE_FLAG_LPAR (1UL << 12) -#define MACHINE_FLAG_SPP (1UL << 13) +#define MACHINE_FLAG_LPP (1UL << 13) #define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_RRBM (1UL << 16) @@ -98,7 +98,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_MVCOS (0) #define MACHINE_HAS_EDAT1 (0) #define MACHINE_HAS_EDAT2 (0) -#define MACHINE_HAS_SPP (0) +#define MACHINE_HAS_LPP (0) #define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TE (0) #define MACHINE_HAS_RRBM (0) @@ -111,7 +111,7 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) -#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) +#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index a7f9abd98cf..c50665fe943 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -840,7 +840,6 @@ static struct insn opcode_b2[] = { { "stcke", 0x78, INSTR_S_RD }, { "sacf", 0x79, INSTR_S_RD }, { "stsi", 0x7d, INSTR_S_RD }, - { "spp", 0x80, INSTR_S_RD }, { "srnm", 0x99, INSTR_S_RD }, { "stfpc", 0x9c, INSTR_S_RD }, { "lfpc", 0x9d, INSTR_S_RD }, diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 1ee98e56fc6..bda011e2f8a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -381,7 +381,7 @@ static __init void detect_machine_facilities(void) if (test_facility(27)) S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; if (test_facility(40)) - S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; + S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; if (test_facility(50) && test_facility(73)) S390_lowcore.machine_flags |= MACHINE_FLAG_TE; if (test_facility(66)) diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 6d34e0c97a3..9c837c10129 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -72,9 +72,9 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) #endif .endm - .macro SPP newpp + .macro LPP newpp #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP jz .+8 .insn s,0xb2800000,\newpp #endif @@ -96,7 +96,7 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) jhe .+22 .endif lg %r9,BASED(.Lsie_loop) - SPP BASED(.Lhost_id) # set host id + LPP BASED(.Lhost_id) # set host id #endif .endm @@ -967,10 +967,10 @@ sie_loop: lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce sie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer - SPP __SF_EMPTY(%r15) # set guest id + LPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) sie_done: - SPP __SF_EMPTY+16(%r15) # set host id + LPP __SF_EMPTY+16(%r15) # set host id lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_exit: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce -- cgit v1.2.3-70-g09d2 From e80cfc31d872b6b85b8966bce6ba80bee401a7dd Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Wed, 31 Oct 2012 17:26:44 +0100 Subject: s390/module: Add missing R_390_NONE relocation type Allow loading of kernel modules that have relocations of type R_390_NONE. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/module.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 06f17311628..f750bd7bd2c 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -234,6 +234,9 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = symtab[r_sym].st_value; switch (r_type) { + case R_390_NONE: /* No relocation. */ + rc = 0; + break; case R_390_8: /* Direct 8 bit. */ case R_390_12: /* Direct 12 bit. */ case R_390_16: /* Direct 16 bit. */ -- cgit v1.2.3-70-g09d2