diff options
Diffstat (limited to 'arch/powerpc/kernel')
26 files changed, 233 insertions, 218 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index fdb58253fa5..92673b43858 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif -ifdef CONFIG_FTRACE +ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index 80cac984d85..10b4ab1008a 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S @@ -34,7 +34,13 @@ _GLOBAL(__setup_cpu_440grx) blr _GLOBAL(__setup_cpu_460ex) _GLOBAL(__setup_cpu_460gt) - b __init_fpu_44x + mflr r4 + bl __init_fpu_44x + bl __fixup_440A_mcheck + mtlr r4 + blr + +_GLOBAL(__setup_cpu_440x5) _GLOBAL(__setup_cpu_440gx) _GLOBAL(__setup_cpu_440spe) b __fixup_440A_mcheck diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index b1eb834bc0f..7e8719504f3 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -39,6 +39,7 @@ extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); +extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); @@ -1500,6 +1501,8 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440x5, + .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460EX */ diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 49248f89ce2..14183af1b3f 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -30,28 +30,26 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, } /* Creates TCEs for a user provided buffer. The user buffer must be - * contiguous real kernel storage (not vmalloc). The address of the buffer - * passed here is the kernel (virtual) address of the buffer. The buffer - * need not be page aligned, the dma_addr_t returned will point to the same - * byte within the page as vaddr. + * contiguous real kernel storage (not vmalloc). The address passed here + * comprises a page address and offset into that page. The dma_addr_t + * returned will point to the same byte within the page as was passed in. */ -static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { - return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, - device_to_mask(dev), direction, attrs); + return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size, + device_to_mask(dev), direction, attrs); } -static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction, + struct dma_attrs *attrs) { - iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction, - attrs); + iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction, + attrs); } @@ -94,10 +92,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) struct dma_mapping_ops dma_iommu_ops = { .alloc_coherent = dma_iommu_alloc_coherent, .free_coherent = dma_iommu_free_coherent, - .map_single = dma_iommu_map_single, - .unmap_single = dma_iommu_unmap_single, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, + .map_page = dma_iommu_map_page, + .unmap_page = dma_iommu_unmap_page, }; EXPORT_SYMBOL(dma_iommu_ops); diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 1562daf8839..3a6eaa876ee 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -75,6 +75,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); sg->dma_length = sg->length; + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } return nents; diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1cbbf703364..7ecc0d1855c 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -1158,7 +1158,7 @@ machine_check_in_rtas: #endif /* CONFIG_PPC_RTAS */ -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index fd8b4bae9b0..e0bcf935428 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -57,12 +57,18 @@ system_call_common: beq- 1f ld r1,PACAKSAVE(r13) 1: std r10,0(r1) - crclr so std r11,_NIP(r1) std r12,_MSR(r1) std r0,GPR0(r1) std r10,GPR1(r1) ACCOUNT_CPU_USER_ENTRY(r10, r11) + /* + * This "crclr so" clears CR0.SO, which is the error indication on + * return from this system call. There must be no cmp instruction + * between it and the "mfcr r9" below, otherwise if XER.SO is set, + * CR0.SO will get set, causing all system calls to appear to fail. + */ + crclr so std r2,GPR2(r1) std r3,GPR3(r1) std r4,GPR4(r1) @@ -884,7 +890,7 @@ _GLOBAL(enter_prom) mtlr r0 blr -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 3855ceb937b..f4b006ed0ab 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -28,17 +28,17 @@ static unsigned int ftrace_nop = 0x60000000; #endif -static unsigned int notrace ftrace_calc_offset(long ip, long addr) +static unsigned int ftrace_calc_offset(long ip, long addr) { return (int)(addr - ip); } -notrace unsigned char *ftrace_nop_replace(void) +unsigned char *ftrace_nop_replace(void) { return (char *)&ftrace_nop; } -notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { static unsigned int op; @@ -68,7 +68,7 @@ notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) # define _ASM_PTR " .long " #endif -notrace int +int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code) { @@ -113,7 +113,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, return faulted; } -notrace int ftrace_update_ftrace_func(ftrace_func_t func) +int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned char old[MCOUNT_INSN_SIZE], *new; @@ -126,23 +126,6 @@ notrace int ftrace_update_ftrace_func(ftrace_func_t func) return ret; } -notrace int ftrace_mcount_set(unsigned long *data) -{ - unsigned long ip = (long)(&mcount_call); - unsigned long *addr = data; - unsigned char old[MCOUNT_INSN_SIZE], *new; - - /* - * Replace the mcount stub with a pointer to the - * ip recorder function. - */ - memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); - new = ftrace_call_replace(ip, *addr); - *addr = ftrace_modify_code(ip, old, new); - - return 0; -} - int __init ftrace_dyn_arch_init(void *data) { /* This is running in kstop_machine */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 69489bd3210..b4bcf5a930f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -97,12 +97,6 @@ __secondary_hold_spinloop: __secondary_hold_acknowledge: .llong 0x0 - /* This flag is set by purgatory if we should be a kdump kernel. */ - /* Do not move this variable as purgatory knows about it. */ - .globl __kdump_flag -__kdump_flag: - .llong 0x0 - #ifdef CONFIG_PPC_ISERIES /* * At offset 0x20, there is a pointer to iSeries LPAR data. @@ -112,6 +106,20 @@ __kdump_flag: .llong hvReleaseData-KERNELBASE #endif /* CONFIG_PPC_ISERIES */ +#ifdef CONFIG_CRASH_DUMP + /* This flag is set to 1 by a loader if the kernel should run + * at the loaded address instead of the linked address. This + * is used by kexec-tools to keep the the kdump kernel in the + * crash_kernel region. The loader is responsible for + * observing the alignment requirement. + */ + /* Do not move this variable as kexec-tools knows about it. */ + . = 0x5c + .globl __run_at_load +__run_at_load: + .long 0x72756e30 /* "run0" -- relocate to 0 by default */ +#endif + . = 0x60 /* * The following code is used to hold secondary processors @@ -1391,8 +1399,8 @@ _STATIC(__after_prom_start) lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ sldi r25,r25,32 #ifdef CONFIG_CRASH_DUMP - ld r7,__kdump_flag-_stext(r26) - cmpldi cr0,r7,1 /* kdump kernel ? - stay where we are */ + lwz r7,__run_at_load-_stext(r26) + cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */ bne 1f add r25,r25,r26 #endif @@ -1416,11 +1424,11 @@ _STATIC(__after_prom_start) #ifdef CONFIG_CRASH_DUMP /* * Check if the kernel has to be running as relocatable kernel based on the - * variable __kdump_flag, if it is set the kernel is treated as relocatable + * variable __run_at_load, if it is set the kernel is treated as relocatable * kernel, otherwise it will be moved to PHYSICAL_START */ - ld r7,__kdump_flag-_stext(r26) - cmpldi cr0,r7,1 + lwz r7,__run_at_load-_stext(r26) + cmplwi cr0,r7,1 bne 3f li r5,__end_interrupts - _stext /* just copy interrupts */ diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index a06362223f8..64299d28f36 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -79,20 +79,21 @@ static void ibmebus_free_coherent(struct device *dev, kfree(vaddr); } -static dma_addr_t ibmebus_map_single(struct device *dev, - void *ptr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static dma_addr_t ibmebus_map_page(struct device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { - return (dma_addr_t)(ptr); + return (dma_addr_t)(page_address(page) + offset); } -static void ibmebus_unmap_single(struct device *dev, - dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static void ibmebus_unmap_page(struct device *dev, + dma_addr_t dma_addr, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { return; } @@ -129,11 +130,11 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask) static struct dma_mapping_ops ibmebus_dma_ops = { .alloc_coherent = ibmebus_alloc_coherent, .free_coherent = ibmebus_free_coherent, - .map_single = ibmebus_map_single, - .unmap_single = ibmebus_unmap_single, .map_sg = ibmebus_map_sg, .unmap_sg = ibmebus_unmap_sg, .dma_supported = ibmebus_dma_supported, + .map_page = ibmebus_map_page, + .unmap_page = ibmebus_unmap_page, }; static int ibmebus_match_path(struct device *dev, void *data) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 3857d7e2af0..1bfa706b96e 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -32,6 +32,7 @@ #include <linux/dma-mapping.h> #include <linux/bitops.h> #include <linux/iommu-helper.h> +#include <linux/crash_dump.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> @@ -460,7 +461,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, static void iommu_table_clear(struct iommu_table *tbl) { - if (!__kdump_flag) { + if (!is_kdump_kernel()) { /* Clear the table in case firmware left allocations in it */ ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); return; @@ -564,21 +565,23 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) } /* Creates TCEs for a user provided buffer. The user buffer must be - * contiguous real kernel storage (not vmalloc). The address of the buffer - * passed here is the kernel (virtual) address of the buffer. The buffer - * need not be page aligned, the dma_addr_t returned will point to the same - * byte within the page as vaddr. + * contiguous real kernel storage (not vmalloc). The address passed here + * comprises a page address and offset into that page. The dma_addr_t + * returned will point to the same byte within the page as was passed in. */ -dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, - void *vaddr, size_t size, unsigned long mask, - enum dma_data_direction direction, struct dma_attrs *attrs) +dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, + struct page *page, unsigned long offset, size_t size, + unsigned long mask, enum dma_data_direction direction, + struct dma_attrs *attrs) { dma_addr_t dma_handle = DMA_ERROR_CODE; + void *vaddr; unsigned long uaddr; unsigned int npages, align; BUG_ON(direction == DMA_NONE); + vaddr = page_address(page) + offset; uaddr = (unsigned long)vaddr; npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); @@ -604,9 +607,9 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, return dma_handle; } -void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) +void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned int npages; diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index e6efec788c4..3c4ca046e85 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -255,14 +255,11 @@ static union thread_union kexec_stack /* Our assembly helper, in kexec_stub.S */ extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, void *image, void *control, - void (*clear_all)(void), - unsigned long kdump_flag) ATTRIB_NORET; + void (*clear_all)(void)) ATTRIB_NORET; /* too late to fail here */ void default_machine_kexec(struct kimage *image) { - unsigned long kdump_flag = 0; - /* prepare control code if any */ /* @@ -275,8 +272,6 @@ void default_machine_kexec(struct kimage *image) if (crashing_cpu == -1) kexec_prepare_cpus(); - else - kdump_flag = KDUMP_SIGNATURE; /* switch to a staticly allocated stack. Based on irq stack code. * XXX: the task struct will likely be invalid once we do the copy! @@ -289,7 +284,7 @@ void default_machine_kexec(struct kimage *image) */ kexec_sequence(&kexec_stack, image->start, image, page_address(image->control_code_page), - ppc_md.hpte_clear_all, kdump_flag); + ppc_md.hpte_clear_all); /* NOTREACHED */ } diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 6a9b4bf0d17..5c33bc14bd9 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -470,6 +470,8 @@ _GLOBAL(_tlbil_pid) mfspr r3,SPRN_MMUCSR0 andi. r3,r3,MMUCSR0_TLBFI@l bne 1b + msync + isync blr /* @@ -477,15 +479,20 @@ _GLOBAL(_tlbil_pid) * (no broadcast) */ _GLOBAL(_tlbil_va) + mfmsr r10 + wrteei 0 slwi r4,r4,16 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ tlbsx 0,r3 mfspr r4,SPRN_MAS1 /* check valid */ andis. r3,r4,MAS1_VALID@h - beqlr + beq 1f rlwinm r4,r4,0,1,31 mtspr SPRN_MAS1,r4 tlbwe + msync + isync +1: wrtee r10 blr #endif /* CONFIG_FSL_BOOKE */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index a243fd072a7..3053fe5c62f 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -611,12 +611,10 @@ real_mode: /* assume normal blr return */ /* - * kexec_sequence(newstack, start, image, control, clear_all(), kdump_flag) + * kexec_sequence(newstack, start, image, control, clear_all()) * * does the grungy work with stack switching and real mode switches * also does simple calls to other code - * - * kdump_flag says whether the next kernel should be a kdump kernel. */ _GLOBAL(kexec_sequence) @@ -649,7 +647,7 @@ _GLOBAL(kexec_sequence) mr r29,r5 /* image (virt) */ mr r28,r6 /* control, unused */ mr r27,r7 /* clear_all() fn desc */ - mr r26,r8 /* kdump flag */ + mr r26,r8 /* spare */ lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ /* disable interrupts, we are overwriting kernel data next */ @@ -711,6 +709,5 @@ _GLOBAL(kexec_sequence) mr r4,r30 # start, aka phys mem offset mtlr 4 li r5,0 - mr r6,r26 /* kdump_flag */ - blr /* image->start(physid, image->start, 0, kdump_flag); */ + blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC */ diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index 93ae5b169f4..f3c9cae01dd 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c @@ -78,7 +78,6 @@ struct of_device *of_device_alloc(struct device_node *np, dev->dev.parent = parent; dev->dev.release = of_release_dev; dev->dev.archdata.of_node = np; - set_dev_node(&dev->dev, of_node_to_nid(np)); if (bus_id) strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 1ec73938a00..f36936d9fda 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1239,69 +1239,66 @@ static int __init reparent_resources(struct resource *parent, * as well. */ -static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) +void pcibios_allocate_bus_resources(struct pci_bus *bus) { - struct pci_bus *bus; + struct pci_bus *b; int i; struct resource *res, *pr; - /* Depth-First Search on bus tree */ - list_for_each_entry(bus, bus_list, node) { - for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { - if ((res = bus->resource[i]) == NULL || !res->flags - || res->start > res->end) - continue; - if (bus->parent == NULL) - pr = (res->flags & IORESOURCE_IO) ? - &ioport_resource : &iomem_resource; - else { - /* Don't bother with non-root busses when - * re-assigning all resources. We clear the - * resource flags as if they were colliding - * and as such ensure proper re-allocation - * later. + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { + if ((res = bus->resource[i]) == NULL || !res->flags + || res->start > res->end) + continue; + if (bus->parent == NULL) + pr = (res->flags & IORESOURCE_IO) ? + &ioport_resource : &iomem_resource; + else { + /* Don't bother with non-root busses when + * re-assigning all resources. We clear the + * resource flags as if they were colliding + * and as such ensure proper re-allocation + * later. + */ + if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC) + goto clear_resource; + pr = pci_find_parent_resource(bus->self, res); + if (pr == res) { + /* this happens when the generic PCI + * code (wrongly) decides that this + * bridge is transparent -- paulus */ - if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC) - goto clear_resource; - pr = pci_find_parent_resource(bus->self, res); - if (pr == res) { - /* this happens when the generic PCI - * code (wrongly) decides that this - * bridge is transparent -- paulus - */ - continue; - } + continue; } + } - DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " - "[0x%x], parent %p (%s)\n", - bus->self ? pci_name(bus->self) : "PHB", - bus->number, i, - (unsigned long long)res->start, - (unsigned long long)res->end, - (unsigned int)res->flags, - pr, (pr && pr->name) ? pr->name : "nil"); - - if (pr && !(pr->flags & IORESOURCE_UNSET)) { - if (request_resource(pr, res) == 0) - continue; - /* - * Must be a conflict with an existing entry. - * Move that entry (or entries) under the - * bridge resource and try again. - */ - if (reparent_resources(pr, res) == 0) - continue; - } - printk(KERN_WARNING - "PCI: Cannot allocate resource region " - "%d of PCI bridge %d, will remap\n", - i, bus->number); -clear_resource: - res->flags = 0; + DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " + "[0x%x], parent %p (%s)\n", + bus->self ? pci_name(bus->self) : "PHB", + bus->number, i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags, + pr, (pr && pr->name) ? pr->name : "nil"); + + if (pr && !(pr->flags & IORESOURCE_UNSET)) { + if (request_resource(pr, res) == 0) + continue; + /* + * Must be a conflict with an existing entry. + * Move that entry (or entries) under the + * bridge resource and try again. + */ + if (reparent_resources(pr, res) == 0) + continue; } - pcibios_allocate_bus_resources(&bus->children); + printk(KERN_WARNING "PCI: Cannot allocate resource region " + "%d of PCI bridge %d, will remap\n", i, bus->number); +clear_resource: + res->flags = 0; } + + list_for_each_entry(b, &bus->children, node) + pcibios_allocate_bus_resources(b); } static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) @@ -1372,10 +1369,13 @@ static void __init pcibios_allocate_resources(int pass) void __init pcibios_resource_survey(void) { + struct pci_bus *b; + /* Allocate and assign resources. If we re-assign everything, then * we skip the allocate phase */ - pcibios_allocate_bus_resources(&pci_root_buses); + list_for_each_entry(b, &pci_root_buses, node) + pcibios_allocate_bus_resources(b); if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) { pcibios_allocate_resources(0); diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 8247cff1cb3..3502b9101e6 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -426,7 +426,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus) pci_name(bus->self)); __flush_hash_table_range(&init_mm, res->start + _IO_BASE, - res->end - res->start + 1); + res->end + _IO_BASE + 1); return 0; } diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 8edc2359c41..260089dccfb 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -68,7 +68,7 @@ EXPORT_SYMBOL(single_step_exception); EXPORT_SYMBOL(sys_sigreturn); #endif -#ifdef CONFIG_FTRACE +#ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 23e0db20332..2445945d376 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -671,7 +671,7 @@ static struct fake_elf { u32 ignore_me; } rpadesc; } rpanote; -} fake_elf __section(.fakeelf) = { +} fake_elf = { .elfhdr = { .e_ident = { 0x7f, 'E', 'L', 'F', ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, @@ -713,13 +713,13 @@ static struct fake_elf { .type = 0x12759999, .name = "IBM,RPA-Client-Config", .rpadesc = { - .lpar_affinity = 1, - .min_rmo_size = 128, /* in megabytes */ + .lpar_affinity = 0, + .min_rmo_size = 64, /* in megabytes */ .min_rmo_percent = 0, - .max_pft_size = 46, /* 2^46 bytes max PFT size */ + .max_pft_size = 48, /* 2^48 bytes max PFT size */ .splpar = 1, .min_load = ~0U, - .new_mem_def = 1 + .new_mem_def = 0 } } }; diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index bc1fb27368a..a11d68976dc 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -250,8 +250,11 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) * parsing */ dn = pci_device_to_OF_node(pdev); - if (dn) - return of_irq_map_one(dn, 0, out_irq); + if (dn) { + rc = of_irq_map_one(dn, 0, out_irq); + if (!rc) + return rc; + } /* Ok, we don't, time to have fun. Let's start by building up an * interrupt spec. we assume #interrupt-cells is 1, which is standard diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 843c0af210d..169d74cef15 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -444,9 +444,9 @@ void __init setup_system(void) if (htab_address) printk("htab_address = 0x%p\n", htab_address); printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); -#if PHYSICAL_START > 0 - printk("physical_start = 0x%lx\n", PHYSICAL_START); -#endif + if (PHYSICAL_START > 0) + printk("physical_start = 0x%lx\n", + PHYSICAL_START); printk("-----------------------------------------------------\n"); DBG(" <- setup_system()\n"); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3e80aa32b8b..b13abf30599 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -410,7 +410,7 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task, * altivec/spe instructions at some point. */ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, - int sigret) + int sigret, int ctx_has_vsx_region) { unsigned long msr = regs->msr; @@ -451,7 +451,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ - if (current->thread.used_vsr) { + if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; @@ -858,11 +858,11 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, frame = &rt_sf->uc.uc_mcontext; addr = frame; if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { - if (save_user_regs(regs, frame, 0)) + if (save_user_regs(regs, frame, 0, 1)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; } else { - if (save_user_regs(regs, frame, __NR_rt_sigreturn)) + if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1)) goto badframe; regs->link = (unsigned long) frame->tramp; } @@ -936,13 +936,26 @@ long sys_swapcontext(struct ucontext __user *old_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { unsigned char tmp; + int ctx_has_vsx_region = 0; #ifdef CONFIG_PPC64 unsigned long new_msr = 0; - if (new_ctx && - __get_user(new_msr, &new_ctx->uc_mcontext.mc_gregs[PT_MSR])) - return -EFAULT; + if (new_ctx) { + struct mcontext __user *mcp; + u32 cmcp; + + /* + * Get pointer to the real mcontext. No need for + * access_ok since we are dealing with compat + * pointers. + */ + if (__get_user(cmcp, &new_ctx->uc_regs)) + return -EFAULT; + mcp = (struct mcontext __user *)(u64)cmcp; + if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR])) + return -EFAULT; + } /* * Check that the context is not smaller than the original * size (with VMX but without VSX) @@ -956,16 +969,9 @@ long sys_swapcontext(struct ucontext __user *old_ctx, if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; -#ifdef CONFIG_VSX - /* - * If userspace doesn't provide enough room for VSX data, - * but current thread has used VSX, we don't have anywhere - * to store the full context back into. - */ - if ((ctx_size < sizeof(struct ucontext)) && - (current->thread.used_vsr && old_ctx)) - return -EINVAL; -#endif + /* Does the context have enough room to store VSX data? */ + if (ctx_size >= sizeof(struct ucontext)) + ctx_has_vsx_region = 1; #else /* Context size is for future use. Right now, we only make sure * we are passed something we understand @@ -985,17 +991,17 @@ long sys_swapcontext(struct ucontext __user *old_ctx, */ mctx = (struct mcontext __user *) ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); - if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) - || save_user_regs(regs, mctx, 0) + if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) + || save_user_regs(regs, mctx, 0, ctx_has_vsx_region) || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked) || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) return -EFAULT; } if (new_ctx == NULL) return 0; - if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) + if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) - || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) + || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* @@ -1196,11 +1202,11 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, goto badframe; if (vdso32_sigtramp && current->mm->context.vdso_base) { - if (save_user_regs(regs, &frame->mctx, 0)) + if (save_user_regs(regs, &frame->mctx, 0, 1)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_sigtramp; } else { - if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) + if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1)) goto badframe; regs->link = (unsigned long) frame->mctx.tramp; } diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c6a8f2326b6..e132891d3ce 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -74,7 +74,8 @@ static const char fmt64[] = KERN_INFO \ */ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, - int signr, sigset_t *set, unsigned long handler) + int signr, sigset_t *set, unsigned long handler, + int ctx_has_vsx_region) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of @@ -121,7 +122,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, * then out to userspace. Update v_regs to point after the * VMX data. */ - if (current->thread.used_vsr) { + if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, current); @@ -282,9 +283,10 @@ int sys_swapcontext(struct ucontext __user *old_ctx, unsigned char tmp; sigset_t set; unsigned long new_msr = 0; + int ctx_has_vsx_region = 0; if (new_ctx && - __get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) + get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) return -EFAULT; /* * Check that the context is not smaller than the original @@ -299,28 +301,23 @@ int sys_swapcontext(struct ucontext __user *old_ctx, if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; -#ifdef CONFIG_VSX - /* - * If userspace doesn't provide enough room for VSX data, - * but current thread has used VSX, we don't have anywhere - * to store the full context back into. - */ - if ((ctx_size < sizeof(struct ucontext)) && - (current->thread.used_vsr && old_ctx)) - return -EINVAL; -#endif + /* Does the context have enough room to store VSX data? */ + if (ctx_size >= sizeof(struct ucontext)) + ctx_has_vsx_region = 1; + if (old_ctx != NULL) { - if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) - || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0) + if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) + || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, + ctx_has_vsx_region) || __copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked, sizeof(sigset_t))) return -EFAULT; } if (new_ctx == NULL) return 0; - if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) + if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) - || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) + || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* @@ -423,7 +420,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, - (unsigned long)ka->sa.sa_handler); + (unsigned long)ka->sa.sa_handler, 1); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto badframe; diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 86a2ffccef2..20885a38237 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -717,9 +717,11 @@ static void unregister_cpu_online(unsigned int cpu) BUG_ON(!c->hotpluggable); +#ifdef CONFIG_PPC64 if (!firmware_has_feature(FW_FEATURE_ISERIES) && cpu_has_feature(CPU_FTR_SMT)) sysdev_remove_file(s, &attr_smt_snooze_delay); +#endif /* PMC stuff */ switch (cur_cpu_spec->pmc_type) { diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 434c92a85c0..a11e6bc59b3 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -516,10 +516,10 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); } -static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, - size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct vio_dev *viodev = to_vio_dev(dev); dma_addr_t ret = DMA_ERROR_CODE; @@ -529,7 +529,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, return ret; } - ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs); + ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); if (unlikely(dma_mapping_error(dev, ret))) { vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); atomic_inc(&viodev->cmo.allocs_failed); @@ -538,14 +538,14 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr, return ret; } -static void vio_dma_iommu_unmap_single(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) +static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct vio_dev *viodev = to_vio_dev(dev); - dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs); + dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); } @@ -603,10 +603,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, struct dma_mapping_ops vio_dma_mapping_ops = { .alloc_coherent = vio_dma_iommu_alloc_coherent, .free_coherent = vio_dma_iommu_free_coherent, - .map_single = vio_dma_iommu_map_single, - .unmap_single = vio_dma_iommu_unmap_single, .map_sg = vio_dma_iommu_map_sg, .unmap_sg = vio_dma_iommu_unmap_sg, + .map_page = vio_dma_iommu_map_page, + .unmap_page = vio_dma_iommu_unmap_page, + }; /** diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b39c27ed791..2412c056baa 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -187,6 +187,7 @@ SECTIONS *(.machine.desc) __machine_desc_end = . ; } +#ifdef CONFIG_RELOCATABLE . = ALIGN(8); .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) } .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) } @@ -202,9 +203,7 @@ SECTIONS __rela_dyn_start = .; *(.rela*) } - - /* Fake ELF header containing RPA note; for addnote */ - .fakeelf : AT(ADDR(.fakeelf) - LOAD_OFFSET) { *(.fakeelf) } +#endif /* freed after init ends here */ . = ALIGN(PAGE_SIZE); |