diff options
Diffstat (limited to 'arch/powerpc/kernel')
66 files changed, 1032 insertions, 235 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 69f7ffe7f67..7c5324f1ec9 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -49,6 +49,9 @@ #ifdef CONFIG_PPC_ISERIES #include <asm/iseries/alpaca.h> #endif +#ifdef CONFIG_PPC_POWERNV +#include <asm/opal.h> +#endif #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST) #include <linux/kvm_host.h> #endif @@ -610,5 +613,12 @@ int main(void) arch.timing_last_enter.tv32.tbl)); #endif +#ifdef CONFIG_PPC_POWERNV + DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3)); + DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0)); + DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1)); + DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt)); +#endif + return 0; } diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 60b3e377b1e..ac8f52732fd 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -6,7 +6,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/memblock.h> #include <asm/sections.h> diff --git a/arch/powerpc/kernel/clock.c b/arch/powerpc/kernel/clock.c index ce668f54575..a764b47791e 100644 --- a/arch/powerpc/kernel/clock.c +++ b/arch/powerpc/kernel/clock.c @@ -6,7 +6,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/clk_interface.h> struct clk_interface clk_functions; diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index fa44ff53886..edae5bb06f1 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -14,7 +14,7 @@ #include <linux/sched.h> #include <linux/threads.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/oprofile_impl.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index cc6a9d5d69a..d879809d5c4 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -17,6 +17,7 @@ #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/bootmem.h> +#include <linux/export.h> #include <linux/crash_dump.h> #include <linux/delay.h> #include <linux/elf.h> diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index e7554154a6d..3f6464b4d97 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -5,6 +5,7 @@ * busses using the iommu infrastructure */ +#include <linux/export.h> #include <asm/iommu.h> /* @@ -90,13 +91,27 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) return 1; } +static u64 dma_iommu_get_required_mask(struct device *dev) +{ + struct iommu_table *tbl = get_iommu_table_base(dev); + u64 mask; + if (!tbl) + return 0; + + mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); + mask += mask - 1; + + return mask; +} + struct dma_map_ops dma_iommu_ops = { - .alloc_coherent = dma_iommu_alloc_coherent, - .free_coherent = dma_iommu_free_coherent, - .map_sg = dma_iommu_map_sg, - .unmap_sg = dma_iommu_unmap_sg, - .dma_supported = dma_iommu_dma_supported, - .map_page = dma_iommu_map_page, - .unmap_page = dma_iommu_unmap_page, + .alloc_coherent = dma_iommu_alloc_coherent, + .free_coherent = dma_iommu_free_coherent, + .map_sg = dma_iommu_map_sg, + .unmap_sg = dma_iommu_unmap_sg, + .dma_supported = dma_iommu_dma_supported, + .map_page = dma_iommu_map_page, + .unmap_page = dma_iommu_unmap_page, + .get_required_mask = dma_iommu_get_required_mask, }; EXPORT_SYMBOL(dma_iommu_ops); diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 4295e0b94b2..1ebc9189aad 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -24,6 +24,21 @@ unsigned int ppc_swiotlb_enable; +static u64 swiotlb_powerpc_get_required(struct device *dev) +{ + u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr; + + end = memblock_end_of_DRAM(); + if (max_direct_dma_addr && end > max_direct_dma_addr) + end = max_direct_dma_addr; + end += get_dma_offset(dev); + + mask = 1ULL << (fls64(end) - 1); + mask += mask - 1; + + return mask; +} + /* * At the moment, all platforms that use this code only require * swiotlb to be used if we're operating on HIGHMEM. Since @@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = { .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, + .get_required_mask = swiotlb_powerpc_get_required, }; void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 4f0959fbfbe..7d0233c12ee 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -10,6 +10,7 @@ #include <linux/dma-debug.h> #include <linux/gfp.h> #include <linux/memblock.h> +#include <linux/export.h> #include <asm/bug.h> #include <asm/abs_addr.h> #include <asm/machdep.h> @@ -96,6 +97,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) #endif } +static u64 dma_direct_get_required_mask(struct device *dev) +{ + u64 end, mask; + + end = memblock_end_of_DRAM() + get_dma_offset(dev); + + mask = 1ULL << (fls64(end) - 1); + mask += mask - 1; + + return mask; +} + static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, @@ -137,13 +150,14 @@ static inline void dma_direct_sync_single(struct device *dev, #endif struct dma_map_ops dma_direct_ops = { - .alloc_coherent = dma_direct_alloc_coherent, - .free_coherent = dma_direct_free_coherent, - .map_sg = dma_direct_map_sg, - .unmap_sg = dma_direct_unmap_sg, - .dma_supported = dma_direct_dma_supported, - .map_page = dma_direct_map_page, - .unmap_page = dma_direct_unmap_page, + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, + .dma_supported = dma_direct_dma_supported, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, + .get_required_mask = dma_direct_get_required_mask, #ifdef CONFIG_NOT_COHERENT_CACHE .sync_single_for_cpu = dma_direct_sync_single, .sync_single_for_device = dma_direct_sync_single, @@ -170,6 +184,23 @@ int dma_set_mask(struct device *dev, u64 dma_mask) } EXPORT_SYMBOL(dma_set_mask); +u64 dma_get_required_mask(struct device *dev) +{ + struct dma_map_ops *dma_ops = get_dma_ops(dev); + + if (ppc_md.dma_get_required_mask) + return ppc_md.dma_get_required_mask(dev); + + if (unlikely(dma_ops == NULL)) + return 0; + + if (dma_ops->get_required_mask) + return dma_ops->get_required_mask(dev); + + return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); +} +EXPORT_SYMBOL_GPL(dma_get_required_mask); + static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 56212bc0ab0..4f80cf1ce77 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -215,7 +215,22 @@ reenable_mmu: /* re-enable mmu so we can */ stw r9,8(r1) stw r11,12(r1) stw r3,ORIG_GPR3(r1) + /* + * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. + * If from user mode there is only one stack frame on the stack, and + * accessing CALLER_ADDR1 will cause oops. So we need create a dummy + * stack frame to make trace_hardirqs_off happy. + */ + andi. r12,r12,MSR_PR + beq 11f + stwu r1,-16(r1) + bl trace_hardirqs_off + addi r1,r1,16 + b 12f + +11: bl trace_hardirqs_off +12: lwz r0,GPR0(r1) lwz r3,ORIG_GPR3(r1) lwz r4,GPR4(r1) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 29ddd8b1c27..cf9c69b9189 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -267,7 +267,7 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) #endif /* CONFIG_CBE_RAS */ STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) @@ -275,7 +275,7 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) #endif /* CONFIG_CBE_RAS */ STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) @@ -283,7 +283,7 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) #endif /* CONFIG_CBE_RAS */ . = 0x3000 @@ -1133,7 +1133,7 @@ _GLOBAL(do_stab_bolted) rfid b . /* prevent speculative execution */ -#ifdef CONFIG_PPC_PSERIES +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * Data area reserved for FWNMI option. * This address (0x7000) is fixed by the RPA. @@ -1141,7 +1141,7 @@ _GLOBAL(do_stab_bolted) .= 0x7000 .globl fwnmi_data_area fwnmi_data_area: -#endif /* CONFIG_PPC_PSERIES */ +#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ /* iSeries does not use the FWNMI stuff, so it is safe to put * this here, even if we later allow kernels that will boot on @@ -1166,9 +1166,12 @@ xLparMap: #endif /* CONFIG_PPC_ISERIES */ -#ifdef CONFIG_PPC_PSERIES +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) + /* pseries and powernv need to keep the whole page from + * 0x7000 to 0x8000 free for use by the firmware + */ . = 0x8000 -#endif /* CONFIG_PPC_PSERIES */ +#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ /* * Space for CPU0's segment table. @@ -1183,3 +1186,19 @@ xLparMap: .globl initial_stab initial_stab: .space 4096 +#ifdef CONFIG_PPC_POWERNV +_GLOBAL(opal_mc_secondary_handler) + HMT_MEDIUM + SET_SCRATCH0(r13) + GET_PACA(r13) + clrldi r3,r3,2 + tovirt(r3,r3) + std r3,PACA_OPAL_MC_EVT(r13) + ld r13,OPAL_MC_SRR0(r3) + mtspr SPRN_SRR0,r13 + ld r13,OPAL_MC_SRR1(r3) + mtspr SPRN_SRR1,r13 + ld r3,OPAL_MC_GPR3(r3) + GET_SCRATCH0(r13) + b machine_check_pSeries +#endif /* CONFIG_PPC_POWERNV */ diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c index 6b1f4271eb5..2eae4478f7a 100644 --- a/arch/powerpc/kernel/firmware.c +++ b/arch/powerpc/kernel/firmware.c @@ -13,7 +13,8 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/module.h> +#include <linux/export.h> +#include <linux/cache.h> #include <asm/firmware.h> diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index ba250d505e0..0654dba2c1f 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -139,8 +139,7 @@ __start: trap #endif /* CONFIG_PPC_PMAC */ -1: mr r31,r3 /* save parameters */ - mr r30,r4 +1: mr r31,r3 /* save device tree ptr */ li r24,0 /* cpu # */ /* @@ -964,8 +963,8 @@ start_here: * Do early platform-specific initialization, * and set up the MMU. */ - mr r3,r31 - mr r4,r30 + li r3,0 + mr r4,r31 bl machine_init bl __save_cpu_setup bl MMU_init diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a91626d87fc..872a6af83ba 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -58,13 +58,7 @@ _ENTRY(_stext); _ENTRY(_start); - /* Save parameters we are passed. - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 + mr r31,r3 /* save device tree ptr */ /* We have to turn on the MMU right away so we get cache modes * set correctly. @@ -849,11 +843,8 @@ start_here: /* * Decide what sort of machine this is and initialize the MMU. */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 + li r3,0 + mr r4,r31 bl machine_init bl MMU_init diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index f8e971ba94f..b725dab0f88 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -61,14 +61,7 @@ _ENTRY(_start); * of abatron_pteptrs */ nop -/* - * Save parameters we are passed - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 + mr r31,r3 /* save device tree ptr */ li r24,0 /* CPU number */ bl init_cpu_state @@ -120,11 +113,8 @@ _ENTRY(_start); /* * Decide what sort of machine this is and initialize the MMU. */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 + li r3,0 + mr r4,r31 bl machine_init bl MMU_init diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 3564c49c683..06c7251c1bf 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -51,6 +51,11 @@ * For pSeries or server processors: * 1. The MMU is off & open firmware is running in real mode. * 2. The kernel is entered at __start + * -or- For OPAL entry: + * 1. The MMU is off, processor in HV mode, primary CPU enters at 0 + * with device-tree in gpr3. We also get OPAL base in r8 and + * entry in r9 for debugging purposes + * 2. Secondary processors enter at 0x60 with PIR in gpr3 * * For iSeries: * 1. The MMU is on (as it always is for iSeries) @@ -331,6 +336,11 @@ _GLOBAL(__start_initialization_multiplatform) /* Save parameters */ mr r31,r3 mr r30,r4 +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL + /* Save OPAL entry */ + mr r28,r8 + mr r29,r9 +#endif #ifdef CONFIG_PPC_BOOK3E bl .start_initialization_book3e @@ -674,9 +684,9 @@ _GLOBAL(enable_64b_mode) _GLOBAL(relative_toc) mflr r0 bcl 20,31,$+4 -0: mflr r9 - ld r2,(p_toc - 0b)(r9) - add r2,r2,r9 +0: mflr r11 + ld r2,(p_toc - 0b)(r11) + add r2,r2,r11 mtlr r0 blr @@ -707,6 +717,12 @@ _INIT_STATIC(start_here_multiplatform) bdnz 3b 4: +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL + /* Setup OPAL entry */ + std r28,0(r11); + std r29,8(r11); +#endif + #ifndef CONFIG_PPC_BOOK3E mfmsr r6 ori r6,r6,MSR_RI diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1cbf64e6b41..b68cb173ba2 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -76,11 +76,7 @@ _ENTRY(_start); */ .globl __start __start: - mr r31,r3 /* save parameters */ - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 + mr r31,r3 /* save device tree ptr */ /* We have to turn on the MMU right away so we get cache modes * set correctly. @@ -723,11 +719,8 @@ start_here: /* * Decide what sort of machine this is and initialize the MMU. */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 + li r3,0 + mr r4,r31 bl machine_init bl MMU_init diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 50845924b7d..9f5d210ddf3 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -63,17 +63,30 @@ _ENTRY(_start); * of abatron_pteptrs */ nop -/* - * Save parameters we are passed - */ - mr r31,r3 - mr r30,r4 - mr r29,r5 - mr r28,r6 - mr r27,r7 - li r25,0 /* phys kernel start (low) */ - li r24,0 /* CPU number */ - li r23,0 /* phys kernel start (high) */ + + /* Translate device tree address to physical, save in r30/r31 */ + mfmsr r16 + mfspr r17,SPRN_PID + rlwinm r17,r17,16,0x3fff0000 /* turn PID into MAS6[SPID] */ + rlwimi r17,r16,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */ + mtspr SPRN_MAS6,r17 + + tlbsx 0,r3 /* must succeed */ + + mfspr r16,SPRN_MAS1 + mfspr r20,SPRN_MAS3 + rlwinm r17,r16,25,0x1f /* r17 = log2(page size) */ + li r18,1024 + slw r18,r18,r17 /* r18 = page size */ + addi r18,r18,-1 + and r19,r3,r18 /* r19 = page offset */ + andc r31,r20,r18 /* r31 = page base */ + or r31,r31,r19 /* r31 = devtree phys addr */ + mfspr r30,SPRN_MAS7 + + li r25,0 /* phys kernel start (low) */ + li r24,0 /* CPU number */ + li r23,0 /* phys kernel start (high) */ /* We try to not make any assumptions about how the boot loader * setup or used the TLBs. We invalidate all mappings from the @@ -198,11 +211,8 @@ _ENTRY(__early_start) /* * Decide what sort of machine this is and initialize the MMU. */ - mr r3,r31 - mr r4,r30 - mr r5,r29 - mr r6,r28 - mr r7,r27 + mr r3,r30 + mr r4,r31 bl machine_init bl MMU_init @@ -236,8 +246,24 @@ _ENTRY(__early_start) * if we find the pte (fall through): * r11 is low pte word * r12 is pointer to the pte + * r10 is the pshift from the PGD, if we're a hugepage */ #ifdef CONFIG_PTE_64BIT +#ifdef CONFIG_HUGETLB_PAGE +#define FIND_PTE \ + rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ + lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ + rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ + blt 1000f; /* Normal non-huge page */ \ + beq 2f; /* Bail if no table */ \ + oris r11, r11, PD_HUGE@h; /* Put back address bit */ \ + andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \ + xor r12, r10, r11; /* drop size bits from pointer */ \ + b 1001f; \ +1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ + li r10, 0; /* clear r10 */ \ +1001: lwz r11, 4(r12); /* Get pte entry */ +#else #define FIND_PTE \ rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ @@ -245,7 +271,8 @@ _ENTRY(__early_start) beq 2f; /* Bail if no table */ \ rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ lwz r11, 4(r12); /* Get pte entry */ -#else +#endif /* HUGEPAGE */ +#else /* !PTE_64BIT */ #define FIND_PTE \ rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ lwz r11, 0(r11); /* Get L1 entry */ \ @@ -402,8 +429,8 @@ interrupt_base: #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_SMP - subf r10,r11,r12 /* create false data dep */ - lwzx r13,r11,r10 /* Get upper pte bits */ + subf r13,r11,r12 /* create false data dep */ + lwzx r13,r11,r13 /* Get upper pte bits */ #else lwz r13,0(r12) /* Get upper pte bits */ #endif @@ -483,8 +510,8 @@ interrupt_base: #ifdef CONFIG_PTE_64BIT #ifdef CONFIG_SMP - subf r10,r11,r12 /* create false data dep */ - lwzx r13,r11,r10 /* Get upper pte bits */ + subf r13,r11,r12 /* create false data dep */ + lwzx r13,r11,r13 /* Get upper pte bits */ #else lwz r13,0(r12) /* Get upper pte bits */ #endif @@ -548,7 +575,7 @@ interrupt_base: /* * Both the instruction and data TLB miss get to this * point to load the TLB. - * r10 - available to use + * r10 - tsize encoding (if HUGETLB_PAGE) or available to use * r11 - TLB (info from Linux PTE) * r12 - available to use * r13 - upper bits of PTE (if PTE_64BIT) or available to use @@ -558,21 +585,73 @@ interrupt_base: * Upon exit, we reload everything and RFI. */ finish_tlb_load: +#ifdef CONFIG_HUGETLB_PAGE + cmpwi 6, r10, 0 /* check for huge page */ + beq 6, finish_tlb_load_cont /* !huge */ + + /* Alas, we need more scratch registers for hugepages */ + mfspr r12, SPRN_SPRG_THREAD + stw r14, THREAD_NORMSAVE(4)(r12) + stw r15, THREAD_NORMSAVE(5)(r12) + stw r16, THREAD_NORMSAVE(6)(r12) + stw r17, THREAD_NORMSAVE(7)(r12) + + /* Get the next_tlbcam_idx percpu var */ +#ifdef CONFIG_SMP + lwz r12, THREAD_INFO-THREAD(r12) + lwz r15, TI_CPU(r12) + lis r14, __per_cpu_offset@h + ori r14, r14, __per_cpu_offset@l + rlwinm r15, r15, 2, 0, 29 + lwzx r16, r14, r15 +#else + li r16, 0 +#endif + lis r17, next_tlbcam_idx@h + ori r17, r17, next_tlbcam_idx@l + add r17, r17, r16 /* r17 = *next_tlbcam_idx */ + lwz r15, 0(r17) /* r15 = next_tlbcam_idx */ + + lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */ + rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */ + mtspr SPRN_MAS0, r14 + + /* Extract TLB1CFG(NENTRY) */ + mfspr r16, SPRN_TLB1CFG + andi. r16, r16, 0xfff + + /* Update next_tlbcam_idx, wrapping when necessary */ + addi r15, r15, 1 + cmpw r15, r16 + blt 100f + lis r14, tlbcam_index@h + ori r14, r14, tlbcam_index@l + lwz r15, 0(r14) +100: stw r15, 0(r17) + + /* + * Calc MAS1_TSIZE from r10 (which has pshift encoded) + * tlb_enc = (pshift - 10). + */ + subi r15, r10, 10 + mfspr r16, SPRN_MAS1 + rlwimi r16, r15, 7, 20, 24 + mtspr SPRN_MAS1, r16 + + /* copy the pshift for use later */ + mr r14, r10 + + /* fall through */ + +#endif /* CONFIG_HUGETLB_PAGE */ + /* * We set execute, because we don't have the granularity to * properly set this at the page level (Linux problem). * Many of these bits are software only. Bits we don't set * here we (properly should) assume have the appropriate value. */ - - mfspr r12, SPRN_MAS2 -#ifdef CONFIG_PTE_64BIT - rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ -#else - rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ -#endif - mtspr SPRN_MAS2, r12 - +finish_tlb_load_cont: #ifdef CONFIG_PTE_64BIT rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */ andi. r10, r11, _PAGE_DIRTY @@ -581,22 +660,40 @@ finish_tlb_load: andc r12, r12, r10 1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */ rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */ - mtspr SPRN_MAS3, r12 +2: mtspr SPRN_MAS3, r12 BEGIN_MMU_FTR_SECTION srwi r10, r13, 12 /* grab RPN[12:31] */ mtspr SPRN_MAS7, r10 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) #else li r10, (_PAGE_EXEC | _PAGE_PRESENT) + mr r13, r11 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ and r12, r11, r10 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ slwi r10, r12, 1 or r10, r10, r12 iseleq r12, r12, r10 - rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ - mtspr SPRN_MAS3, r11 + rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */ + mtspr SPRN_MAS3, r13 #endif + + mfspr r12, SPRN_MAS2 +#ifdef CONFIG_PTE_64BIT + rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ +#else + rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ +#endif +#ifdef CONFIG_HUGETLB_PAGE + beq 6, 3f /* don't mask if page isn't huge */ + li r13, 1 + slw r13, r13, r14 + subi r13, r13, 1 + rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */ + andc r12, r12, r13 /* mask off ea bits within the page */ +#endif +3: mtspr SPRN_MAS2, r12 + #ifdef CONFIG_E200 /* Round robin TLB1 entries assignment */ mfspr r12, SPRN_MAS0 @@ -622,11 +719,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) mtspr SPRN_MAS0,r12 #endif /* CONFIG_E200 */ +tlb_write_entry: tlbwe /* Done...restore registers and get out of here. */ mfspr r10, SPRN_SPRG_THREAD - lwz r11, THREAD_NORMSAVE(3)(r10) +#ifdef CONFIG_HUGETLB_PAGE + beq 6, 8f /* skip restore for 4k page faults */ + lwz r14, THREAD_NORMSAVE(4)(r10) + lwz r15, THREAD_NORMSAVE(5)(r10) + lwz r16, THREAD_NORMSAVE(6)(r10) + lwz r17, THREAD_NORMSAVE(7)(r10) +#endif +8: lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 lwz r13, THREAD_NORMSAVE(2)(r10) lwz r12, THREAD_NORMSAVE(1)(r10) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 5ecd0401cdb..2bc0584be81 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -27,7 +27,6 @@ #include <linux/kprobes.h> #include <linux/percpu.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/smp.h> diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 28581f1ad2c..d39ae606ff8 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -37,12 +37,14 @@ */ #include <linux/init.h> +#include <linux/export.h> #include <linux/console.h> #include <linux/kobject.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/slab.h> +#include <linux/stat.h> #include <linux/of_platform.h> #include <asm/ibmebus.h> #include <asm/abs_addr.h> @@ -125,17 +127,23 @@ static void ibmebus_unmap_sg(struct device *dev, static int ibmebus_dma_supported(struct device *dev, u64 mask) { - return 1; + return mask == DMA_BIT_MASK(64); +} + +static u64 ibmebus_dma_get_required_mask(struct device *dev) +{ + return DMA_BIT_MASK(64); } static struct dma_map_ops ibmebus_dma_ops = { - .alloc_coherent = ibmebus_alloc_coherent, - .free_coherent = ibmebus_free_coherent, - .map_sg = ibmebus_map_sg, - .unmap_sg = ibmebus_unmap_sg, - .dma_supported = ibmebus_dma_supported, - .map_page = ibmebus_map_page, - .unmap_page = ibmebus_unmap_page, + .alloc_coherent = ibmebus_alloc_coherent, + .free_coherent = ibmebus_free_coherent, + .map_sg = ibmebus_map_sg, + .unmap_sg = ibmebus_unmap_sg, + .dma_supported = ibmebus_dma_supported, + .get_required_mask = ibmebus_dma_get_required_mask, + .map_page = ibmebus_map_page, + .unmap_page = ibmebus_unmap_page, }; static int ibmebus_match_path(struct device *dev, void *data) diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S index 3e2b95c6ae6..4f0ab85f378 100644 --- a/arch/powerpc/kernel/idle_e500.S +++ b/arch/powerpc/kernel/idle_e500.S @@ -26,7 +26,7 @@ _GLOBAL(e500_idle) ori r4,r4,_TLF_NAPPING /* so when we take an exception */ stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ -#ifdef CONFIG_E500MC +#ifdef CONFIG_PPC_E500MC wrteei 1 1: wait diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c index 2375b7eb1c7..d076d465dbd 100644 --- a/arch/powerpc/kernel/init_task.c +++ b/arch/powerpc/kernel/init_task.c @@ -1,5 +1,5 @@ #include <linux/mm.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index ffafaea3d26..12d329bcbb9 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -12,6 +12,7 @@ #undef DEBUG #include <linux/kernel.h> +#include <linux/sched.h> /* for init_mm */ #include <asm/io.h> #include <asm/machdep.h> diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c index 8dc7547c237..886381f32c3 100644 --- a/arch/powerpc/kernel/io.c +++ b/arch/powerpc/kernel/io.c @@ -19,7 +19,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/compiler.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/io.h> #include <asm/firmware.h> diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index b25f6325fc7..26279180739 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/pci.h> #include <linux/mm.h> +#include <linux/export.h> #include <asm/io.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 961bb03413f..0cfcf98aafc 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -501,6 +501,14 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) tbl->it_map = page_address(page); memset(tbl->it_map, 0, sz); + /* + * Reserve page 0 so it will not be used for any mappings. + * This avoids buggy drivers that consider page 0 to be invalid + * to crash the machine or even lose data. + */ + if (tbl->it_offset == 0) + set_bit(0, tbl->it_map); + tbl->it_hint = 0; tbl->it_largehint = tbl->it_halfpoint; spin_lock_init(&tbl->it_lock); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d281fb6f12f..5c3c46948d9 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -30,7 +30,7 @@ #undef DEBUG -#include <linux/module.h> +#include <linux/export.h> #include <linux/threads.h> #include <linux/kernel_stat.h> #include <linux/signal.h> diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 4d5731b2429..479752901ec 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/notifier.h> diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index 368d158d665..a1ed8a8c7cb 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,6 +11,7 @@ #include <linux/jump_label.h> #include <asm/code-patching.h> +#ifdef HAVE_JUMP_LABEL void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { @@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry, else patch_instruction(addr, PPC_INST_NOP); } +#endif diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index b06bdae0406..2985338d0e1 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -20,6 +20,7 @@ #include <linux/kvm_host.h> #include <linux/init.h> +#include <linux/export.h> #include <linux/kvm_para.h> #include <linux/slab.h> #include <linux/of.h> @@ -131,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr) /* On relocatable kernels interrupts handlers and our code can be in different regions, so we don't patch them */ - extern u32 __end_interrupts; if ((ulong)inst < (ulong)&__end_interrupts) return; #endif diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 2b97b80d6d7..c7b5afeecaf 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -6,6 +6,7 @@ #include <linux/pci.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/serial_reg.h> #include <asm/io.h> #include <asm/mmu.h> #include <asm/prom.h> @@ -47,6 +48,24 @@ static struct __initdata of_device_id legacy_serial_parents[] = { static unsigned int legacy_serial_count; static int legacy_serial_console = -1; +static unsigned int tsi_serial_in(struct uart_port *p, int offset) +{ + unsigned int tmp; + offset = offset << p->regshift; + if (offset == UART_IIR) { + tmp = readl(p->membase + (UART_IIR & ~3)); + return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */ + } else + return readb(p->membase + offset); +} + +static void tsi_serial_out(struct uart_port *p, int offset, int value) +{ + offset = offset << p->regshift; + if (!((offset == UART_IER) && (value & UART_IER_UUE))) + writeb(value, p->membase + offset); +} + static int __init add_legacy_port(struct device_node *np, int want_index, int iotype, phys_addr_t base, phys_addr_t taddr, unsigned long irq, @@ -102,6 +121,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index, legacy_serial_ports[index].iobase = base; else legacy_serial_ports[index].mapbase = base; + legacy_serial_ports[index].iotype = iotype; legacy_serial_ports[index].uartclk = clock; legacy_serial_ports[index].irq = irq; @@ -112,6 +132,11 @@ static int __init add_legacy_port(struct device_node *np, int want_index, legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0; legacy_serial_infos[index].irq_check_parent = irq_check_parent; + if (iotype == UPIO_TSI) { + legacy_serial_ports[index].serial_in = tsi_serial_in; + legacy_serial_ports[index].serial_out = tsi_serial_out; + } + printk(KERN_DEBUG "Found legacy serial port %d for %s\n", index, np->full_name); printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 583af70c4b1..26ccbf77dd4 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -74,8 +74,7 @@ int default_machine_kexec_prepare(struct kimage *image) } /* We also should not overwrite the tce tables */ - for (node = of_find_node_by_type(NULL, "pci"); node != NULL; - node = of_find_node_by_type(node, "pci")) { + for_each_node_by_type(node, "pci") { basep = of_get_property(node, "linux,tce-base", NULL); sizep = of_get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 998a1002860..7cd07b42ca1 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -8,6 +8,8 @@ * kexec bits: * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz + * PPC44x port. Copyright (C) 2011, IBM Corporation + * Author: Suzuki Poulose <suzuki@in.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -736,6 +738,175 @@ relocate_new_kernel: mr r5, r31 li r0, 0 +#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x) + +/* + * Code for setting up 1:1 mapping for PPC440x for KEXEC + * + * We cannot switch off the MMU on PPC44x. + * So we: + * 1) Invalidate all the mappings except the one we are running from. + * 2) Create a tmp mapping for our code in the other address space(TS) and + * jump to it. Invalidate the entry we started in. + * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. + * 4) Jump to the 1:1 mapping in original TS. + * 5) Invalidate the tmp mapping. + * + * - Based on the kexec support code for FSL BookE + * - Doesn't support 47x yet. + * + */ + /* Save our parameters */ + mr r29, r3 + mr r30, r4 + mr r31, r5 + + /* Load our MSR_IS and TID to MMUCR for TLB search */ + mfspr r3,SPRN_PID + mfmsr r4 + andi. r4,r4,MSR_IS@l + beq wmmucr + oris r3,r3,PPC44x_MMUCR_STS@h +wmmucr: + mtspr SPRN_MMUCR,r3 + sync + + /* + * Invalidate all the TLB entries except the current entry + * where we are running from + */ + bl 0f /* Find our address */ +0: mflr r5 /* Make it accessible */ + tlbsx r23,0,r5 /* Find entry we are in */ + li r4,0 /* Start at TLB entry 0 */ + li r3,0 /* Set PAGEID inval value */ +1: cmpw r23,r4 /* Is this our entry? */ + beq skip /* If so, skip the inval */ + tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ +skip: + addi r4,r4,1 /* Increment */ + cmpwi r4,64 /* Are we done? */ + bne 1b /* If not, repeat */ + isync + + /* Create a temp mapping and jump to it */ + andi. r6, r23, 1 /* Find the index to use */ + addi r24, r6, 1 /* r24 will contain 1 or 2 */ + + mfmsr r9 /* get the MSR */ + rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ + xori r7, r5, 1 /* Use the other address space */ + + /* Read the current mapping entries */ + tlbre r3, r23, PPC44x_TLB_PAGEID + tlbre r4, r23, PPC44x_TLB_XLAT + tlbre r5, r23, PPC44x_TLB_ATTRIB + + /* Save our current XLAT entry */ + mr r25, r4 + + /* Extract the TLB PageSize */ + li r10, 1 /* r10 will hold PageSize */ + rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ + + /* XXX: As of now we use 256M, 4K pages */ + cmpwi r11, PPC44x_TLB_256M + bne tlb_4k + rotlwi r10, r10, 28 /* r10 = 256M */ + b write_out +tlb_4k: + cmpwi r11, PPC44x_TLB_4K + bne default + rotlwi r10, r10, 12 /* r10 = 4K */ + b write_out +default: + rotlwi r10, r10, 10 /* r10 = 1K */ + +write_out: + /* + * Write out the tmp 1:1 mapping for this code in other address space + * Fixup EPN = RPN , TS=other address space + */ + insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ + + /* Write out the tmp mapping entries */ + tlbwe r3, r24, PPC44x_TLB_PAGEID + tlbwe r4, r24, PPC44x_TLB_XLAT + tlbwe r5, r24, PPC44x_TLB_ATTRIB + + subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ + not r10, r11 /* Mask for PageNum */ + + /* Switch to other address space in MSR */ + insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ + + bl 1f +1: mflr r8 + addi r8, r8, (2f-1b) /* Find the target offset */ + + /* Jump to the tmp mapping */ + mtspr SPRN_SRR0, r8 + mtspr SPRN_SRR1, r9 + rfi + +2: + /* Invalidate the entry we were executing from */ + li r3, 0 + tlbwe r3, r23, PPC44x_TLB_PAGEID + + /* attribute fields. rwx for SUPERVISOR mode */ + li r5, 0 + ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) + + /* Create 1:1 mapping in 256M pages */ + xori r7, r7, 1 /* Revert back to Original TS */ + + li r8, 0 /* PageNumber */ + li r6, 3 /* TLB Index, start at 3 */ + +next_tlb: + rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ + mr r4, r3 /* RPN = EPN */ + ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ + insrwi r3, r7, 1, 23 /* Set TS from r7 */ + + tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ + tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ + tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ + + addi r8, r8, 1 /* Increment PN */ + addi r6, r6, 1 /* Increment TLB Index */ + cmpwi r8, 8 /* Are we done ? */ + bne next_tlb + isync + + /* Jump to the new mapping 1:1 */ + li r9,0 + insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ + + bl 1f +1: mflr r8 + and r8, r8, r11 /* Get our offset within page */ + addi r8, r8, (2f-1b) + + and r5, r25, r10 /* Get our target PageNum */ + or r8, r8, r5 /* Target jump address */ + + mtspr SPRN_SRR0, r8 + mtspr SPRN_SRR1, r9 + rfi +2: + /* Invalidate the tmp entry we used */ + li r3, 0 + tlbwe r3, r24, PPC44x_TLB_PAGEID + sync + + /* Restore the parameters */ + mr r3, r29 + mr r4, r30 + mr r5, r31 + + li r0, 0 #else li r0, 0 diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index a1cd701b575..2d275707f41 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -16,7 +16,6 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <linux/module.h> #include <linux/elf.h> #include <linux/moduleloader.h> #include <linux/err.h> diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 59dbf6abaaf..e1612dfb4a9 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -15,7 +15,7 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/mod_devicetable.h> #include <linux/pci.h> #include <linux/of.h> diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 0a5a899846b..41456ff55e1 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -8,7 +8,7 @@ */ #include <linux/smp.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/memblock.h> #include <asm/firmware.h> diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 32656f10525..458ed3bee66 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -21,6 +21,7 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/export.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/mm.h> @@ -1730,6 +1731,17 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); + + /* Configure PCI Express settings */ + if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { + struct pci_bus *child; + list_for_each_entry(child, &bus->children, node) { + struct pci_dev *self = child->self; + if (!self) + continue; + pcie_bus_configure_settings(child, self->pcie_mpss); + } + } } static void fixup_hide_host_resource_fsl(struct pci_dev *dev) diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index bb154511db5..fdd1a3d951d 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/of.h> #include <linux/slab.h> +#include <linux/export.h> #include <asm/processor.h> #include <asm/io.h> diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index ab34046752b..bcf4bf9e72d 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -18,6 +18,7 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/export.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 478f8d78716..4e69deb89b3 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/gfp.h> diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index fe0a5ad6f73..b37d0b5a796 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -15,6 +15,7 @@ */ #include <linux/pci.h> +#include <linux/export.h> #include <asm/pci-bridge.h> #include <asm/prom.h> diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index 461499b43cf..a841a9d136a 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c @@ -14,7 +14,7 @@ #include <linux/errno.h> #include <linux/spinlock.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/processor.h> #include <asm/cputable.h> diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 03b95e2c6d6..0bbc901e7ef 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -487,8 +487,8 @@ static int power6_generic_events[] = { */ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ - [C(OP_READ)] = { 0x80082, 0x80080 }, - [C(OP_WRITE)] = { 0x80086, 0x80088 }, + [C(OP_READ)] = { 0x280030, 0x80080 }, + [C(OP_WRITE)] = { 0x180032, 0x80088 }, [C(OP_PREFETCH)] = { 0x810a4, 0 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index de83d6060dd..1251e4d7e26 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -297,6 +297,8 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) static int power7_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ [PERF_COUNT_HW_INSTRUCTIONS] = 2, [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index f5ae872a2ef..d3114a71dd3 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -1,4 +1,4 @@ -#include <linux/module.h> +#include <linux/export.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/sched.h> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8f53954e75a..6457574c0b2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -28,7 +28,7 @@ #include <linux/init.h> #include <linux/prctl.h> #include <linux/init_task.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/kallsyms.h> #include <linux/mqueue.h> #include <linux/hardirq.h> @@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev, new_thread = &new->thread; old_thread = ¤t->thread; -#if defined(CONFIG_PPC_BOOK3E_64) - /* XXX Current Book3E code doesn't deal with kernel side DBCR0, - * we always hold the user values, so we set it now. - * - * However, we ensure the kernel MSR:DE is appropriately cleared too - * to avoid spurrious single step exceptions in the kernel. - * - * This will have to change to merge with the ppc32 code at some point, - * but I don't like much what ppc32 is doing today so there's some - * thinking needed there - */ - if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) { - u32 dbcr0; - - mtmsr(mfmsr() & ~MSR_DE); - isync(); - dbcr0 = mfspr(SPRN_DBCR0); - dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0; - mtspr(SPRN_DBCR0, dbcr0); - } -#endif /* CONFIG_PPC64_BOOK3E */ - #ifdef CONFIG_PPC64 /* * Collect processor utilization data per process @@ -657,7 +635,7 @@ void show_regs(struct pt_regs * regs) if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) printk("CFAR: "REG"\n", regs->orig_gpr3); if (trap == 0x300 || trap == 0x600) -#ifdef CONFIG_PPC_ADV_DEBUG_REGS +#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); #else printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 174e1e96175..fa1235b0503 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -27,7 +27,7 @@ #include <linux/delay.h> #include <linux/initrd.h> #include <linux/bitops.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/kexec.h> #include <linux/debugfs.h> #include <linux/irq.h> @@ -54,6 +54,8 @@ #include <asm/pci-bridge.h> #include <asm/phyp_dump.h> #include <asm/kexec.h> +#include <asm/opal.h> + #include <mm/mmu_decl.h> #ifdef DEBUG @@ -707,11 +709,23 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_rtas, NULL); #endif +#ifdef CONFIG_PPC_POWERNV + /* Some machines might need OPAL info for debugging, grab it now. */ + of_scan_flat_dt(early_init_dt_scan_opal, NULL); +#endif + #ifdef CONFIG_PHYP_DUMP /* scan tree to see if dump occurred during last boot */ of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); #endif + /* Pre-initialize the cmd_line with the content of boot_commmand_line, + * which will be empty except when the content of the variable has + * been overriden by a bootloading mechanism. This happens typically + * with HAL takeover + */ + strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); + /* Retrieve various informations from the /chosen node of the * device-tree, including the platform type, initrd location and * size, TCE reserve, and more ... @@ -723,12 +737,15 @@ void __init early_init_devtree(void *params) of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); - setup_initial_memory_limit(memstart_addr, first_memblock_size); /* Save command line for /proc/cmdline and then parse parameters */ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); + /* make sure we've parsed cmdline for mem= before this */ + if (memory_limit) + first_memblock_size = min(first_memblock_size, memory_limit); + setup_initial_memory_limit(memstart_addr, first_memblock_size); /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); /* If relocatable, reserve first 32k for interrupt vectors etc. */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a909f4e9343..cc584865b3d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -43,6 +43,7 @@ #include <asm/btext.h> #include <asm/sections.h> #include <asm/machdep.h> +#include <asm/opal.h> #include <linux/linux_logo.h> @@ -139,7 +140,9 @@ struct mem_map_entry { typedef u32 cell_t; -extern void __start(unsigned long r3, unsigned long r4, unsigned long r5); +extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, unsigned long r8, + unsigned long r9); #ifdef CONFIG_PPC64 extern int enter_prom(struct prom_args *args, unsigned long entry); @@ -185,6 +188,7 @@ static unsigned long __initdata prom_tce_alloc_end; #define PLATFORM_LPAR 0x0001 #define PLATFORM_POWERMAC 0x0400 #define PLATFORM_GENERIC 0x0500 +#define PLATFORM_OPAL 0x0600 static int __initdata of_platform; @@ -644,7 +648,7 @@ static void __init early_cmdline_parse(void) } } -#ifdef CONFIG_PPC_PSERIES +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * There are two methods for telling firmware what our capabilities are. * Newer machines have an "ibm,client-architecture-support" method on the @@ -1274,6 +1278,284 @@ static void __init prom_init_mem(void) prom_printf(" ram_top : %x\n", RELOC(ram_top)); } +static void __init prom_close_stdin(void) +{ + struct prom_t *_prom = &RELOC(prom); + ihandle val; + + if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0) + call_prom("close", 1, 0, val); +} + +#ifdef CONFIG_PPC_POWERNV + +static u64 __initdata prom_opal_size; +static u64 __initdata prom_opal_align; +static int __initdata prom_rtas_start_cpu; +static u64 __initdata prom_rtas_data; +static u64 __initdata prom_rtas_entry; + +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL +static u64 __initdata prom_opal_base; +static u64 __initdata prom_opal_entry; +#endif + +/* XXX Don't change this structure without updating opal-takeover.S */ +static struct opal_secondary_data { + s64 ack; /* 0 */ + u64 go; /* 8 */ + struct opal_takeover_args args; /* 16 */ +} opal_secondary_data; + +extern char opal_secondary_entry; + +static void prom_query_opal(void) +{ + long rc; + + /* We must not query for OPAL presence on a machine that + * supports TNK takeover (970 blades), as this uses the same + * h-call with different arguments and will crash + */ + if (PHANDLE_VALID(call_prom("finddevice", 1, 1, + ADDR("/tnk-memory-map")))) { + prom_printf("TNK takeover detected, skipping OPAL check\n"); + return; + } + + prom_printf("Querying for OPAL presence... "); + rc = opal_query_takeover(&RELOC(prom_opal_size), + &RELOC(prom_opal_align)); + prom_debug("(rc = %ld) ", rc); + if (rc != 0) { + prom_printf("not there.\n"); + return; + } + RELOC(of_platform) = PLATFORM_OPAL; + prom_printf(" there !\n"); + prom_debug(" opal_size = 0x%lx\n", RELOC(prom_opal_size)); + prom_debug(" opal_align = 0x%lx\n", RELOC(prom_opal_align)); + if (RELOC(prom_opal_align) < 0x10000) + RELOC(prom_opal_align) = 0x10000; +} + +static int prom_rtas_call(int token, int nargs, int nret, int *outputs, ...) +{ + struct rtas_args rtas_args; + va_list list; + int i; + + rtas_args.token = token; + rtas_args.nargs = nargs; + rtas_args.nret = nret; + rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]); + va_start(list, outputs); + for (i = 0; i < nargs; ++i) + rtas_args.args[i] = va_arg(list, rtas_arg_t); + va_end(list); + + for (i = 0; i < nret; ++i) + rtas_args.rets[i] = 0; + + opal_enter_rtas(&rtas_args, RELOC(prom_rtas_data), + RELOC(prom_rtas_entry)); + + if (nret > 1 && outputs != NULL) + for (i = 0; i < nret-1; ++i) + outputs[i] = rtas_args.rets[i+1]; + return (nret > 0)? rtas_args.rets[0]: 0; +} + +static void __init prom_opal_hold_cpus(void) +{ + int i, cnt, cpu, rc; + long j; + phandle node; + char type[64]; + u32 servers[8]; + struct prom_t *_prom = &RELOC(prom); + void *entry = (unsigned long *)&RELOC(opal_secondary_entry); + struct opal_secondary_data *data = &RELOC(opal_secondary_data); + + prom_debug("prom_opal_hold_cpus: start...\n"); + prom_debug(" - entry = 0x%x\n", entry); + prom_debug(" - data = 0x%x\n", data); + + data->ack = -1; + data->go = 0; + + /* look for cpus */ + for (node = 0; prom_next_node(&node); ) { + type[0] = 0; + prom_getprop(node, "device_type", type, sizeof(type)); + if (strcmp(type, RELOC("cpu")) != 0) + continue; + + /* Skip non-configured cpus. */ + if (prom_getprop(node, "status", type, sizeof(type)) > 0) + if (strcmp(type, RELOC("okay")) != 0) + continue; + + cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers, + sizeof(servers)); + if (cnt == PROM_ERROR) + break; + cnt >>= 2; + for (i = 0; i < cnt; i++) { + cpu = servers[i]; + prom_debug("CPU %d ... ", cpu); + if (cpu == _prom->cpu) { + prom_debug("booted !\n"); + continue; + } + prom_debug("starting ... "); + + /* Init the acknowledge var which will be reset by + * the secondary cpu when it awakens from its OF + * spinloop. + */ + data->ack = -1; + rc = prom_rtas_call(RELOC(prom_rtas_start_cpu), 3, 1, + NULL, cpu, entry, data); + prom_debug("rtas rc=%d ...", rc); + + for (j = 0; j < 100000000 && data->ack == -1; j++) { + HMT_low(); + mb(); + } + HMT_medium(); + if (data->ack != -1) + prom_debug("done, PIR=0x%x\n", data->ack); + else + prom_debug("timeout !\n"); + } + } + prom_debug("prom_opal_hold_cpus: end...\n"); +} + +static void prom_opal_takeover(void) +{ + struct opal_secondary_data *data = &RELOC(opal_secondary_data); + struct opal_takeover_args *args = &data->args; + u64 align = RELOC(prom_opal_align); + u64 top_addr, opal_addr; + + args->k_image = (u64)RELOC(_stext); + args->k_size = _end - _stext; + args->k_entry = 0; + args->k_entry2 = 0x60; + + top_addr = _ALIGN_UP(args->k_size, align); + + if (RELOC(prom_initrd_start) != 0) { + args->rd_image = RELOC(prom_initrd_start); + args->rd_size = RELOC(prom_initrd_end) - args->rd_image; + args->rd_loc = top_addr; + top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align); + } + + /* Pickup an address for the HAL. We want to go really high + * up to avoid problem with future kexecs. On the other hand + * we don't want to be all over the TCEs on P5IOC2 machines + * which are going to be up there too. We assume the machine + * has plenty of memory, and we ask for the HAL for now to + * be just below the 1G point, or above the initrd + */ + opal_addr = _ALIGN_DOWN(0x40000000 - RELOC(prom_opal_size), align); + if (opal_addr < top_addr) + opal_addr = top_addr; + args->hal_addr = opal_addr; + + /* Copy the command line to the kernel image */ + strlcpy(RELOC(boot_command_line), RELOC(prom_cmd_line), + COMMAND_LINE_SIZE); + + prom_debug(" k_image = 0x%lx\n", args->k_image); + prom_debug(" k_size = 0x%lx\n", args->k_size); + prom_debug(" k_entry = 0x%lx\n", args->k_entry); + prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2); + prom_debug(" hal_addr = 0x%lx\n", args->hal_addr); + prom_debug(" rd_image = 0x%lx\n", args->rd_image); + prom_debug(" rd_size = 0x%lx\n", args->rd_size); + prom_debug(" rd_loc = 0x%lx\n", args->rd_loc); + prom_printf("Performing OPAL takeover,this can take a few minutes..\n"); + prom_close_stdin(); + mb(); + data->go = 1; + for (;;) + opal_do_takeover(args); +} + +/* + * Allocate room for and instantiate OPAL + */ +static void __init prom_instantiate_opal(void) +{ + phandle opal_node; + ihandle opal_inst; + u64 base, entry; + u64 size = 0, align = 0x10000; + u32 rets[2]; + + prom_debug("prom_instantiate_opal: start...\n"); + + opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal")); + prom_debug("opal_node: %x\n", opal_node); + if (!PHANDLE_VALID(opal_node)) + return; + + prom_getprop(opal_node, "opal-runtime-size", &size, sizeof(size)); + if (size == 0) + return; + prom_getprop(opal_node, "opal-runtime-alignment", &align, + sizeof(align)); + + base = alloc_down(size, align, 0); + if (base == 0) { + prom_printf("OPAL allocation failed !\n"); + return; + } + + opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal")); + if (!IHANDLE_VALID(opal_inst)) { + prom_printf("opening opal package failed (%x)\n", opal_inst); + return; + } + + prom_printf("instantiating opal at 0x%x...", base); + + if (call_prom_ret("call-method", 4, 3, rets, + ADDR("load-opal-runtime"), + opal_inst, + base >> 32, base & 0xffffffff) != 0 + || (rets[0] == 0 && rets[1] == 0)) { + prom_printf(" failed\n"); + return; + } + entry = (((u64)rets[0]) << 32) | rets[1]; + + prom_printf(" done\n"); + + reserve_mem(base, size); + + prom_debug("opal base = 0x%x\n", base); + prom_debug("opal align = 0x%x\n", align); + prom_debug("opal entry = 0x%x\n", entry); + prom_debug("opal size = 0x%x\n", (long)size); + + prom_setprop(opal_node, "/ibm,opal", "opal-base-address", + &base, sizeof(base)); + prom_setprop(opal_node, "/ibm,opal", "opal-entry-address", + &entry, sizeof(entry)); + +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL + RELOC(prom_opal_base) = base; + RELOC(prom_opal_entry) = entry; +#endif + prom_debug("prom_instantiate_opal: end...\n"); +} + +#endif /* CONFIG_PPC_POWERNV */ /* * Allocate room for and instantiate RTAS @@ -1297,10 +1579,8 @@ static void __init prom_instantiate_rtas(void) return; base = alloc_down(size, PAGE_SIZE, 0); - if (base == 0) { - prom_printf("RTAS allocation failed !\n"); - return; - } + if (base == 0) + prom_panic("Could not allocate memory for RTAS\n"); rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); if (!IHANDLE_VALID(rtas_inst)) { @@ -1326,6 +1606,12 @@ static void __init prom_instantiate_rtas(void) prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", &entry, sizeof(entry)); +#ifdef CONFIG_PPC_POWERNV + /* PowerVN takeover hack */ + RELOC(prom_rtas_data) = base; + RELOC(prom_rtas_entry) = entry; + prom_getprop(rtas_node, "start-cpu", &RELOC(prom_rtas_start_cpu), 4); +#endif prom_debug("rtas base = 0x%x\n", base); prom_debug("rtas entry = 0x%x\n", entry); prom_debug("rtas size = 0x%x\n", (long)size); @@ -1543,7 +1829,7 @@ static void __init prom_hold_cpus(void) *acknowledge = (unsigned long)-1; if (reg != _prom->cpu) { - /* Primary Thread of non-boot cpu */ + /* Primary Thread of non-boot cpu or any thread */ prom_printf("starting cpu hw idx %lu... ", reg); call_prom("start-cpu", 3, 0, node, secondary_hold, reg); @@ -1652,15 +1938,6 @@ static void __init prom_init_stdout(void) prom_setprop(val, path, "linux,boot-display", NULL, 0); } -static void __init prom_close_stdin(void) -{ - struct prom_t *_prom = &RELOC(prom); - ihandle val; - - if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0) - call_prom("close", 1, 0, val); -} - static int __init prom_find_machine_type(void) { struct prom_t *_prom = &RELOC(prom); @@ -1671,7 +1948,7 @@ static int __init prom_find_machine_type(void) int x; #endif - /* Look for a PowerMac */ + /* Look for a PowerMac or a Cell */ len = prom_getprop(_prom->root, "compatible", compat, sizeof(compat)-1); if (len > 0) { @@ -1697,7 +1974,11 @@ static int __init prom_find_machine_type(void) } } #ifdef CONFIG_PPC64 - /* If not a mac, try to figure out if it's an IBM pSeries or any other + /* Try to detect OPAL */ + if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal")))) + return PLATFORM_OPAL; + + /* Try to figure out if it's an IBM pSeries or any other * PAPR compliant platform. We assume it is if : * - /device_type is "chrp" (please, do NOT use that for future * non-IBM designs ! @@ -1924,7 +2205,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, unsigned long soff; unsigned char *valp; static char pname[MAX_PROPERTY_NAME]; - int l, room; + int l, room, has_phandle = 0; dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); @@ -2008,19 +2289,26 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, valp = make_room(mem_start, mem_end, l, 4); call_prom("getprop", 4, 1, node, RELOC(pname), valp, l); *mem_start = _ALIGN(*mem_start, 4); + + if (!strcmp(RELOC(pname), RELOC("phandle"))) + has_phandle = 1; } - /* Add a "linux,phandle" property. */ - soff = dt_find_string(RELOC("linux,phandle")); - if (soff == 0) - prom_printf("WARNING: Can't find string index for" - " <linux-phandle> node %s\n", path); - else { - dt_push_token(OF_DT_PROP, mem_start, mem_end); - dt_push_token(4, mem_start, mem_end); - dt_push_token(soff, mem_start, mem_end); - valp = make_room(mem_start, mem_end, 4, 4); - *(u32 *)valp = node; + /* Add a "linux,phandle" property if no "phandle" property already + * existed (can happen with OPAL) + */ + if (!has_phandle) { + soff = dt_find_string(RELOC("linux,phandle")); + if (soff == 0) + prom_printf("WARNING: Can't find string index for" + " <linux-phandle> node %s\n", path); + else { + dt_push_token(OF_DT_PROP, mem_start, mem_end); + dt_push_token(4, mem_start, mem_end); + dt_push_token(soff, mem_start, mem_end); + valp = make_room(mem_start, mem_end, 4, 4); + *(u32 *)valp = node; + } } /* do all our children */ @@ -2504,6 +2792,7 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) #endif /* CONFIG_BLK_DEV_INITRD */ } + /* * We enter here early on, when the Open Firmware prom is still * handling exceptions and the MMU hash table for us. @@ -2553,6 +2842,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * between pSeries SMP and pSeries LPAR */ RELOC(of_platform) = prom_find_machine_type(); + prom_printf("Detected machine type: %x\n", RELOC(of_platform)); #ifndef CONFIG_RELOCATABLE /* Bail if this is a kdump kernel. */ @@ -2565,7 +2855,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_check_initrd(r3, r4); -#ifdef CONFIG_PPC_PSERIES +#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * On pSeries, inform the firmware about our capabilities */ @@ -2611,14 +2901,33 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, #endif /* - * On non-powermacs, try to instantiate RTAS and puts all CPUs - * in spin-loops. PowerMacs don't have a working RTAS and use - * a different way to spin CPUs + * On non-powermacs, try to instantiate RTAS. PowerMacs don't + * have a usable RTAS implementation. */ - if (RELOC(of_platform) != PLATFORM_POWERMAC) { + if (RELOC(of_platform) != PLATFORM_POWERMAC && + RELOC(of_platform) != PLATFORM_OPAL) prom_instantiate_rtas(); + +#ifdef CONFIG_PPC_POWERNV + /* Detect HAL and try instanciating it & doing takeover */ + if (RELOC(of_platform) == PLATFORM_PSERIES_LPAR) { + prom_query_opal(); + if (RELOC(of_platform) == PLATFORM_OPAL) { + prom_opal_hold_cpus(); + prom_opal_takeover(); + } + } else if (RELOC(of_platform) == PLATFORM_OPAL) + prom_instantiate_opal(); +#endif + + /* + * On non-powermacs, put all CPUs in spin-loops. + * + * PowerMacs use a different mechanism to spin CPUs + */ + if (RELOC(of_platform) != PLATFORM_POWERMAC && + RELOC(of_platform) != PLATFORM_OPAL) prom_hold_cpus(); - } /* * Fill in some infos for use by the kernel later on @@ -2685,7 +2994,13 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, reloc_got2(-offset); #endif - __start(hdr, kbase, 0); +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL + /* OPAL early debug gets the OPAL base & entry in r8 and r9 */ + __start(hdr, kbase, 0, 0, 0, + RELOC(prom_opal_base), RELOC(prom_opal_entry)); +#else + __start(hdr, kbase, 0, 0, 0, 0, 0); +#endif return 0; } diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 9f82f493789..70f4286eaa7 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -20,7 +20,9 @@ WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush _end enter_prom memcpy memset reloc_offset __secondary_hold __secondary_hold_acknowledge __secondary_hold_spinloop __start strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 -reloc_got2 kernstart_addr memstart_addr linux_banner" +reloc_got2 kernstart_addr memstart_addr linux_banner _stext +opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry +boot_command_line" NM="$1" OBJ="$2" diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index 47187cc2cf0..4e1331b8eb3 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -2,7 +2,6 @@ #include <linux/kernel.h> #include <linux/string.h> -#include <linux/module.h> #include <linux/ioport.h> #include <linux/etherdevice.h> #include <linux/of_address.h> diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 05b7dd217f6..5de73dbd15c 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -30,9 +30,6 @@ #include <linux/seccomp.h> #include <linux/audit.h> #include <trace/syscall.h> -#ifdef CONFIG_PPC32 -#include <linux/module.h> -#endif #include <linux/hw_breakpoint.h> #include <linux/perf_event.h> @@ -1497,9 +1494,14 @@ long arch_ptrace(struct task_struct *child, long request, if (index < PT_FPR0) { tmp = ptrace_get_reg(child, (int) index); } else { + unsigned int fpidx = index - PT_FPR0; + flush_fp_to_thread(child); - tmp = ((unsigned long *)child->thread.fpr) - [TS_FPRWIDTH * (index - PT_FPR0)]; + if (fpidx < (PT_FPSCR - PT_FPR0)) + tmp = ((unsigned long *)child->thread.fpr) + [fpidx * TS_FPRWIDTH]; + else + tmp = child->thread.fpscr.val; } ret = put_user(tmp, datalp); break; @@ -1525,9 +1527,14 @@ long arch_ptrace(struct task_struct *child, long request, if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { + unsigned int fpidx = index - PT_FPR0; + flush_fp_to_thread(child); - ((unsigned long *)child->thread.fpr) - [TS_FPRWIDTH * (index - PT_FPR0)] = data; + if (fpidx < (PT_FPSCR - PT_FPR0)) + ((unsigned long *)child->thread.fpr) + [fpidx * TS_FPRWIDTH] = data; + else + child->thread.fpscr.val = data; ret = 0; } break; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index d5ca8236315..517b1d8f455 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/delay.h> diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index b1d738d1289..77bb77da05c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -12,7 +12,7 @@ #undef DEBUG -#include <linux/module.h> +#include <linux/export.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/init.h> diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 209135af0a4..ac761081511 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) PTRRELOC(&__start___lwsync_fixup), PTRRELOC(&__stop___lwsync_fixup)); + do_final_fixups(); + return KERNELBASE + offset; } @@ -117,7 +119,7 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) * This is called very early on the boot process, after a minimal * MMU environment has been set up but before MMU_init is called. */ -notrace void __init machine_init(unsigned long dt_ptr) +notrace void __init machine_init(u64 dt_ptr) { lockdep_init(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index aebef1320ed..fb9bb46e7e8 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -12,7 +12,7 @@ #undef DEBUG -#include <linux/module.h> +#include <linux/export.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/init.h> @@ -278,14 +278,14 @@ static void __init initialize_cache_info(void) DBG(" -> initialize_cache_info()\n"); - for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { + for_each_node_by_type(np, "cpu") { num_cpus += 1; - /* We're assuming *all* of the CPUs have the same + /* + * We're assuming *all* of the CPUs have the same * d-cache and i-cache sizes... -Peter */ - - if ( num_cpus == 1 ) { + if (num_cpus == 1) { const u32 *sizep, *lsizep; u32 size, lsize; @@ -294,10 +294,13 @@ static void __init initialize_cache_info(void) sizep = of_get_property(np, "d-cache-size", NULL); if (sizep != NULL) size = *sizep; - lsizep = of_get_property(np, "d-cache-block-size", NULL); + lsizep = of_get_property(np, "d-cache-block-size", + NULL); /* fallback if block size missing */ if (lsizep == NULL) - lsizep = of_get_property(np, "d-cache-line-size", NULL); + lsizep = of_get_property(np, + "d-cache-line-size", + NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) @@ -314,9 +317,12 @@ static void __init initialize_cache_info(void) sizep = of_get_property(np, "i-cache-size", NULL); if (sizep != NULL) size = *sizep; - lsizep = of_get_property(np, "i-cache-block-size", NULL); + lsizep = of_get_property(np, "i-cache-block-size", + NULL); if (lsizep == NULL) - lsizep = of_get_property(np, "i-cache-line-size", NULL); + lsizep = of_get_property(np, + "i-cache-line-size", + NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) @@ -353,6 +359,7 @@ void __init setup_system(void) &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); do_lwsync_fixups(cur_cpu_spec->cpu_features, &__start___lwsync_fixup, &__stop___lwsync_fixup); + do_final_fixups(); /* * Unflatten the device-tree passed by prom_init or kexec diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 78b76dc54df..836a5a19eb2 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) compat_sigset_t cset; switch (_NSIG_WORDS) { - case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; + case 4: cset.sig[6] = set->sig[3] & 0xffffffffull; cset.sig[7] = set->sig[3] >> 32; case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; cset.sig[5] = set->sig[2] >> 32; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e91c736cc84..a50b5ec281d 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -23,7 +23,6 @@ #include <linux/stddef.h> #include <linux/elf.h> #include <linux/ptrace.h> -#include <linux/module.h> #include <linux/ratelimit.h> #include <asm/sigcontext.h> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 7bf2187dfd9..6df70907d60 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -18,7 +18,7 @@ #undef DEBUG #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -70,6 +70,10 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) + +/* State of each CPU during hotplug phases */ +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; + #else static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; #define get_idle_for_cpu(x) (idle_thread_array[(x)]) @@ -104,12 +108,25 @@ int __devinit smp_generic_kick_cpu(int nr) * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ - paca[nr].cpu_start = 1; - smp_mb(); + if (!paca[nr].cpu_start) { + paca[nr].cpu_start = 1; + smp_mb(); + return 0; + } + +#ifdef CONFIG_HOTPLUG_CPU + /* + * Ok it's not there, so it might be soft-unplugged, let's + * try to bring it back + */ + per_cpu(cpu_state, nr) = CPU_UP_PREPARE; + smp_wmb(); + smp_send_reschedule(nr); +#endif /* CONFIG_HOTPLUG_CPU */ return 0; } -#endif +#endif /* CONFIG_PPC64 */ static irqreturn_t call_function_action(int irq, void *data) { @@ -170,7 +187,7 @@ int smp_request_message_ipi(int virq, int msg) return 1; } #endif - err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU, + err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU, smp_ipi_name[msg], 0); WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", virq, smp_ipi_name[msg], err); @@ -357,8 +374,6 @@ void __devinit smp_prepare_boot_cpu(void) } #ifdef CONFIG_HOTPLUG_CPU -/* State of each CPU during hotplug phases */ -static DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -406,6 +421,11 @@ void generic_set_cpu_dead(unsigned int cpu) { per_cpu(cpu_state, cpu) = CPU_DEAD; } + +int generic_check_cpu_restart(unsigned int cpu) +{ + return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; +} #endif struct create_idle { diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index b0dbb1daa4d..3d30ef1038e 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/stacktrace.h> #include <asm/ptrace.h> diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index aa17b76dd42..641f9adc620 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -33,6 +33,6 @@ void save_processor_state(void) void restore_processor_state(void) { #ifdef CONFIG_PPC32 - switch_mmu_context(NULL, current->active_mm); + switch_mmu_context(current->active_mm, current->active_mm); #endif } diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 6f3f0697274..168e8848022 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -9,6 +9,7 @@ #include <asm/system.h> #include <asm/iommu.h> #include <linux/irq.h> +#include <linux/sched.h> #include <linux/interrupt.h> void do_after_copyback(void) diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index f0f2199e64e..ce035c1905f 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -4,7 +4,7 @@ #include <linux/percpu.h> #include <linux/init.h> #include <linux/sched.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/nodemask.h> #include <linux/cpumask.h> #include <linux/notifier.h> diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 03b29a6759a..522bb1dfc35 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -33,7 +33,7 @@ */ #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f19d9777d3c..5459d148a0f 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -457,7 +457,14 @@ int machine_check_e500mc(struct pt_regs *regs) if (reason & MCSR_DCPERR_MC) { printk("Data Cache Parity Error\n"); - recoverable = 0; + + /* + * In write shadow mode we auto-recover from the error, but it + * may still get logged and cause a machine check. We should + * only treat the non-write shadow case as non-recoverable. + */ + if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) + recoverable = 0; } if (reason & MCSR_L2MMU_MHIT) { @@ -1291,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) if (user_mode(regs)) { current->thread.dbcr0 &= ~DBCR0_IC; -#ifdef CONFIG_PPC_ADV_DEBUG_REGS if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) regs->msr |= MSR_DE; else /* Make sure the IDM bit is off */ current->thread.dbcr0 &= ~DBCR0_IDM; -#endif } _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index b4607a91d1f..57fa2c0a531 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -69,6 +69,12 @@ void __init udbg_early_init(void) udbg_init_wsp(); #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) udbg_init_ehv_bc(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) + udbg_init_ps3gelic(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_RAW) + udbg_init_debug_opal_raw(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI) + udbg_init_debug_opal_hvsi(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 142ab1008c3..7d14bb697d4 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -9,7 +9,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 1b695fdc362..f65af61996b 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -15,11 +15,12 @@ */ #include <linux/types.h> +#include <linux/stat.h> #include <linux/device.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/console.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/kobject.h> @@ -605,15 +606,20 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) return dma_iommu_ops.dma_supported(dev, mask); } -struct dma_map_ops vio_dma_mapping_ops = { - .alloc_coherent = vio_dma_iommu_alloc_coherent, - .free_coherent = vio_dma_iommu_free_coherent, - .map_sg = vio_dma_iommu_map_sg, - .unmap_sg = vio_dma_iommu_unmap_sg, - .map_page = vio_dma_iommu_map_page, - .unmap_page = vio_dma_iommu_unmap_page, - .dma_supported = vio_dma_iommu_dma_supported, +static u64 vio_dma_get_required_mask(struct device *dev) +{ + return dma_iommu_ops.get_required_mask(dev); +} +struct dma_map_ops vio_dma_mapping_ops = { + .alloc_coherent = vio_dma_iommu_alloc_coherent, + .free_coherent = vio_dma_iommu_free_coherent, + .map_sg = vio_dma_iommu_map_sg, + .unmap_sg = vio_dma_iommu_unmap_sg, + .map_page = vio_dma_iommu_map_page, + .unmap_page = vio_dma_iommu_unmap_page, + .dma_supported = vio_dma_iommu_dma_supported, + .get_required_mask = vio_dma_get_required_mask, }; /** |