summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-03-18 09:43:56 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-03-18 09:43:56 +0100
commite19b9137142988bec5a76c5f8bdf12a77ea802b0 (patch)
treeb36c83fa93da7f18c1331252fb82a87431697443 /arch/powerpc
parent2fae6a860ca9adb0c881f6dcd633df775c2520e9 (diff)
parente40d641099213145a034981e646dc2180a488152 (diff)
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next
Conflicts: drivers/gpu/drm/i915/Makefile Makefile cleanup in drm-intel-next conflicts with a build-fix to move intel_opregion under CONFIG_ACPI. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig32
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile7
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts7
-rw-r--r--arch/powerpc/boot/dts/adder875-redboot.dts2
-rw-r--r--arch/powerpc/boot/dts/adder875-uboot.dts2
-rw-r--r--arch/powerpc/boot/dts/asp834x-redboot.dts2
-rw-r--r--arch/powerpc/boot/dts/c2k.dts5
-rw-r--r--arch/powerpc/boot/dts/ep8248e.dts3
-rw-r--r--arch/powerpc/boot/dts/ep88xc.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi82
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/gef_ppc9a.dts2
-rw-r--r--arch/powerpc/boot/dts/gef_sbc310.dts2
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts2
-rw-r--r--arch/powerpc/boot/dts/holly.dts1
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts2
-rw-r--r--arch/powerpc/boot/dts/ksi8560.dts3
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi113
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts53
-rw-r--r--arch/powerpc/boot/dts/mpc7448hpc2.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8272ads.dts3
-rw-r--r--arch/powerpc/boot/dts/mpc8308_p1m.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8308rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8377_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8377_wlan.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8378_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8379_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dtsi2
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts3
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dtsi2
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dtsi4
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8560ads.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts6
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc866ads.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc885ads.dts3
-rw-r--r--arch/powerpc/boot/dts/mvme5100.dts185
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa.dts23
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa.dtsi85
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pa_36b.dts (renamed from arch/powerpc/boot/dts/p1010rdb_36b.dts)47
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pb.dts35
-rw-r--r--arch/powerpc/boot/dts/p1010rdb-pb_36b.dts58
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dts66
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi43
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_32b.dtsi79
-rw-r--r--arch/powerpc/boot/dts/p1010rdb_36b.dtsi79
-rw-r--r--arch/powerpc/boot/dts/p1021mds.dts2
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi3
-rw-r--r--arch/powerpc/boot/dts/p1025rdb_32b.dts2
-rw-r--r--arch/powerpc/boot/dts/p1025twr.dts95
-rw-r--r--arch/powerpc/boot/dts/p1025twr.dtsi280
-rw-r--r--arch/powerpc/boot/dts/ppa8548.dts2
-rw-r--r--arch/powerpc/boot/dts/pq2fads.dts3
-rw-r--r--arch/powerpc/boot/dts/prpmc2800.dts5
-rw-r--r--arch/powerpc/boot/dts/sbc8349.dts2
-rw-r--r--arch/powerpc/boot/dts/sbc8548-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts4
-rw-r--r--arch/powerpc/boot/dts/stx_gp3_8560.dts2
-rw-r--r--arch/powerpc/boot/dts/stxssa8555.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts3
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts3
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts5
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts5
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts3
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts3
-rw-r--r--arch/powerpc/boot/dts/tqm8xx.dts1
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml507.dts2
-rw-r--r--arch/powerpc/boot/mvme5100.c27
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/configs/85xx/p1023_defconfig188
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig3
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig144
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/Kbuild3
-rw-r--r--arch/powerpc/include/asm/barrier.h21
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/cache.h14
-rw-r--r--arch/powerpc/include/asm/clk_interface.h20
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h1
-rw-r--r--arch/powerpc/include/asm/code-patching.h7
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/eeh.h35
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h111
-rw-r--r--arch/powerpc/include/asm/exception-64s.h21
-rw-r--r--arch/powerpc/include/asm/fixmap.h44
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/hardirq.h3
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/io.h16
-rw-r--r--arch/powerpc/include/asm/iommu.h55
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h27
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h6
-rw-r--r--arch/powerpc/include/asm/kvm_host.h61
-rw-r--r--arch/powerpc/include/asm/kvm_para.h80
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h13
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/mce.h197
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h13
-rw-r--r--arch/powerpc/include/asm/mmu.h21
-rw-r--r--arch/powerpc/include/asm/mpc5121.h7
-rw-r--r--arch/powerpc/include/asm/opal.h108
-rw-r--r--arch/powerpc/include/asm/paca.h16
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h40
-rw-r--r--arch/powerpc/include/asm/pgtable.h109
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h14
-rw-r--r--arch/powerpc/include/asm/processor.h15
-rw-r--r--arch/powerpc/include/asm/ps3.h1
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h8
-rw-r--r--arch/powerpc/include/asm/reg.h45
-rw-r--r--arch/powerpc/include/asm/reg_booke.h10
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/spinlock.h12
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/tm.h1
-rw-r--r--arch/powerpc/include/asm/topology.h10
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/uprobes.h5
-rw-r--r--arch/powerpc/include/asm/vdso.h6
-rw-r--r--arch/powerpc/include/asm/vio.h1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h3
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h3
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c63
-rw-r--r--arch/powerpc/kernel/cacheinfo.c4
-rw-r--r--arch/powerpc/kernel/clock.c82
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S54
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S38
-rw-r--r--arch/powerpc/kernel/cputable.c16
-rw-r--r--arch/powerpc/kernel/crash.c1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/eeh.c49
-rw-r--r--arch/powerpc/kernel/eeh_driver.c189
-rw-r--r--arch/powerpc/kernel/eeh_pe.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S242
-rw-r--r--arch/powerpc/kernel/fpu.S16
-rw-r--r--arch/powerpc/kernel/fsl_booke_entry_mapping.S2
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S266
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c1
-rw-r--r--arch/powerpc/kernel/idle_power7.S1
-rw-r--r--arch/powerpc/kernel/iomap.c1
-rw-r--r--arch/powerpc/kernel/iommu.c157
-rw-r--r--arch/powerpc/kernel/irq.c17
-rw-r--r--arch/powerpc/kernel/kgdb.c1
-rw-r--r--arch/powerpc/kernel/kvm.c41
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/mce.c352
-rw-r--r--arch/powerpc/kernel/mce_power.c284
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/misc_64.S6
-rw-r--r--arch/powerpc/kernel/paca.c37
-rw-r--r--arch/powerpc/kernel/pci-common.c4
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c4
-rw-r--r--arch/powerpc/kernel/process.c179
-rw-r--r--arch/powerpc/kernel/prom.c41
-rw-r--r--arch/powerpc/kernel/reloc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_32.c8
-rw-r--r--arch/powerpc/kernel/setup_64.c50
-rw-r--r--arch/powerpc/kernel/signal.c3
-rw-r--r--arch/powerpc/kernel/signal_32.c40
-rw-r--r--arch/powerpc/kernel/signal_64.c14
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c1
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/kernel/swsusp_booke.S32
-rw-r--r--arch/powerpc/kernel/syscalls.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c390
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kernel/traps.c72
-rw-r--r--arch/powerpc/kernel/uprobes.c2
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S3
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S3
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vio.c31
-rw-r--r--arch/powerpc/kvm/44x.c4
-rw-r--r--arch/powerpc/kvm/book3s.c46
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_exports.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c321
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S8
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c50
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1191
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c169
-rw-r--r--arch/powerpc/kvm/book3s_pr.c155
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S47
-rw-r--r--arch/powerpc/kvm/book3s_segment.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c4
-rw-r--r--arch/powerpc/kvm/booke.c44
-rw-r--r--arch/powerpc/kvm/booke.h5
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S13
-rw-r--r--arch/powerpc/kvm/e500.c4
-rw-r--r--arch/powerpc/kvm/e500.h8
-rw-r--r--arch/powerpc/kvm/e500_mmu.c2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c59
-rw-r--r--arch/powerpc/kvm/e500mc.c4
-rw-r--r--arch/powerpc/kvm/emulate.c1
-rw-r--r--arch/powerpc/kvm/mpic.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c58
-rw-r--r--arch/powerpc/lib/code-patching.c15
-rw-r--r--arch/powerpc/lib/crtsavres.S186
-rw-r--r--arch/powerpc/math-emu/math_efp.c316
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c80
-rw-r--r--arch/powerpc/mm/hash_low_64.S15
-rw-r--r--arch/powerpc/mm/hash_utils_64.c21
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c54
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mem.c10
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c106
-rw-r--r--arch/powerpc/mm/pgtable.c3
-rw-r--r--arch/powerpc/mm/pgtable_32.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c27
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/mm/subpage-prot.c2
-rw-r--r--arch/powerpc/mm/tlb_hash64.c1
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S175
-rw-r--r--arch/powerpc/mm/tlb_nohash.c114
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S4
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c1
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c1
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c1
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c1
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c1
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c144
-rw-r--r--arch/powerpc/platforms/512x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/Makefile3
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c1221
-rw-r--r--arch/powerpc/platforms/512x/clock.c754
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c169
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c2
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c1
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/common.c38
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c29
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c25
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c17
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c147
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c14
-rw-r--r--arch/powerpc/platforms/chrp/smp.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig13
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile1
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c221
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c1
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c5
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c1
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig5
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c255
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c26
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c39
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c146
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c267
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c95
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci.c240
-rw-r--r--arch/powerpc/platforms/powernv/pci.h9
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h8
-rw-r--r--arch/powerpc/platforms/powernv/setup.c28
-rw-r--r--arch/powerpc/platforms/ps3/spu.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig12
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c6
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c167
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c364
-rw-r--r--arch/powerpc/platforms/pseries/setup.c9
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c11
-rw-r--r--arch/powerpc/sysdev/Kconfig2
-rw-r--r--arch/powerpc/sysdev/axonram.c21
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c1
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c1
-rw-r--r--arch/powerpc/sysdev/fsl_ifc.c1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c31
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.h1
-rw-r--r--arch/powerpc/sysdev/i8259.c1
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c6
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c1
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c10
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_udbg.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c1
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/xmon/xmon.c28
347 files changed, 9807 insertions, 4378 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b44b52c0a8f..957bf344c0f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -86,6 +86,7 @@ config PPC
bool
default y
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
select OF
select OF_EARLY_FLATTREE
@@ -139,6 +140,7 @@ config PPC
select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -147,6 +149,10 @@ config EARLY_PRINTK
bool
default y
+config PANIC_TIMEOUT
+ int
+ default 180
+
config COMPAT
bool
default y if PPC64
@@ -209,9 +215,6 @@ config DEFAULT_UIMAGE
Used to allow a board to specify it wants a uImage built by default
default n
-config REDBOOT
- bool
-
config ARCH_HIBERNATION_POSSIBLE
bool
default y
@@ -339,6 +342,8 @@ config PPC_TRANSACTIONAL_MEM
bool "Transactional Memory support for POWERPC"
depends on PPC_BOOK3S_64
depends on SMP
+ select ALTIVEC
+ select VSX
default n
---help---
Support user-mode Transactional Memory on POWERPC.
@@ -379,6 +384,12 @@ config ARCH_HAS_WALK_MEMORY
config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y
+config PPC64_SUPPORTS_MEMORY_FAILURE
+ bool "Add support for memory hwpoison"
+ depends on PPC_BOOK3S_64
+ default "y" if PPC_POWERNV
+ select ARCH_SUPPORTS_MEMORY_FAILURE
+
config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
@@ -399,8 +410,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
- select RELOCATABLE if PPC64 || 44x
- select DYNAMIC_MEMSTART if FSL_BOOKE
+ select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -524,6 +534,7 @@ config PPC_16K_PAGES
config PPC_64K_PAGES
bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
+ depends on !PPC_FSL_BOOK3E
select PPC_HAS_HASH_64K if PPC_STD_MMU_64
config PPC_256K_PAGES
@@ -790,7 +801,7 @@ config HAS_RAPIDIO
default n
config RAPIDIO
- bool "RapidIO support"
+ tristate "RapidIO support"
depends on HAS_RAPIDIO || PCI
help
If you say Y here, the kernel will include drivers and
@@ -798,7 +809,7 @@ config RAPIDIO
config FSL_RIO
bool "Freescale Embedded SRIO Controller support"
- depends on RAPIDIO && HAS_RAPIDIO
+ depends on RAPIDIO = y && HAS_RAPIDIO
default "n"
---help---
Include support for RapidIO controller on Freescale embedded
@@ -881,7 +892,7 @@ config DYNAMIC_MEMSTART
config RELOCATABLE
bool "Build a relocatable kernel"
- depends on ADVANCED_OPTIONS && FLATMEM && 44x
+ depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE)
select NONSTATIC_KERNEL
help
This builds a kernel image that is capable of running at the
@@ -1037,11 +1048,6 @@ config KEYS_COMPAT
source "crypto/Kconfig"
-config PPC_CLOCK
- bool
- default n
- select HAVE_CLK
-
config PPC_LIB_RHEAP
bool
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 554734ff302..d61c0352577 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -16,6 +16,7 @@ mktree
uImage
cuImage.*
dtbImage.*
+*.dtb
treeImage.*
zImage
zImage.initrd
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ca7f08cc4af..90e9d954866 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -71,9 +71,9 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
uartlite.c mpc52xx-psc.c
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
-src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
+src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
-src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
+src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c
src-plat-y := of.c epapr.c
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
@@ -95,7 +95,7 @@ src-plat-$(CONFIG_FSL_SOC_BOOKE) += cuboot-85xx.c cuboot-85xx-cpm2.c
src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
cuboot-c2k.c gamecube-head.S \
gamecube.c wii-head.S wii.c holly.c \
- prpmc2800.c
+ prpmc2800.c fixed-head.S mvme5100.c
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
@@ -286,6 +286,7 @@ image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
image-$(CONFIG_PPC_C2K) += cuImage.c2k
image-$(CONFIG_GAMECUBE) += dtbImage.gamecube
image-$(CONFIG_WII) += dtbImage.wii
+image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index a543c4088cb..a1b883730b3 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -139,7 +139,14 @@
};
};
+ clocks {
+ osc {
+ clock-frequency = <25000000>;
+ };
+ };
+
soc@80000000 {
+ bus-frequency = <80000000>; /* 80 MHz ips bus */
clock@f00 {
compatible = "fsl,mpc5121rev2-clock", "fsl,mpc5121-clock";
diff --git a/arch/powerpc/boot/dts/adder875-redboot.dts b/arch/powerpc/boot/dts/adder875-redboot.dts
index 28e9cd3d7a2..083984720b2 100644
--- a/arch/powerpc/boot/dts/adder875-redboot.dts
+++ b/arch/powerpc/boot/dts/adder875-redboot.dts
@@ -87,12 +87,10 @@
PHY0: ethernet-phy@0 {
reg = <0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
reg = <1>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/adder875-uboot.dts b/arch/powerpc/boot/dts/adder875-uboot.dts
index 54fb60ec03e..e4554caf8f8 100644
--- a/arch/powerpc/boot/dts/adder875-uboot.dts
+++ b/arch/powerpc/boot/dts/adder875-uboot.dts
@@ -86,12 +86,10 @@
PHY0: ethernet-phy@0 {
reg = <0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
reg = <1>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts
index 227290db866..9198745f45f 100644
--- a/arch/powerpc/boot/dts/asp834x-redboot.dts
+++ b/arch/powerpc/boot/dts/asp834x-redboot.dts
@@ -207,14 +207,12 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/c2k.dts b/arch/powerpc/boot/dts/c2k.dts
index f5d625fa3e5..1e32903cb0a 100644
--- a/arch/powerpc/boot/dts/c2k.dts
+++ b/arch/powerpc/boot/dts/c2k.dts
@@ -73,19 +73,16 @@
compatible = "marvell,mv64360-mdio";
reg = <0x2000 4>;
PHY0: ethernet-phy@0 {
- device_type = "ethernet-phy";
interrupts = <76>; /* GPP 12 */
interrupt-parent = <&PIC>;
reg = <0>;
};
PHY1: ethernet-phy@1 {
- device_type = "ethernet-phy";
interrupts = <76>; /* GPP 12 */
interrupt-parent = <&PIC>;
reg = <1>;
};
PHY2: ethernet-phy@2 {
- device_type = "ethernet-phy";
interrupts = <76>; /* GPP 12 */
interrupt-parent = <&PIC>;
reg = <2>;
@@ -174,7 +171,6 @@
};
MPSC0: mpsc@8000 {
- device_type = "serial";
compatible = "marvell,mv64360-mpsc";
reg = <0x8000 0x38>;
virtual-reg = <0xd8008000>;
@@ -189,7 +185,6 @@
};
MPSC1: mpsc@9000 {
- device_type = "serial";
compatible = "marvell,mv64360-mpsc";
reg = <0x9000 0x38>;
virtual-reg = <0xd8009000>;
diff --git a/arch/powerpc/boot/dts/ep8248e.dts b/arch/powerpc/boot/dts/ep8248e.dts
index 756758fb5b7..8b3a49f34f5 100644
--- a/arch/powerpc/boot/dts/ep8248e.dts
+++ b/arch/powerpc/boot/dts/ep8248e.dts
@@ -67,7 +67,6 @@
ranges;
mdio {
- device_type = "mdio";
compatible = "fsl,ep8248e-mdio-bitbang";
#address-cells = <1>;
#size-cells = <0>;
@@ -76,13 +75,11 @@
PHY0: ethernet-phy@0 {
interrupt-parent = <&PIC>;
reg = <0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
interrupt-parent = <&PIC>;
reg = <1>;
- device_type = "ethernet-phy";
};
};
};
diff --git a/arch/powerpc/boot/dts/ep88xc.dts b/arch/powerpc/boot/dts/ep88xc.dts
index ae57d624012..2aa5bf55964 100644
--- a/arch/powerpc/boot/dts/ep88xc.dts
+++ b/arch/powerpc/boot/dts/ep88xc.dts
@@ -85,12 +85,10 @@
PHY0: ethernet-phy@0 {
reg = <0x0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
reg = <0x1>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
new file mode 100644
index 00000000000..d3cc8d0f7c2
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/elo3-dma-2.dtsi
@@ -0,0 +1,82 @@
+/*
+ * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x102300 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+dma2: dma@102300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elo3-dma";
+ reg = <0x102300 0x4>,
+ <0x102600 0x4>;
+ ranges = <0x0 0x102100 0x500>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ interrupts = <464 2 0 0>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ interrupts = <465 2 0 0>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ interrupts = <466 2 0 0>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ interrupts = <467 2 0 0>;
+ };
+ dma-channel@300 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x300 0x80>;
+ interrupts = <468 2 0 0>;
+ };
+ dma-channel@380 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x380 0x80>;
+ interrupts = <469 2 0 0>;
+ };
+ dma-channel@400 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x400 0x80>;
+ interrupts = <470 2 0 0>;
+ };
+ dma-channel@480 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x480 0x80>;
+ interrupts = <471 2 0 0>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
index 68cc5e7f647..642dc3a83d0 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -36,7 +36,8 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index adb82fd9057..407cb5fd0f5 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -36,7 +36,8 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,p1021-elbc", "fsl,elbc", "simple-bus";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index e179803a81e..ebf20223454 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -40,7 +40,8 @@
* pin muxing when the DIU is enabled.
*/
compatible = "fsl,p1022-elbc", "fsl,elbc";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0x9000 */
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index f1105bffa91..81437fdf1db 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -36,7 +36,8 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,p1023-elbc", "fsl,elbc", "simple-bus";
- interrupts = <19 2 0 0>;
+ interrupts = <19 2 0 0>,
+ <16 2 0 0>;
};
/* controller at 0xa000 */
diff --git a/arch/powerpc/boot/dts/gef_ppc9a.dts b/arch/powerpc/boot/dts/gef_ppc9a.dts
index 38dcb96c8e2..83eb0fda266 100644
--- a/arch/powerpc/boot/dts/gef_ppc9a.dts
+++ b/arch/powerpc/boot/dts/gef_ppc9a.dts
@@ -292,13 +292,11 @@
interrupt-parent = <&gef_pic>;
interrupts = <0x9 0x4>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&gef_pic>;
interrupts = <0x8 0x4>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/gef_sbc310.dts b/arch/powerpc/boot/dts/gef_sbc310.dts
index 5ab8932d09b..d426dd3de9e 100644
--- a/arch/powerpc/boot/dts/gef_sbc310.dts
+++ b/arch/powerpc/boot/dts/gef_sbc310.dts
@@ -290,13 +290,11 @@
interrupt-parent = <&gef_pic>;
interrupts = <0x9 0x4>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&gef_pic>;
interrupts = <0x8 0x4>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index d5341f5741a..5db3399b76b 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -290,13 +290,11 @@
interrupt-parent = <&gef_pic>;
interrupts = <0x9 0x4>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&gef_pic>;
interrupts = <0x8 0x4>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts
index c6e11ebeceb..43e6f0c8e44 100644
--- a/arch/powerpc/boot/dts/holly.dts
+++ b/arch/powerpc/boot/dts/holly.dts
@@ -58,7 +58,6 @@
};
MDIO: mdio@6000 {
- device_type = "mdio";
compatible = "tsi109-mdio", "tsi108-mdio";
reg = <0x00006000 0x00000050>;
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 1613d6e4049..5ba7f01e2a2 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -406,7 +406,7 @@
MSI: ppc4xx-msi@C10000000 {
compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0x0 0xEF620000 0x100>;
+ reg = <0xEF620000 0x100>;
sdr-base = <0x4B0>;
msi-data = <0x00000000>;
msi-mask = <0x44440000>;
diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts
index 296c572ea60..5d68236e7c3 100644
--- a/arch/powerpc/boot/dts/ksi8560.dts
+++ b/arch/powerpc/boot/dts/ksi8560.dts
@@ -161,13 +161,11 @@
PHY1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
PHY2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
@@ -284,7 +282,6 @@
PHY0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index 2d7cb04ac96..2c0e1552d20 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -9,6 +9,8 @@
* option) any later version.
*/
+#include <dt-bindings/clock/mpc512x-clock.h>
+
/dts-v1/;
/ {
@@ -49,6 +51,10 @@
compatible = "fsl,mpc5121-mbx";
reg = <0x20000000 0x4000>;
interrupts = <66 0x8>;
+ clocks = <&clks MPC512x_CLK_MBX_BUS>,
+ <&clks MPC512x_CLK_MBX_3D>,
+ <&clks MPC512x_CLK_MBX>;
+ clock-names = "mbx-bus", "mbx-3d", "mbx";
};
sram@30000000 {
@@ -62,6 +68,8 @@
interrupts = <6 8>;
#address-cells = <1>;
#size-cells = <1>;
+ clocks = <&clks MPC512x_CLK_NFC>;
+ clock-names = "ipg";
};
localbus@80000020 {
@@ -73,6 +81,17 @@
ranges = <0x0 0x0 0xfc000000 0x04000000>;
};
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc: osc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <33000000>;
+ };
+ };
+
soc@80000000 {
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
@@ -117,9 +136,12 @@
};
/* Clock control */
- clock@f00 {
+ clks: clock@f00 {
compatible = "fsl,mpc5121-clock";
reg = <0xf00 0x100>;
+ #clock-cells = <1>;
+ clocks = <&osc>;
+ clock-names = "osc";
};
/* Power Management Controller */
@@ -139,12 +161,24 @@
compatible = "fsl,mpc5121-mscan";
reg = <0x1300 0x80>;
interrupts = <12 0x8>;
+ clocks = <&clks MPC512x_CLK_BDLC>,
+ <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SYS>,
+ <&clks MPC512x_CLK_REF>,
+ <&clks MPC512x_CLK_MSCAN0_MCLK>;
+ clock-names = "ipg", "ips", "sys", "ref", "mclk";
};
can@1380 {
compatible = "fsl,mpc5121-mscan";
reg = <0x1380 0x80>;
interrupts = <13 0x8>;
+ clocks = <&clks MPC512x_CLK_BDLC>,
+ <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SYS>,
+ <&clks MPC512x_CLK_REF>,
+ <&clks MPC512x_CLK_MSCAN1_MCLK>;
+ clock-names = "ipg", "ips", "sys", "ref", "mclk";
};
sdhc@1500 {
@@ -153,6 +187,9 @@
interrupts = <8 0x8>;
dmas = <&dma0 30>;
dma-names = "rx-tx";
+ clocks = <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SDHC>;
+ clock-names = "ipg", "per";
};
i2c@1700 {
@@ -161,6 +198,8 @@
compatible = "fsl,mpc5121-i2c", "fsl-i2c";
reg = <0x1700 0x20>;
interrupts = <9 0x8>;
+ clocks = <&clks MPC512x_CLK_I2C>;
+ clock-names = "ipg";
};
i2c@1720 {
@@ -169,6 +208,8 @@
compatible = "fsl,mpc5121-i2c", "fsl-i2c";
reg = <0x1720 0x20>;
interrupts = <10 0x8>;
+ clocks = <&clks MPC512x_CLK_I2C>;
+ clock-names = "ipg";
};
i2c@1740 {
@@ -177,6 +218,8 @@
compatible = "fsl,mpc5121-i2c", "fsl-i2c";
reg = <0x1740 0x20>;
interrupts = <11 0x8>;
+ clocks = <&clks MPC512x_CLK_I2C>;
+ clock-names = "ipg";
};
i2ccontrol@1760 {
@@ -188,30 +231,48 @@
compatible = "fsl,mpc5121-axe";
reg = <0x2000 0x100>;
interrupts = <42 0x8>;
+ clocks = <&clks MPC512x_CLK_AXE>;
+ clock-names = "ipg";
};
display@2100 {
compatible = "fsl,mpc5121-diu";
reg = <0x2100 0x100>;
interrupts = <64 0x8>;
+ clocks = <&clks MPC512x_CLK_DIU>;
+ clock-names = "ipg";
};
can@2300 {
compatible = "fsl,mpc5121-mscan";
reg = <0x2300 0x80>;
interrupts = <90 0x8>;
+ clocks = <&clks MPC512x_CLK_BDLC>,
+ <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SYS>,
+ <&clks MPC512x_CLK_REF>,
+ <&clks MPC512x_CLK_MSCAN2_MCLK>;
+ clock-names = "ipg", "ips", "sys", "ref", "mclk";
};
can@2380 {
compatible = "fsl,mpc5121-mscan";
reg = <0x2380 0x80>;
interrupts = <91 0x8>;
+ clocks = <&clks MPC512x_CLK_BDLC>,
+ <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SYS>,
+ <&clks MPC512x_CLK_REF>,
+ <&clks MPC512x_CLK_MSCAN3_MCLK>;
+ clock-names = "ipg", "ips", "sys", "ref", "mclk";
};
viu@2400 {
compatible = "fsl,mpc5121-viu";
reg = <0x2400 0x400>;
interrupts = <67 0x8>;
+ clocks = <&clks MPC512x_CLK_VIU>;
+ clock-names = "ipg";
};
mdio@2800 {
@@ -219,6 +280,8 @@
reg = <0x2800 0x800>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&clks MPC512x_CLK_FEC>;
+ clock-names = "per";
};
eth0: ethernet@2800 {
@@ -227,6 +290,8 @@
reg = <0x2800 0x800>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <4 0x8>;
+ clocks = <&clks MPC512x_CLK_FEC>;
+ clock-names = "per";
};
/* USB1 using external ULPI PHY */
@@ -238,6 +303,8 @@
interrupts = <43 0x8>;
dr_mode = "otg";
phy_type = "ulpi";
+ clocks = <&clks MPC512x_CLK_USB1>;
+ clock-names = "ipg";
};
/* USB0 using internal UTMI PHY */
@@ -249,6 +316,8 @@
interrupts = <44 0x8>;
dr_mode = "otg";
phy_type = "utmi_wide";
+ clocks = <&clks MPC512x_CLK_USB2>;
+ clock-names = "ipg";
};
/* IO control */
@@ -267,6 +336,8 @@
compatible = "fsl,mpc5121-pata";
reg = <0x10200 0x100>;
interrupts = <5 0x8>;
+ clocks = <&clks MPC512x_CLK_PATA>;
+ clock-names = "ipg";
};
/* 512x PSCs are not 52xx PSC compatible */
@@ -278,6 +349,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC0>,
+ <&clks MPC512x_CLK_PSC0_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC1 */
@@ -287,6 +361,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC1>,
+ <&clks MPC512x_CLK_PSC1_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC2 */
@@ -296,6 +373,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC2>,
+ <&clks MPC512x_CLK_PSC2_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC3 */
@@ -305,6 +385,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC3>,
+ <&clks MPC512x_CLK_PSC3_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC4 */
@@ -314,6 +397,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC4>,
+ <&clks MPC512x_CLK_PSC4_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC5 */
@@ -323,6 +409,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC5>,
+ <&clks MPC512x_CLK_PSC5_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC6 */
@@ -332,6 +421,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC6>,
+ <&clks MPC512x_CLK_PSC6_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC7 */
@@ -341,6 +433,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC7>,
+ <&clks MPC512x_CLK_PSC7_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC8 */
@@ -350,6 +445,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC8>,
+ <&clks MPC512x_CLK_PSC8_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC9 */
@@ -359,6 +457,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC9>,
+ <&clks MPC512x_CLK_PSC9_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC10 */
@@ -368,6 +469,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC10>,
+ <&clks MPC512x_CLK_PSC10_MCLK>;
+ clock-names = "ipg", "mclk";
};
/* PSC11 */
@@ -377,12 +481,17 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC11>,
+ <&clks MPC512x_CLK_PSC11_MCLK>;
+ clock-names = "ipg", "mclk";
};
pscfifo@11f00 {
compatible = "fsl,mpc5121-psc-fifo";
reg = <0x11f00 0x100>;
interrupts = <40 0x8>;
+ clocks = <&clks MPC512x_CLK_PSC_FIFO>;
+ clock-names = "ipg";
};
dma0: dma@14000 {
@@ -400,6 +509,8 @@
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
+ clocks = <&clks MPC512x_CLK_PCI>;
+ clock-names = "ipg";
reg = <0x80008500 0x100 /* internal registers */
0x80008300 0x8>; /* config space access registers */
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index a618dfc13e4..e4f29747174 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -12,6 +12,8 @@
* option) any later version.
*/
+#include <dt-bindings/clock/mpc512x-clock.h>
+
/dts-v1/;
/ {
@@ -54,6 +56,17 @@
reg = <0x30000000 0x08000>; // 32K at 0x30000000
};
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc: osc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <33000000>;
+ };
+ };
+
soc@80000000 {
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
@@ -87,9 +100,12 @@
reg = <0xe00 0x100>;
};
- clock@f00 { // Clock control
+ clks: clock@f00 { // Clock control
compatible = "fsl,mpc5121-clock";
reg = <0xf00 0x100>;
+ #clock-cells = <1>;
+ clocks = <&osc>;
+ clock-names = "osc";
};
pmc@1000{ // Power Management Controller
@@ -114,18 +130,33 @@
compatible = "fsl,mpc5121-mscan";
interrupts = <12 0x8>;
reg = <0x1300 0x80>;
+ clocks = <&clks MPC512x_CLK_BDLC>,
+ <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SYS>,
+ <&clks MPC512x_CLK_REF>,
+ <&clks MPC512x_CLK_MSCAN0_MCLK>;
+ clock-names = "ipg", "ips", "sys", "ref", "mclk";
};
can@1380 {
compatible = "fsl,mpc5121-mscan";
interrupts = <13 0x8>;
reg = <0x1380 0x80>;
+ clocks = <&clks MPC512x_CLK_BDLC>,
+ <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SYS>,
+ <&clks MPC512x_CLK_REF>,
+ <&clks MPC512x_CLK_MSCAN1_MCLK>;
+ clock-names = "ipg", "ips", "sys", "ref", "mclk";
};
sdhc@1500 {
compatible = "fsl,mpc5121-sdhc";
interrupts = <8 0x8>;
reg = <0x1500 0x100>;
+ clocks = <&clks MPC512x_CLK_IPS>,
+ <&clks MPC512x_CLK_SDHC>;
+ clock-names = "ipg", "per";
};
i2c@1700 {
@@ -134,6 +165,8 @@
compatible = "fsl,mpc5121-i2c", "fsl-i2c";
reg = <0x1700 0x20>;
interrupts = <0x9 0x8>;
+ clocks = <&clks MPC512x_CLK_I2C>;
+ clock-names = "ipg";
};
i2c@1720 {
@@ -142,6 +175,8 @@
compatible = "fsl,mpc5121-i2c", "fsl-i2c";
reg = <0x1720 0x20>;
interrupts = <0xa 0x8>;
+ clocks = <&clks MPC512x_CLK_I2C>;
+ clock-names = "ipg";
};
i2c@1740 {
@@ -150,6 +185,8 @@
compatible = "fsl,mpc5121-i2c", "fsl-i2c";
reg = <0x1740 0x20>;
interrupts = <0xb 0x8>;
+ clocks = <&clks MPC512x_CLK_I2C>;
+ clock-names = "ipg";
};
i2ccontrol@1760 {
@@ -161,6 +198,8 @@
compatible = "fsl,mpc5121-diu";
reg = <0x2100 0x100>;
interrupts = <64 0x8>;
+ clocks = <&clks MPC512x_CLK_DIU>;
+ clock-names = "ipg";
};
mdio@2800 {
@@ -180,6 +219,8 @@
interrupts = <4 0x8>;
phy-handle = < &phy0 >;
phy-connection-type = "rmii";
+ clocks = <&clks MPC512x_CLK_FEC>;
+ clock-names = "per";
};
// IO control
@@ -200,6 +241,8 @@
interrupts = <43 0x8>;
dr_mode = "host";
phy_type = "ulpi";
+ clocks = <&clks MPC512x_CLK_USB1>;
+ clock-names = "ipg";
status = "disabled";
};
@@ -211,6 +254,9 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC1>,
+ <&clks MPC512x_CLK_PSC1_MCLK>;
+ clock-names = "ipg", "mclk";
};
// PSC9 uart1 aka ttyPSC1
@@ -220,12 +266,17 @@
interrupts = <40 0x8>;
fsl,rx-fifo-size = <16>;
fsl,tx-fifo-size = <16>;
+ clocks = <&clks MPC512x_CLK_PSC9>,
+ <&clks MPC512x_CLK_PSC9_MCLK>;
+ clock-names = "ipg", "mclk";
};
pscfifo@11f00 {
compatible = "fsl,mpc5121-psc-fifo";
reg = <0x11f00 0x100>;
interrupts = <40 0x8>;
+ clocks = <&clks MPC512x_CLK_PSC_FIFO>;
+ clock-names = "ipg";
};
dma@14000 {
diff --git a/arch/powerpc/boot/dts/mpc7448hpc2.dts b/arch/powerpc/boot/dts/mpc7448hpc2.dts
index 2544f3ecd6e..20a0d22df47 100644
--- a/arch/powerpc/boot/dts/mpc7448hpc2.dts
+++ b/arch/powerpc/boot/dts/mpc7448hpc2.dts
@@ -68,7 +68,6 @@
};
MDIO: mdio@6000 {
- device_type = "mdio";
compatible = "tsi108-mdio";
reg = <0x6000 0x50>;
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8272ads.dts b/arch/powerpc/boot/dts/mpc8272ads.dts
index e802ebd88cb..6d2cddf64cf 100644
--- a/arch/powerpc/boot/dts/mpc8272ads.dts
+++ b/arch/powerpc/boot/dts/mpc8272ads.dts
@@ -182,7 +182,6 @@
};
mdio@10d40 {
- device_type = "mdio";
compatible = "fsl,mpc8272ads-mdio-bitbang",
"fsl,mpc8272-mdio-bitbang",
"fsl,cpm2-mdio-bitbang";
@@ -196,14 +195,12 @@
interrupt-parent = <&PIC>;
interrupts = <23 8>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
interrupt-parent = <&PIC>;
interrupts = <23 8>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 22b0832b6c3..651e4f55acd 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -189,13 +189,11 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&ipic>;
interrupts = <19 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index f66d10d95a8..9ce45f2efd3 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -166,7 +166,6 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 1c836c6c5be..4b635dc4ecd 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -217,7 +217,6 @@
interrupt-parent = <&ipic>;
interrupts = <20 0x8>;
reg = <0x4>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 811848e93ae..43546844ea5 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -216,14 +216,12 @@
interrupt-parent = <&ipic>;
interrupts = <20 0x8>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&ipic>;
interrupts = <19 0x8>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index da9c72ddc34..0793cdf0d46 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -356,13 +356,11 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
phy4: ethernet-phy@04 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x4>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index ff7b15b340a..91df1eb1666 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -314,13 +314,11 @@
interrupt-parent = <&ipic>;
interrupts = <0>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy04:ethernet-phy@04 {
interrupt-parent = <&ipic>;
interrupts = <0>;
reg = <0x4>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 2608679d0d4..cf8542401a3 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -240,7 +240,6 @@
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x1c>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index 6cd044d8fb8..f00066dcc8d 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -176,7 +176,6 @@
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x1c>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 4552864082c..4843c3ff716 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -193,14 +193,12 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 81dd513d630..ecb6ccd3a6a 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -397,13 +397,11 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@01 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
tbi-phy@2 {
device_type = "tbi-phy";
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index b6e9aec1d86..daeacbdcf8b 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -332,25 +332,21 @@
reg = <0x2120 0x18>;
phy1: ethernet-phy@1 {
- device_type = "ethernet-phy";
compatible = "national,DP83848VV";
reg = <1>;
};
phy2: ethernet-phy@2 {
- device_type = "ethernet-phy";
compatible = "broadcom,BCM5481UA2KMLG";
reg = <2>;
};
phy3: ethernet-phy@3 {
- device_type = "ethernet-phy";
compatible = "national,DP83848VV";
reg = <3>;
};
phy4: ethernet-phy@4 {
- device_type = "ethernet-phy";
compatible = "broadcom,BCM5481UA2KMLG";
reg = <4>;
};
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index cfccef57cd1..c2c062e8175 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -225,14 +225,12 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 353deff1b7f..2b4b6532d69 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -277,7 +277,6 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8377_wlan.dts b/arch/powerpc/boot/dts/mpc8377_wlan.dts
index ef4a305a0d0..c0c790168b9 100644
--- a/arch/powerpc/boot/dts/mpc8377_wlan.dts
+++ b/arch/powerpc/boot/dts/mpc8377_wlan.dts
@@ -253,14 +253,12 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index 538fcb92733..1b82b77f941 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -264,14 +264,12 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 32333a908f3..74b6a535a41 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -277,7 +277,6 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index 5387092fdfb..38e5048d65d 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -262,14 +262,12 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&ipic>;
interrupts = <18 0x8>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 46224c2430f..3b5cbac8536 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -275,7 +275,6 @@
interrupt-parent = <&ipic>;
interrupts = <17 0x8>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
index 7c3dde84d19..937ad7e4611 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -200,12 +200,10 @@
phy0: ethernet-phy@0 {
interrupts = <10 0x1 0 0>;
reg = <0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupts = <10 0x1 0 0>;
reg = <1>;
- device_type = "ethernet-phy";
};
sgmii_phy0: sgmii-phy@0 {
interrupts = <6 1 0 0>;
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index 2d31863accf..7ce274c9a2d 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -165,19 +165,16 @@
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <7 1>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 1c03c266737..4d35a3e0fb0 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -165,13 +165,11 @@
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dtsi b/arch/powerpc/boot/dts/mpc8544ds.dtsi
index b219d035d79..47d986b041f 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8544ds.dtsi
@@ -82,12 +82,10 @@
phy0: ethernet-phy@0 {
interrupts = <10 1 0 0>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupts = <10 1 0 0>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
sgmii_phy0: sgmii-phy@0 {
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dtsi b/arch/powerpc/boot/dts/mpc8548cds.dtsi
index c61f525e474..3bc7d471122 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8548cds.dtsi
@@ -109,22 +109,18 @@
phy0: ethernet-phy@0 {
interrupts = <5 1 0 0>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupts = <5 1 0 0>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupts = <5 1 0 0>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupts = <5 1 0 0>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index 36a7ea138c2..f115f21cb0a 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -165,13 +165,11 @@
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index 1a43f5a968f..0d70921d612 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -154,25 +154,21 @@
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <7 1>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <7 1>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 09598bb5d44..bead2b655b9 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -91,22 +91,18 @@
phy0: ethernet-phy@7 {
interrupts = <1 1 0 0>;
reg = <0x7>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupts = <2 1 0 0>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupts = <1 1 0 0>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupts = <2 1 0 0>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
@@ -236,25 +232,21 @@
interrupt-parent = <&mpic>;
interrupts = <1 1 0 0>;
reg = <0x7>;
- device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@01 {
interrupt-parent = <&mpic>;
interrupts = <2 1 0 0>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
qe_phy2: ethernet-phy@02 {
interrupt-parent = <&mpic>;
interrupts = <1 1 0 0>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
qe_phy3: ethernet-phy@03 {
interrupt-parent = <&mpic>;
interrupts = <2 1 0 0>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
};
};
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index fe0d60935e9..d0dcdafa5eb 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -276,33 +276,27 @@
interrupt-parent = <&mpic>;
interrupts = <1 1 0 0>;
reg = <0x7>;
- device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@01 {
interrupt-parent = <&mpic>;
interrupts = <2 1 0 0>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
qe_phy2: ethernet-phy@02 {
interrupt-parent = <&mpic>;
interrupts = <3 1 0 0>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
qe_phy3: ethernet-phy@03 {
interrupt-parent = <&mpic>;
interrupts = <4 1 0 0>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
qe_phy5: ethernet-phy@04 {
reg = <0x04>;
- device_type = "ethernet-phy";
};
qe_phy7: ethernet-phy@06 {
reg = <0x6>;
- device_type = "ethernet-phy";
};
tbi1: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 1e8666ccbed..1c03060dd0b 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -211,25 +211,21 @@
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts b/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
index fd4cd4da60b..bb575e28042 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn_36b.dts
@@ -211,25 +211,21 @@
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/mpc866ads.dts b/arch/powerpc/boot/dts/mpc866ads.dts
index bd700651f36..34c1f48b1a0 100644
--- a/arch/powerpc/boot/dts/mpc866ads.dts
+++ b/arch/powerpc/boot/dts/mpc866ads.dts
@@ -74,7 +74,6 @@
#size-cells = <0>;
PHY: ethernet-phy@f {
reg = <0xf>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/mpc885ads.dts b/arch/powerpc/boot/dts/mpc885ads.dts
index b123e9f7a5a..4e93bd961e0 100644
--- a/arch/powerpc/boot/dts/mpc885ads.dts
+++ b/arch/powerpc/boot/dts/mpc885ads.dts
@@ -86,17 +86,14 @@
PHY0: ethernet-phy@0 {
reg = <0x0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
reg = <0x1>;
- device_type = "ethernet-phy";
};
PHY2: ethernet-phy@2 {
reg = <0x2>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/mvme5100.dts b/arch/powerpc/boot/dts/mvme5100.dts
new file mode 100644
index 00000000000..1ecb341a232
--- /dev/null
+++ b/arch/powerpc/boot/dts/mvme5100.dts
@@ -0,0 +1,185 @@
+/*
+ * Device Tree Source for Motorola/Emerson MVME5100.
+ *
+ * Copyright 2013 CSC Australia Pty. Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "MVME5100";
+ compatible = "MVME5100";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ serial0 = &serial0;
+ pci0 = &pci0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,7410 {
+ device_type = "cpu";
+ reg = <0x0>;
+ /* Following required by dtc but not used */
+ d-cache-line-size = <32>;
+ i-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ timebase-frequency = <25000000>;
+ clock-frequency = <500000000>;
+ bus-frequency = <100000000>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x20000000>;
+ };
+
+ hawk@fef80000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "hawk-bridge", "simple-bus";
+ ranges = <0x0 0xfef80000 0x10000>;
+ reg = <0xfef80000 0x10000>;
+
+ serial0: serial@8000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x8000 0x80>;
+ reg-shift = <4>;
+ clock-frequency = <1843200>;
+ current-speed = <9600>;
+ interrupts = <1 1>; // IRQ1 Level Active Low.
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@8200 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x8200 0x80>;
+ reg-shift = <4>;
+ clock-frequency = <1843200>;
+ current-speed = <9600>;
+ interrupts = <1 1>; // IRQ1 Level Active Low.
+ interrupt-parent = <&mpic>;
+ };
+
+ mpic: interrupt-controller@f3f80000 {
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ device_type = "open-pic";
+ compatible = "chrp,open-pic";
+ interrupt-controller;
+ reg = <0xf3f80000 0x40000>;
+ };
+ };
+
+ pci0: pci@feff0000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ compatible = "hawk-pci";
+ reg = <0xfec00000 0x400000>;
+ 8259-interrupt-acknowledge = <0xfeff0030>;
+ ranges = <0x1000000 0x0 0x0 0xfe000000 0x0 0x800000
+ 0x2000000 0x0 0x80000000 0x80000000 0x0 0x74000000>;
+ bus-range = <0 255>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ /*
+ * This definition (IDSEL 11) duplicates the
+ * interrupts definition in the i8259
+ * interrupt controller below.
+ *
+ * Do not change the interrupt sense/polarity from
+ * 0x2 to anything else, doing so will cause endless
+ * "spurious" i8259 interrupts to be fielded.
+ */
+ // IDSEL 11 - iPMC712 PCI/ISA Bridge
+ 0x5800 0x0 0x0 0x1 &mpic 0x0 0x2
+ 0x5800 0x0 0x0 0x2 &mpic 0x0 0x2
+ 0x5800 0x0 0x0 0x3 &mpic 0x0 0x2
+ 0x5800 0x0 0x0 0x4 &mpic 0x0 0x2
+
+ /* IDSEL 12 - Not Used */
+
+ /* IDSEL 13 - Universe VME Bridge */
+ 0x6800 0x0 0x0 0x1 &mpic 0x5 0x1
+ 0x6800 0x0 0x0 0x2 &mpic 0x6 0x1
+ 0x6800 0x0 0x0 0x3 &mpic 0x7 0x1
+ 0x6800 0x0 0x0 0x4 &mpic 0x8 0x1
+
+ /* IDSEL 14 - ENET 1 */
+ 0x7000 0x0 0x0 0x1 &mpic 0x2 0x1
+
+ /* IDSEL 15 - Not Used */
+
+ /* IDSEL 16 - PMC Slot 1 */
+ 0x8000 0x0 0x0 0x1 &mpic 0x9 0x1
+ 0x8000 0x0 0x0 0x2 &mpic 0xa 0x1
+ 0x8000 0x0 0x0 0x3 &mpic 0xb 0x1
+ 0x8000 0x0 0x0 0x4 &mpic 0xc 0x1
+
+ /* IDSEL 17 - PMC Slot 2 */
+ 0x8800 0x0 0x0 0x1 &mpic 0xc 0x1
+ 0x8800 0x0 0x0 0x2 &mpic 0x9 0x1
+ 0x8800 0x0 0x0 0x3 &mpic 0xa 0x1
+ 0x8800 0x0 0x0 0x4 &mpic 0xb 0x1
+
+ /* IDSEL 18 - Not Used */
+
+ /* IDSEL 19 - ENET 2 */
+ 0x9800 0x0 0x0 0x1 &mpic 0xd 0x1
+
+ /* IDSEL 20 - PMCSPAN (PCI-X) */
+ 0xa000 0x0 0x0 0x1 &mpic 0x9 0x1
+ 0xa000 0x0 0x0 0x2 &mpic 0xa 0x1
+ 0xa000 0x0 0x0 0x3 &mpic 0xb 0x1
+ 0xa000 0x0 0x0 0x4 &mpic 0xc 0x1
+
+ >;
+
+ isa {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ device_type = "isa";
+ compatible = "isa";
+ ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupts = <0 2>;
+ device_type = "interrupt-controller";
+ compatible = "chrp,iic";
+ interrupt-controller;
+ reg = <1 0x00000020 0x00000002
+ 1 0x000000a0 0x00000002
+ 1 0x000004d0 0x00000002>;
+ interrupt-parent = <&mpic>;
+ };
+
+ };
+
+ };
+
+ chosen {
+ linux,stdout-path = &serial0;
+ };
+
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb-pa.dts b/arch/powerpc/boot/dts/p1010rdb-pa.dts
new file mode 100644
index 00000000000..767d4c03285
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pa.dts
@@ -0,0 +1,23 @@
+/*
+ * P1010 RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB";
+ compatible = "fsl,P1010RDB";
+
+ /include/ "p1010rdb_32b.dtsi"
+};
+
+/include/ "p1010rdb.dtsi"
+/include/ "p1010rdb-pa.dtsi"
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pa.dtsi b/arch/powerpc/boot/dts/p1010rdb-pa.dtsi
new file mode 100644
index 00000000000..434fb2d5857
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pa.dtsi
@@ -0,0 +1,85 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&ifc_nand {
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00f00000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1900000 {
+ /* 7MB for User Area */
+ reg = <0x01900000 0x00700000>;
+ label = "NAND User area";
+ };
+};
+
+&phy0 {
+ interrupts = <1 1 0 0>;
+};
+
+&phy1 {
+ interrupts = <2 1 0 0>;
+};
+
+&phy2 {
+ interrupts = <4 1 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dts b/arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
index 64776f4a465..3033371bc00 100644
--- a/arch/powerpc/boot/dts/p1010rdb_36b.dts
+++ b/arch/powerpc/boot/dts/p1010rdb-pa_36b.dts
@@ -38,52 +38,9 @@
model = "fsl,P1010RDB";
compatible = "fsl,P1010RDB";
- memory {
- device_type = "memory";
- };
-
- board_ifc: ifc: ifc@fffe1e000 {
- /* NOR, NAND Flashes and CPLD on board */
- ranges = <0x0 0x0 0xf 0xee000000 0x02000000
- 0x1 0x0 0xf 0xff800000 0x00010000
- 0x3 0x0 0xf 0xffb00000 0x00000020>;
- reg = <0xf 0xffe1e000 0 0x2000>;
- };
-
- board_soc: soc: soc@fffe00000 {
- ranges = <0x0 0xf 0xffe00000 0x100000>;
- };
-
- pci0: pcie@fffe09000 {
- reg = <0xf 0xffe09000 0 0x1000>;
- ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci1: pcie@fffe0a000 {
- reg = <0xf 0xffe0a000 0 0x1000>;
- ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xc0000000
- 0x2000000 0x0 0xc0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
+ /include/ "p1010rdb_36b.dtsi"
};
/include/ "p1010rdb.dtsi"
+/include/ "p1010rdb-pa.dtsi"
/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pb.dts b/arch/powerpc/boot/dts/p1010rdb-pb.dts
new file mode 100644
index 00000000000..6eeb7d3185b
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pb.dts
@@ -0,0 +1,35 @@
+/*
+ * P1010 RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB-PB";
+ compatible = "fsl,P1010RDB-PB";
+
+ /include/ "p1010rdb_32b.dtsi"
+};
+
+/include/ "p1010rdb.dtsi"
+
+&phy0 {
+ interrupts = <0 1 0 0>;
+};
+
+&phy1 {
+ interrupts = <2 1 0 0>;
+};
+
+&phy2 {
+ interrupts = <1 1 0 0>;
+};
+
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts b/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
new file mode 100644
index 00000000000..7ab3c907b32
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb-pb_36b.dts
@@ -0,0 +1,58 @@
+/*
+ * P1010 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1010si-pre.dtsi"
+
+/ {
+ model = "fsl,P1010RDB-PB";
+ compatible = "fsl,P1010RDB-PB";
+
+ /include/ "p1010rdb_36b.dtsi"
+};
+
+/include/ "p1010rdb.dtsi"
+
+&phy0 {
+ interrupts = <0 1 0 0>;
+};
+
+&phy1 {
+ interrupts = <2 1 0 0>;
+};
+
+&phy2 {
+ interrupts = <1 1 0 0>;
+};
+
+/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dts b/arch/powerpc/boot/dts/p1010rdb.dts
deleted file mode 100644
index b868d22984e..00000000000
--- a/arch/powerpc/boot/dts/p1010rdb.dts
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * P1010 RDB Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/include/ "fsl/p1010si-pre.dtsi"
-
-/ {
- model = "fsl,P1010RDB";
- compatible = "fsl,P1010RDB";
-
- memory {
- device_type = "memory";
- };
-
- board_ifc: ifc: ifc@ffe1e000 {
- /* NOR, NAND Flashes and CPLD on board */
- ranges = <0x0 0x0 0x0 0xee000000 0x02000000
- 0x1 0x0 0x0 0xff800000 0x00010000
- 0x3 0x0 0x0 0xffb00000 0x00000020>;
- reg = <0x0 0xffe1e000 0 0x2000>;
- };
-
- board_soc: soc: soc@ffe00000 {
- ranges = <0x0 0x0 0xffe00000 0x100000>;
- };
-
- pci0: pcie@ffe09000 {
- reg = <0 0xffe09000 0 0x1000>;
- ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci1: pcie@ffe0a000 {
- reg = <0 0xffe0a000 0 0x1000>;
- ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-};
-
-/include/ "p1010rdb.dtsi"
-/include/ "fsl/p1010si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
index ec7c27a6467..ea534efa790 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -69,49 +69,11 @@
};
};
- nand@1,0 {
+ ifc_nand: nand@1,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,ifc-nand";
reg = <0x1 0x0 0x10000>;
-
- partition@0 {
- /* This location must not be altered */
- /* 1MB for u-boot Bootloader Image */
- reg = <0x0 0x00100000>;
- label = "NAND U-Boot Image";
- read-only;
- };
-
- partition@100000 {
- /* 1MB for DTB Image */
- reg = <0x00100000 0x00100000>;
- label = "NAND DTB Image";
- };
-
- partition@200000 {
- /* 4MB for Linux Kernel Image */
- reg = <0x00200000 0x00400000>;
- label = "NAND Linux Kernel Image";
- };
-
- partition@600000 {
- /* 4MB for Compressed Root file System Image */
- reg = <0x00600000 0x00400000>;
- label = "NAND Compressed RFS Image";
- };
-
- partition@a00000 {
- /* 15MB for JFFS2 based Root file System */
- reg = <0x00a00000 0x00f00000>;
- label = "NAND JFFS2 Root File System";
- };
-
- partition@1900000 {
- /* 7MB for User Area */
- reg = <0x01900000 0x00700000>;
- label = "NAND User area";
- };
};
cpld@3,0 {
@@ -193,17 +155,14 @@
mdio@24000 {
phy0: ethernet-phy@0 {
- interrupts = <3 1 0 0>;
reg = <0x1>;
};
phy1: ethernet-phy@1 {
- interrupts = <2 1 0 0>;
reg = <0x0>;
};
phy2: ethernet-phy@2 {
- interrupts = <2 1 0 0>;
reg = <0x2>;
};
diff --git a/arch/powerpc/boot/dts/p1010rdb_32b.dtsi b/arch/powerpc/boot/dts/p1010rdb_32b.dtsi
new file mode 100644
index 00000000000..fdc19aab2f7
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_32b.dtsi
@@ -0,0 +1,79 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+memory {
+ device_type = "memory";
+};
+
+board_ifc: ifc: ifc@ffe1e000 {
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0x0 0xee000000 0x02000000
+ 0x1 0x0 0x0 0xff800000 0x00010000
+ 0x3 0x0 0x0 0xffb00000 0x00000020>;
+ reg = <0x0 0xffe1e000 0 0x2000>;
+};
+
+board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+};
+
+pci0: pcie@ffe09000 {
+ reg = <0 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
+
+pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1010rdb_36b.dtsi b/arch/powerpc/boot/dts/p1010rdb_36b.dtsi
new file mode 100644
index 00000000000..de2fceed4f7
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1010rdb_36b.dtsi
@@ -0,0 +1,79 @@
+/*
+ * P1010 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+memory {
+ device_type = "memory";
+};
+
+board_ifc: ifc: ifc@fffe1e000 {
+ /* NOR, NAND Flashes and CPLD on board */
+ ranges = <0x0 0x0 0xf 0xee000000 0x02000000
+ 0x1 0x0 0xf 0xff800000 0x00010000
+ 0x3 0x0 0xf 0xffb00000 0x00000020>;
+ reg = <0xf 0xffe1e000 0 0x2000>;
+};
+
+board_soc: soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+};
+
+pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
+
+pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1021mds.dts b/arch/powerpc/boot/dts/p1021mds.dts
index 97116f198a3..76559044df4 100644
--- a/arch/powerpc/boot/dts/p1021mds.dts
+++ b/arch/powerpc/boot/dts/p1021mds.dts
@@ -295,13 +295,11 @@
interrupt-parent = <&mpic>;
interrupts = <4 1 0 0>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@03 {
interrupt-parent = <&mpic>;
interrupts = <5 1 0 0>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
index 873da350d01..957e0dc1dc0 100644
--- a/arch/powerpc/boot/dts/p1022ds.dtsi
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -146,8 +146,9 @@
*/
};
rtc@68 {
- compatible = "dallas,ds1339";
+ compatible = "dallas,ds3232";
reg = <0x68>;
+ interrupts = <0x1 0x1 0 0>;
};
adt7461@4c {
compatible = "adi,adt7461";
diff --git a/arch/powerpc/boot/dts/p1025rdb_32b.dts b/arch/powerpc/boot/dts/p1025rdb_32b.dts
index ac5729c14ed..a2ed6280ba7 100644
--- a/arch/powerpc/boot/dts/p1025rdb_32b.dts
+++ b/arch/powerpc/boot/dts/p1025rdb_32b.dts
@@ -105,13 +105,11 @@
interrupt-parent = <&mpic>;
interrupts = <4 1 0 0>;
reg = <0x6>;
- device_type = "ethernet-phy";
};
qe_phy1: ethernet-phy@03 {
interrupt-parent = <&mpic>;
interrupts = <5 1 0 0>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/p1025twr.dts b/arch/powerpc/boot/dts/p1025twr.dts
new file mode 100644
index 00000000000..9036a498790
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025twr.dts
@@ -0,0 +1,95 @@
+/*
+ * P1025 TWR Device Tree Source (32-bit address map)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1021si-pre.dtsi"
+/ {
+ model = "fsl,P1025";
+ compatible = "fsl,TWR-P1025";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ /* NOR Flash and SSD1289 */
+ ranges = <0x0 0x0 0x0 0xec000000 0x04000000
+ 0x2 0x0 0x0 0xe0000000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ reg = <0 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ qe: qe@ffe80000 {
+ ranges = <0x0 0x0 0xffe80000 0x40000>;
+ reg = <0 0xffe80000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+ };
+};
+
+/include/ "p1025twr.dtsi"
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025twr.dtsi b/arch/powerpc/boot/dts/p1025twr.dtsi
new file mode 100644
index 00000000000..8453501c256
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025twr.dtsi
@@ -0,0 +1,280 @@
+/*
+ * P1025 TWR Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/{
+ aliases {
+ ethernet3 = &enet3;
+ ethernet4 = &enet4;
+ };
+};
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x4000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 5.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00580000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 56.75MB for Root file System */
+ reg = <0x00600000 0x038c0000>;
+ label = "NOR Root File System";
+ };
+
+ partition@ec0000 {
+ /* This location must not be altered */
+ /* 256KB for QE ucode firmware*/
+ reg = <0x03ec0000 0x00040000>;
+ label = "NOR QE microcode firmware";
+ read-only;
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x03f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ /* CS2 for Display */
+ display@2,0 {
+ compatible = "solomon,ssd1289fb";
+ reg = <0x2 0x0000 0x0004>;
+ };
+
+};
+
+&soc {
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@2 {
+ interrupt-parent = <&mpic>;
+ interrupts = <1 1 0 0>;
+ reg = <0x2>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ par_io@e0100 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe0100 0x60>;
+ ranges = <0x0 0xe0100 0x60>;
+ device_type = "par_io";
+ num-ports = <3>;
+ pio1: ucc_pin@01 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
+ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
+ 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
+ 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */
+ 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
+ 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
+ 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
+ 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */
+ 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */
+ 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */
+ 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */
+ 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */
+ 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */
+ 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */
+ 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */
+ 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */
+ 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */
+ 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */
+ };
+
+ pio2: ucc_pin@02 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
+ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
+ 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */
+ 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */
+ 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */
+ 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */
+ 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */
+ 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */
+ 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
+ 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
+ };
+
+ pio3: ucc_pin@03 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x0 0x16 0x2 0x0 0x2 0x0 /* SER7_CD_B*/
+ 0x0 0x12 0x2 0x0 0x2 0x0 /* SER7_CTS_B*/
+ 0x0 0x13 0x1 0x0 0x2 0x0 /* SER7_RTS_B*/
+ 0x0 0x14 0x2 0x0 0x2 0x0 /* SER7_RXD0*/
+ 0x0 0x15 0x1 0x0 0x2 0x0>; /* SER7_TXD0*/
+ };
+
+ pio4: ucc_pin@04 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x0 0x2 0x0 0x2 0x0 /* SER3_CD_B*/
+ 0x0 0x1c 0x2 0x0 0x2 0x0 /* SER3_CTS_B*/
+ 0x0 0x1d 0x1 0x0 0x2 0x0 /* SER3_RTS_B*/
+ 0x0 0x1e 0x2 0x0 0x2 0x0 /* SER3_RXD0*/
+ 0x0 0x1f 0x1 0x0 0x2 0x0>; /* SER3_TXD0*/
+ };
+ };
+};
+
+&qe {
+ enet3: ucc@2000 {
+ device_type = "network";
+ compatible = "ucc_geth";
+ rx-clock-name = "clk12";
+ tx-clock-name = "clk9";
+ pio-handle = <&pio1>;
+ phy-handle = <&qe_phy0>;
+ phy-connection-type = "mii";
+ };
+
+ mdio@2120 {
+ qe_phy0: ethernet-phy@18 {
+ interrupt-parent = <&mpic>;
+ interrupts = <4 1 0 0>;
+ reg = <0x18>;
+ device_type = "ethernet-phy";
+ };
+ qe_phy1: ethernet-phy@19 {
+ interrupt-parent = <&mpic>;
+ interrupts = <5 1 0 0>;
+ reg = <0x19>;
+ device_type = "ethernet-phy";
+ };
+ tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet4: ucc@2400 {
+ device_type = "network";
+ compatible = "ucc_geth";
+ rx-clock-name = "none";
+ tx-clock-name = "clk13";
+ pio-handle = <&pio2>;
+ phy-handle = <&qe_phy1>;
+ phy-connection-type = "rmii";
+ };
+
+ serial2: ucc@2600 {
+ device_type = "serial";
+ compatible = "ucc_uart";
+ port-number = <0>;
+ rx-clock-name = "brg6";
+ tx-clock-name = "brg6";
+ pio-handle = <&pio3>;
+ };
+
+ serial3: ucc@2200 {
+ device_type = "serial";
+ compatible = "ucc_uart";
+ port-number = <1>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ pio-handle = <&pio4>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/ppa8548.dts b/arch/powerpc/boot/dts/ppa8548.dts
index f97eceed610..27b0699ee92 100644
--- a/arch/powerpc/boot/dts/ppa8548.dts
+++ b/arch/powerpc/boot/dts/ppa8548.dts
@@ -110,12 +110,10 @@
phy0: ethernet-phy@0 {
interrupts = <7 1 0 0>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1 {
interrupts = <8 1 0 0>;
reg = <0x1>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/pq2fads.dts b/arch/powerpc/boot/dts/pq2fads.dts
index 0bb66937674..0c525ff0c25 100644
--- a/arch/powerpc/boot/dts/pq2fads.dts
+++ b/arch/powerpc/boot/dts/pq2fads.dts
@@ -198,7 +198,6 @@
};
mdio@10d40 {
- device_type = "mdio";
compatible = "fsl,pq2fads-mdio-bitbang",
"fsl,mpc8280-mdio-bitbang",
"fsl,cpm2-mdio-bitbang";
@@ -212,14 +211,12 @@
interrupt-parent = <&PIC>;
interrupts = <25 2>;
reg = <0x0>;
- device_type = "ethernet-phy";
};
PHY1: ethernet-phy@1 {
interrupt-parent = <&PIC>;
interrupts = <25 2>;
reg = <0x3>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/prpmc2800.dts b/arch/powerpc/boot/dts/prpmc2800.dts
index 1ee6ff43dd5..00afaacf8c8 100644
--- a/arch/powerpc/boot/dts/prpmc2800.dts
+++ b/arch/powerpc/boot/dts/prpmc2800.dts
@@ -73,17 +73,14 @@
mdio {
#address-cells = <1>;
#size-cells = <0>;
- device_type = "mdio";
compatible = "marvell,mv64360-mdio";
PHY0: ethernet-phy@1 {
- device_type = "ethernet-phy";
compatible = "broadcom,bcm5421";
interrupts = <76>; /* GPP 12 */
interrupt-parent = <&PIC>;
reg = <1>;
};
PHY1: ethernet-phy@3 {
- device_type = "ethernet-phy";
compatible = "broadcom,bcm5421";
interrupts = <76>; /* GPP 12 */
interrupt-parent = <&PIC>;
@@ -162,7 +159,6 @@
};
MPSC0: mpsc@8000 {
- device_type = "serial";
compatible = "marvell,mv64360-mpsc";
reg = <0x8000 0x38>;
virtual-reg = <0xf1008000>;
@@ -177,7 +173,6 @@
};
MPSC1: mpsc@9000 {
- device_type = "serial";
compatible = "marvell,mv64360-mpsc";
reg = <0x9000 0x38>;
virtual-reg = <0xf1009000>;
diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
index b1e45a8537a..fc89e00b765 100644
--- a/arch/powerpc/boot/dts/sbc8349.dts
+++ b/arch/powerpc/boot/dts/sbc8349.dts
@@ -173,14 +173,12 @@
interrupt-parent = <&ipic>;
interrupts = <20 0x8>;
reg = <0x19>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1a {
interrupt-parent = <&ipic>;
interrupts = <21 0x8>;
reg = <0x1a>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/boot/dts/sbc8548-post.dtsi b/arch/powerpc/boot/dts/sbc8548-post.dtsi
index 33a47e27a11..9b505c8e535 100644
--- a/arch/powerpc/boot/dts/sbc8548-post.dtsi
+++ b/arch/powerpc/boot/dts/sbc8548-post.dtsi
@@ -137,13 +137,11 @@
interrupt-parent = <&mpic>;
interrupts = <0x6 0x1>;
reg = <0x19>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@1a {
interrupt-parent = <&mpic>;
interrupts = <0x7 0x1>;
reg = <0x1a>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 56bebce8784..631ede72e22 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -230,25 +230,21 @@
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0x1f>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@0 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts
index b670d03fbcd..78a72ee4820 100644
--- a/arch/powerpc/boot/dts/stx_gp3_8560.dts
+++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts
@@ -161,13 +161,11 @@
interrupt-parent = <&mpic>;
interrupts = <5 4>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy4: ethernet-phy@4 {
interrupt-parent = <&mpic>;
interrupts = <5 4>;
reg = <4>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/stxssa8555.dts b/arch/powerpc/boot/dts/stxssa8555.dts
index 4f166b01c1b..859f854ba53 100644
--- a/arch/powerpc/boot/dts/stxssa8555.dts
+++ b/arch/powerpc/boot/dts/stxssa8555.dts
@@ -164,13 +164,11 @@
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x2>;
- device_type = "ethernet-phy";
};
phy1: ethernet-phy@4 {
interrupt-parent = <&mpic>;
interrupts = <5 1>;
reg = <0x4>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index ed264d9ae35..91cbd7acd27 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -172,19 +172,16 @@
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index 92524211581..84dce2d5fc4 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -172,19 +172,16 @@
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 6e1ac50852a..7a333dd02d9 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -185,31 +185,26 @@
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
phy4: ethernet-phy@4 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <4>;
- device_type = "ethernet-phy";
};
phy5: ethernet-phy@5 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <5>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 161e75eac7f..c737caff10c 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -185,31 +185,26 @@
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
phy4: ethernet-phy@4 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <4>;
- device_type = "ethernet-phy";
};
phy5: ethernet-phy@5 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <5>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index aa6ff0d3dd9..d0416a5cddd 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -172,19 +172,16 @@
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index 7665a16a8b9..f9a11ebf736 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -174,19 +174,16 @@
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <1>;
- device_type = "ethernet-phy";
};
phy2: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <2>;
- device_type = "ethernet-phy";
};
phy3: ethernet-phy@3 {
interrupt-parent = <&mpic>;
interrupts = <8 1>;
reg = <3>;
- device_type = "ethernet-phy";
};
tbi0: tbi-phy@11 {
reg = <0x11>;
diff --git a/arch/powerpc/boot/dts/tqm8xx.dts b/arch/powerpc/boot/dts/tqm8xx.dts
index c3dba2518d8..3d1446b99c7 100644
--- a/arch/powerpc/boot/dts/tqm8xx.dts
+++ b/arch/powerpc/boot/dts/tqm8xx.dts
@@ -107,7 +107,6 @@
#size-cells = <0>;
PHY: ethernet-phy@f {
reg = <0xf>;
- device_type = "ethernet-phy";
};
};
diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts
index fc7073bc547..391a4e29978 100644
--- a/arch/powerpc/boot/dts/virtex440-ml507.dts
+++ b/arch/powerpc/boot/dts/virtex440-ml507.dts
@@ -257,6 +257,8 @@
#size-cells = <1>;
compatible = "xlnx,compound";
ethernet@81c00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "xlnx,xps-ll-temac-1.01.b";
device_type = "network";
interrupt-parent = <&xps_intc_0>;
diff --git a/arch/powerpc/boot/mvme5100.c b/arch/powerpc/boot/mvme5100.c
new file mode 100644
index 00000000000..cb865f83c60
--- /dev/null
+++ b/arch/powerpc/boot/mvme5100.c
@@ -0,0 +1,27 @@
+/*
+ * Motorola/Emerson MVME5100 with PPCBug firmware.
+ *
+ * Author: Stephen Chivers <schivers@csc.com>
+ *
+ * Copyright 2013 CSC Australia Pty. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+#include "types.h"
+#include "ops.h"
+#include "io.h"
+
+BSS_STACK(4096);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ u32 heapsize;
+
+ heapsize = 0x8000000 - (u32)_end; /* 128M */
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 2e1af74a64b..d27a25518b0 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -265,6 +265,10 @@ epapr)
link_address='0x20000000'
pie=-pie
;;
+mvme5100)
+ platformo="$object/fixed-head.o $object/mvme5100.o"
+ binary=y
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
diff --git a/arch/powerpc/configs/85xx/p1023_defconfig b/arch/powerpc/configs/85xx/p1023_defconfig
deleted file mode 100644
index b06d37da44f..00000000000
--- a/arch/powerpc/configs/85xx/p1023_defconfig
+++ /dev/null
@@ -1,188 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=2
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_RCU_FANOUT=32
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_PHYSICAL_START=0x00000000
-CONFIG_P1023_RDB=y
-CONFIG_P1023_RDS=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_CPM2=y
-CONFIG_HIGHMEM=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_SWIOTLB=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEAER is not set
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_PROC_DEVICETREE=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_E1000E=y
-CONFIG_PHYLIB=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_HW_RANDOM=y
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_GPIO_MPC8XXX=y
-# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_STORAGE=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-# CONFIG_NET_DMA is not set
-CONFIG_STAGING=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FRAME_WARN=8092
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_STRICT_DEVMEM=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_AES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 69128740c14..15b1ff5d96e 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -70,3 +70,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 219fd470ed2..b8a79d7ee89 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -72,3 +72,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index d2e0fab5ee5..83d3550fdb5 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -31,6 +31,7 @@ CONFIG_C293_PCIE=y
CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
CONFIG_KSI8560=y
@@ -113,6 +114,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -211,6 +213,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 4cb7b59e98b..4b686294feb 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -34,6 +34,7 @@ CONFIG_C293_PCIE=y
CONFIG_P1010_RDB=y
CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
CONFIG_P1023_RDS=y
CONFIG_SOCRATES=y
CONFIG_KSI8560=y
@@ -116,6 +117,7 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -212,6 +214,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 5c258823e69..d954e80c286 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -55,3 +55,4 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_CCITT=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 9e146cdf63d..3f47d00a10c 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -78,3 +78,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
new file mode 100644
index 00000000000..93c7752e2db
--- /dev/null
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -0,0 +1,144 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_PMAC is not set
+CONFIG_EMBEDDED6xx=y
+CONFIG_MVME5100=y
+CONFIG_KVM_GUEST=y
+CONFIG_HZ_100=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs"
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_LAPB=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_EEPROM_LEGACY=m
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_E100=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=10
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MPC=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_VME_BUS=m
+CONFIG_VME_CA91CX42=m
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_XZ_DEC=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=20
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 581a3bcae72..e015896b7e5 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -186,6 +186,7 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
CONFIG_SATA_SIL24=y
+CONFIG_SATA_MV=y
CONFIG_SATA_SVW=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 8616fde0896..4b6f8bf104e 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -84,3 +84,4 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d8f9d2f18a2..6c0a955a1b0 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -3,4 +3,5 @@ generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += preempt.h
-generic-y += vtime.h \ No newline at end of file
+generic-y += vtime.h
+generic-y += hash.h
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index ae782254e73..f89da808ce3 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -45,11 +45,15 @@
# define SMPWMB eieio
#endif
+#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+
#define smp_mb() mb()
-#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_rmb() __lwsync()
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
+#define __lwsync() barrier()
+
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
@@ -65,4 +69,19 @@
#define data_barrier(x) \
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ __lwsync(); \
+ ___p1; \
+})
+
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 910194e9a1e..a5e9a7d494d 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -46,6 +46,11 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
+/* PPC bit number conversion */
+#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
+#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+
/*
* clear_bit doesn't imply a memory barrier
*/
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 9e495c9a6a8..ed0afc1e44a 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -41,8 +41,20 @@ struct ppc64_caches {
extern struct ppc64_caches ppc64_caches;
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
-#if !defined(__ASSEMBLY__)
+#if defined(__ASSEMBLY__)
+/*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+#define PURGE_PREFETCHED_INS \
+ sync; \
+ icbi 0,r3; \
+ sync; \
+ isync
+#else
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifdef CONFIG_6xx
diff --git a/arch/powerpc/include/asm/clk_interface.h b/arch/powerpc/include/asm/clk_interface.h
deleted file mode 100644
index ab1882c1e17..00000000000
--- a/arch/powerpc/include/asm/clk_interface.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_POWERPC_CLK_INTERFACE_H
-#define __ASM_POWERPC_CLK_INTERFACE_H
-
-#include <linux/clk.h>
-
-struct clk_interface {
- struct clk* (*clk_get) (struct device *dev, const char *id);
- int (*clk_enable) (struct clk *clk);
- void (*clk_disable) (struct clk *clk);
- unsigned long (*clk_get_rate) (struct clk *clk);
- void (*clk_put) (struct clk *clk);
- long (*clk_round_rate) (struct clk *clk, unsigned long rate);
- int (*clk_set_rate) (struct clk *clk, unsigned long rate);
- int (*clk_set_parent) (struct clk *clk, struct clk *parent);
- struct clk* (*clk_get_parent) (struct clk *clk);
-};
-
-extern struct clk_interface clk_functions;
-
-#endif /* __ASM_POWERPC_CLK_INTERFACE_H */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index e245aab7f19..d463c68fe7f 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
+#define cmpxchg64_relaxed cmpxchg64_local
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index a6f8c7a5cbb..97e02f985df 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -34,6 +34,13 @@ int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
unsigned long branch_target(const unsigned int *instr);
unsigned int translate_branch(const unsigned int *dest,
const unsigned int *src);
+#ifdef CONFIG_PPC_BOOK3E_64
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+#endif
static inline unsigned long ppc_function_entry(void *func)
{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 0d4939ba48e..617cc767c07 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -90,6 +90,18 @@ struct cpu_spec {
* if the error is fatal, 1 if it was fully recovered and 0 to
* pass up (not CPU originated) */
int (*machine_check)(struct pt_regs *regs);
+
+ /*
+ * Processor specific early machine check handler which is
+ * called in real mode to handle SLB and TLB errors.
+ */
+ long (*machine_check_early)(struct pt_regs *regs);
+
+ /*
+ * Processor specific routine to flush tlbs.
+ */
+ void (*flush_tlb)(unsigned long inval_selector);
+
};
extern struct cpu_spec *cur_cpu_spec;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad6818..150866b2a3f 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
}
extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern int __dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d3e5e9bc8f9..d4dd41fb951 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -90,7 +90,8 @@ struct eeh_pe {
#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
-#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */
+#define EEH_DEV_NO_HANDLER (1 << 8) /* No error handler */
+#define EEH_DEV_SYSFS (1 << 9) /* Sysfs created */
struct eeh_dev {
int mode; /* EEH mode */
@@ -117,6 +118,16 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
return edev ? edev->pdev : NULL;
}
+/* Return values from eeh_ops::next_error */
+enum {
+ EEH_NEXT_ERR_NONE = 0,
+ EEH_NEXT_ERR_INF,
+ EEH_NEXT_ERR_FROZEN_PE,
+ EEH_NEXT_ERR_FENCED_PHB,
+ EEH_NEXT_ERR_DEAD_PHB,
+ EEH_NEXT_ERR_DEAD_IOC
+};
+
/*
* The struct is used to trace the registered EEH operation
* callback functions. Actually, those operation callback
@@ -157,13 +168,24 @@ struct eeh_ops {
int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
int (*write_config)(struct device_node *dn, int where, int size, u32 val);
int (*next_error)(struct eeh_pe **pe);
+ int (*restore_config)(struct device_node *dn);
};
extern struct eeh_ops *eeh_ops;
-extern int eeh_subsystem_enabled;
+extern bool eeh_subsystem_enabled;
extern raw_spinlock_t confirm_error_lock;
extern int eeh_probe_mode;
+static inline bool eeh_enabled(void)
+{
+ return eeh_subsystem_enabled;
+}
+
+static inline void eeh_set_enable(bool mode)
+{
+ eeh_subsystem_enabled = mode;
+}
+
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
@@ -234,7 +256,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
-#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
/*
* Reads from a device which has been isolated by EEH will return
@@ -245,6 +267,13 @@ void eeh_remove_device(struct pci_dev *);
#else /* !CONFIG_EEH */
+static inline bool eeh_enabled(void)
+{
+ return false;
+}
+
+static inline void eeh_set_enable(bool mode) { }
+
static inline int eeh_init(void)
{
return 0;
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index 86b0ac79990..334459ad145 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void)
return r3;
}
+
+#ifdef CONFIG_EPAPR_PARAVIRT
+static inline unsigned long epapr_hypercall(unsigned long *in,
+ unsigned long *out,
+ unsigned long nr)
+{
+ unsigned long register r0 asm("r0");
+ unsigned long register r3 asm("r3") = in[0];
+ unsigned long register r4 asm("r4") = in[1];
+ unsigned long register r5 asm("r5") = in[2];
+ unsigned long register r6 asm("r6") = in[3];
+ unsigned long register r7 asm("r7") = in[4];
+ unsigned long register r8 asm("r8") = in[5];
+ unsigned long register r9 asm("r9") = in[6];
+ unsigned long register r10 asm("r10") = in[7];
+ unsigned long register r11 asm("r11") = nr;
+ unsigned long register r12 asm("r12");
+
+ asm volatile("bl epapr_hypercall_start"
+ : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
+ "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
+ "=r"(r12)
+ : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
+ "r"(r9), "r"(r10), "r"(r11)
+ : "memory", "cc", "xer", "ctr", "lr");
+
+ out[0] = r4;
+ out[1] = r5;
+ out[2] = r6;
+ out[3] = r7;
+ out[4] = r8;
+ out[5] = r9;
+ out[6] = r10;
+ out[7] = r11;
+
+ return r3;
+}
+#else
+static unsigned long epapr_hypercall(unsigned long *in,
+ unsigned long *out,
+ unsigned long nr)
+{
+ return EV_UNIMPLEMENTED;
+}
+#endif
+
+static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+ unsigned long r;
+
+ r = epapr_hypercall(in, out, nr);
+ *r2 = out[0];
+
+ return r;
+}
+
+static inline long epapr_hypercall0(unsigned int nr)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ in[2] = p3;
+ return epapr_hypercall(in, out, nr);
+}
+
+static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ unsigned long in[8];
+ unsigned long out[8];
+
+ in[0] = p1;
+ in[1] = p2;
+ in[2] = p3;
+ in[3] = p4;
+ return epapr_hypercall(in, out, nr);
+}
#endif /* !__ASSEMBLY__ */
#endif /* _EPAPR_HCALLS_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 243ce69ad68..66830618cc1 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -301,9 +301,12 @@ do_kvm_##n: \
beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r9, r10); \
SAVE_PPR(area, r9, r10); \
-4: std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
+4: EXCEPTION_PROLOG_COMMON_2(area) \
+ EXCEPTION_PROLOG_COMMON_3(n) \
+ ACCOUNT_STOLEN_TIME
+
+/* Save original regs values from save area to stack frame. */
+#define EXCEPTION_PROLOG_COMMON_2(area) \
ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
ld r10,area+EX_R10(r13); \
std r9,GPR9(r1); \
@@ -318,11 +321,16 @@ do_kvm_##n: \
ld r10,area+EX_CFAR(r13); \
std r10,ORIG_GPR3(r1); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ GET_CTR(r10, area); \
+ std r10,_CTR(r1);
+
+#define EXCEPTION_PROLOG_COMMON_3(n) \
+ std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
+ SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \
- GET_CTR(r10, area); \
- std r10,_CTR(r1); \
lbz r10,PACASOFTIRQEN(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r10,SOFTE(r1); \
@@ -332,8 +340,7 @@ do_kvm_##n: \
li r10,0; \
ld r11,exception_marker@toc(r2); \
std r10,RESULT(r1); /* clear regs->result */ \
- std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
- ACCOUNT_STOLEN_TIME
+ std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
/*
* Exception vectors.
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 5c2c0233175..90f604bbcd1 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -58,52 +58,12 @@ enum fixed_addresses {
extern void __set_fixmap (enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NCG)
-
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
+#include <asm-generic/fixmap.h>
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 420b45368fc..067fb0dca54 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -285,7 +285,7 @@ struct fsl_lbc_ctrl {
/* device info */
struct device *dev;
struct fsl_lbc_regs __iomem *regs;
- int irq;
+ int irq[2];
wait_queue_head_t irq_wait;
spinlock_t lock;
void *nand;
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3bdcfce2c42..418fb654370 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,7 +6,8 @@
typedef struct {
unsigned int __softirq_pending;
- unsigned int timer_irqs;
+ unsigned int timer_irqs_event;
+ unsigned int timer_irqs_others;
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index d750336b171..623f2971ce0 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PPC64
- return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
#else
return __pte(pte_update(ptep, ~0UL, 0));
#endif
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 575fbf81fad..97d3869991c 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -191,8 +191,24 @@ DEF_MMIO_OUT_D(out_le32, 32, stw);
#endif /* __BIG_ENDIAN */
+/*
+ * Cache inhibitied accessors for use in real mode, you don't want to use these
+ * unless you know what you're doing.
+ *
+ * NB. These use the cpu byte ordering.
+ */
+DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
+DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
+DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
+DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
+DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
+DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
+
#ifdef __powerpc64__
+DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
+DEF_MMIO_IN_X(in_rm64, 64, ldcix);
+
#ifdef __BIG_ENDIAN__
DEF_MMIO_OUT_D(out_be64, 64, std);
DEF_MMIO_IN_D(in_be64, 64, ld);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index c34656a8925..42632c7a2a4 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,22 +30,19 @@
#include <asm/machdep.h>
#include <asm/types.h>
-#define IOMMU_PAGE_SHIFT 12
-#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
-#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
-#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
+#define IOMMU_PAGE_SHIFT_4K 12
+#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
+#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
+#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
+
+#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
+#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
-/* Pure 2^n version of get_order */
-static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
-{
- return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
-}
-
-
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -76,11 +73,21 @@ struct iommu_table {
struct iommu_pool large_pool;
struct iommu_pool pools[IOMMU_NR_POOLS];
unsigned long *it_map; /* A simple allocation bitmap for now */
+ unsigned long it_page_shift;/* table iommu page size */
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
+ void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
+/* Pure 2^n version of get_order */
+static inline __attribute_const__
+int get_iommu_order(unsigned long size, struct iommu_table *tbl)
+{
+ return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
+}
+
+
struct scatterlist;
static inline void set_iommu_table_base(struct device *dev, void *base)
@@ -101,8 +108,34 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
+#ifdef CONFIG_IOMMU_API
extern void iommu_register_group(struct iommu_table *tbl,
int pci_domain_number, unsigned long pe_num);
+extern int iommu_add_device(struct device *dev);
+extern void iommu_del_device(struct device *dev);
+#else
+static inline void iommu_register_group(struct iommu_table *tbl,
+ int pci_domain_number,
+ unsigned long pe_num)
+{
+}
+
+static inline int iommu_add_device(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_del_device(struct device *dev)
+{
+}
+#endif /* !CONFIG_IOMMU_API */
+
+static inline void set_iommu_table_base_and_group(struct device *dev,
+ void *base)
+{
+ set_iommu_table_base(dev, base);
+ iommu_add_device(dev);
+}
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 1bd92fd43cf..19eb74a95b5 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -74,6 +74,7 @@
#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
#define BOOKE_INTERRUPT_HV_SYSCALL 40
#define BOOKE_INTERRUPT_HV_PRIV 41
+#define BOOKE_INTERRUPT_LRAT_ERROR 42
/* book3s */
@@ -91,14 +92,17 @@
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
+#define BOOK3S_INTERRUPT_DOORBELL 0xa00
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
+#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
+#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bc23b1ba798..83851aabfdc 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
-extern void kvmppc_load_up_fpu(void);
-extern void kvmppc_load_up_altivec(void);
-extern void kvmppc_load_up_vsx(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
@@ -271,16 +268,25 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.pc;
}
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{
- ulong pc = kvmppc_get_pc(vcpu);
+ return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
+}
+static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
+{
/* Load the instruction manually if it failed to do so in the
* exit path */
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
- return vcpu->arch.last_inst;
+ return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
+ vcpu->arch.last_inst;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
}
/*
@@ -290,14 +296,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
*/
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{
- ulong pc = kvmppc_get_pc(vcpu) - 4;
-
- /* Load the instruction manually if it failed to do so in the
- * exit path */
- if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
-
- return vcpu->arch.last_inst;
+ return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 192917d2239..f3a91dc02c9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -88,6 +88,7 @@ struct kvmppc_host_state {
u8 hwthread_req;
u8 hwthread_state;
u8 host_ipi;
+ u8 ptid;
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys;
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index dd8f61510df..80d46b5a7ef 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
return vcpu->arch.xer;
}
+static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+{
+ /* XXX Would need to check TLB entry */
+ return false;
+}
+
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
return vcpu->arch.last_inst;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 237d1d25b44..1eaea2dea17 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -288,6 +288,7 @@ struct kvmppc_vcore {
int n_woken;
int nap_count;
int napping_threads;
+ int first_vcpuid;
u16 pcpu;
u16 last_cpu;
u8 vcore_state;
@@ -298,10 +299,12 @@ struct kvmppc_vcore {
u64 stolen_tb;
u64 preempt_tb;
struct kvm_vcpu *runner;
+ struct kvm *kvm;
u64 tb_offset; /* guest timebase - host timebase */
ulong lpcr;
u32 arch_compat;
ulong pcr;
+ ulong dpdes; /* doorbell state (POWER8) */
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -410,8 +413,7 @@ struct kvm_vcpu_arch {
ulong gpr[32];
- u64 fpr[32];
- u64 fpscr;
+ struct thread_fp_state fp;
#ifdef CONFIG_SPE
ulong evr[32];
@@ -420,12 +422,7 @@ struct kvm_vcpu_arch {
u64 acc;
#endif
#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
-#endif
-
-#ifdef CONFIG_VSX
- u64 vsr[64];
+ struct thread_vr_state vr;
#endif
#ifdef CONFIG_KVM_BOOKE_HV
@@ -452,6 +449,7 @@ struct kvm_vcpu_arch {
ulong pc;
ulong ctr;
ulong lr;
+ ulong tar;
ulong xer;
u32 cr;
@@ -461,13 +459,30 @@ struct kvm_vcpu_arch {
ulong guest_owned_ext;
ulong purr;
ulong spurr;
+ ulong ic;
+ ulong vtb;
ulong dscr;
ulong amr;
ulong uamor;
+ ulong iamr;
u32 ctrl;
+ u32 dabrx;
ulong dabr;
+ ulong dawr;
+ ulong dawrx;
+ ulong ciabr;
ulong cfar;
ulong ppr;
+ ulong pspb;
+ ulong fscr;
+ ulong ebbhr;
+ ulong ebbrr;
+ ulong bescr;
+ ulong csigr;
+ ulong tacr;
+ ulong tcscr;
+ ulong acop;
+ ulong wort;
ulong shadow_srr1;
#endif
u32 vrsave; /* also USPRG0 */
@@ -502,10 +517,33 @@ struct kvm_vcpu_arch {
u32 ccr1;
u32 dbsr;
- u64 mmcr[3];
+ u64 mmcr[5];
u32 pmc[8];
+ u32 spmc[2];
u64 siar;
u64 sdar;
+ u64 sier;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tfhar;
+ u64 texasr;
+ u64 tfiar;
+
+ u32 cr_tm;
+ u64 lr_tm;
+ u64 ctr_tm;
+ u64 amr_tm;
+ u64 ppr_tm;
+ u64 dscr_tm;
+ u64 tar_tm;
+
+ ulong gpr_tm[32];
+
+ struct thread_fp_state fp_tm;
+
+ struct thread_vr_state vr_tm;
+ u32 vrsave_tm; /* also USPRG0 */
+
+#endif
#ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock;
@@ -546,6 +584,7 @@ struct kvm_vcpu_arch {
#endif
gpa_t paddr_accessed;
gva_t vaddr_accessed;
+ pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
@@ -603,7 +642,6 @@ struct kvm_vcpu_arch {
struct list_head run_list;
struct task_struct *run_task;
struct kvm_run *kvm_run;
- pgd_t *pgdir;
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
@@ -616,9 +654,12 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
+ unsigned long intr_msr;
#endif
};
+#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
+
/* Values for vcpu->arch.state */
#define KVMPPC_VCPU_NOTREADY 0
#define KVMPPC_VCPU_RUNNABLE 1
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 2b119654b4c..336a91acb8b 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -39,10 +39,6 @@ static inline int kvm_para_available(void)
return 1;
}
-extern unsigned long kvm_hypercall(unsigned long *in,
- unsigned long *out,
- unsigned long nr);
-
#else
static inline int kvm_para_available(void)
@@ -50,82 +46,8 @@ static inline int kvm_para_available(void)
return 0;
}
-static unsigned long kvm_hypercall(unsigned long *in,
- unsigned long *out,
- unsigned long nr)
-{
- return EV_UNIMPLEMENTED;
-}
-
#endif
-static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
-{
- unsigned long in[8];
- unsigned long out[8];
- unsigned long r;
-
- r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
- *r2 = out[0];
-
- return r;
-}
-
-static inline long kvm_hypercall0(unsigned int nr)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
- unsigned long p2)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- in[1] = p2;
- return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
- unsigned long p2, unsigned long p3)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- in[1] = p2;
- in[2] = p3;
- return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
- unsigned long p2, unsigned long p3,
- unsigned long p4)
-{
- unsigned long in[8];
- unsigned long out[8];
-
- in[0] = p1;
- in[1] = p2;
- in[2] = p3;
- in[3] = p4;
- return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
-}
-
-
static inline unsigned int kvm_arch_para_features(void)
{
unsigned long r;
@@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void)
if (!kvm_para_available())
return 0;
- if(kvm_hypercall0_1(KVM_HC_FEATURES, &r))
+ if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
return 0;
return r;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index c8317fbf92c..fcd53f0d34b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_bigendian);
+ int is_default_endian);
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_bigendian);
+ int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u64 val, unsigned int bytes, int is_bigendian);
+ u64 val, unsigned int bytes,
+ int is_default_endian);
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -455,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
trace_hardirqs_on();
#ifdef CONFIG_PPC64
+ /*
+ * To avoid races, the caller must have gone directly from having
+ * interrupts fully-enabled to hard-disabled.
+ */
+ WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
local_paca->soft_enabled = 1;
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 844c28de7ec..d0a2a2f9956 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -132,8 +132,6 @@ struct slb_shadow {
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-extern struct slb_shadow slb_shadow[];
-
/*
* Layout of entries in the hypervisor's dispatch trace log buffer.
*/
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
new file mode 100644
index 00000000000..8e99edf6d96
--- /dev/null
+++ b/arch/powerpc/include/asm/mce.h
@@ -0,0 +1,197 @@
+/*
+ * Machine check exception header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_MCE_H__
+#define __ASM_PPC64_MCE_H__
+
+#include <linux/bitops.h>
+
+/*
+ * Machine Check bits on power7 and power8
+ */
+#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */
+
+/* SRR1 bits for machine check (On Power7 and Power8) */
+#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */
+
+#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
+#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */
+#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
+
+/* SRR1 bits for machine check (On Power8) */
+#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45))
+
+/* DSISR bits for machine check (On Power7 and Power8) */
+#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */
+#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */
+#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */
+#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */
+#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */
+#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */
+#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */
+
+/*
+ * DSISR bits for machine check (Power8) in addition to above.
+ * Secondary DERAT Multihit
+ */
+#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54))
+
+/* SLB error bits */
+#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \
+ P7_DSISR_MC_SLB_PARITY_MFSLB | \
+ P7_DSISR_MC_SLB_MULTIHIT | \
+ P7_DSISR_MC_SLB_MULTIHIT_PARITY)
+
+#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
+ P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+enum MCE_Version {
+ MCE_V1 = 1,
+};
+
+enum MCE_Severity {
+ MCE_SEV_NO_ERROR = 0,
+ MCE_SEV_WARNING = 1,
+ MCE_SEV_ERROR_SYNC = 2,
+ MCE_SEV_FATAL = 3,
+};
+
+enum MCE_Disposition {
+ MCE_DISPOSITION_RECOVERED = 0,
+ MCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum MCE_Initiator {
+ MCE_INITIATOR_UNKNOWN = 0,
+ MCE_INITIATOR_CPU = 1,
+};
+
+enum MCE_ErrorType {
+ MCE_ERROR_TYPE_UNKNOWN = 0,
+ MCE_ERROR_TYPE_UE = 1,
+ MCE_ERROR_TYPE_SLB = 2,
+ MCE_ERROR_TYPE_ERAT = 3,
+ MCE_ERROR_TYPE_TLB = 4,
+};
+
+enum MCE_UeErrorType {
+ MCE_UE_ERROR_INDETERMINATE = 0,
+ MCE_UE_ERROR_IFETCH = 1,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_UE_ERROR_LOAD_STORE = 3,
+ MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum MCE_SlbErrorType {
+ MCE_SLB_ERROR_INDETERMINATE = 0,
+ MCE_SLB_ERROR_PARITY = 1,
+ MCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_EratErrorType {
+ MCE_ERAT_ERROR_INDETERMINATE = 0,
+ MCE_ERAT_ERROR_PARITY = 1,
+ MCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum MCE_TlbErrorType {
+ MCE_TLB_ERROR_INDETERMINATE = 0,
+ MCE_TLB_ERROR_PARITY = 1,
+ MCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+struct machine_check_event {
+ enum MCE_Version version:8; /* 0x00 */
+ uint8_t in_use; /* 0x01 */
+ enum MCE_Severity severity:8; /* 0x02 */
+ enum MCE_Initiator initiator:8; /* 0x03 */
+ enum MCE_ErrorType error_type:8; /* 0x04 */
+ enum MCE_Disposition disposition:8; /* 0x05 */
+ uint8_t reserved_1[2]; /* 0x06 */
+ uint64_t gpr3; /* 0x08 */
+ uint64_t srr0; /* 0x10 */
+ uint64_t srr1; /* 0x18 */
+ union { /* 0x20 */
+ struct {
+ enum MCE_UeErrorType ue_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t physical_address_provided;
+ uint8_t reserved_1[5];
+ uint64_t effective_address;
+ uint64_t physical_address;
+ uint8_t reserved_2[8];
+ } ue_error;
+
+ struct {
+ enum MCE_SlbErrorType slb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } slb_error;
+
+ struct {
+ enum MCE_EratErrorType erat_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } erat_error;
+
+ struct {
+ enum MCE_TlbErrorType tlb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } tlb_error;
+ } u;
+};
+
+struct mce_error_info {
+ enum MCE_ErrorType error_type:8;
+ union {
+ enum MCE_UeErrorType ue_error_type:8;
+ enum MCE_SlbErrorType slb_error_type:8;
+ enum MCE_EratErrorType erat_error_type:8;
+ enum MCE_TlbErrorType tlb_error_type:8;
+ } u;
+ uint8_t reserved[2];
+};
+
+#define MAX_MC_EVT 100
+
+/* Release flags for get_mce_event() */
+#define MCE_EVENT_RELEASE true
+#define MCE_EVENT_DONTRELEASE false
+
+extern void save_mce_event(struct pt_regs *regs, long handled,
+ struct mce_error_info *mce_err, uint64_t addr);
+extern int get_mce_event(struct machine_check_event *mce, bool release);
+extern void release_mce_event(void);
+extern void machine_check_queue_event(void);
+extern void machine_check_print_event_info(struct machine_check_event *evt);
+extern uint64_t get_mce_fault_addr(struct machine_check_event *evt);
+
+#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 936db360790..89b785d1684 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -286,8 +286,21 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
extern int mmu_linear_psize;
extern int mmu_vmemmap_psize;
+struct tlb_core_data {
+ /* For software way selection, as on Freescale TLB1 */
+ u8 esel_next, esel_max, esel_first;
+
+ /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
+ u8 lock;
+};
+
#ifdef CONFIG_PPC64
extern unsigned long linear_map_top;
+extern int book3e_htw_mode;
+
+#define PPC_HTW_NONE 0
+#define PPC_HTW_IBM 1
+#define PPC_HTW_E6500 2
/*
* 64-bit booke platforms don't load the tlb in the tlb miss handler code.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 691fd8aca93..f8d1d6dcf7d 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -180,16 +180,17 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K 4
#define MMU_PAGE_1M 5
-#define MMU_PAGE_4M 6
-#define MMU_PAGE_8M 7
-#define MMU_PAGE_16M 8
-#define MMU_PAGE_64M 9
-#define MMU_PAGE_256M 10
-#define MMU_PAGE_1G 11
-#define MMU_PAGE_16G 12
-#define MMU_PAGE_64G 13
-
-#define MMU_PAGE_COUNT 14
+#define MMU_PAGE_2M 6
+#define MMU_PAGE_4M 7
+#define MMU_PAGE_8M 8
+#define MMU_PAGE_16M 9
+#define MMU_PAGE_64M 10
+#define MMU_PAGE_256M 11
+#define MMU_PAGE_1G 12
+#define MMU_PAGE_16G 13
+#define MMU_PAGE_64G 14
+
+#define MMU_PAGE_COUNT 15
#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 887d3d6133e..4a69cd1d504 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -37,7 +37,12 @@ struct mpc512x_ccm {
u32 cccr; /* CFM Clock Control Register */
u32 dccr; /* DIU Clock Control Register */
u32 mscan_ccr[4]; /* MSCAN Clock Control Registers */
- u8 res[0x98]; /* Reserved */
+ u32 out_ccr[4]; /* OUT CLK Configure Registers */
+ u32 rsv0[2]; /* Reserved */
+ u32 scfr3; /* System Clock Frequency Register 3 */
+ u32 rsv1[3]; /* Reserved */
+ u32 spll_lock_cnt; /* System PLL Lock Counter */
+ u8 res[0x6c]; /* Reserved */
};
/*
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 7bdcf340016..40157e2ca69 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -33,6 +33,28 @@ struct opal_takeover_args {
u64 rd_loc; /* r11 */
};
+/*
+ * SG entry
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ void *data;
+ long length;
+};
+
+/* sg list */
+struct opal_sg_list {
+ unsigned long num_entries;
+ struct opal_sg_list *next;
+ struct opal_sg_entry entry[];
+};
+
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+
extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
extern long opal_do_takeover(struct opal_takeover_args *args);
@@ -132,6 +154,9 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_FLASH_VALIDATE 76
#define OPAL_FLASH_MANAGE 77
#define OPAL_FLASH_UPDATE 78
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
#ifndef __ASSEMBLY__
@@ -211,7 +236,16 @@ enum OpalPendingState {
OPAL_EVENT_ERROR_LOG = 0x40,
OPAL_EVENT_EPOW = 0x80,
OPAL_EVENT_LED_STATUS = 0x100,
- OPAL_EVENT_PCI_ERROR = 0x200
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+enum OpalMessageType {
+ OPAL_MSG_ASYNC_COMP = 0,
+ OPAL_MSG_MEM_ERR,
+ OPAL_MSG_EPOW,
+ OPAL_MSG_SHUTDOWN,
+ OPAL_MSG_TYPE_MAX,
};
/* Machine check related definitions */
@@ -311,12 +345,16 @@ enum OpalMveEnableAction {
OPAL_ENABLE_MVE = 1
};
-enum OpalPciResetAndReinitScope {
+enum OpalPciResetScope {
OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
OPAL_PCI_IODA_TABLE_RESET = 6,
};
+enum OpalPciReinitScope {
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
enum OpalPciResetState {
OPAL_DEASSERT_RESET = 0,
OPAL_ASSERT_RESET = 1
@@ -356,6 +394,12 @@ enum OpalLPCAddressType {
OPAL_LPC_FW = 2,
};
+struct opal_msg {
+ uint32_t msg_type;
+ uint32_t reserved;
+ uint64_t params[8];
+};
+
struct opal_machine_check_event {
enum OpalMCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@@ -404,6 +448,58 @@ struct opal_machine_check_event {
} u;
};
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+ OPAL_MEM_ERR_TYPE_SCRUB,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+/* OpalMemoryErrorData->flags */
+#define OPAL_MEM_CORRECTED_ERROR 0x0001
+#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
+#define OPAL_MEM_ACK_REQUIRED 0x8000
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ uint16_t flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ uint64_t physical_address_start;
+ uint64_t physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ uint64_t physical_address_start;
+ uint64_t physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
enum {
OPAL_P7IOC_DIAG_TYPE_NONE = 0,
OPAL_P7IOC_DIAG_TYPE_RGC = 1,
@@ -710,7 +806,7 @@ int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer,
int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer,
uint64_t diag_buffer_len);
int64_t opal_pci_fence_phb(uint64_t phb_id);
-int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
+int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
int64_t opal_get_epow_status(__be64 *status);
@@ -731,6 +827,10 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
+int64_t opal_get_msg(uint64_t buffer, size_t size);
+int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_sync_host_reboot(void);
+
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
@@ -744,6 +844,8 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int opal_notifier_register(struct notifier_block *nb);
+extern int opal_message_notifier_register(enum OpalMessageType msg_type,
+ struct notifier_block *nb);
extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b6ea9e068c1..9c5dbc3833f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -16,7 +16,6 @@
#ifdef CONFIG_PPC64
-#include <linux/init.h>
#include <asm/types.h>
#include <asm/lppaca.h>
#include <asm/mmu.h>
@@ -113,6 +112,10 @@ struct paca_struct {
/* Keep pgd in the same cacheline as the start of extlb */
pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
pgd_t *kernel_pgd; /* Kernel PGD */
+
+ /* Shared by all threads of a core -- points to tcd of first thread */
+ struct tlb_core_data *tcd_ptr;
+
/* We can have up to 3 levels of reentrancy in the TLB miss handler */
u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
u64 exmc[8]; /* used for machine checks */
@@ -123,6 +126,8 @@ struct paca_struct {
void *mc_kstack;
void *crit_kstack;
void *dbg_kstack;
+
+ struct tlb_core_data tcd;
#endif /* CONFIG_PPC_BOOK3E */
mm_context_t context;
@@ -152,6 +157,15 @@ struct paca_struct {
*/
struct opal_machine_check_event *opal_mc_evt;
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Exclusive emergency stack pointer for machine check exception. */
+ void *mc_emergency_sp;
+ /*
+ * Flag to check whether we are in machine check early handler
+ * and already using emergency stack.
+ */
+ u16 in_mce;
+#endif
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 4a191c47286..eb9261024f5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
+ unsigned long set,
int huge)
{
#ifdef PTE_ATOMIC_UPDATES
@@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
- *ptep = __pte(old & ~clr);
+ *ptep = __pte((old & ~clr) | set);
#endif
/* huge pages use the old page table lock */
if (!huge)
@@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 0);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 1);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
/*
@@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
return __pte(old);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t * ptep)
{
- pte_update(mm, addr, ptep, ~0UL, 0);
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
@@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
unsigned long addr,
- pmd_t *pmdp, unsigned long clr);
+ pmd_t *pmdp,
+ unsigned long clr,
+ unsigned long set);
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
@@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
+ old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
@@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
return;
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
@@ -558,5 +562,19 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_INVALIDATE
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
+
+#define pmd_move_must_withdraw pmd_move_must_withdraw
+struct spinlock;
+static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl)
+{
+ /*
+ * Archs like ppc64 use pgtable to store per pmd
+ * specific information. So when we switch the pmd,
+ * we should also withdraw and deposit the pgtable
+ */
+ return true;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 7d6eacf249c..3ebb188c3ff 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#include <linux/mmdebug.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
@@ -33,10 +34,95 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+#ifdef CONFIG_NUMA_BALANCING
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
+}
+
+#define pte_numa pte_numa
+static inline int pte_numa(pte_t pte)
+{
+ return (pte_val(pte) &
+ (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+
+#define pte_mknonnuma pte_mknonnuma
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_NUMA;
+ pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
+ return pte;
+}
+
+#define pte_mknuma pte_mknuma
+static inline pte_t pte_mknuma(pte_t pte)
+{
+ /*
+ * We should not set _PAGE_NUMA on non present ptes. Also clear the
+ * present bit so that hash_page will return 1 and we collect this
+ * as numa fault.
+ */
+ if (pte_present(pte)) {
+ pte_val(pte) |= _PAGE_NUMA;
+ pte_val(pte) &= ~_PAGE_PRESENT;
+ } else
+ VM_BUG_ON(1);
+ return pte;
+}
+
+#define ptep_set_numa ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
+ return;
+}
+
+#define pmd_numa pmd_numa
+static inline int pmd_numa(pmd_t pmd)
+{
+ return pte_numa(pmd_pte(pmd));
+}
+
+#define pmdp_set_numa pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
+ return;
+}
+
+#define pmd_mknonnuma pmd_mknonnuma
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+ return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
+}
+
+#define pmd_mknuma pmd_mknuma
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+ return pte_pmd(pte_mknuma(pmd_pte(pmd)));
+}
+
+# else
+
+static inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
/* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
@@ -223,6 +309,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#endif
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned *shift);
+
+static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
+ unsigned long *pte_sizep)
+{
+ pte_t *ptep;
+ unsigned long ps = *pte_sizep;
+ unsigned int shift;
+
+ ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
+ if (!ptep)
+ return NULL;
+ if (shift)
+ *pte_sizep = 1ul << shift;
+ else
+ *pte_sizep = PAGE_SIZE;
+
+ if (ps > *pte_sizep)
+ return NULL;
+
+ return ptep;
+}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index f595b98079e..6586a40a46c 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -4,7 +4,6 @@
#ifndef _ASM_POWERPC_PPC_ASM_H
#define _ASM_POWERPC_PPC_ASM_H
-#include <linux/init.h>
#include <linux/stringify.h>
#include <asm/asm-compat.h>
#include <asm/processor.h>
@@ -295,6 +294,11 @@ n:
* you want to access various offsets within it). On ppc32 this is
* identical to LOAD_REG_IMMEDIATE.
*
+ * LOAD_REG_ADDR_PIC(rn, name)
+ * Loads the address of label 'name' into register 'run'. Use this when
+ * the kernel doesn't run at the linked or relocated address. Please
+ * note that this macro will clobber the lr register.
+ *
* LOAD_REG_ADDRBASE(rn, name)
* ADDROFF(name)
* LOAD_REG_ADDRBASE loads part of the address of label 'name' into
@@ -305,6 +309,14 @@ n:
* LOAD_REG_ADDRBASE(rX, name)
* ld rY,ADDROFF(name)(rX)
*/
+
+/* Be careful, this will clobber the lr register. */
+#define LOAD_REG_ADDR_PIC(reg, name) \
+ bl 0f; \
+0: mflr reg; \
+ addis reg,reg,(name - 0b)@ha; \
+ addi reg,reg,(name - 0b)@l;
+
#ifdef __powerpc64__
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis reg,(expr)@highest; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index fc14a38c7cc..b62de43ae5f 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -256,6 +256,8 @@ struct thread_struct {
unsigned long evr[32]; /* upper 32-bits of SPE regs */
u64 acc; /* Accumulator */
unsigned long spefscr; /* SPE & eFP status */
+ unsigned long spefscr_last; /* SPEFSCR value on last prctl
+ call or trap return */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -317,7 +319,9 @@ struct thread_struct {
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
#ifdef CONFIG_SPE
-#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#define SPEFSCR_INIT \
+ .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
+ .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
#else
#define SPEFSCR_INIT
#endif
@@ -373,6 +377,8 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+extern void fp_enable(void);
+extern void vec_enable(void);
extern void load_fp_state(struct thread_fp_state *fp);
extern void store_fp_state(struct thread_fp_state *fp);
extern void load_vr_state(struct thread_vr_state *vr);
@@ -444,13 +450,6 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern void power7_nap(void);
-
-#ifdef CONFIG_PSERIES_IDLE
-extern void update_smt_snooze_delay(int cpu, int residency);
-#else
-static inline void update_smt_snooze_delay(int cpu, int residency) {}
-#endif
-
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 678a7c1d9cb..a1bc7e75842 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -21,7 +21,6 @@
#if !defined(_ASM_POWERPC_PS3_H)
#define _ASM_POWERPC_PS3_H
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <asm/cell-pmu.h>
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
index 0419eeb5327..2505d8eab15 100644
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -19,7 +19,7 @@
#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008
-#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
+/* We can derive Memory coherence from _PAGE_NO_CACHE */
#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
#define _PAGE_DIRTY 0x0080 /* C: page changed */
@@ -27,6 +27,12 @@
#define _PAGE_RW 0x0200 /* software: user write access allowed */
#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
+/*
+ * Used for tracking numa faults
+ */
+#define _PAGE_NUMA 0x00000010 /* Gather numa placement stats */
+
+
/* No separate kernel read-only */
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fa8388ed94c..90c06ec6eff 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -223,17 +223,26 @@
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4
+#define SPRN_CIABR 0xBB
+#define CIABR_PRIV 0x3
+#define CIABR_PRIV_USER 1
+#define CIABR_PRIV_SUPER 2
+#define CIABR_PRIV_HYPER 3
#define SPRN_DAWRX 0xBC
-#define DAWRX_USER (1UL << 0)
-#define DAWRX_KERNEL (1UL << 1)
-#define DAWRX_HYP (1UL << 2)
+#define DAWRX_USER __MASK(0)
+#define DAWRX_KERNEL __MASK(1)
+#define DAWRX_HYP __MASK(2)
+#define DAWRX_WTI __MASK(3)
+#define DAWRX_WT __MASK(4)
+#define DAWRX_DR __MASK(5)
+#define DAWRX_DW __MASK(6)
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
-#define DABRX_USER (1UL << 0)
-#define DABRX_KERNEL (1UL << 1)
-#define DABRX_HYP (1UL << 2)
-#define DABRX_BTI (1UL << 3)
+#define DABRX_USER __MASK(0)
+#define DABRX_KERNEL __MASK(1)
+#define DABRX_HYP __MASK(2)
+#define DABRX_BTI __MASK(3)
#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
#define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
@@ -260,6 +269,8 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_IC 0x350 /* Virtual Instruction Count */
+#define SPRN_VTB 0x351 /* Virtual Time Base */
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
@@ -298,9 +309,13 @@
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
#define LPCR_RMLS_SH (63-37)
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
+#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
-#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
+#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
+#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
+#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
+#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
@@ -322,6 +337,8 @@
#define SPRN_PCR 0x152 /* Processor compatibility register */
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
+#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
+#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
@@ -368,6 +385,8 @@
#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
+#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
+#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
#define SPRN_EAR 0x11A /* External Address Register */
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
@@ -427,6 +446,7 @@
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
+#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
@@ -541,6 +561,7 @@
#define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif
#define SPRN_TIR 0x1BE /* Thread Identification Register */
+#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
@@ -682,6 +703,7 @@
#define SPRN_EBBHR 804 /* Event based branch handler register */
#define SPRN_EBBRR 805 /* Event based branch return register */
#define SPRN_BESCR 806 /* Branch event status and control register */
+#define SPRN_WORT 895 /* Workload optimization register - thread */
#define SPRN_PMC1 787
#define SPRN_PMC2 788
@@ -698,6 +720,11 @@
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_TACR 888
+#define SPRN_TCSCR 889
+#define SPRN_CSIGR 890
+#define SPRN_SPMC1 892
+#define SPRN_SPMC2 893
/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
@@ -1075,6 +1102,8 @@
#define PVR_8560 0x80200000
#define PVR_VER_E500V1 0x8020
#define PVR_VER_E500V2 0x8021
+#define PVR_VER_E6500 0x8040
+
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2e31aacd8ac..163c3b05a76 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -101,6 +101,7 @@
#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
+#define SPRN_IVOR42 0x1B4 /* Interrupt Vector Offset Register 42 */
#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
@@ -170,6 +171,7 @@
#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
#define SPRN_SVR 0x3FF /* System Version Register */
/*
@@ -216,6 +218,14 @@
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
#define CCR1_TCS 0x00000080 /* Timer Clock Select */
+/* Bit definitions for PWRMGTCR0. */
+#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+#define PWRMGTCR0_PW20_ENT_SHIFT 8
+#define PWRMGTCR0_PW20_ENT 0x3F00
+#define PWRMGTCR0_AV_IDLE_PD_EN (1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_AV_IDLE_CNT_SHIFT 16
+#define PWRMGTCR0_AV_IDLE_CNT 0x3F0000
+
/* Bit definitions for the MCSR. */
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
#define MCSR_IB 0x40000000 /* Instruction PLB Error */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe15de..d0e784e0ff4 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
#ifdef __powerpc64__
+extern char __start_interrupts[];
extern char __end_interrupts[];
extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+ unsigned long end)
+{
+ unsigned long real_start, real_end;
+ real_start = __start_interrupts - _stext;
+ real_end = __end_interrupts - _stext;
+
+ return start < (unsigned long)__va(real_end) &&
+ (unsigned long)__va(real_start) < end;
+}
+
static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
{
return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 703a8412dac..11ba86e1763 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -26,6 +26,7 @@ extern void reloc_got2(unsigned long);
void check_for_initrd(void);
void do_init_bootmem(void);
void setup_panic(void);
+#define ARCH_PANIC_TIMEOUT 180
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 5f54a744dcc..35aa339410b 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#define arch_spin_is_locked(x) ((x)->slock != 0)
+#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,6 +54,16 @@
#define SYNC_IO
#endif
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(*lock);
+}
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index aace9054761..0e83e7d8c73 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
static inline void save_tar(struct thread_struct *prev) {}
#endif
-extern void load_up_fpu(void);
extern void enable_kernel_fp(void);
extern void enable_kernel_altivec(void);
-extern void load_up_altivec(struct task_struct *);
extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *);
extern void giveup_vsx(struct task_struct *);
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 43523fe0d8b..3ddf7027670 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -359,3 +359,5 @@ COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
SYSCALL(ni_syscall) /* sys_kcmp */
+SYSCALL_SPU(sched_setattr)
+SYSCALL_SPU(sched_getattr)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 9854c564ac5..b034ecdb7c7 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -91,8 +91,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
-#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
+#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
#define TIF_NOHZ 9 /* in adaptive nohz mode */
@@ -115,8 +114,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1<<TIF_32BIT)
-#define _TIF_PERFMON_WORK (1<<TIF_PERFMON_WORK)
-#define _TIF_PERFMON_CTXSW (1<<TIF_PERFMON_CTXSW)
+#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -132,7 +130,8 @@ static inline struct thread_info *current_thread_info(void)
_TIF_NOHZ)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_RESTORE_TM)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
index 9dfbc34bdbf..0c9f8b74dd9 100644
--- a/arch/powerpc/include/asm/tm.h
+++ b/arch/powerpc/include/asm/tm.h
@@ -15,6 +15,7 @@ extern void do_load_up_transact_altivec(struct thread_struct *thread);
extern void tm_enable(void);
extern void tm_reclaim(struct thread_struct *thread,
unsigned long orig_msr, uint8_t cause);
+extern void tm_reclaim_current(uint8_t cause);
extern void tm_recheckpoint(struct thread_struct *thread,
unsigned long orig_msr);
extern void tm_abort(uint8_t cause);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 89e3ef2496a..d0b5fca6b07 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,7 +22,15 @@ struct device_node;
static inline int cpu_to_node(int cpu)
{
- return numa_cpu_lookup_table[cpu];
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * During early boot, the numa-cpu lookup table might not have been
+ * setup for all CPUs yet. In such cases, default to node 0.
+ */
+ return (nid < 0) ? 0 : nid;
}
#define parent_node(node) (node)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 3ca819f541b..4494f029b63 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 355
+#define __NR_syscalls 357
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 75c6ecdb8f3..7422a999a39 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- u8 insn[MAX_UINSN_BYTES];
- u8 ixol[MAX_UINSN_BYTES];
- u32 ainsn;
+ u32 insn;
+ u32 ixol;
};
};
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 0d9cecddf8a..c53f5f6d176 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -4,11 +4,11 @@
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0x100000
-#define VDSO64_LBASE 0x100000
+#define VDSO32_LBASE 0x0
+#define VDSO64_LBASE 0x0
/* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE VDSO32_LBASE
+#define VDSO32_MBASE 0x100000
#define VDSO_VERSION_STRING LINUX_2.6.15
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 68d0cc998b1..4f9b7ca0710 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -15,7 +15,6 @@
#define _ASM_POWERPC_VIO_H
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 6836ec79a83..a6665be4f3a 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -545,6 +545,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
+#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
@@ -553,6 +554,8 @@ struct kvm_get_htab_header {
/* Architecture compatibility level */
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
+#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index fa698324a1f..a9c3e2e18c0 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -85,4 +85,6 @@
#define SO_MAX_PACING_RATE 47
+#define SO_BPF_EXTENSIONS 48
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 85059a00f56..5d836b7c117 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -6,6 +6,8 @@
* the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
*/
#define TM_CAUSE_PERSISTENT 0x01
+#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */
+#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */
#define TM_CAUSE_RESCHED 0xde
#define TM_CAUSE_TLBI 0xdc
#define TM_CAUSE_FAC_UNAV 0xda
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 74cb4d72d67..881bf2e2560 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -377,6 +377,7 @@
#define __NR_process_vm_writev 352
#define __NR_finit_module 353
#define __NR_kcmp 354
-
+#define __NR_sched_setattr 355
+#define __NR_sched_getattr 356
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 445cb6e39d5..fcc9a89a469 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
+obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
@@ -47,7 +48,6 @@ obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
-obj-$(CONFIG_PPC_CLOCK) += clock.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d3de01066f7..b5aacf72ae6 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -203,6 +203,15 @@ int main(void)
DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack));
DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack));
DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack));
+ DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr));
+
+ DEFINE(TCD_ESEL_NEXT,
+ offsetof(struct tlb_core_data, esel_next));
+ DEFINE(TCD_ESEL_MAX,
+ offsetof(struct tlb_core_data, esel_max));
+ DEFINE(TCD_ESEL_FIRST,
+ offsetof(struct tlb_core_data, esel_first));
+ DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_STD_MMU_64
@@ -232,6 +241,10 @@ int main(void)
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+#ifdef CONFIG_PPC_BOOK3S_64
+ DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp));
+ DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
+#endif
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
@@ -425,18 +438,14 @@ int main(void)
DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
- DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
- DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
+ DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
#ifdef CONFIG_ALTIVEC
- DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
- DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
-#endif
-#ifdef CONFIG_VSX
- DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
+ DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
#endif
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -484,16 +493,24 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+ DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
+ DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
+ DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
+ DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
+ DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
+ DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
+ DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
+ DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
@@ -502,8 +519,10 @@ int main(void)
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+ DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
+ DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
@@ -511,20 +530,47 @@ int main(void)
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
- DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
+ DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
+ DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
+ DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
+ DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
+ DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
+ DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
+ DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
+ DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
+ DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
+ DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
+ DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
+ DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
+ DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
+ DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
+ DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
+ DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
+ DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
+ DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
+ DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+ DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
+ DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
+ DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
+ DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
+ DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
+ DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -589,6 +635,7 @@ int main(void)
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
+ HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
HSTATE_FIELD(HSTATE_PMC, host_pmc);
HSTATE_FIELD(HSTATE_PURR, host_purr);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 65493272787..2912b8787aa 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -12,7 +12,6 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -794,6 +793,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
+ /* Remove cache dir from sysfs */
+ kobject_del(cache_dir->kobj);
+
kobject_put(cache_dir->kobj);
kfree(cache_dir);
diff --git a/arch/powerpc/kernel/clock.c b/arch/powerpc/kernel/clock.c
deleted file mode 100644
index a764b47791e..00000000000
--- a/arch/powerpc/kernel/clock.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Dummy clk implementations for powerpc.
- * These need to be overridden in platform code.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/export.h>
-#include <asm/clk_interface.h>
-
-struct clk_interface clk_functions;
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- if (clk_functions.clk_get)
- return clk_functions.clk_get(dev, id);
- return ERR_PTR(-ENOSYS);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- if (clk_functions.clk_put)
- clk_functions.clk_put(clk);
-}
-EXPORT_SYMBOL(clk_put);
-
-int clk_enable(struct clk *clk)
-{
- if (clk_functions.clk_enable)
- return clk_functions.clk_enable(clk);
- return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- if (clk_functions.clk_disable)
- clk_functions.clk_disable(clk);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (clk_functions.clk_get_rate)
- return clk_functions.clk_get_rate(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (clk_functions.clk_round_rate)
- return clk_functions.clk_round_rate(clk, rate);
- return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- if (clk_functions.clk_set_rate)
- return clk_functions.clk_set_rate(clk, rate);
- return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- if (clk_functions.clk_get_parent)
- return clk_functions.clk_get_parent(clk);
- return ERR_PTR(-ENOSYS);
-}
-EXPORT_SYMBOL(clk_get_parent);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- if (clk_functions.clk_set_parent)
- return clk_functions.clk_set_parent(clk, parent);
- return -ENOSYS;
-}
-EXPORT_SYMBOL(clk_set_parent);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index bfb18c7290b..cc2d8962e09 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -53,11 +53,57 @@ _GLOBAL(__e500_dcache_setup)
isync
blr
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for PW20_WAIT_IDLE_BIT.
+ */
+#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_pw20_idle)
+ mfspr r3, SPRN_PWRMGTCR0
+
+ /* Set PW20_WAIT bit, enable pw20 state*/
+ ori r3, r3, PWRMGTCR0_PW20_WAIT
+ li r11, PW20_WAIT_IDLE_BIT
+
+ /* Set Automatic PW20 Core Idle Count */
+ rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
+
+ mtspr SPRN_PWRMGTCR0, r3
+
+ blr
+
+/*
+ * FIXME - we haven't yet done testing to determine a reasonable default
+ * value for AV_WAIT_IDLE_BIT.
+ */
+#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
+_GLOBAL(setup_altivec_idle)
+ mfspr r3, SPRN_PWRMGTCR0
+
+ /* Enable Altivec Idle */
+ oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
+ li r11, AV_WAIT_IDLE_BIT
+
+ /* Set Automatic AltiVec Idle Count */
+ rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
+
+ mtspr SPRN_PWRMGTCR0, r3
+
+ blr
+
_GLOBAL(__setup_cpu_e6500)
mflr r6
#ifdef CONFIG_PPC64
bl .setup_altivec_ivors
+ /* Touch IVOR42 only if the CPU supports E.HV category */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
+ bl .setup_lrat_ivor
+1:
#endif
+ bl setup_pw20_idle
+ bl setup_altivec_idle
bl __setup_cpu_e5500
mtlr r6
blr
@@ -119,6 +165,14 @@ _GLOBAL(__setup_cpu_e5500)
_GLOBAL(__restore_cpu_e6500)
mflr r5
bl .setup_altivec_ivors
+ /* Touch IVOR42 only if the CPU supports E.HV category */
+ mfspr r10,SPRN_MMUCFG
+ rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
+ beq 1f
+ bl .setup_lrat_ivor
+1:
+ bl .setup_pw20_idle
+ bl .setup_altivec_idle
bl __restore_cpu_e5500
mtlr r5
blr
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 18b5b9cf8e3..37d1bb002aa 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -29,7 +29,7 @@ _GLOBAL(__setup_cpu_power7)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
bl __init_LPCR
- bl __init_TLB
+ bl __init_tlb_power7
mtlr r11
blr
@@ -42,7 +42,7 @@ _GLOBAL(__restore_cpu_power7)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
bl __init_LPCR
- bl __init_TLB
+ bl __init_tlb_power7
mtlr r11
blr
@@ -59,7 +59,7 @@ _GLOBAL(__setup_cpu_power8)
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_HFSCR
- bl __init_TLB
+ bl __init_tlb_power8
bl __init_PMU_HV
mtlr r11
blr
@@ -78,7 +78,7 @@ _GLOBAL(__restore_cpu_power8)
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
bl __init_HFSCR
- bl __init_TLB
+ bl __init_tlb_power8
bl __init_PMU_HV
mtlr r11
blr
@@ -134,15 +134,31 @@ __init_HFSCR:
mtspr SPRN_HFSCR,r3
blr
-__init_TLB:
- /*
- * Clear the TLB using the "IS 3" form of tlbiel instruction
- * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
- * so we just always do 512
- */
+/*
+ * Clear the TLB using the specified IS form of tlbiel instruction
+ * (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
+ *
+ * r3 = IS field
+ */
+__init_tlb_power7:
+ li r3,0xc00 /* IS field = 0b11 */
+_GLOBAL(__flush_tlb_power7)
+ li r6,128
+ mtctr r6
+ mr r7,r3 /* IS field */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1: blr
+
+__init_tlb_power8:
+ li r3,0xc00 /* IS field = 0b11 */
+_GLOBAL(__flush_tlb_power8)
li r6,512
mtctr r6
- li r7,0xc00 /* IS field = 0b11 */
+ mr r7,r3 /* IS field */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 597d954e586..6c8dd5da4de 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,6 +71,10 @@ extern void __restore_cpu_power7(void);
extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power8(void);
extern void __restore_cpu_a2(void);
+extern void __flush_tlb_power7(unsigned long inval_selector);
+extern void __flush_tlb_power8(unsigned long inval_selector);
+extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -440,6 +444,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
+ .flush_tlb = __flush_tlb_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
.platform = "power7",
},
{ /* 2.07-compliant processor, i.e. Power8 "architected" mode */
@@ -456,6 +462,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power7 */
@@ -474,6 +482,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
+ .flush_tlb = __flush_tlb_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
.platform = "power7",
},
{ /* Power7+ */
@@ -492,6 +502,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.cpu_setup = __setup_cpu_power7,
.cpu_restore = __restore_cpu_power7,
+ .flush_tlb = __flush_tlb_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
.platform = "power7+",
},
{ /* Power8E */
@@ -510,6 +522,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 */
@@ -528,6 +542,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
+ .flush_tlb = __flush_tlb_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index fdcd8f551af..18d7c80ddeb 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -17,7 +17,6 @@
#include <linux/export.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e4897523de4..54d0116256f 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 0;
}
- if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+ if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
- mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
+ mask, tbl->it_offset << tbl->it_page_shift);
return 0;
} else
return 1;
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97ccdc..ee78f6e49d6 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-int dma_set_mask(struct device *dev, u64 dma_mask)
+int __dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- if (ppc_md.dma_set_mask)
- return ppc_md.dma_set_mask(dev, dma_mask);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
*dev->dma_mask = dma_mask;
return 0;
}
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (ppc_md.dma_set_mask)
+ return ppc_md.dma_set_mask(dev, dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 4bd687d5e7a..e7b76a6bf15 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
+#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/export.h>
@@ -84,12 +85,12 @@
#define EEH_MAX_FAILS 2100000
/* Time to wait for a PCI slot to report status, in milliseconds */
-#define PCI_BUS_RESET_WAIT_MSEC (60*1000)
+#define PCI_BUS_RESET_WAIT_MSEC (5*60*1000)
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
-int eeh_subsystem_enabled;
+bool eeh_subsystem_enabled = false;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/*
@@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.total_mmio_ffs++;
- if (!eeh_subsystem_enabled)
+ if (!eeh_enabled())
return 0;
if (!edev) {
@@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name)
return -EEXIST;
}
+static int eeh_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ eeh_set_enable(false);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block eeh_reboot_nb = {
+ .notifier_call = eeh_reboot_notifier,
+};
+
/**
* eeh_init - EEH initialization
*
@@ -778,6 +790,14 @@ int eeh_init(void)
if (machine_is(powernv) && cnt++ <= 0)
return ret;
+ /* Register reboot notifier */
+ ret = register_reboot_notifier(&eeh_reboot_nb);
+ if (ret) {
+ pr_warn("%s: Failed to register notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
/* call platform initialization function */
if (!eeh_ops) {
pr_warning("%s: Platform EEH operation not found\n",
@@ -822,7 +842,7 @@ int eeh_init(void)
return ret;
}
- if (eeh_subsystem_enabled)
+ if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
pr_warning("EEH: No capable adapters found\n");
@@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev)
struct device_node *dn;
struct eeh_dev *edev;
- if (!dev || !eeh_subsystem_enabled)
+ if (!dev || !eeh_enabled())
return;
pr_debug("EEH: Adding device %s\n", pci_name(dev));
@@ -921,6 +941,13 @@ void eeh_add_device_late(struct pci_dev *dev)
eeh_sysfs_remove_device(edev->pdev);
edev->mode &= ~EEH_DEV_SYSFS;
+ /*
+ * We definitely should have the PCI device removed
+ * though it wasn't correctly. So we needn't call
+ * into error handler afterwards.
+ */
+ edev->mode |= EEH_DEV_NO_HANDLER;
+
edev->pdev = NULL;
dev->dev.archdata.edev = NULL;
}
@@ -998,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
- if (!dev || !eeh_subsystem_enabled)
+ if (!dev || !eeh_enabled())
return;
edev = pci_dev_to_eeh_dev(dev);
@@ -1023,6 +1050,14 @@ void eeh_remove_device(struct pci_dev *dev)
else
edev->mode |= EEH_DEV_DISCONNECTED;
+ /*
+ * We're removing from the PCI subsystem, that means
+ * the PCI device driver can't support EEH or not
+ * well. So we rely on hotplug completely to do recovery
+ * for the specific PCI device.
+ */
+ edev->mode |= EEH_DEV_NO_HANDLER;
+
eeh_addr_cache_rmv_dev(dev);
eeh_sysfs_remove_device(dev);
edev->mode &= ~EEH_DEV_SYSFS;
@@ -1030,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev)
static int proc_eeh_show(struct seq_file *m, void *v)
{
- if (0 == eeh_subsystem_enabled) {
+ if (!eeh_enabled()) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 36bed5a1275..fdc679d309e 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -217,7 +217,8 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
if (!driver) return NULL;
if (!driver->err_handler ||
- !driver->err_handler->mmio_enabled) {
+ !driver->err_handler->mmio_enabled ||
+ (edev->mode & EEH_DEV_NO_HANDLER)) {
eeh_pcid_put(dev);
return NULL;
}
@@ -258,7 +259,8 @@ static void *eeh_report_reset(void *data, void *userdata)
eeh_enable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->slot_reset) {
+ !driver->err_handler->slot_reset ||
+ (edev->mode & EEH_DEV_NO_HANDLER)) {
eeh_pcid_put(dev);
return NULL;
}
@@ -297,7 +299,9 @@ static void *eeh_report_resume(void *data, void *userdata)
eeh_enable_irq(dev);
if (!driver->err_handler ||
- !driver->err_handler->resume) {
+ !driver->err_handler->resume ||
+ (edev->mode & EEH_DEV_NO_HANDLER)) {
+ edev->mode &= ~EEH_DEV_NO_HANDLER;
eeh_pcid_put(dev);
return NULL;
}
@@ -358,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
*/
if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
return NULL;
+
driver = eeh_pcid_get(dev);
- if (driver && driver->err_handler)
- return NULL;
+ if (driver) {
+ eeh_pcid_put(dev);
+ if (driver->err_handler)
+ return NULL;
+ }
/* Remove it from PCI subsystem */
pr_debug("EEH: Removing %s without EEH sensitive driver\n",
@@ -369,7 +377,9 @@ static void *eeh_rmv_device(void *data, void *userdata)
edev->mode |= EEH_DEV_DISCONNECTED;
(*removed)++;
+ pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(dev);
+ pci_unlock_rescan_remove();
return NULL;
}
@@ -416,10 +426,13 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
* into pcibios_add_pci_devices().
*/
eeh_pe_state_mark(pe, EEH_PE_KEEP);
- if (bus)
+ if (bus) {
+ pci_lock_rescan_remove();
pcibios_remove_pci_devices(bus);
- else if (frozen_bus)
+ pci_unlock_rescan_remove();
+ } else if (frozen_bus) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
+ }
/* Reset the pci controller. (Asserts RST#; resets config space).
* Reconfigure bridges and devices. Don't try to bring the system
@@ -429,6 +442,8 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
if (rc)
return rc;
+ pci_lock_rescan_remove();
+
/* Restore PE */
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
@@ -462,13 +477,14 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
pe->tstamp = tstamp;
pe->freeze_count = cnt;
+ pci_unlock_rescan_remove();
return 0;
}
/* The longest amount of time to wait for a pci device
* to come back on line, in seconds.
*/
-#define MAX_WAIT_FOR_RECOVERY 150
+#define MAX_WAIT_FOR_RECOVERY 300
static void eeh_handle_normal_event(struct eeh_pe *pe)
{
@@ -618,92 +634,103 @@ perm_error:
eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
/* Shut down the device drivers for good. */
- if (frozen_bus)
+ if (frozen_bus) {
+ pci_lock_rescan_remove();
pcibios_remove_pci_devices(frozen_bus);
+ pci_unlock_rescan_remove();
+ }
}
static void eeh_handle_special_event(void)
{
struct eeh_pe *pe, *phb_pe;
struct pci_bus *bus;
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
unsigned long flags;
- int rc = 0;
+ int rc;
- /*
- * The return value from next_error() has been classified as follows.
- * It might be good to enumerate them. However, next_error() is only
- * supported by PowerNV platform for now. So it would be fine to use
- * integer directly:
- *
- * 4 - Dead IOC 3 - Dead PHB
- * 2 - Fenced PHB 1 - Frozen PE
- * 0 - No error found
- *
- */
- rc = eeh_ops->next_error(&pe);
- if (rc <= 0)
- return;
- switch (rc) {
- case 4:
- /* Mark all PHBs in dead state */
- eeh_serialize_lock(&flags);
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe) continue;
-
- eeh_pe_state_mark(phb_pe,
- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ do {
+ rc = eeh_ops->next_error(&pe);
+
+ switch (rc) {
+ case EEH_NEXT_ERR_DEAD_IOC:
+ /* Mark all PHBs in dead state */
+ eeh_serialize_lock(&flags);
+
+ /* Purge all events */
+ eeh_remove_event(NULL);
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe) continue;
+
+ eeh_pe_state_mark(phb_pe,
+ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ }
+
+ eeh_serialize_unlock(flags);
+
+ break;
+ case EEH_NEXT_ERR_FROZEN_PE:
+ case EEH_NEXT_ERR_FENCED_PHB:
+ case EEH_NEXT_ERR_DEAD_PHB:
+ /* Mark the PE in fenced state */
+ eeh_serialize_lock(&flags);
+
+ /* Purge all events of the PHB */
+ eeh_remove_event(pe);
+
+ if (rc == EEH_NEXT_ERR_DEAD_PHB)
+ eeh_pe_state_mark(pe,
+ EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
+ else
+ eeh_pe_state_mark(pe,
+ EEH_PE_ISOLATED | EEH_PE_RECOVERING);
+
+ eeh_serialize_unlock(flags);
+
+ break;
+ case EEH_NEXT_ERR_NONE:
+ return;
+ default:
+ pr_warn("%s: Invalid value %d from next_error()\n",
+ __func__, rc);
+ return;
}
- eeh_serialize_unlock(flags);
-
- /* Purge all events */
- eeh_remove_event(NULL);
- break;
- case 3:
- case 2:
- case 1:
- /* Mark the PE in fenced state */
- eeh_serialize_lock(&flags);
- if (rc == 3)
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_PHB_DEAD);
- else
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_RECOVERING);
- eeh_serialize_unlock(flags);
-
- /* Purge all events of the PHB */
- eeh_remove_event(pe);
- break;
- default:
- pr_err("%s: Invalid value %d from next_error()\n",
- __func__, rc);
- return;
- }
- /*
- * For fenced PHB and frozen PE, it's handled as normal
- * event. We have to remove the affected PHBs for dead
- * PHB and IOC
- */
- if (rc == 2 || rc == 1)
- eeh_handle_normal_event(pe);
- else {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD))
- continue;
-
- bus = eeh_pe_bus_get(phb_pe);
- /* Notify all devices that they're about to go down. */
- eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
- pcibios_remove_pci_devices(bus);
+ /*
+ * For fenced PHB and frozen PE, it's handled as normal
+ * event. We have to remove the affected PHBs for dead
+ * PHB and IOC
+ */
+ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ rc == EEH_NEXT_ERR_FENCED_PHB) {
+ eeh_handle_normal_event(pe);
+ } else {
+ pci_lock_rescan_remove();
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe ||
+ !(phb_pe->state & EEH_PE_PHB_DEAD))
+ continue;
+
+ /* Notify all devices to be down */
+ bus = eeh_pe_bus_get(phb_pe);
+ eeh_pe_dev_traverse(pe,
+ eeh_report_failure, NULL);
+ pcibios_remove_pci_devices(bus);
+ }
+ pci_unlock_rescan_remove();
}
- }
+
+ /*
+ * If we have detected dead IOC, we needn't proceed
+ * any more since all PHBs would have been removed
+ */
+ if (rc == EEH_NEXT_ERR_DEAD_IOC)
+ break;
+ } while (rc != EEH_NEXT_ERR_NONE);
}
/**
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index f9450537e33..f0c353fa655 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -737,6 +736,9 @@ static void *eeh_restore_one_device_bars(void *data, void *flag)
else
eeh_restore_device_bars(edev, dn);
+ if (eeh_ops->restore_config)
+ eeh_ops->restore_config(dn);
+
return NULL;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index bbfb0294b35..662c6dd9807 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -664,8 +664,16 @@ _GLOBAL(ret_from_except_lite)
bl .restore_interrupts
SCHEDULE_USER
b .ret_from_except_lite
-
-2: bl .save_nvgprs
+2:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
+ bne 3f /* only restore TM if nothing else to do */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .restore_tm_state
+ b restore
+3:
+#endif
+ bl .save_nvgprs
bl .restore_interrupts
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_notify_resume
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index e7751561fd1..063b65dd4f2 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -308,6 +308,7 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
EXCEPTION_STUB(0x300, hypercall)
EXCEPTION_STUB(0x320, ehpriv)
+ EXCEPTION_STUB(0x340, lrat_error)
.globl interrupt_end_book3e
interrupt_end_book3e:
@@ -677,6 +678,17 @@ kernel_dbg_exc:
bl .unknown_exception
b .ret_from_except
+/* LRAT Error interrupt */
+ START_EXCEPTION(lrat_error);
+ NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
+ PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x340, PACA_EXGEN, INTS_KEEP)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .save_nvgprs
+ INTS_RESTORE_HARD
+ bl .unknown_exception
+ b .ret_from_except
+
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
@@ -859,6 +871,7 @@ BAD_STACK_TRAMPOLINE(0x2e0)
BAD_STACK_TRAMPOLINE(0x300)
BAD_STACK_TRAMPOLINE(0x310)
BAD_STACK_TRAMPOLINE(0x320)
+BAD_STACK_TRAMPOLINE(0x340)
BAD_STACK_TRAMPOLINE(0x400)
BAD_STACK_TRAMPOLINE(0x500)
BAD_STACK_TRAMPOLINE(0x600)
@@ -1055,12 +1068,9 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_MAS0,r3
tlbre
mfspr r6,SPRN_MAS1
- rlwinm r6,r6,0,2,0 /* clear IPROT */
+ rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */
mtspr SPRN_MAS1,r6
tlbwe
-
- /* Invalidate TLB1 */
- PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -1114,12 +1124,9 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_MAS0,r4
tlbre
mfspr r5,SPRN_MAS1
- rlwinm r5,r5,0,2,0 /* clear IPROT */
+ rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */
mtspr SPRN_MAS1,r5
tlbwe
-
- /* Invalidate TLB1 */
- PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -1414,3 +1421,7 @@ _GLOBAL(setup_ehv_ivors)
SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
+
+_GLOBAL(setup_lrat_ivor)
+ SET_IVOR(42, 0x340) /* LRAT Error */
+ blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f905e40922..38d507306a1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -155,8 +155,30 @@ machine_check_pSeries_1:
*/
HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
+#ifdef CONFIG_PPC_P7_NAP
+BEGIN_FTR_SECTION
+ /* Running native on arch 2.06 or later, check if we are
+ * waking up from nap. We only handle no state loss and
+ * supervisor state loss. We do -not- handle hypervisor
+ * state loss at this time.
+ */
+ mfspr r13,SPRN_SRR1
+ rlwinm. r13,r13,47-31,30,31
+ beq 9f
+
+ /* waking up from powersave (nap) state */
+ cmpwi cr1,r13,2
+ /* Total loss of HV state is fatal. let's just stay stuck here */
+ bgt cr1,.
+9:
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#endif /* CONFIG_PPC_P7_NAP */
EXCEPTION_PROLOG_0(PACA_EXMC)
+BEGIN_FTR_SECTION
+ b machine_check_pSeries_early
+FTR_SECTION_ELSE
b machine_check_pSeries_0
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
. = 0x300
.globl data_access_pSeries
@@ -405,6 +427,64 @@ denorm_exception_hv:
.align 7
/* moved from 0x200 */
+machine_check_pSeries_early:
+BEGIN_FTR_SECTION
+ EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
+ /*
+ * Register contents:
+ * R13 = PACA
+ * R9 = CR
+ * Original R9 to R13 is saved on PACA_EXMC
+ *
+ * Switch to mc_emergency stack and handle re-entrancy (though we
+ * currently don't test for overflow). Save MCE registers srr1,
+ * srr0, dar and dsisr and then set ME=1
+ *
+ * We use paca->in_mce to check whether this is the first entry or
+ * nested machine check. We increment paca->in_mce to track nested
+ * machine checks.
+ *
+ * If this is the first entry then set stack pointer to
+ * paca->mc_emergency_sp, otherwise r1 is already pointing to
+ * stack frame on mc_emergency stack.
+ *
+ * NOTE: We are here with MSR_ME=0 (off), which means we risk a
+ * checkstop if we get another machine check exception before we do
+ * rfid with MSR_ME=1.
+ */
+ mr r11,r1 /* Save r1 */
+ lhz r10,PACA_IN_MCE(r13)
+ cmpwi r10,0 /* Are we in nested machine check */
+ bne 0f /* Yes, we are. */
+ /* First machine check entry */
+ ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
+0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ addi r10,r10,1 /* increment paca->in_mce */
+ sth r10,PACA_IN_MCE(r13)
+ std r11,GPR1(r1) /* Save r1 on the stack. */
+ std r11,0(r1) /* make stack chain pointer */
+ mfspr r11,SPRN_SRR0 /* Save SRR0 */
+ std r11,_NIP(r1)
+ mfspr r11,SPRN_SRR1 /* Save SRR1 */
+ std r11,_MSR(r1)
+ mfspr r11,SPRN_DAR /* Save DAR */
+ std r11,_DAR(r1)
+ mfspr r11,SPRN_DSISR /* Save DSISR */
+ std r11,_DSISR(r1)
+ std r9,_CCR(r1) /* Save CR in stackframe */
+ /* Save r9 through r13 from EXMC save area to stack frame. */
+ EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
+ mfmsr r11 /* get MSR value */
+ ori r11,r11,MSR_ME /* turn on ME bit */
+ ori r11,r11,MSR_RI /* turn on RI bit */
+ ld r12,PACAKBASE(r13) /* get high part of &label */
+ LOAD_HANDLER(r12, machine_check_handle_early)
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r11
+ rfid
+ b . /* prevent speculative execution */
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+
machine_check_pSeries:
.globl machine_check_fwnmi
machine_check_fwnmi:
@@ -688,30 +768,6 @@ kvmppc_skip_Hinterrupt:
STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
- /*
- * Machine check is different because we use a different
- * save area: PACA_EXMC instead of PACA_EXGEN.
- */
- .align 7
- .globl machine_check_common
-machine_check_common:
-
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
- FINISH_NAP
- DISABLE_INTS
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .machine_check_exception
- b .ret_from_except
-
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
@@ -1080,6 +1136,30 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
+ /*
+ * Machine check is different because we use a different
+ * save area: PACA_EXMC instead of PACA_EXGEN.
+ */
+ .align 7
+ .globl machine_check_common
+machine_check_common:
+
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+ FINISH_NAP
+ DISABLE_INTS
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .machine_check_exception
+ b .ret_from_except
+
.align 7
.globl alignment_common
alignment_common:
@@ -1263,6 +1343,120 @@ _GLOBAL(opal_mc_secondary_handler)
#endif /* CONFIG_PPC_POWERNV */
+#define MACHINE_CHECK_HANDLER_WINDUP \
+ /* Clear MSR_RI before setting SRR0 and SRR1. */\
+ li r0,MSR_RI; \
+ mfmsr r9; /* get MSR value */ \
+ andc r9,r9,r0; \
+ mtmsrd r9,1; /* Clear MSR_RI */ \
+ /* Move original SRR0 and SRR1 into the respective regs */ \
+ ld r9,_MSR(r1); \
+ mtspr SPRN_SRR1,r9; \
+ ld r3,_NIP(r1); \
+ mtspr SPRN_SRR0,r3; \
+ ld r9,_CTR(r1); \
+ mtctr r9; \
+ ld r9,_XER(r1); \
+ mtxer r9; \
+ ld r9,_LINK(r1); \
+ mtlr r9; \
+ REST_GPR(0, r1); \
+ REST_8GPRS(2, r1); \
+ REST_GPR(10, r1); \
+ ld r11,_CCR(r1); \
+ mtcr r11; \
+ /* Decrement paca->in_mce. */ \
+ lhz r12,PACA_IN_MCE(r13); \
+ subi r12,r12,1; \
+ sth r12,PACA_IN_MCE(r13); \
+ REST_GPR(11, r1); \
+ REST_2GPRS(12, r1); \
+ /* restore original r1. */ \
+ ld r1,GPR1(r1)
+
+ /*
+ * Handle machine check early in real mode. We come here with
+ * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
+ */
+ .align 7
+ .globl machine_check_handle_early
+machine_check_handle_early:
+ std r0,GPR0(r1) /* Save r0 */
+ EXCEPTION_PROLOG_COMMON_3(0x200)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .machine_check_early
+ ld r12,_MSR(r1)
+#ifdef CONFIG_PPC_P7_NAP
+ /*
+ * Check if thread was in power saving mode. We come here when any
+ * of the following is true:
+ * a. thread wasn't in power saving mode
+ * b. thread was in power saving mode with no state loss or
+ * supervisor state loss
+ *
+ * Go back to nap again if (b) is true.
+ */
+ rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
+ beq 4f /* No, it wasn;t */
+ /* Thread was in power saving mode. Go back to nap again. */
+ cmpwi r11,2
+ bne 3f
+ /* Supervisor state loss */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
+3: bl .machine_check_queue_event
+ MACHINE_CHECK_HANDLER_WINDUP
+ GET_PACA(r13)
+ ld r1,PACAR1(r13)
+ b .power7_enter_nap_mode
+4:
+#endif
+ /*
+ * Check if we are coming from hypervisor userspace. If yes then we
+ * continue in host kernel in V mode to deliver the MC event.
+ */
+ rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
+ beq 5f
+ andi. r11,r12,MSR_PR /* See if coming from user. */
+ bne 9f /* continue in V mode if we are. */
+
+5:
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ /*
+ * We are coming from kernel context. Check if we are coming from
+ * guest. if yes, then we can continue. We will fall through
+ * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
+ */
+ lbz r11,HSTATE_IN_GUEST(r13)
+ cmpwi r11,0 /* Check if coming from guest */
+ bne 9f /* continue if we are. */
+#endif
+ /*
+ * At this point we are not sure about what context we come from.
+ * Queue up the MCE event and return from the interrupt.
+ * But before that, check if this is an un-recoverable exception.
+ * If yes, then stay on emergency stack and panic.
+ */
+ andi. r11,r12,MSR_RI
+ bne 2f
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+2:
+ /*
+ * Return from MC interrupt.
+ * Queue up the MCE event so that we can log it later, while
+ * returning from kernel or opal call.
+ */
+ bl .machine_check_queue_event
+ MACHINE_CHECK_HANDLER_WINDUP
+ rfid
+9:
+ /* Deliver the machine check to host kernel in V mode. */
+ MACHINE_CHECK_HANDLER_WINDUP
+ b machine_check_pSeries
+
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index f7f5b8bed68..9ad236e5d2c 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -81,6 +81,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
+ * Enable use of the FPU, and VSX if possible, for the caller.
+ */
+_GLOBAL(fp_enable)
+ mfmsr r3
+ ori r3,r3,MSR_FP
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r3,r3,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ SYNC
+ MTMSRD(r3)
+ isync /* (not necessary for arch 2.02 and later) */
+ blr
+
+/*
* Load state from memory into FP registers including FPSCR.
* Assumes the caller has enabled FP in the MSR.
*/
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index a92c79be272..f22e7e44fbf 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -176,6 +176,8 @@ skpinv: addi r6,r6,1 /* Increment */
/* 7. Jump to KERNELBASE mapping */
lis r6,(KERNELBASE & ~0xfff)@h
ori r6,r6,(KERNELBASE & ~0xfff)@l
+ rlwinm r7,r25,0,0x03ffffff
+ add r6,r7,r6
#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
/*
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 4f0946de2d5..b7363bd4245 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -23,6 +23,7 @@
*/
#include <linux/threads.h>
+#include <linux/init.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index f45726a1d96..b497188a94a 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -65,29 +65,78 @@ _ENTRY(_start);
nop
/* Translate device tree address to physical, save in r30/r31 */
- mfmsr r16
- mfspr r17,SPRN_PID
- rlwinm r17,r17,16,0x3fff0000 /* turn PID into MAS6[SPID] */
- rlwimi r17,r16,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
- mtspr SPRN_MAS6,r17
-
- tlbsx 0,r3 /* must succeed */
-
- mfspr r16,SPRN_MAS1
- mfspr r20,SPRN_MAS3
- rlwinm r17,r16,25,0x1f /* r17 = log2(page size) */
- li r18,1024
- slw r18,r18,r17 /* r18 = page size */
- addi r18,r18,-1
- and r19,r3,r18 /* r19 = page offset */
- andc r31,r20,r18 /* r31 = page base */
- or r31,r31,r19 /* r31 = devtree phys addr */
- mfspr r30,SPRN_MAS7
+ bl get_phys_addr
+ mr r30,r3
+ mr r31,r4
li r25,0 /* phys kernel start (low) */
li r24,0 /* CPU number */
li r23,0 /* phys kernel start (high) */
+#ifdef CONFIG_RELOCATABLE
+ LOAD_REG_ADDR_PIC(r3, _stext) /* Get our current runtime base */
+
+ /* Translate _stext address to physical, save in r23/r25 */
+ bl get_phys_addr
+ mr r23,r3
+ mr r25,r4
+
+ bl 0f
+0: mflr r8
+ addis r3,r8,(is_second_reloc - 0b)@ha
+ lwz r19,(is_second_reloc - 0b)@l(r3)
+
+ /* Check if this is the second relocation. */
+ cmpwi r19,1
+ bne 1f
+
+ /*
+ * For the second relocation, we already get the real memstart_addr
+ * from device tree. So we will map PAGE_OFFSET to memstart_addr,
+ * then the virtual address of start kernel should be:
+ * PAGE_OFFSET + (kernstart_addr - memstart_addr)
+ * Since the offset between kernstart_addr and memstart_addr should
+ * never be beyond 1G, so we can just use the lower 32bit of them
+ * for the calculation.
+ */
+ lis r3,PAGE_OFFSET@h
+
+ addis r4,r8,(kernstart_addr - 0b)@ha
+ addi r4,r4,(kernstart_addr - 0b)@l
+ lwz r5,4(r4)
+
+ addis r6,r8,(memstart_addr - 0b)@ha
+ addi r6,r6,(memstart_addr - 0b)@l
+ lwz r7,4(r6)
+
+ subf r5,r7,r5
+ add r3,r3,r5
+ b 2f
+
+1:
+ /*
+ * We have the runtime (virutal) address of our base.
+ * We calculate our shift of offset from a 64M page.
+ * We could map the 64M page we belong to at PAGE_OFFSET and
+ * get going from there.
+ */
+ lis r4,KERNELBASE@h
+ ori r4,r4,KERNELBASE@l
+ rlwinm r6,r25,0,0x3ffffff /* r6 = PHYS_START % 64M */
+ rlwinm r5,r4,0,0x3ffffff /* r5 = KERNELBASE % 64M */
+ subf r3,r5,r6 /* r3 = r6 - r5 */
+ add r3,r4,r3 /* Required Virtual Address */
+
+2: bl relocate
+
+ /*
+ * For the second relocation, we already set the right tlb entries
+ * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
+ */
+ cmpwi r19,1
+ beq set_ivor
+#endif
+
/* We try to not make any assumptions about how the boot loader
* setup or used the TLBs. We invalidate all mappings from the
* boot loader and load a single entry in TLB1[0] to map the
@@ -113,6 +162,7 @@ _ENTRY(__early_start)
#include "fsl_booke_entry_mapping.S"
#undef ENTRY_MAPPING_BOOT_SETUP
+set_ivor:
/* Establish the interrupt vector offsets */
SET_IVOR(0, CriticalInput);
SET_IVOR(1, MachineCheck);
@@ -166,8 +216,7 @@ _ENTRY(__early_start)
/* Check to see if we're the second processor, and jump
* to the secondary_start code if so
*/
- lis r24, boot_cpuid@h
- ori r24, r24, boot_cpuid@l
+ LOAD_REG_ADDR_PIC(r24, boot_cpuid)
lwz r24, 0(r24)
cmpwi r24, -1
mfspr r24,SPRN_PIR
@@ -197,6 +246,18 @@ _ENTRY(__early_start)
bl early_init
+#ifdef CONFIG_RELOCATABLE
+ mr r3,r30
+ mr r4,r31
+#ifdef CONFIG_PHYS_64BIT
+ mr r5,r23
+ mr r6,r25
+#else
+ mr r5,r25
+#endif
+ bl relocate_init
+#endif
+
#ifdef CONFIG_DYNAMIC_MEMSTART
lis r3,kernstart_addr@ha
la r3,kernstart_addr@l(r3)
@@ -856,6 +917,33 @@ KernelSPE:
#endif /* CONFIG_SPE */
/*
+ * Translate the effec addr in r3 to phys addr. The phys addr will be put
+ * into r3(higher 32bit) and r4(lower 32bit)
+ */
+get_phys_addr:
+ mfmsr r8
+ mfspr r9,SPRN_PID
+ rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */
+ rlwimi r9,r8,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
+ mtspr SPRN_MAS6,r9
+
+ tlbsx 0,r3 /* must succeed */
+
+ mfspr r8,SPRN_MAS1
+ mfspr r12,SPRN_MAS3
+ rlwinm r9,r8,25,0x1f /* r9 = log2(page size) */
+ li r10,1024
+ slw r10,r10,r9 /* r10 = page size */
+ addi r10,r10,-1
+ and r11,r3,r10 /* r11 = page offset */
+ andc r4,r12,r10 /* r4 = page base */
+ or r4,r4,r11 /* r4 = devtree phys addr */
+#ifdef CONFIG_PHYS_64BIT
+ mfspr r3,SPRN_MAS7
+#endif
+ blr
+
+/*
* Global functions
*/
@@ -1057,24 +1145,36 @@ _GLOBAL(__flush_disable_L1)
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
__secondary_start:
- lis r3,__secondary_hold_acknowledge@h
- ori r3,r3,__secondary_hold_acknowledge@l
- stw r24,0(r3)
-
- li r3,0
- mr r4,r24 /* Why? */
- bl call_setup_cpu
-
- lis r3,tlbcam_index@ha
- lwz r3,tlbcam_index@l(r3)
+ LOAD_REG_ADDR_PIC(r3, tlbcam_index)
+ lwz r3,0(r3)
mtctr r3
li r26,0 /* r26 safe? */
+ bl switch_to_as1
+ mr r27,r3 /* tlb entry */
/* Load each CAM entry */
1: mr r3,r26
bl loadcam_entry
addi r26,r26,1
bdnz 1b
+ mr r3,r27 /* tlb entry */
+ LOAD_REG_ADDR_PIC(r4, memstart_addr)
+ lwz r4,0(r4)
+ mr r5,r25 /* phys kernel start */
+ rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
+ subf r4,r5,r4 /* memstart_addr - phys kernel start */
+ li r5,0 /* no device tree */
+ li r6,0 /* not boot cpu */
+ bl restore_to_as0
+
+
+ lis r3,__secondary_hold_acknowledge@h
+ ori r3,r3,__secondary_hold_acknowledge@l
+ stw r24,0(r3)
+
+ li r3,0
+ mr r4,r24 /* Why? */
+ bl call_setup_cpu
/* get current_thread_info and current */
lis r1,secondary_ti@ha
@@ -1111,6 +1211,112 @@ __secondary_hold_acknowledge:
#endif
/*
+ * Create a tlb entry with the same effective and physical address as
+ * the tlb entry used by the current running code. But set the TS to 1.
+ * Then switch to the address space 1. It will return with the r3 set to
+ * the ESEL of the new created tlb.
+ */
+_GLOBAL(switch_to_as1)
+ mflr r5
+
+ /* Find a entry not used */
+ mfspr r3,SPRN_TLB1CFG
+ andi. r3,r3,0xfff
+ mfspr r4,SPRN_PID
+ rlwinm r4,r4,16,0x3fff0000 /* turn PID into MAS6[SPID] */
+ mtspr SPRN_MAS6,r4
+1: lis r4,0x1000 /* Set MAS0(TLBSEL) = 1 */
+ addi r3,r3,-1
+ rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
+ mtspr SPRN_MAS0,r4
+ tlbre
+ mfspr r4,SPRN_MAS1
+ andis. r4,r4,MAS1_VALID@h
+ bne 1b
+
+ /* Get the tlb entry used by the current running code */
+ bl 0f
+0: mflr r4
+ tlbsx 0,r4
+
+ mfspr r4,SPRN_MAS1
+ ori r4,r4,MAS1_TS /* Set the TS = 1 */
+ mtspr SPRN_MAS1,r4
+
+ mfspr r4,SPRN_MAS0
+ rlwinm r4,r4,0,~MAS0_ESEL_MASK
+ rlwimi r4,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
+ mtspr SPRN_MAS0,r4
+ tlbwe
+ isync
+ sync
+
+ mfmsr r4
+ ori r4,r4,MSR_IS | MSR_DS
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r4
+ sync
+ rfi
+
+/*
+ * Restore to the address space 0 and also invalidate the tlb entry created
+ * by switch_to_as1.
+ * r3 - the tlb entry which should be invalidated
+ * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
+ * r5 - device tree virtual address. If r4 is 0, r5 is ignored.
+ * r6 - boot cpu
+*/
+_GLOBAL(restore_to_as0)
+ mflr r0
+
+ bl 0f
+0: mflr r9
+ addi r9,r9,1f - 0b
+
+ /*
+ * We may map the PAGE_OFFSET in AS0 to a different physical address,
+ * so we need calculate the right jump and device tree address based
+ * on the offset passed by r4.
+ */
+ add r9,r9,r4
+ add r5,r5,r4
+ add r0,r0,r4
+
+2: mfmsr r7
+ li r8,(MSR_IS | MSR_DS)
+ andc r7,r7,r8
+
+ mtspr SPRN_SRR0,r9
+ mtspr SPRN_SRR1,r7
+ sync
+ rfi
+
+ /* Invalidate the temporary tlb entry for AS1 */
+1: lis r9,0x1000 /* Set MAS0(TLBSEL) = 1 */
+ rlwimi r9,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
+ mtspr SPRN_MAS0,r9
+ tlbre
+ mfspr r9,SPRN_MAS1
+ rlwinm r9,r9,0,2,31 /* Clear MAS1 Valid and IPPROT */
+ mtspr SPRN_MAS1,r9
+ tlbwe
+ isync
+
+ cmpwi r4,0
+ cmpwi cr1,r6,0
+ cror eq,4*cr1+eq,eq
+ bne 3f /* offset != 0 && is_boot_cpu */
+ mtlr r0
+ blr
+
+ /*
+ * The PAGE_OFFSET will map to a different physical address,
+ * jump to _start to do another relocation again.
+ */
+3: mr r3,r5
+ bl _start
+
+/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f0b47d1a6b0..b0a1792279b 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 847e40e62fc..3fdef0f0c67 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -84,6 +84,7 @@ _GLOBAL(power7_nap)
std r9,_MSR(r1)
std r1,PACAR1(r13)
+_GLOBAL(power7_enter_nap_mode)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/* Tell KVM we're napping */
li r4,KVM_HWTHREAD_IN_NAP
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 97a3715ac8b..b82227e7e21 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -3,7 +3,6 @@
*
* (C) Copyright 2004 Linus Torvalds
*/
-#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/export.h>
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 572bb5b95f3..88e3ec6e1d9 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -251,14 +251,13 @@ again:
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << IOMMU_PAGE_SHIFT);
+ 1 << tbl->it_page_shift);
else
- boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
+ boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
- n = iommu_area_alloc(tbl->it_map, limit, start, npages,
- tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
- align_mask);
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
+ boundary_size >> tbl->it_page_shift, align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First try the pool from the start */
@@ -320,12 +319,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
return DMA_ERROR_CODE;
entry += tbl->it_offset; /* Offset into real TCE table */
- ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
+ ret = entry << tbl->it_page_shift; /* Set the return dma address */
/* Put the TCEs in the HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
- (unsigned long)page & IOMMU_PAGE_MASK,
- direction, attrs);
+ (unsigned long)page &
+ IOMMU_PAGE_MASK(tbl), direction, attrs);
/* ppc_md.tce_build() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
@@ -352,7 +351,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
{
unsigned long entry, free_entry;
- entry = dma_addr >> IOMMU_PAGE_SHIFT;
+ entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
@@ -401,7 +400,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;
struct iommu_pool *pool;
- entry = dma_addr >> IOMMU_PAGE_SHIFT;
+ entry = dma_addr >> tbl->it_page_shift;
free_entry = entry - tbl->it_offset;
pool = get_pool(tbl, free_entry);
@@ -468,13 +467,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
- npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
+ npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
align = 0;
- if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
+ if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
- align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
+ align = PAGE_SHIFT - tbl->it_page_shift;
entry = iommu_range_alloc(dev, tbl, npages, &handle,
- mask >> IOMMU_PAGE_SHIFT, align);
+ mask >> tbl->it_page_shift, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -489,16 +488,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
- dma_addr = entry << IOMMU_PAGE_SHIFT;
- dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
+ dma_addr = entry << tbl->it_page_shift;
+ dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
- vaddr & IOMMU_PAGE_MASK,
- direction, attrs);
+ vaddr & IOMMU_PAGE_MASK(tbl),
+ direction, attrs);
if(unlikely(build_fail))
goto failure;
@@ -559,9 +558,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) {
unsigned long vaddr, npages;
- vaddr = s->dma_address & IOMMU_PAGE_MASK;
+ vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
npages = iommu_num_pages(s->dma_address, s->dma_length,
- IOMMU_PAGE_SIZE);
+ IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -592,7 +591,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sg->dma_length == 0)
break;
npages = iommu_num_pages(dma_handle, sg->dma_length,
- IOMMU_PAGE_SIZE);
+ IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
@@ -676,7 +675,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
set_bit(0, tbl->it_map);
/* We only split the IOMMU table if we have 1GB or more of space */
- if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
+ if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
tbl->nr_pools = IOMMU_NR_POOLS;
else
tbl->nr_pools = 1;
@@ -768,16 +767,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
+ npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
if (tbl) {
align = 0;
- if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
+ if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
- align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
+ align = PAGE_SHIFT - tbl->it_page_shift;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
- mask >> IOMMU_PAGE_SHIFT, align,
+ mask >> tbl->it_page_shift, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
@@ -786,7 +785,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
npages);
}
} else
- dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
+ dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
}
return dma_handle;
@@ -801,7 +800,8 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
BUG_ON(direction == DMA_NONE);
if (tbl) {
- npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
+ npages = iommu_num_pages(dma_handle, size,
+ IOMMU_PAGE_SIZE(tbl));
iommu_free(tbl, dma_handle, npages);
}
}
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- nio_pages = size >> IOMMU_PAGE_SHIFT;
- io_order = get_iommu_order(size);
+ nio_pages = size >> tbl->it_page_shift;
+ io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
- mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
+ mask >> tbl->it_page_shift, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
unsigned int nio_pages;
size = PAGE_ALIGN(size);
- nio_pages = size >> IOMMU_PAGE_SHIFT;
+ nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
if (tce_value)
return -EINVAL;
- if (ioba & ~IOMMU_PAGE_MASK)
+ if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
- ioba >>= IOMMU_PAGE_SHIFT;
+ ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
return -EINVAL;
- if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ))
+ if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
return -EINVAL;
- if (ioba & ~IOMMU_PAGE_MASK)
+ if (ioba & ~IOMMU_PAGE_MASK(tbl))
return -EINVAL;
- ioba >>= IOMMU_PAGE_SHIFT;
+ ioba >>= tbl->it_page_shift;
if (ioba < tbl->it_offset)
return -EINVAL;
@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
/* if (unlikely(ret))
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
- __func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
+ __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl),
hwaddr, ret); */
return ret;
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
{
int ret;
struct page *page = NULL;
- unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK;
+ unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
enum dma_data_direction direction = iommu_tce_direction(tce);
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
direction != DMA_TO_DEVICE, &page);
if (unlikely(ret != 1)) {
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
- tce, entry << IOMMU_PAGE_SHIFT, ret); */
+ tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */
return -EFAULT;
}
hwaddr = (unsigned long) page_address(page) + offset;
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
if (ret < 0)
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
- __func__, entry << IOMMU_PAGE_SHIFT, tce, ret);
+ __func__, entry << tbl->it_page_shift, tce, ret);
return ret;
}
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
+ /*
+ * Disable iommu bypass, otherwise the user can DMA to all of
+ * our physical memory via the bypass window instead of just
+ * the pages that has been explicitly mapped into the iommu
+ */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,10 +1110,14 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
+
+ /* The kernel owns the device now, we can restore the iommu bypass */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, true);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
-static int iommu_add_device(struct device *dev)
+int iommu_add_device(struct device *dev)
{
struct iommu_table *tbl;
int ret = 0;
@@ -1127,6 +1139,12 @@ static int iommu_add_device(struct device *dev)
pr_debug("iommu_tce: adding %s to iommu group %d\n",
dev_name(dev), iommu_group_id(tbl->it_group));
+ if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
+ pr_err("iommu_tce: unsupported iommu page size.");
+ pr_err("%s has not been added\n", dev_name(dev));
+ return -EINVAL;
+ }
+
ret = iommu_group_add_device(tbl->it_group, dev);
if (ret < 0)
pr_err("iommu_tce: %s has not been added, ret=%d\n",
@@ -1134,52 +1152,23 @@ static int iommu_add_device(struct device *dev)
return ret;
}
+EXPORT_SYMBOL_GPL(iommu_add_device);
-static void iommu_del_device(struct device *dev)
-{
- iommu_group_remove_device(dev);
-}
-
-static int iommu_bus_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+void iommu_del_device(struct device *dev)
{
- struct device *dev = data;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- return iommu_add_device(dev);
- case BUS_NOTIFY_DEL_DEVICE:
- iommu_del_device(dev);
- return 0;
- default:
- return 0;
+ /*
+ * Some devices might not have IOMMU table and group
+ * and we needn't detach them from the associated
+ * IOMMU groups
+ */
+ if (!dev->iommu_group) {
+ pr_debug("iommu_tce: skipping device %s with no tbl\n",
+ dev_name(dev));
+ return;
}
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
- .notifier_call = iommu_bus_notifier,
-};
-
-static int __init tce_iommu_init(void)
-{
- struct pci_dev *pdev = NULL;
-
- BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);
-
- for_each_pci_dev(pdev)
- iommu_add_device(&pdev->dev);
-
- bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
- return 0;
-}
-
-subsys_initcall_sync(tce_iommu_init);
-
-#else
-void iommu_register_group(struct iommu_table *tbl,
- int pci_domain_number, unsigned long pe_num)
-{
+ iommu_group_remove_device(dev);
}
+EXPORT_SYMBOL_GPL(iommu_del_device);
#endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ba016561521..1d0848bba04 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -354,8 +354,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
- seq_printf(p, " Local timer interrupts\n");
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
+ seq_printf(p, " Local timer interrupts for timer event device\n");
+
+ seq_printf(p, "%*s: ", prec, "LOC");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
+ seq_printf(p, " Local timer interrupts for others\n");
seq_printf(p, "%*s: ", prec, "SPU");
for_each_online_cpu(j)
@@ -389,11 +394,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
- u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
+ u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
+ sum += per_cpu(irq_stat, cpu).timer_irqs_others;
#ifdef CONFIG_PPC_DOORBELL
sum += per_cpu(irq_stat, cpu).doorbell_irqs;
#endif
@@ -553,8 +559,13 @@ void exc_lvl_ctx_init(void)
#ifdef CONFIG_PPC64
cpu_nr = i;
#else
+#ifdef CONFIG_SMP
cpu_nr = get_hard_smp_processor_id(i);
+#else
+ cpu_nr = 0;
+#endif
#endif
+
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 83e89d31073..8504657379f 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/signal.h>
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index db28032e320..6a0175297b0 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data)
{
u32 *features = data;
- ulong in[8];
+ ulong in[8] = {0};
ulong out[8];
in[0] = KVM_MAGIC_PAGE;
in[1] = KVM_MAGIC_PAGE;
- kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
+ epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
*features = out[0];
}
@@ -711,43 +711,6 @@ static void kvm_use_magic_page(void)
kvm_patching_worked ? "worked" : "failed");
}
-unsigned long kvm_hypercall(unsigned long *in,
- unsigned long *out,
- unsigned long nr)
-{
- unsigned long register r0 asm("r0");
- unsigned long register r3 asm("r3") = in[0];
- unsigned long register r4 asm("r4") = in[1];
- unsigned long register r5 asm("r5") = in[2];
- unsigned long register r6 asm("r6") = in[3];
- unsigned long register r7 asm("r7") = in[4];
- unsigned long register r8 asm("r8") = in[5];
- unsigned long register r9 asm("r9") = in[6];
- unsigned long register r10 asm("r10") = in[7];
- unsigned long register r11 asm("r11") = nr;
- unsigned long register r12 asm("r12");
-
- asm volatile("bl epapr_hypercall_start"
- : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
- "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
- "=r"(r12)
- : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
- "r"(r9), "r"(r10), "r"(r11)
- : "memory", "cc", "xer", "ctr", "lr");
-
- out[0] = r4;
- out[1] = r5;
- out[2] = r6;
- out[3] = r7;
- out[4] = r8;
- out[5] = r9;
- out[6] = r10;
- out[7] = r11;
-
- return r3;
-}
-EXPORT_SYMBOL_GPL(kvm_hypercall);
-
static __init void kvm_free_tmp(void)
{
free_reserved_area(&kvm_tmp[kvm_tmp_index],
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f7340da..015ae55c186 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
/* Values we need to export to the second kernel via the device tree. */
static phys_addr_t kernel_end;
+static phys_addr_t crashk_base;
static phys_addr_t crashk_size;
+static unsigned long long mem_limit;
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(phys_addr_t),
- .value = &crashk_res.start,
+ .value = &crashk_base
};
static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
static struct property memory_limit_prop = {
.name = "linux,memory-limit",
.length = sizeof(unsigned long long),
- .value = &memory_limit,
+ .value = &mem_limit,
};
+#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
+
static void __init export_crashk_values(struct device_node *node)
{
struct property *prop;
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
of_remove_property(node, prop);
if (crashk_res.start != 0) {
+ crashk_base = cpu_to_be_ulong(crashk_res.start),
of_add_property(node, &crashk_base_prop);
- crashk_size = resource_size(&crashk_res);
+ crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
of_add_property(node, &crashk_size_prop);
}
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
* memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used.
*/
+ mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop);
}
@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
of_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */
- kernel_end = __pa(_end);
+ kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop);
export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d648f6..59d229a2a3e 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
+static unsigned long htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
- .value = &htab_size_bytes,
+ .value = &htab_size,
};
static int __init export_htab_values(void)
@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
if (prop)
of_remove_property(node, prop);
- htab_base = __pa(htab_address);
+ htab_base = cpu_to_be64(__pa(htab_address));
of_add_property(node, &htab_base_prop);
+ htab_size = cpu_to_be64(htab_size_bytes);
of_add_property(node, &htab_size_prop);
of_node_put(node);
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
new file mode 100644
index 00000000000..cadef7e64e4
--- /dev/null
+++ b/arch/powerpc/kernel/mce.c
@@ -0,0 +1,352 @@
+/*
+ * Machine check exception handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+#define pr_fmt(fmt) "mce: " fmt
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <linux/export.h>
+#include <linux/irq_work.h>
+#include <asm/mce.h>
+
+static DEFINE_PER_CPU(int, mce_nest_count);
+static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
+
+/* Queue for delayed MCE events. */
+static DEFINE_PER_CPU(int, mce_queue_count);
+static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
+
+static void machine_check_process_queued_event(struct irq_work *work);
+struct irq_work mce_event_process_work = {
+ .func = machine_check_process_queued_event,
+};
+
+static void mce_set_error_info(struct machine_check_event *mce,
+ struct mce_error_info *mce_err)
+{
+ mce->error_type = mce_err->error_type;
+ switch (mce_err->error_type) {
+ case MCE_ERROR_TYPE_UE:
+ mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
+ break;
+ case MCE_ERROR_TYPE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/*
+ * Decode and save high level MCE information into per cpu buffer which
+ * is an array of machine_check_event structure.
+ */
+void save_mce_event(struct pt_regs *regs, long handled,
+ struct mce_error_info *mce_err,
+ uint64_t addr)
+{
+ uint64_t srr1;
+ int index = __get_cpu_var(mce_nest_count)++;
+ struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
+
+ /*
+ * Return if we don't have enough space to log mce event.
+ * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
+ * the check below will stop buffer overrun.
+ */
+ if (index >= MAX_MC_EVT)
+ return;
+
+ /* Populate generic machine check info */
+ mce->version = MCE_V1;
+ mce->srr0 = regs->nip;
+ mce->srr1 = regs->msr;
+ mce->gpr3 = regs->gpr[3];
+ mce->in_use = 1;
+
+ mce->initiator = MCE_INITIATOR_CPU;
+ if (handled)
+ mce->disposition = MCE_DISPOSITION_RECOVERED;
+ else
+ mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
+ mce->severity = MCE_SEV_ERROR_SYNC;
+
+ srr1 = regs->msr;
+
+ /*
+ * Populate the mce error_type and type-specific error_type.
+ */
+ mce_set_error_info(mce, mce_err);
+
+ if (!addr)
+ return;
+
+ if (mce->error_type == MCE_ERROR_TYPE_TLB) {
+ mce->u.tlb_error.effective_address_provided = true;
+ mce->u.tlb_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
+ mce->u.slb_error.effective_address_provided = true;
+ mce->u.slb_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
+ mce->u.erat_error.effective_address_provided = true;
+ mce->u.erat_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
+ mce->u.ue_error.effective_address_provided = true;
+ mce->u.ue_error.effective_address = addr;
+ }
+ return;
+}
+
+/*
+ * get_mce_event:
+ * mce Pointer to machine_check_event structure to be filled.
+ * release Flag to indicate whether to free the event slot or not.
+ * 0 <= do not release the mce event. Caller will invoke
+ * release_mce_event() once event has been consumed.
+ * 1 <= release the slot.
+ *
+ * return 1 = success
+ * 0 = failure
+ *
+ * get_mce_event() will be called by platform specific machine check
+ * handle routine and in KVM.
+ * When we call get_mce_event(), we are still in interrupt context and
+ * preemption will not be scheduled until ret_from_expect() routine
+ * is called.
+ */
+int get_mce_event(struct machine_check_event *mce, bool release)
+{
+ int index = __get_cpu_var(mce_nest_count) - 1;
+ struct machine_check_event *mc_evt;
+ int ret = 0;
+
+ /* Sanity check */
+ if (index < 0)
+ return ret;
+
+ /* Check if we have MCE info to process. */
+ if (index < MAX_MC_EVT) {
+ mc_evt = &__get_cpu_var(mce_event[index]);
+ /* Copy the event structure and release the original */
+ if (mce)
+ *mce = *mc_evt;
+ if (release)
+ mc_evt->in_use = 0;
+ ret = 1;
+ }
+ /* Decrement the count to free the slot. */
+ if (release)
+ __get_cpu_var(mce_nest_count)--;
+
+ return ret;
+}
+
+void release_mce_event(void)
+{
+ get_mce_event(NULL, true);
+}
+
+/*
+ * Queue up the MCE event which then can be handled later.
+ */
+void machine_check_queue_event(void)
+{
+ int index;
+ struct machine_check_event evt;
+
+ if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
+ return;
+
+ index = __get_cpu_var(mce_queue_count)++;
+ /* If queue is full, just return for now. */
+ if (index >= MAX_MC_EVT) {
+ __get_cpu_var(mce_queue_count)--;
+ return;
+ }
+ __get_cpu_var(mce_event_queue[index]) = evt;
+
+ /* Queue irq work to process this event later. */
+ irq_work_queue(&mce_event_process_work);
+}
+
+/*
+ * process pending MCE event from the mce event queue. This function will be
+ * called during syscall exit.
+ */
+static void machine_check_process_queued_event(struct irq_work *work)
+{
+ int index;
+
+ /*
+ * For now just print it to console.
+ * TODO: log this error event to FSP or nvram.
+ */
+ while (__get_cpu_var(mce_queue_count) > 0) {
+ index = __get_cpu_var(mce_queue_count) - 1;
+ machine_check_print_event_info(
+ &__get_cpu_var(mce_event_queue[index]));
+ __get_cpu_var(mce_queue_count)--;
+ }
+}
+
+void machine_check_print_event_info(struct machine_check_event *evt)
+{
+ const char *level, *sevstr, *subtype;
+ static const char *mc_ue_types[] = {
+ "Indeterminate",
+ "Instruction fetch",
+ "Page table walk ifetch",
+ "Load/Store",
+ "Page table walk Load/Store",
+ };
+ static const char *mc_slb_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+ static const char *mc_erat_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+ static const char *mc_tlb_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+
+ /* Print things out */
+ if (evt->version != MCE_V1) {
+ pr_err("Machine Check Exception, Unknown event version %d !\n",
+ evt->version);
+ return;
+ }
+ switch (evt->severity) {
+ case MCE_SEV_NO_ERROR:
+ level = KERN_INFO;
+ sevstr = "Harmless";
+ break;
+ case MCE_SEV_WARNING:
+ level = KERN_WARNING;
+ sevstr = "";
+ break;
+ case MCE_SEV_ERROR_SYNC:
+ level = KERN_ERR;
+ sevstr = "Severe";
+ break;
+ case MCE_SEV_FATAL:
+ default:
+ level = KERN_ERR;
+ sevstr = "Fatal";
+ break;
+ }
+
+ printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
+ evt->disposition == MCE_DISPOSITION_RECOVERED ?
+ "Recovered" : "[Not recovered");
+ printk("%s Initiator: %s\n", level,
+ evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
+ switch (evt->error_type) {
+ case MCE_ERROR_TYPE_UE:
+ subtype = evt->u.ue_error.ue_error_type <
+ ARRAY_SIZE(mc_ue_types) ?
+ mc_ue_types[evt->u.ue_error.ue_error_type]
+ : "Unknown";
+ printk("%s Error type: UE [%s]\n", level, subtype);
+ if (evt->u.ue_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.ue_error.effective_address);
+ if (evt->u.ue_error.physical_address_provided)
+ printk("%s Physial address: %016llx\n",
+ level, evt->u.ue_error.physical_address);
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ subtype = evt->u.slb_error.slb_error_type <
+ ARRAY_SIZE(mc_slb_types) ?
+ mc_slb_types[evt->u.slb_error.slb_error_type]
+ : "Unknown";
+ printk("%s Error type: SLB [%s]\n", level, subtype);
+ if (evt->u.slb_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.slb_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ subtype = evt->u.erat_error.erat_error_type <
+ ARRAY_SIZE(mc_erat_types) ?
+ mc_erat_types[evt->u.erat_error.erat_error_type]
+ : "Unknown";
+ printk("%s Error type: ERAT [%s]\n", level, subtype);
+ if (evt->u.erat_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.erat_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ subtype = evt->u.tlb_error.tlb_error_type <
+ ARRAY_SIZE(mc_tlb_types) ?
+ mc_tlb_types[evt->u.tlb_error.tlb_error_type]
+ : "Unknown";
+ printk("%s Error type: TLB [%s]\n", level, subtype);
+ if (evt->u.tlb_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.tlb_error.effective_address);
+ break;
+ default:
+ case MCE_ERROR_TYPE_UNKNOWN:
+ printk("%s Error type: Unknown\n", level);
+ break;
+ }
+}
+
+uint64_t get_mce_fault_addr(struct machine_check_event *evt)
+{
+ switch (evt->error_type) {
+ case MCE_ERROR_TYPE_UE:
+ if (evt->u.ue_error.effective_address_provided)
+ return evt->u.ue_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ if (evt->u.slb_error.effective_address_provided)
+ return evt->u.slb_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ if (evt->u.erat_error.effective_address_provided)
+ return evt->u.erat_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ if (evt->u.tlb_error.effective_address_provided)
+ return evt->u.tlb_error.effective_address;
+ break;
+ default:
+ case MCE_ERROR_TYPE_UNKNOWN:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(get_mce_fault_addr);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
new file mode 100644
index 00000000000..27c93f41166
--- /dev/null
+++ b/arch/powerpc/kernel/mce_power.c
@@ -0,0 +1,284 @@
+/*
+ * Machine check exception handling CPU-side for power7 and power8
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+#define pr_fmt(fmt) "mce_power: " fmt
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <asm/mmu.h>
+#include <asm/mce.h>
+
+/* flush SLBs and reload */
+static void flush_and_reload_slb(void)
+{
+ struct slb_shadow *slb;
+ unsigned long i, n;
+
+ /* Invalidate all SLBs */
+ asm volatile("slbmte %0,%0; slbia" : : "r" (0));
+
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+ /*
+ * If machine check is hit when in guest or in transition, we will
+ * only flush the SLBs and continue.
+ */
+ if (get_paca()->kvm_hstate.in_guest)
+ return;
+#endif
+
+ /* For host kernel, reload the SLBs from shadow SLB buffer. */
+ slb = get_slb_shadow();
+ if (!slb)
+ return;
+
+ n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
+
+ /* Load up the SLB entries from shadow SLB */
+ for (i = 0; i < n; i++) {
+ unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
+ unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
+
+ rb = (rb & ~0xFFFul) | i;
+ asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
+ }
+}
+
+static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
+{
+ long handled = 1;
+
+ /*
+ * flush and reload SLBs for SLB errors and flush TLBs for TLB errors.
+ * reset the error bits whenever we handle them so that at the end
+ * we can check whether we handled all of them or not.
+ * */
+ if (dsisr & slb_error_bits) {
+ flush_and_reload_slb();
+ /* reset error bits */
+ dsisr &= ~(slb_error_bits);
+ }
+ if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+ /* reset error bits */
+ dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
+ }
+ /* Any other errors we don't understand? */
+ if (dsisr & 0xffffffffUL)
+ handled = 0;
+
+ return handled;
+}
+
+static long mce_handle_derror_p7(uint64_t dsisr)
+{
+ return mce_handle_derror(dsisr, P7_DSISR_MC_SLB_ERRORS);
+}
+
+static long mce_handle_common_ierror(uint64_t srr1)
+{
+ long handled = 0;
+
+ switch (P7_SRR1_MC_IFETCH(srr1)) {
+ case 0:
+ break;
+ case P7_SRR1_MC_IFETCH_SLB_PARITY:
+ case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ /* flush and reload SLBs for SLB errors. */
+ flush_and_reload_slb();
+ handled = 1;
+ break;
+ case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+ handled = 1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return handled;
+}
+
+static long mce_handle_ierror_p7(uint64_t srr1)
+{
+ long handled = 0;
+
+ handled = mce_handle_common_ierror(srr1);
+
+ if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
+ flush_and_reload_slb();
+ handled = 1;
+ }
+ return handled;
+}
+
+static void mce_get_common_ierror(struct mce_error_info *mce_err, uint64_t srr1)
+{
+ switch (P7_SRR1_MC_IFETCH(srr1)) {
+ case P7_SRR1_MC_IFETCH_SLB_PARITY:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ break;
+ case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ break;
+ case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ break;
+ case P7_SRR1_MC_IFETCH_UE:
+ case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+ break;
+ case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type =
+ MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ }
+}
+
+static void mce_get_ierror_p7(struct mce_error_info *mce_err, uint64_t srr1)
+{
+ mce_get_common_ierror(mce_err, srr1);
+ if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
+ }
+}
+
+static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr)
+{
+ if (dsisr & P7_DSISR_MC_UE) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+ } else if (dsisr & P7_DSISR_MC_UE_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type =
+ MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P7_DSISR_MC_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ } else if (dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ } else if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
+ }
+}
+
+long __machine_check_early_realmode_p7(struct pt_regs *regs)
+{
+ uint64_t srr1, addr;
+ long handled = 1;
+ struct mce_error_info mce_error_info = { 0 };
+
+ srr1 = regs->msr;
+
+ /*
+ * Handle memory errors depending whether this was a load/store or
+ * ifetch exception. Also, populate the mce error_type and
+ * type-specific error_type from either SRR1 or DSISR, depending
+ * whether this was a load/store or ifetch exception
+ */
+ if (P7_SRR1_MC_LOADSTORE(srr1)) {
+ handled = mce_handle_derror_p7(regs->dsisr);
+ mce_get_derror_p7(&mce_error_info, regs->dsisr);
+ addr = regs->dar;
+ } else {
+ handled = mce_handle_ierror_p7(srr1);
+ mce_get_ierror_p7(&mce_error_info, srr1);
+ addr = regs->nip;
+ }
+
+ save_mce_event(regs, handled, &mce_error_info, addr);
+ return handled;
+}
+
+static void mce_get_ierror_p8(struct mce_error_info *mce_err, uint64_t srr1)
+{
+ mce_get_common_ierror(mce_err, srr1);
+ if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ }
+}
+
+static void mce_get_derror_p8(struct mce_error_info *mce_err, uint64_t dsisr)
+{
+ mce_get_derror_p7(mce_err, dsisr);
+ if (dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ }
+}
+
+static long mce_handle_ierror_p8(uint64_t srr1)
+{
+ long handled = 0;
+
+ handled = mce_handle_common_ierror(srr1);
+
+ if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
+ flush_and_reload_slb();
+ handled = 1;
+ }
+ return handled;
+}
+
+static long mce_handle_derror_p8(uint64_t dsisr)
+{
+ return mce_handle_derror(dsisr, P8_DSISR_MC_SLB_ERRORS);
+}
+
+long __machine_check_early_realmode_p8(struct pt_regs *regs)
+{
+ uint64_t srr1, addr;
+ long handled = 1;
+ struct mce_error_info mce_error_info = { 0 };
+
+ srr1 = regs->msr;
+
+ if (P7_SRR1_MC_LOADSTORE(srr1)) {
+ handled = mce_handle_derror_p8(regs->dsisr);
+ mce_get_derror_p8(&mce_error_info, regs->dsisr);
+ addr = regs->dar;
+ } else {
+ handled = mce_handle_ierror_p8(srr1);
+ mce_get_ierror_p8(&mce_error_info, srr1);
+ addr = regs->nip;
+ }
+
+ save_mce_event(regs, handled, &mce_error_info, addr);
+ return handled;
+}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index e47d268727a..7c6bb4b17b4 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
+/*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+ */
_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
lwz r10,THREAD+KSP_LIMIT(r2)
- addi r11,r3,THREAD_INFO_GAP
+ addi r11,r4,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
stw r10,8(r1)
@@ -344,7 +347,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
*/
_KPROBE(flush_icache_range)
BEGIN_FTR_SECTION
- isync
+ PURGE_PREFETCHED_INS
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
li r5,L1_CACHE_BYTES-1
@@ -448,6 +451,7 @@ _GLOBAL(invalidate_dcache_range)
*/
_GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
@@ -489,6 +493,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
*/
_GLOBAL(__flush_dcache_icache_phys)
BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
blr /* for 601, do nothing */
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mfmsr r10
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 64bf8db12b1..3d0249599d5 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,6 +67,7 @@ PPC64_CACHES:
_KPROBE(flush_icache_range)
BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
/*
@@ -211,6 +212,11 @@ _GLOBAL(__flush_dcache_icache)
* Different systems have different cache line sizes
*/
+BEGIN_FTR_SECTION
+ PURGE_PREFETCHED_INS
+ blr
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
+
/* Flush the dcache */
ld r7,PPC64_CACHES@toc(r2)
clrrdi r3,r3,PAGE_SHIFT /* Page align */
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0620eaaaad4..bf0aada02fe 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -99,12 +99,28 @@ static inline void free_lppacas(void) { }
* 3 persistent SLBs are registered here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
*/
-struct slb_shadow slb_shadow[] __cacheline_aligned = {
- [0 ... (NR_CPUS-1)] = {
- .persistent = cpu_to_be32(SLB_NUM_BOLTED),
- .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
- },
-};
+static struct slb_shadow *slb_shadow;
+
+static void __init allocate_slb_shadows(int nr_cpus, int limit)
+{
+ int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
+ slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
+ memset(slb_shadow, 0, size);
+}
+
+static struct slb_shadow * __init init_slb_shadow(int cpu)
+{
+ struct slb_shadow *s = &slb_shadow[cpu];
+
+ s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
+ s->buffer_length = cpu_to_be32(sizeof(*s));
+
+ return s;
+}
+
+#else /* CONFIG_PPC_STD_MMU_64 */
+
+static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
#endif /* CONFIG_PPC_STD_MMU_64 */
@@ -142,8 +158,13 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->__current = &init_task;
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
#ifdef CONFIG_PPC_STD_MMU_64
- new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+ new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
#endif /* CONFIG_PPC_STD_MMU_64 */
+
+#ifdef CONFIG_PPC_BOOK3E
+ /* For now -- if we have threads this will be adjusted later */
+ new_paca->tcd_ptr = &new_paca->tcd;
+#endif
}
/* Put the paca pointer into r13 and SPRG_PACA */
@@ -190,6 +211,8 @@ void __init allocate_pacas(void)
allocate_lppacas(nr_cpu_ids, limit);
+ allocate_slb_shadows(nr_cpu_ids, limit);
+
/* Can't use for_each_*_cpu, as they aren't functional yet */
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
initialise_paca(&paca[cpu], cpu);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a1e3e40ca3f..d9476c1fc95 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -835,7 +835,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
* at 0 as unset as well, except if PCI_PROBE_ONLY is also set
* since in that case, we don't want to re-assign anything
*/
- pcibios_resource_to_bus(dev, &reg, res);
+ pcibios_resource_to_bus(dev->bus, &reg, res);
if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
(reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
/* Only print message if not re-assigning */
@@ -886,7 +886,7 @@ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
/* Job is a bit different between memory and IO */
if (res->flags & IORESOURCE_MEM) {
- pcibios_resource_to_bus(dev, &region, res);
+ pcibios_resource_to_bus(dev->bus, &region, res);
/* If the BAR is non-0 then it's probably been initialized */
if (region.start != 0)
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index ac0b034f9ae..83c26d82999 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -111,7 +111,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
res->name = pci_name(dev);
region.start = base;
region.end = base + size - 1;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
}
}
@@ -280,7 +280,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
res->flags = flags;
region.start = of_read_number(&ranges[1], 2);
region.end = region.start + size - 1;
- pcibios_bus_to_resource(dev, res, &region);
+ pcibios_bus_to_resource(dev->bus, res, &region);
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4a96556fd2d..8d4c247f173 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
-#include <linux/init.h>
#include <linux/prctl.h>
#include <linux/init_task.h>
#include <linux/export.h>
@@ -74,6 +73,48 @@ struct task_struct *last_task_used_vsx = NULL;
struct task_struct *last_task_used_spe = NULL;
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void giveup_fpu_maybe_transactional(struct task_struct *tsk)
+{
+ /*
+ * If we are saving the current thread's registers, and the
+ * thread is in a transactional state, set the TIF_RESTORE_TM
+ * bit so that we know to restore the registers before
+ * returning to userspace.
+ */
+ if (tsk == current && tsk->thread.regs &&
+ MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+ !test_thread_flag(TIF_RESTORE_TM)) {
+ tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+ set_thread_flag(TIF_RESTORE_TM);
+ }
+
+ giveup_fpu(tsk);
+}
+
+void giveup_altivec_maybe_transactional(struct task_struct *tsk)
+{
+ /*
+ * If we are saving the current thread's registers, and the
+ * thread is in a transactional state, set the TIF_RESTORE_TM
+ * bit so that we know to restore the registers before
+ * returning to userspace.
+ */
+ if (tsk == current && tsk->thread.regs &&
+ MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+ !test_thread_flag(TIF_RESTORE_TM)) {
+ tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+ set_thread_flag(TIF_RESTORE_TM);
+ }
+
+ giveup_altivec(tsk);
+}
+
+#else
+#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
+#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
#ifdef CONFIG_PPC_FPU
/*
* Make sure the floating-point register state in the
@@ -102,13 +143,13 @@ void flush_fp_to_thread(struct task_struct *tsk)
*/
BUG_ON(tsk != current);
#endif
- giveup_fpu(tsk);
+ giveup_fpu_maybe_transactional(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(flush_fp_to_thread);
-#endif
+#endif /* CONFIG_PPC_FPU */
void enable_kernel_fp(void)
{
@@ -116,11 +157,11 @@ void enable_kernel_fp(void)
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
- giveup_fpu(current);
+ giveup_fpu_maybe_transactional(current);
else
giveup_fpu(NULL); /* just enables FP for kernel */
#else
- giveup_fpu(last_task_used_math);
+ giveup_fpu_maybe_transactional(last_task_used_math);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_fp);
@@ -132,11 +173,11 @@ void enable_kernel_altivec(void)
#ifdef CONFIG_SMP
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
- giveup_altivec(current);
+ giveup_altivec_maybe_transactional(current);
else
giveup_altivec_notask();
#else
- giveup_altivec(last_task_used_altivec);
+ giveup_altivec_maybe_transactional(last_task_used_altivec);
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL(enable_kernel_altivec);
@@ -153,7 +194,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
- giveup_altivec(tsk);
+ giveup_altivec_maybe_transactional(tsk);
}
preempt_enable();
}
@@ -182,8 +223,8 @@ EXPORT_SYMBOL(enable_kernel_vsx);
void giveup_vsx(struct task_struct *tsk)
{
- giveup_fpu(tsk);
- giveup_altivec(tsk);
+ giveup_fpu_maybe_transactional(tsk);
+ giveup_altivec_maybe_transactional(tsk);
__giveup_vsx(tsk);
}
@@ -479,7 +520,48 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
return false;
return true;
}
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static void tm_reclaim_thread(struct thread_struct *thr,
+ struct thread_info *ti, uint8_t cause)
+{
+ unsigned long msr_diff = 0;
+
+ /*
+ * If FP/VSX registers have been already saved to the
+ * thread_struct, move them to the transact_fp array.
+ * We clear the TIF_RESTORE_TM bit since after the reclaim
+ * the thread will no longer be transactional.
+ */
+ if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
+ msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
+ if (msr_diff & MSR_FP)
+ memcpy(&thr->transact_fp, &thr->fp_state,
+ sizeof(struct thread_fp_state));
+ if (msr_diff & MSR_VEC)
+ memcpy(&thr->transact_vr, &thr->vr_state,
+ sizeof(struct thread_vr_state));
+ clear_ti_thread_flag(ti, TIF_RESTORE_TM);
+ msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
+ }
+
+ tm_reclaim(thr, thr->regs->msr, cause);
+
+ /* Having done the reclaim, we now have the checkpointed
+ * FP/VSX values in the registers. These might be valid
+ * even if we have previously called enable_kernel_fp() or
+ * flush_fp_to_thread(), so update thr->regs->msr to
+ * indicate their current validity.
+ */
+ thr->regs->msr |= msr_diff;
+}
+
+void tm_reclaim_current(uint8_t cause)
+{
+ tm_enable();
+ tm_reclaim_thread(&current->thread, current_thread_info(), cause);
+}
+
static inline void tm_reclaim_task(struct task_struct *tsk)
{
/* We have to work out if we're switching from/to a task that's in the
@@ -502,9 +584,11 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
/* Stash the original thread MSR, as giveup_fpu et al will
* modify it. We hold onto it to see whether the task used
- * FP & vector regs.
+ * FP & vector regs. If the TIF_RESTORE_TM flag is set,
+ * tm_orig_msr is already set.
*/
- thr->tm_orig_msr = thr->regs->msr;
+ if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
+ thr->tm_orig_msr = thr->regs->msr;
TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
"ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -512,7 +596,7 @@ static inline void tm_reclaim_task(struct task_struct *tsk)
thr->regs->ccr, thr->regs->msr,
thr->regs->trap);
- tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
+ tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
TM_DEBUG("--- tm_reclaim on pid %d complete\n",
tsk->pid);
@@ -588,6 +672,43 @@ static inline void __switch_to_tm(struct task_struct *prev)
tm_reclaim_task(prev);
}
}
+
+/*
+ * This is called if we are on the way out to userspace and the
+ * TIF_RESTORE_TM flag is set. It checks if we need to reload
+ * FP and/or vector state and does so if necessary.
+ * If userspace is inside a transaction (whether active or
+ * suspended) and FP/VMX/VSX instructions have ever been enabled
+ * inside that transaction, then we have to keep them enabled
+ * and keep the FP/VMX/VSX state loaded while ever the transaction
+ * continues. The reason is that if we didn't, and subsequently
+ * got a FP/VMX/VSX unavailable interrupt inside a transaction,
+ * we don't know whether it's the same transaction, and thus we
+ * don't know which of the checkpointed state and the transactional
+ * state to use.
+ */
+void restore_tm_state(struct pt_regs *regs)
+{
+ unsigned long msr_diff;
+
+ clear_thread_flag(TIF_RESTORE_TM);
+ if (!MSR_TM_ACTIVE(regs->msr))
+ return;
+
+ msr_diff = current->thread.tm_orig_msr & ~regs->msr;
+ msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
+ if (msr_diff & MSR_FP) {
+ fp_enable();
+ load_fp_state(&current->thread.fp_state);
+ regs->msr |= current->thread.fpexc_mode;
+ }
+ if (msr_diff & MSR_VEC) {
+ vec_enable();
+ load_vr_state(&current->thread.vr_state);
+ }
+ regs->msr |= msr_diff;
+}
+
#else
#define tm_recheckpoint_new_task(new)
#define __switch_to_tm(prev)
@@ -690,7 +811,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
+ if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
set_breakpoint(&new->thread.hw_brk);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -1175,6 +1296,19 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val & PR_FP_EXC_SW_ENABLE) {
#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE)) {
+ /*
+ * When the sticky exception bits are set
+ * directly by userspace, it must call prctl
+ * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
+ * in the existing prctl settings) or
+ * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
+ * the bits being set). <fenv.h> functions
+ * saving and restoring the whole
+ * floating-point environment need to do so
+ * anyway to restore the prctl settings from
+ * the saved environment.
+ */
+ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
return 0;
@@ -1206,9 +1340,22 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
#ifdef CONFIG_SPE
- if (cpu_has_feature(CPU_FTR_SPE))
+ if (cpu_has_feature(CPU_FTR_SPE)) {
+ /*
+ * When the sticky exception bits are set
+ * directly by userspace, it must call prctl
+ * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
+ * in the existing prctl settings) or
+ * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
+ * the bits being set). <fenv.h> functions
+ * saving and restoring the whole
+ * floating-point environment need to do so
+ * anyway to restore the prctl settings from
+ * the saved environment.
+ */
+ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
val = tsk->thread.fpexc_mode;
- else
+ } else
return -EINVAL;
#else
return -EINVAL;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fa0ad8aafbc..f58c0d3aaeb 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -523,6 +523,20 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node,
return early_init_dt_scan_memory(node, uname, depth, data);
}
+/*
+ * For a relocatable kernel, we need to get the memstart_addr first,
+ * then use it to calculate the virtual kernel start address. This has
+ * to happen at a very early stage (before machine_init). In this case,
+ * we just want to get the memstart_address and would not like to mess the
+ * memblock at this stage. So introduce a variable to skip the memblock_add()
+ * for this reason.
+ */
+#ifdef CONFIG_RELOCATABLE
+static int add_mem_to_memblock = 1;
+#else
+#define add_mem_to_memblock 1
+#endif
+
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
#ifdef CONFIG_PPC64
@@ -543,7 +557,8 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
}
/* Add the chunk to the MEMBLOCK list */
- memblock_add(base, size);
+ if (add_mem_to_memblock)
+ memblock_add(base, size);
}
static void __init early_reserve_mem_dt(void)
@@ -740,6 +755,30 @@ void __init early_init_devtree(void *params)
DBG(" <- early_init_devtree()\n");
}
+#ifdef CONFIG_RELOCATABLE
+/*
+ * This function run before early_init_devtree, so we have to init
+ * initial_boot_params.
+ */
+void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
+{
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+ /*
+ * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
+ * mess the memblock.
+ */
+ add_mem_to_memblock = 0;
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ add_mem_to_memblock = 1;
+
+ if (size)
+ *size = first_memblock_size;
+}
+#endif
+
/*******
*
* New implementation of the OF "find" APIs, return a refcounted
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab00..1482327cfeb 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@ _GLOBAL(relocate)
* R_PPC64_RELATIVE ones.
*/
mtctr r8
-5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */
- cmpwi r0,R_PPC64_RELATIVE
+5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
+ cmpdi r0,R_PPC64_RELATIVE
bne 6f
ld r6,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index b903dc5cf94..04cc4fcca78 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
hw_cpu = get_hard_smp_processor_id(i);
+#else
+ hw_cpu = 0;
+#endif
+
critirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
@@ -296,9 +301,6 @@ void __init setup_arch(char **cmdline_p)
if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
ucache_bsize = icache_bsize = dcache_bsize;
- /* reboot on panic */
- panic_timeout = 180;
-
if (ppc_md.panic)
setup_panic();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4085aaa9478..f5f11a7d30e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -97,6 +97,36 @@ int dcache_bsize;
int icache_bsize;
int ucache_bsize;
+#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+static void setup_tlb_core_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ int first = cpu_first_thread_sibling(cpu);
+
+ paca[cpu].tcd_ptr = &paca[first].tcd;
+
+ /*
+ * If we have threads, we need either tlbsrx.
+ * or e6500 tablewalk mode, or else TLB handlers
+ * will be racy and could produce duplicate entries.
+ */
+ if (smt_enabled_at_boot >= 2 &&
+ !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
+ book3e_htw_mode != PPC_HTW_E6500) {
+ /* Should we panic instead? */
+ WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
+ __func__);
+ }
+ }
+}
+#else
+static void setup_tlb_core_data(void)
+{
+}
+#endif
+
#ifdef CONFIG_SMP
static char *smt_enabled_cmdline;
@@ -445,6 +475,7 @@ void __init setup_system(void)
smp_setup_cpu_maps();
check_smt_enabled();
+ setup_tlb_core_data();
#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
@@ -520,9 +551,6 @@ static void __init irqstack_early_init(void)
#ifdef CONFIG_PPC_BOOK3E
static void __init exc_lvl_early_init(void)
{
- extern unsigned int interrupt_base_book3e;
- extern unsigned int exc_debug_debug_book3e;
-
unsigned int i;
for_each_possible_cpu(i) {
@@ -535,8 +563,7 @@ static void __init exc_lvl_early_init(void)
}
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
- patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
- (unsigned long)&exc_debug_debug_book3e, 0);
+ patch_exception(0x040, exc_debug_debug_book3e);
}
#else
#define exc_lvl_early_init()
@@ -544,7 +571,8 @@ static void __init exc_lvl_early_init(void)
/*
* Stack space used when we detect a bad kernel stack pointer, and
- * early in SMP boots before relocation is enabled.
+ * early in SMP boots before relocation is enabled. Exclusive emergency
+ * stack for machine checks.
*/
static void __init emergency_stack_init(void)
{
@@ -567,6 +595,13 @@ static void __init emergency_stack_init(void)
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
sp += THREAD_SIZE;
paca[i].emergency_sp = __va(sp);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* emergency stack for machine check exception handling. */
+ sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+ sp += THREAD_SIZE;
+ paca[i].mc_emergency_sp = __va(sp);
+#endif
}
}
@@ -588,9 +623,6 @@ void __init setup_arch(char **cmdline_p)
dcache_bsize = ppc64_caches.dline_size;
icache_bsize = ppc64_caches.iline_size;
- /* reboot on panic */
- panic_timeout = 180;
-
if (ppc_md.panic)
setup_panic();
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 457e97aa294..8fc4177ed65 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -203,8 +203,7 @@ unsigned long get_tm_stackpointer(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (MSR_TM_ACTIVE(regs->msr)) {
- tm_enable();
- tm_reclaim(&current->thread, regs->msr, TM_CAUSE_SIGNAL);
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
if (MSR_TM_TRANSACTIONAL(regs->msr))
return current->thread.ckpt_regs.gpr[1];
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 68027bfa5f8..a67e00aa3ca 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -519,6 +519,13 @@ static int save_tm_user_regs(struct pt_regs *regs,
{
unsigned long msr = regs->msr;
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state. This also ensures
+ * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+ */
+ regs->msr &= ~MSR_TS_MASK;
+
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
@@ -1015,29 +1022,24 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_frame = &rt_sf->uc_transact.uc_mcontext;
if (MSR_TM_ACTIVE(regs->msr)) {
+ if (__put_user((unsigned long)&rt_sf->uc_transact,
+ &rt_sf->uc.uc_link) ||
+ __put_user((unsigned long)tm_frame,
+ &rt_sf->uc_transact.uc_regs))
+ goto badframe;
if (save_tm_user_regs(regs, frame, tm_frame, sigret))
goto badframe;
}
else
#endif
{
+ if (__put_user(0, &rt_sf->uc.uc_link))
+ goto badframe;
if (save_user_regs(regs, frame, tm_frame, sigret, 1))
goto badframe;
}
regs->link = tramp;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(regs->msr)) {
- if (__put_user((unsigned long)&rt_sf->uc_transact,
- &rt_sf->uc.uc_link)
- || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
- goto badframe;
- }
- else
-#endif
- if (__put_user(0, &rt_sf->uc.uc_link))
- goto badframe;
-
current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
/* create a stack frame for the caller of the handler */
@@ -1056,13 +1058,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
/* enter the signal handler in native-endian mode */
regs->msr &= ~MSR_LE;
regs->msr |= (MSR_KERNEL & MSR_LE);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state:
- */
- regs->msr &= ~MSR_TS_MASK;
-#endif
return 1;
badframe:
@@ -1484,13 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state:
- */
- regs->msr &= ~MSR_TS_MASK;
-#endif
return 1;
badframe:
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 42991045349..e35bf773df7 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -192,6 +192,13 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
+ /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+ * just indicates to userland that we were doing a transaction, but we
+ * don't want to return in transactional state. This also ensures
+ * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+ */
+ regs->msr &= ~MSR_TS_MASK;
+
flush_fp_to_thread(current);
#ifdef CONFIG_ALTIVEC
@@ -749,13 +756,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
/* Make sure signal handler doesn't get spurious FP exceptions */
current->thread.fp_state.fpscr = 0;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state:
- */
- regs->msr &= ~MSR_TS_MASK;
-#endif
/* Set up to return from userspace. */
if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index e68fd1ae727..7a37ecd3afa 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -9,7 +9,6 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/unistd.h>
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c1cf4a1522d..ac2621af315 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -369,13 +369,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
- if (smp_ops)
- if (smp_ops->probe)
- max_cpus = smp_ops->probe();
- else
- max_cpus = NR_CPUS;
- else
- max_cpus = 1;
+ if (smp_ops && smp_ops->probe)
+ smp_ops->probe();
}
void smp_prepare_boot_cpu(void)
diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_booke.S
index 0f204053e5b..553c1405ee0 100644
--- a/arch/powerpc/kernel/swsusp_booke.S
+++ b/arch/powerpc/kernel/swsusp_booke.S
@@ -74,21 +74,21 @@ _GLOBAL(swsusp_arch_suspend)
bne 1b
/* Save SPRGs */
- mfsprg r4,0
+ mfspr r4,SPRN_SPRG0
stw r4,SL_SPRG0(r11)
- mfsprg r4,1
+ mfspr r4,SPRN_SPRG1
stw r4,SL_SPRG1(r11)
- mfsprg r4,2
+ mfspr r4,SPRN_SPRG2
stw r4,SL_SPRG2(r11)
- mfsprg r4,3
+ mfspr r4,SPRN_SPRG3
stw r4,SL_SPRG3(r11)
- mfsprg r4,4
+ mfspr r4,SPRN_SPRG4
stw r4,SL_SPRG4(r11)
- mfsprg r4,5
+ mfspr r4,SPRN_SPRG5
stw r4,SL_SPRG5(r11)
- mfsprg r4,6
+ mfspr r4,SPRN_SPRG6
stw r4,SL_SPRG6(r11)
- mfsprg r4,7
+ mfspr r4,SPRN_SPRG7
stw r4,SL_SPRG7(r11)
/* Call the low level suspend stuff (we should probably have made
@@ -150,21 +150,21 @@ _GLOBAL(swsusp_arch_resume)
bl _tlbil_all
lwz r4,SL_SPRG0(r11)
- mtsprg 0,r4
+ mtspr SPRN_SPRG0,r4
lwz r4,SL_SPRG1(r11)
- mtsprg 1,r4
+ mtspr SPRN_SPRG1,r4
lwz r4,SL_SPRG2(r11)
- mtsprg 2,r4
+ mtspr SPRN_SPRG2,r4
lwz r4,SL_SPRG3(r11)
- mtsprg 3,r4
+ mtspr SPRN_SPRG3,r4
lwz r4,SL_SPRG4(r11)
- mtsprg 4,r4
+ mtspr SPRN_SPRG4,r4
lwz r4,SL_SPRG5(r11)
- mtsprg 5,r4
+ mtspr SPRN_SPRG5,r4
lwz r4,SL_SPRG6(r11)
- mtsprg 6,r4
+ mtspr SPRN_SPRG6,r4
lwz r4,SL_SPRG7(r11)
- mtsprg 7,r4
+ mtspr SPRN_SPRG7,r4
/* restore the MSR */
lwz r3,SL_MSR(r11)
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 4e3cc47f26b..cd9be9aa016 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -34,7 +34,6 @@
#include <linux/ipc.h>
#include <linux/utsname.h>
#include <linux/file.h>
-#include <linux/init.h>
#include <linux/personality.h>
#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index b4e667663d9..97e1dc91768 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -51,8 +51,6 @@ static ssize_t store_smt_snooze_delay(struct device *dev,
return -EINVAL;
per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
- update_smt_snooze_delay(cpu->dev.id, snooze);
-
return count;
}
@@ -86,6 +84,304 @@ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define MAX_BIT 63
+
+static u64 pw20_wt;
+static u64 altivec_idle_wt;
+
+static unsigned int get_idle_ticks_bit(u64 ns)
+{
+ u64 cycle;
+
+ if (ns >= 10000)
+ cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
+ else
+ cycle = div_u64(ns * tb_ticks_per_usec, 1000);
+
+ if (!cycle)
+ return 0;
+
+ return ilog2(cycle);
+}
+
+static void do_show_pwrmgtcr0(void *val)
+{
+ u32 *value = val;
+
+ *value = mfspr(SPRN_PWRMGTCR0);
+}
+
+static ssize_t show_pw20_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+
+ value &= PWRMGTCR0_PW20_WAIT;
+
+ return sprintf(buf, "%u\n", value ? 1 : 0);
+}
+
+static void do_store_pw20_state(void *val)
+{
+ u32 *value = val;
+ u32 pw20_state;
+
+ pw20_state = mfspr(SPRN_PWRMGTCR0);
+
+ if (*value)
+ pw20_state |= PWRMGTCR0_PW20_WAIT;
+ else
+ pw20_state &= ~PWRMGTCR0_PW20_WAIT;
+
+ mtspr(SPRN_PWRMGTCR0, pw20_state);
+}
+
+static ssize_t store_pw20_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > 1)
+ return -EINVAL;
+
+ smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
+
+ return count;
+}
+
+static ssize_t show_pw20_wait_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ u64 tb_cycle = 1;
+ u64 time;
+
+ unsigned int cpu = dev->id;
+
+ if (!pw20_wt) {
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+ value = (value & PWRMGTCR0_PW20_ENT) >>
+ PWRMGTCR0_PW20_ENT_SHIFT;
+
+ tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
+ /* convert ms to ns */
+ if (tb_ticks_per_usec > 1000) {
+ time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
+ } else {
+ u32 rem_us;
+
+ time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
+ &rem_us);
+ time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
+ }
+ } else {
+ time = pw20_wt;
+ }
+
+ return sprintf(buf, "%llu\n", time > 0 ? time : 0);
+}
+
+static void set_pw20_wait_entry_bit(void *val)
+{
+ u32 *value = val;
+ u32 pw20_idle;
+
+ pw20_idle = mfspr(SPRN_PWRMGTCR0);
+
+ /* Set Automatic PW20 Core Idle Count */
+ /* clear count */
+ pw20_idle &= ~PWRMGTCR0_PW20_ENT;
+
+ /* set count */
+ pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
+
+ mtspr(SPRN_PWRMGTCR0, pw20_idle);
+}
+
+static ssize_t store_pw20_wait_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 entry_bit;
+ u64 value;
+
+ unsigned int cpu = dev->id;
+
+ if (kstrtou64(buf, 0, &value))
+ return -EINVAL;
+
+ if (!value)
+ return -EINVAL;
+
+ entry_bit = get_idle_ticks_bit(value);
+ if (entry_bit > MAX_BIT)
+ return -EINVAL;
+
+ pw20_wt = value;
+
+ smp_call_function_single(cpu, set_pw20_wait_entry_bit,
+ &entry_bit, 1);
+
+ return count;
+}
+
+static ssize_t show_altivec_idle(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+
+ value &= PWRMGTCR0_AV_IDLE_PD_EN;
+
+ return sprintf(buf, "%u\n", value ? 1 : 0);
+}
+
+static void do_store_altivec_idle(void *val)
+{
+ u32 *value = val;
+ u32 altivec_idle;
+
+ altivec_idle = mfspr(SPRN_PWRMGTCR0);
+
+ if (*value)
+ altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
+ else
+ altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
+
+ mtspr(SPRN_PWRMGTCR0, altivec_idle);
+}
+
+static ssize_t store_altivec_idle(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 value;
+ unsigned int cpu = dev->id;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ if (value > 1)
+ return -EINVAL;
+
+ smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
+
+ return count;
+}
+
+static ssize_t show_altivec_idle_wait_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+ u64 tb_cycle = 1;
+ u64 time;
+
+ unsigned int cpu = dev->id;
+
+ if (!altivec_idle_wt) {
+ smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
+ value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
+ PWRMGTCR0_AV_IDLE_CNT_SHIFT;
+
+ tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
+ /* convert ms to ns */
+ if (tb_ticks_per_usec > 1000) {
+ time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
+ } else {
+ u32 rem_us;
+
+ time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
+ &rem_us);
+ time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
+ }
+ } else {
+ time = altivec_idle_wt;
+ }
+
+ return sprintf(buf, "%llu\n", time > 0 ? time : 0);
+}
+
+static void set_altivec_idle_wait_entry_bit(void *val)
+{
+ u32 *value = val;
+ u32 altivec_idle;
+
+ altivec_idle = mfspr(SPRN_PWRMGTCR0);
+
+ /* Set Automatic AltiVec Idle Count */
+ /* clear count */
+ altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
+
+ /* set count */
+ altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
+
+ mtspr(SPRN_PWRMGTCR0, altivec_idle);
+}
+
+static ssize_t store_altivec_idle_wait_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 entry_bit;
+ u64 value;
+
+ unsigned int cpu = dev->id;
+
+ if (kstrtou64(buf, 0, &value))
+ return -EINVAL;
+
+ if (!value)
+ return -EINVAL;
+
+ entry_bit = get_idle_ticks_bit(value);
+ if (entry_bit > MAX_BIT)
+ return -EINVAL;
+
+ altivec_idle_wt = value;
+
+ smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
+ &entry_bit, 1);
+
+ return count;
+}
+
+/*
+ * Enable/Disable interface:
+ * 0, disable. 1, enable.
+ */
+static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
+static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
+
+/*
+ * Set wait time interface:(Nanosecond)
+ * Example: Base on TBfreq is 41MHZ.
+ * 1~48(ns): TB[63]
+ * 49~97(ns): TB[62]
+ * 98~195(ns): TB[61]
+ * 196~390(ns): TB[60]
+ * 391~780(ns): TB[59]
+ * 781~1560(ns): TB[58]
+ * ...
+ */
+static DEVICE_ATTR(pw20_wait_time, 0600,
+ show_pw20_wait_time,
+ store_pw20_wait_time);
+static DEVICE_ATTR(altivec_idle_wait_time, 0600,
+ show_altivec_idle_wait_time,
+ store_altivec_idle_wait_time);
+#endif
+
/*
* Enabling PMCs will slow partition context switch times so we only do
* it the first time we write to the PMCs.
@@ -108,14 +404,14 @@ void ppc_enable_pmcs(void)
}
EXPORT_SYMBOL(ppc_enable_pmcs);
-#define SYSFS_PMCSETUP(NAME, ADDRESS) \
+#define __SYSFS_SPRSETUP(NAME, ADDRESS, EXTRA) \
static void read_##NAME(void *val) \
{ \
*(unsigned long *)val = mfspr(ADDRESS); \
} \
static void write_##NAME(void *val) \
{ \
- ppc_enable_pmcs(); \
+ EXTRA; \
mtspr(ADDRESS, *(unsigned long *)val); \
} \
static ssize_t show_##NAME(struct device *dev, \
@@ -140,6 +436,10 @@ static ssize_t __used \
return count; \
}
+#define SYSFS_PMCSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP(NAME, ADDRESS, ppc_enable_pmcs())
+#define SYSFS_SPRSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP(NAME, ADDRESS, )
/* Let's define all possible registers, we'll only hook up the ones
* that are implemented on the current processor
@@ -175,10 +475,10 @@ SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
-SYSFS_PMCSETUP(purr, SPRN_PURR);
-SYSFS_PMCSETUP(spurr, SPRN_SPURR);
-SYSFS_PMCSETUP(dscr, SPRN_DSCR);
-SYSFS_PMCSETUP(pir, SPRN_PIR);
+SYSFS_SPRSETUP(purr, SPRN_PURR);
+SYSFS_SPRSETUP(spurr, SPRN_SPURR);
+SYSFS_SPRSETUP(dscr, SPRN_DSCR);
+SYSFS_SPRSETUP(pir, SPRN_PIR);
/*
Lets only enable read for phyp resources and
@@ -249,34 +549,34 @@ SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
#ifdef CONFIG_DEBUG_KERNEL
-SYSFS_PMCSETUP(hid0, SPRN_HID0);
-SYSFS_PMCSETUP(hid1, SPRN_HID1);
-SYSFS_PMCSETUP(hid4, SPRN_HID4);
-SYSFS_PMCSETUP(hid5, SPRN_HID5);
-SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0);
-SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1);
-SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2);
-SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3);
-SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4);
-SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5);
-SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6);
-SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7);
-SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8);
-SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9);
-SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT);
-SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR);
-SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR);
-SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR);
-SYSFS_PMCSETUP(der, SPRN_PA6T_DER);
-SYSFS_PMCSETUP(mer, SPRN_PA6T_MER);
-SYSFS_PMCSETUP(ber, SPRN_PA6T_BER);
-SYSFS_PMCSETUP(ier, SPRN_PA6T_IER);
-SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER);
-SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR);
-SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0);
-SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1);
-SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2);
-SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3);
+SYSFS_SPRSETUP(hid0, SPRN_HID0);
+SYSFS_SPRSETUP(hid1, SPRN_HID1);
+SYSFS_SPRSETUP(hid4, SPRN_HID4);
+SYSFS_SPRSETUP(hid5, SPRN_HID5);
+SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
+SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
+SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
+SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
+SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
+SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
+SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
+SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
+SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
+SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
+SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
+SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
+SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
+SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
+SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
+SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
+SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
+SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
+SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
+SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
+SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
+SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
+SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
+SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
#endif /* CONFIG_DEBUG_KERNEL */
#endif /* HAS_PPC_PMC_PA6T */
@@ -421,6 +721,15 @@ static void register_cpu_online(unsigned int cpu)
device_create_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
+ device_create_file(s, &dev_attr_pw20_state);
+ device_create_file(s, &dev_attr_pw20_wait_time);
+
+ device_create_file(s, &dev_attr_altivec_idle);
+ device_create_file(s, &dev_attr_altivec_idle_wait_time);
+ }
+#endif
cacheinfo_cpu_online(cpu);
}
@@ -493,6 +802,15 @@ static void unregister_cpu_online(unsigned int cpu)
device_remove_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
+ device_remove_file(s, &dev_attr_pw20_state);
+ device_remove_file(s, &dev_attr_pw20_wait_time);
+
+ device_remove_file(s, &dev_attr_altivec_idle);
+ device_remove_file(s, &dev_attr_altivec_idle_wait_time);
+ }
+#endif
cacheinfo_cpu_offline(cpu);
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b3b144121cc..b3dab20acf3 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -510,7 +510,6 @@ void timer_interrupt(struct pt_regs * regs)
*/
may_hard_irq_enable();
- __get_cpu_var(irq_stat).timer_irqs++;
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -532,10 +531,15 @@ void timer_interrupt(struct pt_regs * regs)
*next_tb = ~(u64)0;
if (evt->event_handler)
evt->event_handler(evt);
+ __get_cpu_var(irq_stat).timer_irqs_event++;
} else {
now = *next_tb - now;
if (now <= DECREMENTER_MAX)
set_dec((int)now);
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
+ __get_cpu_var(irq_stat).timer_irqs_others++;
}
#ifdef CONFIG_PPC64
@@ -801,8 +805,16 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
+ /* Don't adjust the decrementer if some irq work is pending */
+ if (test_irq_work_pending())
+ return 0;
__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
set_dec(evt);
+
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
+
return 0;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 907a472f9a9..33cd7a0b8e7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -285,6 +285,21 @@ void system_reset_exception(struct pt_regs *regs)
/* What should we do here? We could issue a shutdown or hard reset. */
}
+
+/*
+ * This function is called in real mode. Strictly no printk's please.
+ *
+ * regs->nip and regs->msr contains srr0 and ssr1.
+ */
+long machine_check_early(struct pt_regs *regs)
+{
+ long handled = 0;
+
+ if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
+ handled = cur_cpu_spec->machine_check_early(regs);
+ return handled;
+}
+
#endif
/*
@@ -1384,7 +1399,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
regs->nip, regs->msr);
- tm_enable();
/* We can only have got here if the task started using FP after
* beginning the transaction. So, the transactional regs are just a
@@ -1393,8 +1407,7 @@ void fp_unavailable_tm(struct pt_regs *regs)
* transaction, and probably retry but now with FP enabled. So the
* checkpointed FP registers need to be loaded.
*/
- tm_reclaim(&current->thread, current->thread.regs->msr,
- TM_CAUSE_FAC_UNAV);
+ tm_reclaim_current(TM_CAUSE_FAC_UNAV);
/* Reclaim didn't save out any FPRs to transact_fprs. */
/* Enable FP for the task: */
@@ -1403,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs)
/* This loads and recheckpoints the FP registers from
* thread.fpr[]. They will remain in registers after the
* checkpoint so we don't need to reload them after.
+ * If VMX is in use, the VRs now hold checkpointed values,
+ * so we don't want to load the VRs from the thread_struct.
*/
- tm_recheckpoint(&current->thread, regs->msr);
+ tm_recheckpoint(&current->thread, MSR_FP);
+
+ /* If VMX is in use, get the transactional values back */
+ if (regs->msr & MSR_VEC) {
+ do_load_up_transact_altivec(&current->thread);
+ /* At this point all the VSX state is loaded, so enable it */
+ regs->msr |= MSR_VSX;
+ }
}
-#ifdef CONFIG_ALTIVEC
void altivec_unavailable_tm(struct pt_regs *regs)
{
/* See the comments in fp_unavailable_tm(). This function operates
@@ -1417,18 +1438,21 @@ void altivec_unavailable_tm(struct pt_regs *regs)
TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
"MSR=%lx\n",
regs->nip, regs->msr);
- tm_enable();
- tm_reclaim(&current->thread, current->thread.regs->msr,
- TM_CAUSE_FAC_UNAV);
+ tm_reclaim_current(TM_CAUSE_FAC_UNAV);
regs->msr |= MSR_VEC;
- tm_recheckpoint(&current->thread, regs->msr);
+ tm_recheckpoint(&current->thread, MSR_VEC);
current->thread.used_vr = 1;
+
+ if (regs->msr & MSR_FP) {
+ do_load_up_transact_fpu(&current->thread);
+ regs->msr |= MSR_VSX;
+ }
}
-#endif
-#ifdef CONFIG_VSX
void vsx_unavailable_tm(struct pt_regs *regs)
{
+ unsigned long orig_msr = regs->msr;
+
/* See the comments in fp_unavailable_tm(). This works similarly,
* though we're loading both FP and VEC registers in here.
*
@@ -1440,18 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs)
"MSR=%lx\n",
regs->nip, regs->msr);
- tm_enable();
+ current->thread.used_vsr = 1;
+
+ /* If FP and VMX are already loaded, we have all the state we need */
+ if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
+ regs->msr |= MSR_VSX;
+ return;
+ }
+
/* This reclaims FP and/or VR regs if they're already enabled */
- tm_reclaim(&current->thread, current->thread.regs->msr,
- TM_CAUSE_FAC_UNAV);
+ tm_reclaim_current(TM_CAUSE_FAC_UNAV);
regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
MSR_VSX;
- /* This loads & recheckpoints FP and VRs. */
- tm_recheckpoint(&current->thread, regs->msr);
- current->thread.used_vsr = 1;
+
+ /* This loads & recheckpoints FP and VRs; but we have
+ * to be sure not to overwrite previously-valid state.
+ */
+ tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
+
+ if (orig_msr & MSR_FP)
+ do_load_up_transact_fpu(&current->thread);
+ if (orig_msr & MSR_VEC)
+ do_load_up_transact_altivec(&current->thread);
}
-#endif
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
void performance_monitor_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 59f419b935f..003b20964ea 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, auprobe->ainsn);
+ ret = emulate_step(regs, auprobe->insn);
if (ret > 0)
return true;
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 6e8f507ed32..6ac107ac402 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/page.h>
@@ -7,7 +6,7 @@
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
- .incbin "arch/powerpc/kernel/vdso32/vdso32.so"
+ .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
.balign PAGE_SIZE
vdso32_end:
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index b8553d62b79..df60fca6a13 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/page.h>
@@ -7,7 +6,7 @@
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so"
+ .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 0458a9aaba9..74f8050518d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -37,6 +37,16 @@ _GLOBAL(do_load_up_transact_altivec)
#endif
/*
+ * Enable use of VMX/Altivec for the caller.
+ */
+_GLOBAL(vec_enable)
+ mfmsr r3
+ oris r3,r3,MSR_VEC@h
+ MTMSRD(r3)
+ isync
+ blr
+
+/*
* Load state from memory into VMX registers including VSCR.
* Assumes the caller has enabled VMX in the MSR.
*/
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 76a64821f4a..826d8bd9e52 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -518,16 +518,18 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
dma_addr_t ret = DMA_ERROR_CODE;
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
+ tbl = get_iommu_table_base(dev);
+ if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
atomic_inc(&viodev->cmo.allocs_failed);
return ret;
}
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
if (unlikely(dma_mapping_error(dev, ret))) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
atomic_inc(&viodev->cmo.allocs_failed);
}
@@ -540,10 +542,12 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
+ tbl = get_iommu_table_base(dev);
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
}
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -551,12 +555,14 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
struct scatterlist *sgl;
int ret, count = 0;
size_t alloc_size = 0;
+ tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++)
- alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
+ alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
if (vio_cmo_alloc(viodev, alloc_size)) {
atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +578,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
- alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
+ alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size);
@@ -585,12 +591,14 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
+ struct iommu_table *tbl;
struct scatterlist *sgl;
size_t alloc_size = 0;
int count = 0;
+ tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++)
- alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
+ alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
@@ -706,11 +714,14 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
{
struct vio_cmo_dev_entry *dev_ent;
struct device *dev = &viodev->dev;
+ struct iommu_table *tbl;
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
bool dma_capable = false;
+ tbl = get_iommu_table_base(dev);
+
/* A device requires entitlement if it has a DMA window property */
switch (viodev->family) {
case VDEVICE:
@@ -736,7 +747,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
return -EINVAL;
}
- viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
+ viodev->cmo.desired =
+ IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
viodev->cmo.desired = VIO_CMO_MIN_ENT;
size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1188,10 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
- tbl->it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ tbl->it_size = size >> tbl->it_page_shift;
/* offset for VIO should always be 0 */
- tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = offset >> tbl->it_page_shift;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
tbl->it_blocksize = 16;
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 93221e87b91..9cb4b0a3603 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -21,6 +21,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void)
module_init(kvmppc_44x_init);
module_exit(kvmppc_44x_exit);
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8912608b7e1..94e597e6f15 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -18,6 +18,8 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -575,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
- val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
+ val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
- val = get_reg_val(reg->id, vcpu->arch.fpscr);
+ val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
break;
#ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -586,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
+ val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
+ val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
break;
case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave);
break;
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ long int i = reg->id - KVM_REG_PPC_VSR0;
+ val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
+ val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
+ } else {
+ r = -ENXIO;
+ }
+ break;
+#endif /* CONFIG_VSX */
case KVM_REG_PPC_DEBUG_INST: {
u32 opcode = INS_TW;
r = copy_to_user((u32 __user *)(long)reg->addr,
@@ -654,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
- vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
+ VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_FPSCR:
- vcpu->arch.fpscr = set_reg_val(reg->id, val);
+ vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
break;
#ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
@@ -665,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
+ vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
@@ -682,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu->arch.vrsave = set_reg_val(reg->id, val);
break;
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
+ if (cpu_has_feature(CPU_FTR_VSX)) {
+ long int i = reg->id - KVM_REG_PPC_VSR0;
+ vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
+ vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
+ } else {
+ r = -ENXIO;
+ }
+ break;
+#endif /* CONFIG_VSX */
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
if (!vcpu->arch.icp) {
@@ -879,3 +903,9 @@ static void kvmppc_book3s_exit(void)
module_init(kvmppc_book3s_init);
module_exit(kvmppc_book3s_exit);
+
+/* On 32bit this is our one and only kernel module */
+#ifdef CONFIG_KVM_BOOK3S_32
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
+#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 3a0abd2e5a1..5fac89dfe4c 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -243,6 +243,11 @@ next_pteg:
/* Now tell our Shadow PTE code about the new page */
pte = kvmppc_mmu_hpte_cache_next(vcpu);
+ if (!pte) {
+ kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
+ r = -EAGAIN;
+ goto out;
+ }
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
orig_pte->may_write ? 'w' : '-',
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c5d148434c0..303ece75b8e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
- kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+ kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
}
/*
@@ -562,7 +562,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
* we just return and retry the instruction.
*/
- if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
+ if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
return RESUME_GUEST;
/*
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 852989a9bad..20d4ea8e656 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
#endif
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
-EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
-#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
-#endif
#endif
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index b51d5db7806..17fc9496b6a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/page-flags.h>
#include <linux/srcu.h>
+#include <linux/miscdevice.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -85,10 +86,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
/* CPU points to the first thread of the core */
if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
+#ifdef CONFIG_KVM_XICS
int real_cpu = cpu + vcpu->arch.ptid;
if (paca[real_cpu].kvm_hstate.xics_phys)
xics_wake_cpu(real_cpu);
- else if (cpu_online(cpu))
+ else
+#endif
+ if (cpu_online(cpu))
smp_send_reschedule(cpu);
}
put_cpu();
@@ -182,14 +186,28 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
switch (arch_compat) {
case PVR_ARCH_205:
- pcr = PCR_ARCH_205;
+ /*
+ * If an arch bit is set in PCR, all the defined
+ * higher-order arch bits also have to be set.
+ */
+ pcr = PCR_ARCH_206 | PCR_ARCH_205;
break;
case PVR_ARCH_206:
case PVR_ARCH_206p:
+ pcr = PCR_ARCH_206;
+ break;
+ case PVR_ARCH_207:
break;
default:
return -EINVAL;
}
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ /* POWER7 can't emulate POWER8 */
+ if (!(pcr & PCR_ARCH_206))
+ return -EINVAL;
+ pcr &= ~PCR_ARCH_206;
+ }
}
spin_lock(&vc->lock);
@@ -637,6 +655,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
+ case BOOK3S_INTERRUPT_H_DOORBELL:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
@@ -673,12 +692,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* hcall - punt to userspace */
int i;
- if (vcpu->arch.shregs.msr & MSR_PR) {
- /* sc 1 from userspace - reflect to guest syscall */
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
- r = RESUME_GUEST;
- break;
- }
+ /* hypercall with MSR_PR has already been handled in rmode,
+ * and never reaches here.
+ */
+
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -708,7 +725,16 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
* we don't emulate any guest instructions at this stage.
*/
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
- kvmppc_core_queue_program(vcpu, 0x80000);
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ r = RESUME_GUEST;
+ break;
+ /*
+ * This occurs if the guest (kernel or userspace), does something that
+ * is prohibited by HFSCR. We just generate a program interrupt to
+ * the guest.
+ */
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
break;
default:
@@ -766,10 +792,34 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
spin_lock(&vc->lock);
/*
+ * If ILE (interrupt little-endian) has changed, update the
+ * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+ */
+ if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.vcore != vc)
+ continue;
+ if (new_lpcr & LPCR_ILE)
+ vcpu->arch.intr_msr |= MSR_LE;
+ else
+ vcpu->arch.intr_msr &= ~MSR_LE;
+ }
+ mutex_unlock(&kvm->lock);
+ }
+
+ /*
* Userspace can only modify DPFD (default prefetch depth),
* ILE (interrupt little-endian) and TC (translation control).
+ * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
*/
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ mask |= LPCR_AIL;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
}
@@ -787,6 +837,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DABR:
*val = get_reg_val(id, vcpu->arch.dabr);
break;
+ case KVM_REG_PPC_DABRX:
+ *val = get_reg_val(id, vcpu->arch.dabrx);
+ break;
case KVM_REG_PPC_DSCR:
*val = get_reg_val(id, vcpu->arch.dscr);
break;
@@ -802,7 +855,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_UAMOR:
*val = get_reg_val(id, vcpu->arch.uamor);
break;
- case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
i = id - KVM_REG_PPC_MMCR0;
*val = get_reg_val(id, vcpu->arch.mmcr[i]);
break;
@@ -810,33 +863,87 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
i = id - KVM_REG_PPC_PMC1;
*val = get_reg_val(id, vcpu->arch.pmc[i]);
break;
+ case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+ i = id - KVM_REG_PPC_SPMC1;
+ *val = get_reg_val(id, vcpu->arch.spmc[i]);
+ break;
case KVM_REG_PPC_SIAR:
*val = get_reg_val(id, vcpu->arch.siar);
break;
case KVM_REG_PPC_SDAR:
*val = get_reg_val(id, vcpu->arch.sdar);
break;
-#ifdef CONFIG_VSX
- case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
- if (cpu_has_feature(CPU_FTR_VSX)) {
- /* VSX => FP reg i is stored in arch.vsr[2*i] */
- long int i = id - KVM_REG_PPC_FPR0;
- *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
- } else {
- /* let generic code handle it */
- r = -EINVAL;
- }
+ case KVM_REG_PPC_SIER:
+ *val = get_reg_val(id, vcpu->arch.sier);
break;
- case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
- if (cpu_has_feature(CPU_FTR_VSX)) {
- long int i = id - KVM_REG_PPC_VSR0;
- val->vsxval[0] = vcpu->arch.vsr[2 * i];
- val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
- } else {
- r = -ENXIO;
- }
+ case KVM_REG_PPC_IAMR:
+ *val = get_reg_val(id, vcpu->arch.iamr);
+ break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case KVM_REG_PPC_TFHAR:
+ *val = get_reg_val(id, vcpu->arch.tfhar);
+ break;
+ case KVM_REG_PPC_TFIAR:
+ *val = get_reg_val(id, vcpu->arch.tfiar);
+ break;
+ case KVM_REG_PPC_TEXASR:
+ *val = get_reg_val(id, vcpu->arch.texasr);
+ break;
+#endif
+ case KVM_REG_PPC_FSCR:
+ *val = get_reg_val(id, vcpu->arch.fscr);
+ break;
+ case KVM_REG_PPC_PSPB:
+ *val = get_reg_val(id, vcpu->arch.pspb);
+ break;
+ case KVM_REG_PPC_EBBHR:
+ *val = get_reg_val(id, vcpu->arch.ebbhr);
+ break;
+ case KVM_REG_PPC_EBBRR:
+ *val = get_reg_val(id, vcpu->arch.ebbrr);
+ break;
+ case KVM_REG_PPC_BESCR:
+ *val = get_reg_val(id, vcpu->arch.bescr);
+ break;
+ case KVM_REG_PPC_TAR:
+ *val = get_reg_val(id, vcpu->arch.tar);
+ break;
+ case KVM_REG_PPC_DPDES:
+ *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
+ break;
+ case KVM_REG_PPC_DAWR:
+ *val = get_reg_val(id, vcpu->arch.dawr);
+ break;
+ case KVM_REG_PPC_DAWRX:
+ *val = get_reg_val(id, vcpu->arch.dawrx);
+ break;
+ case KVM_REG_PPC_CIABR:
+ *val = get_reg_val(id, vcpu->arch.ciabr);
+ break;
+ case KVM_REG_PPC_IC:
+ *val = get_reg_val(id, vcpu->arch.ic);
+ break;
+ case KVM_REG_PPC_VTB:
+ *val = get_reg_val(id, vcpu->arch.vtb);
+ break;
+ case KVM_REG_PPC_CSIGR:
+ *val = get_reg_val(id, vcpu->arch.csigr);
+ break;
+ case KVM_REG_PPC_TACR:
+ *val = get_reg_val(id, vcpu->arch.tacr);
+ break;
+ case KVM_REG_PPC_TCSCR:
+ *val = get_reg_val(id, vcpu->arch.tcscr);
+ break;
+ case KVM_REG_PPC_PID:
+ *val = get_reg_val(id, vcpu->arch.pid);
+ break;
+ case KVM_REG_PPC_ACOP:
+ *val = get_reg_val(id, vcpu->arch.acop);
+ break;
+ case KVM_REG_PPC_WORT:
+ *val = get_reg_val(id, vcpu->arch.wort);
break;
-#endif /* CONFIG_VSX */
case KVM_REG_PPC_VPA_ADDR:
spin_lock(&vcpu->arch.vpa_update_lock);
*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -890,6 +997,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_DABR:
vcpu->arch.dabr = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_DABRX:
+ vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
+ break;
case KVM_REG_PPC_DSCR:
vcpu->arch.dscr = set_reg_val(id, *val);
break;
@@ -905,7 +1015,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_UAMOR:
vcpu->arch.uamor = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
i = id - KVM_REG_PPC_MMCR0;
vcpu->arch.mmcr[i] = set_reg_val(id, *val);
break;
@@ -913,33 +1023,90 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
i = id - KVM_REG_PPC_PMC1;
vcpu->arch.pmc[i] = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
+ i = id - KVM_REG_PPC_SPMC1;
+ vcpu->arch.spmc[i] = set_reg_val(id, *val);
+ break;
case KVM_REG_PPC_SIAR:
vcpu->arch.siar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SDAR:
vcpu->arch.sdar = set_reg_val(id, *val);
break;
-#ifdef CONFIG_VSX
- case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
- if (cpu_has_feature(CPU_FTR_VSX)) {
- /* VSX => FP reg i is stored in arch.vsr[2*i] */
- long int i = id - KVM_REG_PPC_FPR0;
- vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
- } else {
- /* let generic code handle it */
- r = -EINVAL;
- }
+ case KVM_REG_PPC_SIER:
+ vcpu->arch.sier = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
- if (cpu_has_feature(CPU_FTR_VSX)) {
- long int i = id - KVM_REG_PPC_VSR0;
- vcpu->arch.vsr[2 * i] = val->vsxval[0];
- vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
- } else {
- r = -ENXIO;
- }
+ case KVM_REG_PPC_IAMR:
+ vcpu->arch.iamr = set_reg_val(id, *val);
+ break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case KVM_REG_PPC_TFHAR:
+ vcpu->arch.tfhar = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_TFIAR:
+ vcpu->arch.tfiar = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_TEXASR:
+ vcpu->arch.texasr = set_reg_val(id, *val);
+ break;
+#endif
+ case KVM_REG_PPC_FSCR:
+ vcpu->arch.fscr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_PSPB:
+ vcpu->arch.pspb = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_EBBHR:
+ vcpu->arch.ebbhr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_EBBRR:
+ vcpu->arch.ebbrr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_BESCR:
+ vcpu->arch.bescr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_TAR:
+ vcpu->arch.tar = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DPDES:
+ vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DAWR:
+ vcpu->arch.dawr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DAWRX:
+ vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
+ break;
+ case KVM_REG_PPC_CIABR:
+ vcpu->arch.ciabr = set_reg_val(id, *val);
+ /* Don't allow setting breakpoints in hypervisor code */
+ if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
+ break;
+ case KVM_REG_PPC_IC:
+ vcpu->arch.ic = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_VTB:
+ vcpu->arch.vtb = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_CSIGR:
+ vcpu->arch.csigr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_TACR:
+ vcpu->arch.tacr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_TCSCR:
+ vcpu->arch.tcscr = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_PID:
+ vcpu->arch.pid = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_ACOP:
+ vcpu->arch.acop = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_WORT:
+ vcpu->arch.wort = set_reg_val(id, *val);
break;
-#endif /* CONFIG_VSX */
case KVM_REG_PPC_VPA_ADDR:
addr = set_reg_val(id, *val);
r = -EINVAL;
@@ -1017,6 +1184,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
+ vcpu->arch.intr_msr = MSR_SF | MSR_ME;
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -1034,6 +1202,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
init_waitqueue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
+ vcore->first_vcpuid = core * threads_per_core;
+ vcore->kvm = kvm;
}
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
@@ -1047,6 +1217,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
++vcore->num_threads;
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
+ vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
@@ -1110,7 +1281,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
}
}
-extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu)
@@ -1184,13 +1355,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
tpaca = &paca[cpu];
tpaca->kvm_hstate.kvm_vcpu = vcpu;
tpaca->kvm_hstate.kvm_vcore = vc;
- tpaca->kvm_hstate.napping = 0;
+ tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
vcpu->cpu = vc->pcpu;
smp_wmb();
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
- if (vcpu->arch.ptid) {
+ if (cpu != smp_processor_id()) {
+#ifdef CONFIG_KVM_XICS
xics_wake_cpu(cpu);
- ++vc->n_woken;
+#endif
+ if (vcpu->arch.ptid)
+ ++vc->n_woken;
}
#endif
}
@@ -1247,10 +1421,10 @@ static int on_primary_thread(void)
*/
static void kvmppc_run_core(struct kvmppc_vcore *vc)
{
- struct kvm_vcpu *vcpu, *vcpu0, *vnext;
+ struct kvm_vcpu *vcpu, *vnext;
long ret;
u64 now;
- int ptid, i, need_vpa_update;
+ int i, need_vpa_update;
int srcu_idx;
struct kvm_vcpu *vcpus_to_update[threads_per_core];
@@ -1288,25 +1462,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
}
/*
- * Assign physical thread IDs, first to non-ceded vcpus
- * and then to ceded ones.
- */
- ptid = 0;
- vcpu0 = NULL;
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
- if (!vcpu->arch.ceded) {
- if (!ptid)
- vcpu0 = vcpu;
- vcpu->arch.ptid = ptid++;
- }
- }
- if (!vcpu0)
- goto out; /* nothing to run; should never happen */
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
- if (vcpu->arch.ceded)
- vcpu->arch.ptid = ptid++;
-
- /*
* Make sure we are running on thread 0, and that
* secondary threads are offline.
*/
@@ -1322,15 +1477,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_create_dtl_entry(vcpu, vc);
}
+ /* Set this explicitly in case thread 0 doesn't have a vcpu */
+ get_paca()->kvm_hstate.kvm_vcore = vc;
+ get_paca()->kvm_hstate.ptid = 0;
+
vc->vcore_state = VCORE_RUNNING;
preempt_disable();
spin_unlock(&vc->lock);
kvm_guest_enter();
- srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
+ srcu_idx = srcu_read_lock(&vc->kvm->srcu);
- __kvmppc_vcore_entry(NULL, vcpu0);
+ __kvmppc_vcore_entry();
spin_lock(&vc->lock);
/* disable sending of IPIs on virtual external irqs */
@@ -1345,14 +1504,14 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
vc->vcore_state = VCORE_EXITING;
spin_unlock(&vc->lock);
- srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
+ srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
kvm_guest_exit();
preempt_enable();
- kvm_resched(vcpu);
+ cond_resched();
spin_lock(&vc->lock);
now = get_tb();
@@ -1453,7 +1612,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (!signal_pending(current)) {
if (vc->vcore_state == VCORE_RUNNING &&
VCORE_EXIT_COUNT(vc) == 0) {
- vcpu->arch.ptid = vc->n_runnable - 1;
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -2048,6 +2206,9 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
LPCR_VPM0 | LPCR_VPM1;
kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* On POWER8 turn on online bit to enable PURR/SPURR */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ lpcr |= LPCR_ONL;
}
kvm->arch.lpcr = lpcr;
@@ -2222,3 +2383,5 @@ static void kvmppc_book3s_exit_hv(void)
module_init(kvmppc_book3s_init_hv);
module_exit(kvmppc_book3s_exit_hv);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 928142c64cb..e873796b1a2 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -35,7 +35,7 @@
****************************************************************************/
/* Registers:
- * r4: vcpu pointer
+ * none
*/
_GLOBAL(__kvmppc_vcore_entry)
@@ -57,9 +57,11 @@ BEGIN_FTR_SECTION
std r3, HSTATE_DSCR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+BEGIN_FTR_SECTION
/* Save host DABR */
mfspr r3, SPRN_DABR
std r3, HSTATE_DABR(r13)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Hard-disable interrupts */
mfmsr r10
@@ -69,7 +71,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtmsrd r10,1
/* Save host PMU registers */
- /* R4 is live here (vcpu pointer) but not r3 or r5 */
li r3, 1
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
@@ -134,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* enters the guest with interrupts enabled.
*/
BEGIN_FTR_SECTION
+ ld r4, HSTATE_KVM_VCPU(r13)
ld r0, VCPU_PENDING_EXC(r4)
li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
and. r0, r0, r7
beq 32f
- mr r31, r4
lhz r3, PACAPACAINDEX(r13)
bl smp_send_reschedule
nop
- mr r4, r31
32:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index a353c485808..768a9f977c0 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -12,6 +12,7 @@
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <asm/opal.h>
+#include <asm/mce.h>
/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
@@ -58,18 +59,6 @@ static void reload_slb(struct kvm_vcpu *vcpu)
}
}
-/* POWER7 TLB flush */
-static void flush_tlb_power7(struct kvm_vcpu *vcpu)
-{
- unsigned long i, rb;
-
- rb = TLBIEL_INVAL_SET_LPID;
- for (i = 0; i < POWER7_TLB_SETS; ++i) {
- asm volatile("tlbiel %0" : : "r" (rb));
- rb += 1 << TLBIEL_INVAL_SET_SHIFT;
- }
-}
-
/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
@@ -79,9 +68,7 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
-#ifdef CONFIG_PPC_POWERNV
- struct opal_machine_check_event *opal_evt;
-#endif
+ struct machine_check_event mce_evt;
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
@@ -96,7 +83,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
}
if (dsisr & DSISR_MC_TLB_MULTI) {
- flush_tlb_power7(vcpu);
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
dsisr &= ~DSISR_MC_TLB_MULTI;
}
/* Any other errors we don't understand? */
@@ -113,28 +101,38 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
reload_slb(vcpu);
break;
case SRR1_MC_IFETCH_TLBMULTI:
- flush_tlb_power7(vcpu);
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
+ cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
break;
default:
handled = 0;
}
-#ifdef CONFIG_PPC_POWERNV
/*
- * See if OPAL has already handled the condition.
- * We assume that if the condition is recovered then OPAL
+ * See if we have already handled the condition in the linux host.
+ * We assume that if the condition is recovered then linux host
* will have generated an error log event that we will pick
* up and log later.
+ * Don't release mce event now. In case if condition is not
+ * recovered we do guest exit and go back to linux host machine
+ * check handler. Hence we need make sure that current mce event
+ * is available for linux host to consume.
*/
- opal_evt = local_paca->opal_mc_evt;
- if (opal_evt->version == OpalMCE_V1 &&
- (opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
- opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
+ if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
+ goto out;
+
+ if (mce_evt.version == MCE_V1 &&
+ (mce_evt.severity == MCE_SEV_NO_ERROR ||
+ mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
handled = 1;
+out:
+ /*
+ * If we have handled the error, then release the mce event because
+ * we will be delivering machine check to guest.
+ */
if (handled)
- opal_evt->in_use = 0;
-#endif
+ release_mce_event();
return handled;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8689e2e3085..37fb3caa4c8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -134,7 +134,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unlock_rmap(rmap);
}
-static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
+static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
int writing, unsigned long *pte_sizep)
{
pte_t *ptep;
@@ -232,7 +232,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Look up the Linux PTE for the backing page */
pte_size = psize;
- pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
+ pte = lookup_linux_pte_and_update(pgdir, hva, writing,
+ &pte_size);
if (pte_present(pte)) {
if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */
@@ -672,7 +673,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
if (memslot) {
hva = __gfn_to_hva_memslot(memslot, gfn);
- pte = lookup_linux_pte(pgdir, hva, 1, &psize);
+ pte = lookup_linux_pte_and_update(pgdir, hva,
+ 1, &psize);
if (pte_present(pte) && !pte_write(pte))
r = hpte_make_readonly(r);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index be4fa04a37c..e66d4ec04d9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -33,6 +33,10 @@
#error Need to fix lppaca and SLB shadow accesses in little endian mode
#endif
+/* Values in HSTATE_NAPPING(r13) */
+#define NAPPING_CEDE 1
+#define NAPPING_NOVCPU 2
+
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -57,29 +61,23 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
RFI
kvmppc_call_hv_entry:
+ ld r4, HSTATE_KVM_VCPU(r13)
bl kvmppc_hv_entry
/* Back from guest - restore host state and return to caller */
+BEGIN_FTR_SECTION
/* Restore host DABR and DABRX */
ld r5,HSTATE_DABR(r13)
li r6,7
mtspr SPRN_DABR,r5
mtspr SPRN_DABRX,r6
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Restore SPRG3 */
ld r3,PACA_SPRG3(r13)
mtspr SPRN_SPRG3,r3
- /*
- * Reload DEC. HDEC interrupts were disabled when
- * we reloaded the host's LPCR value.
- */
- ld r3, HSTATE_DECEXP(r13)
- mftb r4
- subf r4, r4, r3
- mtspr SPRN_DEC, r4
-
/* Reload the host's PMU registers */
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r4, LPPACA_PMCINUSE(r3)
@@ -115,6 +113,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
23:
/*
+ * Reload DEC. HDEC interrupts were disabled when
+ * we reloaded the host's LPCR value.
+ */
+ ld r3, HSTATE_DECEXP(r13)
+ mftb r4
+ subf r4, r4, r3
+ mtspr SPRN_DEC, r4
+
+ /*
* For external and machine check interrupts, we need
* to call the Linux handler to process the interrupt.
* We do that by jumping to absolute address 0x500 for
@@ -153,15 +160,75 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
13: b machine_check_fwnmi
+kvmppc_primary_no_guest:
+ /* We handle this much like a ceded vcpu */
+ /* set our bit in napping_threads */
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lbz r7, HSTATE_PTID(r13)
+ li r0, 1
+ sld r0, r0, r7
+ addi r6, r5, VCORE_NAPPING_THREADS
+1: lwarx r3, 0, r6
+ or r3, r3, r0
+ stwcx. r3, 0, r6
+ bne 1b
+ /* order napping_threads update vs testing entry_exit_count */
+ isync
+ li r12, 0
+ lwz r7, VCORE_ENTRY_EXIT(r5)
+ cmpwi r7, 0x100
+ bge kvm_novcpu_exit /* another thread already exiting */
+ li r3, NAPPING_NOVCPU
+ stb r3, HSTATE_NAPPING(r13)
+ li r3, 1
+ stb r3, HSTATE_HWTHREAD_REQ(r13)
+
+ b kvm_do_nap
+
+kvm_novcpu_wakeup:
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r5, HSTATE_KVM_VCORE(r13)
+ li r0, 0
+ stb r0, HSTATE_NAPPING(r13)
+ stb r0, HSTATE_HWTHREAD_REQ(r13)
+
+ /* check the wake reason */
+ bl kvmppc_check_wake_reason
+
+ /* see if any other thread is already exiting */
+ lwz r0, VCORE_ENTRY_EXIT(r5)
+ cmpwi r0, 0x100
+ bge kvm_novcpu_exit
+
+ /* clear our bit in napping_threads */
+ lbz r7, HSTATE_PTID(r13)
+ li r0, 1
+ sld r0, r0, r7
+ addi r6, r5, VCORE_NAPPING_THREADS
+4: lwarx r7, 0, r6
+ andc r7, r7, r0
+ stwcx. r7, 0, r6
+ bne 4b
+
+ /* See if the wake reason means we need to exit */
+ cmpdi r3, 0
+ bge kvm_novcpu_exit
+
+ /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
+ ld r4, HSTATE_KVM_VCPU(r13)
+ cmpdi r4, 0
+ bne kvmppc_got_guest
+
+kvm_novcpu_exit:
+ b hdec_soon
+
/*
- * We come in here when wakened from nap mode on a secondary hw thread.
+ * We come in here when wakened from nap mode.
* Relocation is off and most register values are lost.
* r13 points to the PACA.
*/
.globl kvm_start_guest
kvm_start_guest:
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,STACK_FRAME_OVERHEAD
ld r2,PACATOC(r13)
li r0,KVM_HWTHREAD_IN_KVM
@@ -173,8 +240,13 @@ kvm_start_guest:
/* were we napping due to cede? */
lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,0
- bne kvm_end_cede
+ cmpwi r0,NAPPING_CEDE
+ beq kvm_end_cede
+ cmpwi r0,NAPPING_NOVCPU
+ beq kvm_novcpu_wakeup
+
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,STACK_FRAME_OVERHEAD
/*
* We weren't napping due to cede, so this must be a secondary
@@ -184,40 +256,22 @@ kvm_start_guest:
*/
/* Check the wake reason in SRR1 to see why we got here */
- mfspr r3,SPRN_SRR1
- rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
- cmpwi r3,4 /* was it an external interrupt? */
- bne 27f /* if not */
- ld r5,HSTATE_XICS_PHYS(r13)
- li r7,XICS_XIRR /* if it was an external interrupt, */
- lwzcix r8,r5,r7 /* get and ack the interrupt */
- sync
- clrldi. r9,r8,40 /* get interrupt source ID. */
- beq 28f /* none there? */
- cmpwi r9,XICS_IPI /* was it an IPI? */
- bne 29f
- li r0,0xff
- li r6,XICS_MFRR
- stbcix r0,r5,r6 /* clear IPI */
- stwcix r8,r5,r7 /* EOI the interrupt */
- sync /* order loading of vcpu after that */
+ bl kvmppc_check_wake_reason
+ cmpdi r3, 0
+ bge kvm_no_guest
/* get vcpu pointer, NULL if we have no vcpu to run */
ld r4,HSTATE_KVM_VCPU(r13)
cmpdi r4,0
/* if we have no vcpu to run, go back to sleep */
beq kvm_no_guest
- b 30f
-27: /* XXX should handle hypervisor maintenance interrupts etc. here */
- b kvm_no_guest
-28: /* SRR1 said external but ICP said nope?? */
- b kvm_no_guest
-29: /* External non-IPI interrupt to offline secondary thread? help?? */
- stw r8,HSTATE_SAVED_XIRR(r13)
- b kvm_no_guest
+ /* Set HSTATE_DSCR(r13) to something sensible */
+ LOAD_REG_ADDR(r6, dscr_default)
+ ld r6, 0(r6)
+ std r6, HSTATE_DSCR(r13)
-30: bl kvmppc_hv_entry
+ bl kvmppc_hv_entry
/* Back from the guest, go back to nap */
/* Clear our vcpu pointer so we don't come back in early */
@@ -229,18 +283,6 @@ kvm_start_guest:
* visible we could be given another vcpu.
*/
lwsync
- /* Clear any pending IPI - we're an offline thread */
- ld r5, HSTATE_XICS_PHYS(r13)
- li r7, XICS_XIRR
- lwzcix r3, r5, r7 /* ack any pending interrupt */
- rlwinm. r0, r3, 0, 0xffffff /* any pending? */
- beq 37f
- sync
- li r0, 0xff
- li r6, XICS_MFRR
- stbcix r0, r5, r6 /* clear the IPI */
- stwcix r3, r5, r7 /* EOI it */
-37: sync
/* increment the nap count and then go to nap mode */
ld r4, HSTATE_KVM_VCORE(r13)
@@ -253,6 +295,7 @@ kvm_start_guest:
kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
+kvm_do_nap:
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -277,7 +320,7 @@ kvmppc_hv_entry:
/* Required state:
*
- * R4 = vcpu pointer
+ * R4 = vcpu pointer (or NULL)
* MSR = ~IR|DR
* R13 = PACA
* R1 = host R1
@@ -287,122 +330,12 @@ kvmppc_hv_entry:
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)
- /* Set partition DABR */
- /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
- li r5,3
- ld r6,VCPU_DABR(r4)
- mtspr SPRN_DABRX,r5
- mtspr SPRN_DABR,r6
-BEGIN_FTR_SECTION
- isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
- /* Load guest PMU registers */
- /* R4 is live here (vcpu pointer) */
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
- isync
- lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
- lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
- lwz r6, VCPU_PMC + 8(r4)
- lwz r7, VCPU_PMC + 12(r4)
- lwz r8, VCPU_PMC + 16(r4)
- lwz r9, VCPU_PMC + 20(r4)
-BEGIN_FTR_SECTION
- lwz r10, VCPU_PMC + 24(r4)
- lwz r11, VCPU_PMC + 28(r4)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r5
- mtspr SPRN_PMC3, r6
- mtspr SPRN_PMC4, r7
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- ld r3, VCPU_MMCR(r4)
- ld r5, VCPU_MMCR + 8(r4)
- ld r6, VCPU_MMCR + 16(r4)
- ld r7, VCPU_SIAR(r4)
- ld r8, VCPU_SDAR(r4)
- mtspr SPRN_MMCR1, r5
- mtspr SPRN_MMCRA, r6
- mtspr SPRN_SIAR, r7
- mtspr SPRN_SDAR, r8
- mtspr SPRN_MMCR0, r3
- isync
-
- /* Load up FP, VMX and VSX registers */
- bl kvmppc_load_fp
-
- ld r14, VCPU_GPR(R14)(r4)
- ld r15, VCPU_GPR(R15)(r4)
- ld r16, VCPU_GPR(R16)(r4)
- ld r17, VCPU_GPR(R17)(r4)
- ld r18, VCPU_GPR(R18)(r4)
- ld r19, VCPU_GPR(R19)(r4)
- ld r20, VCPU_GPR(R20)(r4)
- ld r21, VCPU_GPR(R21)(r4)
- ld r22, VCPU_GPR(R22)(r4)
- ld r23, VCPU_GPR(R23)(r4)
- ld r24, VCPU_GPR(R24)(r4)
- ld r25, VCPU_GPR(R25)(r4)
- ld r26, VCPU_GPR(R26)(r4)
- ld r27, VCPU_GPR(R27)(r4)
- ld r28, VCPU_GPR(R28)(r4)
- ld r29, VCPU_GPR(R29)(r4)
- ld r30, VCPU_GPR(R30)(r4)
- ld r31, VCPU_GPR(R31)(r4)
-
-BEGIN_FTR_SECTION
- /* Switch DSCR to guest value */
- ld r5, VCPU_DSCR(r4)
- mtspr SPRN_DSCR, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
- /*
- * Set the decrementer to the guest decrementer.
- */
- ld r8,VCPU_DEC_EXPIRES(r4)
- mftb r7
- subf r3,r7,r8
- mtspr SPRN_DEC,r3
- stw r3,VCPU_DEC(r4)
-
- ld r5, VCPU_SPRG0(r4)
- ld r6, VCPU_SPRG1(r4)
- ld r7, VCPU_SPRG2(r4)
- ld r8, VCPU_SPRG3(r4)
- mtspr SPRN_SPRG0, r5
- mtspr SPRN_SPRG1, r6
- mtspr SPRN_SPRG2, r7
- mtspr SPRN_SPRG3, r8
-
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
- /* Load up DAR and DSISR */
- ld r5, VCPU_DAR(r4)
- lwz r6, VCPU_DSISR(r4)
- mtspr SPRN_DAR, r5
- mtspr SPRN_DSISR, r6
-
li r6, KVM_GUEST_MODE_HOST_HV
stb r6, HSTATE_IN_GUEST(r13)
-BEGIN_FTR_SECTION
- /* Restore AMR and UAMOR, set AMOR to all 1s */
- ld r5,VCPU_AMR(r4)
- ld r6,VCPU_UAMOR(r4)
- li r7,-1
- mtspr SPRN_AMR,r5
- mtspr SPRN_UAMOR,r6
- mtspr SPRN_AMOR,r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
/* Clear out SLB */
li r6,0
slbmte r6,r6
@@ -428,8 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
bne 21b
/* Primary thread switches to guest partition. */
- ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
- lwz r6,VCPU_PTID(r4)
+ ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
+ lbz r6,HSTATE_PTID(r13)
cmpwi r6,0
bne 20f
ld r6,KVM_SDR1(r9)
@@ -457,7 +390,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
andc r7,r7,r0
stdcx. r7,0,r6
bne 23b
- li r6,128 /* and flush the TLB */
+ /* Flush the TLB of any entries for this LPID */
+ /* use arch 2.07S as a proxy for POWER8 */
+BEGIN_FTR_SECTION
+ li r6,512 /* POWER8 has 512 sets */
+FTR_SECTION_ELSE
+ li r6,128 /* POWER7 has 128 sets */
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
mtctr r6
li r7,0x800 /* IS field = 0b10 */
ptesync
@@ -487,6 +426,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
beq 38f
mtspr SPRN_PCR, r7
38:
+
+BEGIN_FTR_SECTION
+ /* DPDES is shared between threads */
+ ld r8, VCORE_DPDES(r5)
+ mtspr SPRN_DPDES, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
b 10f
@@ -503,32 +449,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_RMOR,r8
isync
- /* Increment yield count if they have a VPA */
- ld r3, VCPU_VPA(r4)
- cmpdi r3, 0
- beq 25f
- lwz r5, LPPACA_YIELDCOUNT(r3)
- addi r5, r5, 1
- stw r5, LPPACA_YIELDCOUNT(r3)
- li r6, 1
- stb r6, VCPU_VPA_DIRTY(r4)
-25:
/* Check if HDEC expires soon */
mfspr r3,SPRN_HDEC
- cmpwi r3,10
+ cmpwi r3,512 /* 1 microsecond */
li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- mr r9,r4
blt hdec_soon
-
- /* Save purr/spurr */
- mfspr r5,SPRN_PURR
- mfspr r6,SPRN_SPURR
- std r5,HSTATE_PURR(r13)
- std r6,HSTATE_SPURR(r13)
- ld r7,VCPU_PURR(r4)
- ld r8,VCPU_SPURR(r4)
- mtspr SPRN_PURR,r7
- mtspr SPRN_SPURR,r8
b 31f
/*
@@ -539,7 +464,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* We also have to invalidate the TLB since its
* entries aren't tagged with the LPID.
*/
-30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+30: ld r5,HSTATE_KVM_VCORE(r13)
+ ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
/* first take native_tlbie_lock */
.section ".toc","aw"
@@ -604,7 +530,6 @@ toc_tlbie_lock:
mfspr r3,SPRN_HDEC
cmpwi r3,10
li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- mr r9,r4
blt hdec_soon
/* Enable HDEC interrupts */
@@ -619,9 +544,14 @@ toc_tlbie_lock:
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
+31:
+ /* Do we have a guest vcpu to run? */
+ cmpdi r4, 0
+ beq kvmppc_primary_no_guest
+kvmppc_got_guest:
/* Load up guest SLB entries */
-31: lwz r5,VCPU_SLB_MAX(r4)
+ lwz r5,VCPU_SLB_MAX(r4)
cmpwi r5,0
beq 9f
mtctr r5
@@ -632,6 +562,209 @@ toc_tlbie_lock:
addi r6,r6,VCPU_SLB_SIZE
bdnz 1b
9:
+ /* Increment yield count if they have a VPA */
+ ld r3, VCPU_VPA(r4)
+ cmpdi r3, 0
+ beq 25f
+ lwz r5, LPPACA_YIELDCOUNT(r3)
+ addi r5, r5, 1
+ stw r5, LPPACA_YIELDCOUNT(r3)
+ li r6, 1
+ stb r6, VCPU_VPA_DIRTY(r4)
+25:
+
+BEGIN_FTR_SECTION
+ /* Save purr/spurr */
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ std r5,HSTATE_PURR(r13)
+ std r6,HSTATE_SPURR(r13)
+ ld r7,VCPU_PURR(r4)
+ ld r8,VCPU_SPURR(r4)
+ mtspr SPRN_PURR,r7
+ mtspr SPRN_SPURR,r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+ /* Set partition DABR */
+ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+ lwz r5,VCPU_DABRX(r4)
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+ BEGIN_FTR_SECTION_NESTED(89)
+ isync
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+
+ /* Load guest PMU registers */
+ /* R4 is live here (vcpu pointer) */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
+ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
+ lwz r6, VCPU_PMC + 8(r4)
+ lwz r7, VCPU_PMC + 12(r4)
+ lwz r8, VCPU_PMC + 16(r4)
+ lwz r9, VCPU_PMC + 20(r4)
+BEGIN_FTR_SECTION
+ lwz r10, VCPU_PMC + 24(r4)
+ lwz r11, VCPU_PMC + 28(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r5
+ mtspr SPRN_PMC3, r6
+ mtspr SPRN_PMC4, r7
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, VCPU_MMCR(r4)
+ ld r5, VCPU_MMCR + 8(r4)
+ ld r6, VCPU_MMCR + 16(r4)
+ ld r7, VCPU_SIAR(r4)
+ ld r8, VCPU_SDAR(r4)
+ mtspr SPRN_MMCR1, r5
+ mtspr SPRN_MMCRA, r6
+ mtspr SPRN_SIAR, r7
+ mtspr SPRN_SDAR, r8
+BEGIN_FTR_SECTION
+ ld r5, VCPU_MMCR + 24(r4)
+ ld r6, VCPU_SIER(r4)
+ lwz r7, VCPU_PMC + 24(r4)
+ lwz r8, VCPU_PMC + 28(r4)
+ ld r9, VCPU_MMCR + 32(r4)
+ mtspr SPRN_MMCR2, r5
+ mtspr SPRN_SIER, r6
+ mtspr SPRN_SPMC1, r7
+ mtspr SPRN_SPMC2, r8
+ mtspr SPRN_MMCRS, r9
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mtspr SPRN_MMCR0, r3
+ isync
+
+ /* Load up FP, VMX and VSX registers */
+ bl kvmppc_load_fp
+
+ ld r14, VCPU_GPR(R14)(r4)
+ ld r15, VCPU_GPR(R15)(r4)
+ ld r16, VCPU_GPR(R16)(r4)
+ ld r17, VCPU_GPR(R17)(r4)
+ ld r18, VCPU_GPR(R18)(r4)
+ ld r19, VCPU_GPR(R19)(r4)
+ ld r20, VCPU_GPR(R20)(r4)
+ ld r21, VCPU_GPR(R21)(r4)
+ ld r22, VCPU_GPR(R22)(r4)
+ ld r23, VCPU_GPR(R23)(r4)
+ ld r24, VCPU_GPR(R24)(r4)
+ ld r25, VCPU_GPR(R25)(r4)
+ ld r26, VCPU_GPR(R26)(r4)
+ ld r27, VCPU_GPR(R27)(r4)
+ ld r28, VCPU_GPR(R28)(r4)
+ ld r29, VCPU_GPR(R29)(r4)
+ ld r30, VCPU_GPR(R30)(r4)
+ ld r31, VCPU_GPR(R31)(r4)
+
+BEGIN_FTR_SECTION
+ /* Switch DSCR to guest value */
+ ld r5, VCPU_DSCR(r4)
+ mtspr SPRN_DSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+BEGIN_FTR_SECTION
+ /* Skip next section on POWER7 or PPC970 */
+ b 8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ /* Load up POWER8-specific registers */
+ ld r5, VCPU_IAMR(r4)
+ lwz r6, VCPU_PSPB(r4)
+ ld r7, VCPU_FSCR(r4)
+ mtspr SPRN_IAMR, r5
+ mtspr SPRN_PSPB, r6
+ mtspr SPRN_FSCR, r7
+ ld r5, VCPU_DAWR(r4)
+ ld r6, VCPU_DAWRX(r4)
+ ld r7, VCPU_CIABR(r4)
+ ld r8, VCPU_TAR(r4)
+ mtspr SPRN_DAWR, r5
+ mtspr SPRN_DAWRX, r6
+ mtspr SPRN_CIABR, r7
+ mtspr SPRN_TAR, r8
+ ld r5, VCPU_IC(r4)
+ ld r6, VCPU_VTB(r4)
+ mtspr SPRN_IC, r5
+ mtspr SPRN_VTB, r6
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ ld r5, VCPU_TFHAR(r4)
+ ld r6, VCPU_TFIAR(r4)
+ ld r7, VCPU_TEXASR(r4)
+ mtspr SPRN_TFHAR, r5
+ mtspr SPRN_TFIAR, r6
+ mtspr SPRN_TEXASR, r7
+#endif
+ ld r8, VCPU_EBBHR(r4)
+ mtspr SPRN_EBBHR, r8
+ ld r5, VCPU_EBBRR(r4)
+ ld r6, VCPU_BESCR(r4)
+ ld r7, VCPU_CSIGR(r4)
+ ld r8, VCPU_TACR(r4)
+ mtspr SPRN_EBBRR, r5
+ mtspr SPRN_BESCR, r6
+ mtspr SPRN_CSIGR, r7
+ mtspr SPRN_TACR, r8
+ ld r5, VCPU_TCSCR(r4)
+ ld r6, VCPU_ACOP(r4)
+ lwz r7, VCPU_GUEST_PID(r4)
+ ld r8, VCPU_WORT(r4)
+ mtspr SPRN_TCSCR, r5
+ mtspr SPRN_ACOP, r6
+ mtspr SPRN_PID, r7
+ mtspr SPRN_WORT, r8
+8:
+
+ /*
+ * Set the decrementer to the guest decrementer.
+ */
+ ld r8,VCPU_DEC_EXPIRES(r4)
+ mftb r7
+ subf r3,r7,r8
+ mtspr SPRN_DEC,r3
+ stw r3,VCPU_DEC(r4)
+
+ ld r5, VCPU_SPRG0(r4)
+ ld r6, VCPU_SPRG1(r4)
+ ld r7, VCPU_SPRG2(r4)
+ ld r8, VCPU_SPRG3(r4)
+ mtspr SPRN_SPRG0, r5
+ mtspr SPRN_SPRG1, r6
+ mtspr SPRN_SPRG2, r7
+ mtspr SPRN_SPRG3, r8
+
+ /* Load up DAR and DSISR */
+ ld r5, VCPU_DAR(r4)
+ lwz r6, VCPU_DSISR(r4)
+ mtspr SPRN_DAR, r5
+ mtspr SPRN_DSISR, r6
+
+BEGIN_FTR_SECTION
+ /* Restore AMR and UAMOR, set AMOR to all 1s */
+ ld r5,VCPU_AMR(r4)
+ ld r6,VCPU_UAMOR(r4)
+ li r7,-1
+ mtspr SPRN_AMR,r5
+ mtspr SPRN_UAMOR,r6
+ mtspr SPRN_AMOR,r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Restore state of CTRL run bit; assume 1 on entry */
lwz r5,VCPU_CTRL(r4)
@@ -647,48 +780,53 @@ toc_tlbie_lock:
mtctr r6
mtxer r7
+kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
ld r10, VCPU_PC(r4)
ld r11, VCPU_MSR(r4)
-kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
ld r6, VCPU_SRR0(r4)
ld r7, VCPU_SRR1(r4)
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r7
+deliver_guest_interrupt:
/* r11 = vcpu->arch.msr & ~MSR_HV */
rldicl r11, r11, 63 - MSR_HV_LG, 1
rotldi r11, r11, 1 + MSR_HV_LG
ori r11, r11, MSR_ME
/* Check if we can deliver an external or decrementer interrupt now */
- ld r0,VCPU_PENDING_EXC(r4)
- lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
- and r0,r0,r8
- cmpdi cr1,r0,0
- andi. r0,r11,MSR_EE
- beq cr1,11f
+ ld r0, VCPU_PENDING_EXC(r4)
+ rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
+ cmpdi cr1, r0, 0
+ andi. r8, r11, MSR_EE
BEGIN_FTR_SECTION
- mfspr r8,SPRN_LPCR
- ori r8,r8,LPCR_MER
- mtspr SPRN_LPCR,r8
+ mfspr r8, SPRN_LPCR
+ /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
+ rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
+ mtspr SPRN_LPCR, r8
isync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
beq 5f
- li r0,BOOK3S_INTERRUPT_EXTERNAL
-12: mr r6,r10
- mr r10,r0
- mr r7,r11
- li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11,r11,63
- b 5f
-11: beq 5f
- mfspr r0,SPRN_DEC
- cmpwi r0,0
- li r0,BOOK3S_INTERRUPT_DECREMENTER
- blt 12b
+ li r0, BOOK3S_INTERRUPT_EXTERNAL
+ bne cr1, 12f
+ mfspr r0, SPRN_DEC
+ cmpwi r0, 0
+ li r0, BOOK3S_INTERRUPT_DECREMENTER
+ bge 5f
- /* Move SRR0 and SRR1 into the respective regs */
-5: mtspr SPRN_SRR0, r6
- mtspr SPRN_SRR1, r7
+12: mtspr SPRN_SRR0, r10
+ mr r10,r0
+ mtspr SPRN_SRR1, r11
+ ld r11, VCPU_INTR_MSR(r4)
+5:
+/*
+ * Required state:
+ * R4 = vcpu
+ * R10: value for HSRR0
+ * R11: value for HSRR1
+ * R13 = PACA
+ */
fast_guest_return:
li r0,0
stb r0,VCPU_CEDED(r4) /* cancel cede */
@@ -868,39 +1006,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/* External interrupt, first check for host_ipi. If this is
* set, we know the host wants us out so let's do it now
*/
-do_ext_interrupt:
bl kvmppc_read_intr
cmpdi r3, 0
bgt ext_interrupt_to_host
- /* Allright, looks like an IPI for the guest, we need to set MER */
/* Check if any CPU is heading out to the host, if so head out too */
ld r5, HSTATE_KVM_VCORE(r13)
lwz r0, VCORE_ENTRY_EXIT(r5)
cmpwi r0, 0x100
bge ext_interrupt_to_host
- /* See if there is a pending interrupt for the guest */
- mfspr r8, SPRN_LPCR
- ld r0, VCPU_PENDING_EXC(r9)
- /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
- rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
- rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
- beq 2f
-
- /* And if the guest EE is set, we can deliver immediately, else
- * we return to the guest with MER set
- */
- andi. r0, r11, MSR_EE
- beq 2f
- mtspr SPRN_SRR0, r10
- mtspr SPRN_SRR1, r11
- li r10, BOOK3S_INTERRUPT_EXTERNAL
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
-2: mr r4, r9
- mtspr SPRN_LPCR, r8
- b fast_guest_return
+ /* Return to guest after delivering any pending interrupt */
+ mr r4, r9
+ b deliver_guest_interrupt
ext_interrupt_to_host:
@@ -975,13 +1093,194 @@ BEGIN_FTR_SECTION
mtspr SPRN_SPURR,r4
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
+BEGIN_FTR_SECTION
+ b 8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ /* Save POWER8-specific registers */
+ mfspr r5, SPRN_IAMR
+ mfspr r6, SPRN_PSPB
+ mfspr r7, SPRN_FSCR
+ std r5, VCPU_IAMR(r9)
+ stw r6, VCPU_PSPB(r9)
+ std r7, VCPU_FSCR(r9)
+ mfspr r5, SPRN_IC
+ mfspr r6, SPRN_VTB
+ mfspr r7, SPRN_TAR
+ std r5, VCPU_IC(r9)
+ std r6, VCPU_VTB(r9)
+ std r7, VCPU_TAR(r9)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ mfspr r5, SPRN_TFHAR
+ mfspr r6, SPRN_TFIAR
+ mfspr r7, SPRN_TEXASR
+ std r5, VCPU_TFHAR(r9)
+ std r6, VCPU_TFIAR(r9)
+ std r7, VCPU_TEXASR(r9)
+#endif
+ mfspr r8, SPRN_EBBHR
+ std r8, VCPU_EBBHR(r9)
+ mfspr r5, SPRN_EBBRR
+ mfspr r6, SPRN_BESCR
+ mfspr r7, SPRN_CSIGR
+ mfspr r8, SPRN_TACR
+ std r5, VCPU_EBBRR(r9)
+ std r6, VCPU_BESCR(r9)
+ std r7, VCPU_CSIGR(r9)
+ std r8, VCPU_TACR(r9)
+ mfspr r5, SPRN_TCSCR
+ mfspr r6, SPRN_ACOP
+ mfspr r7, SPRN_PID
+ mfspr r8, SPRN_WORT
+ std r5, VCPU_TCSCR(r9)
+ std r6, VCPU_ACOP(r9)
+ stw r7, VCPU_GUEST_PID(r9)
+ std r8, VCPU_WORT(r9)
+8:
+
+ /* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_AMR
+ mfspr r6,SPRN_UAMOR
+ std r5,VCPU_AMR(r9)
+ std r6,VCPU_UAMOR(r9)
+ li r6,0
+ mtspr SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Switch DSCR back to host value */
+BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+ std r8, VCPU_DSCR(r9)
+ mtspr SPRN_DSCR, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save non-volatile GPRs */
+ std r14, VCPU_GPR(R14)(r9)
+ std r15, VCPU_GPR(R15)(r9)
+ std r16, VCPU_GPR(R16)(r9)
+ std r17, VCPU_GPR(R17)(r9)
+ std r18, VCPU_GPR(R18)(r9)
+ std r19, VCPU_GPR(R19)(r9)
+ std r20, VCPU_GPR(R20)(r9)
+ std r21, VCPU_GPR(R21)(r9)
+ std r22, VCPU_GPR(R22)(r9)
+ std r23, VCPU_GPR(R23)(r9)
+ std r24, VCPU_GPR(R24)(r9)
+ std r25, VCPU_GPR(R25)(r9)
+ std r26, VCPU_GPR(R26)(r9)
+ std r27, VCPU_GPR(R27)(r9)
+ std r28, VCPU_GPR(R28)(r9)
+ std r29, VCPU_GPR(R29)(r9)
+ std r30, VCPU_GPR(R30)(r9)
+ std r31, VCPU_GPR(R31)(r9)
+
+ /* Save SPRGs */
+ mfspr r3, SPRN_SPRG0
+ mfspr r4, SPRN_SPRG1
+ mfspr r5, SPRN_SPRG2
+ mfspr r6, SPRN_SPRG3
+ std r3, VCPU_SPRG0(r9)
+ std r4, VCPU_SPRG1(r9)
+ std r5, VCPU_SPRG2(r9)
+ std r6, VCPU_SPRG3(r9)
+
+ /* save FP state */
+ mr r3, r9
+ bl kvmppc_save_fp
+
+ /* Increment yield count if they have a VPA */
+ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
+ cmpdi r8, 0
+ beq 25f
+ lwz r3, LPPACA_YIELDCOUNT(r8)
+ addi r3, r3, 1
+ stw r3, LPPACA_YIELDCOUNT(r8)
+ li r3, 1
+ stb r3, VCPU_VPA_DIRTY(r9)
+25:
+ /* Save PMU registers if requested */
+ /* r8 and cr0.eq are live here */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ mfspr r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+ /* On P7, clear MMCRA in order to disable SDAR updates */
+ li r7, 0
+ mtspr SPRN_MMCRA, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ isync
+ beq 21f /* if no VPA, save PMU stuff anyway */
+ lbz r7, LPPACA_PMCINUSE(r8)
+ cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
+ bne 21f
+ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
+ b 22f
+21: mfspr r5, SPRN_MMCR1
+ mfspr r7, SPRN_SIAR
+ mfspr r8, SPRN_SDAR
+ std r4, VCPU_MMCR(r9)
+ std r5, VCPU_MMCR + 8(r9)
+ std r6, VCPU_MMCR + 16(r9)
+ std r7, VCPU_SIAR(r9)
+ std r8, VCPU_SDAR(r9)
+ mfspr r3, SPRN_PMC1
+ mfspr r4, SPRN_PMC2
+ mfspr r5, SPRN_PMC3
+ mfspr r6, SPRN_PMC4
+ mfspr r7, SPRN_PMC5
+ mfspr r8, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, VCPU_PMC(r9)
+ stw r4, VCPU_PMC + 4(r9)
+ stw r5, VCPU_PMC + 8(r9)
+ stw r6, VCPU_PMC + 12(r9)
+ stw r7, VCPU_PMC + 16(r9)
+ stw r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+ stw r10, VCPU_PMC + 24(r9)
+ stw r11, VCPU_PMC + 28(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+BEGIN_FTR_SECTION
+ mfspr r4, SPRN_MMCR2
+ mfspr r5, SPRN_SIER
+ mfspr r6, SPRN_SPMC1
+ mfspr r7, SPRN_SPMC2
+ mfspr r8, SPRN_MMCRS
+ std r4, VCPU_MMCR + 24(r9)
+ std r5, VCPU_SIER(r9)
+ stw r6, VCPU_PMC + 24(r9)
+ stw r7, VCPU_PMC + 28(r9)
+ std r8, VCPU_MMCR + 32(r9)
+ lis r4, 0x8000
+ mtspr SPRN_MMCRS, r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+22:
/* Clear out SLB */
li r5,0
slbmte r5,r5
slbia
ptesync
-hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
+hdec_soon: /* r12 = trap, r13 = paca */
BEGIN_FTR_SECTION
b 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
@@ -1014,8 +1313,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
*/
cmpwi r3,0x100 /* Are we the first here? */
bge 43f
- cmpwi r3,1 /* Are any other threads in the guest? */
- ble 43f
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
beq 40f
li r0,0
@@ -1026,7 +1323,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* doesn't wake CPUs up from nap.
*/
lwz r3,VCORE_NAPPING_THREADS(r5)
- lwz r4,VCPU_PTID(r9)
+ lbz r4,HSTATE_PTID(r13)
li r0,1
sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
@@ -1045,10 +1342,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
addi r6,r6,PACA_SIZE
bne 42b
+secondary_too_late:
/* Secondary threads wait for primary to do partition switch */
-43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
- ld r5,HSTATE_KVM_VCORE(r13)
- lwz r3,VCPU_PTID(r9)
+43: ld r5,HSTATE_KVM_VCORE(r13)
+ ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
+ lbz r3,HSTATE_PTID(r13)
cmpwi r3,0
beq 15f
HMT_LOW
@@ -1076,6 +1374,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_LPID,r7
isync
+BEGIN_FTR_SECTION
+ /* DPDES is shared between threads */
+ mfspr r7, SPRN_DPDES
+ std r7, VCORE_DPDES(r5)
+ /* clear DPDES so we don't get guest doorbells in the host */
+ li r8, 0
+ mtspr SPRN_DPDES, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
/* Subtract timebase offset from timebase */
ld r8,VCORE_TB_OFFSET(r5)
cmpdi r8,0
@@ -1113,7 +1420,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* We have to lock against concurrent tlbies, and
* we have to flush the whole TLB.
*/
-32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+32: ld r5,HSTATE_KVM_VCORE(r13)
+ ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
/* Take the guest's tlbie_lock */
#ifdef __BIG_ENDIAN__
@@ -1203,6 +1511,56 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
add r5,r5,r6
std r5,VCPU_DEC_EXPIRES(r9)
+BEGIN_FTR_SECTION
+ b 8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ /* Save POWER8-specific registers */
+ mfspr r5, SPRN_IAMR
+ mfspr r6, SPRN_PSPB
+ mfspr r7, SPRN_FSCR
+ std r5, VCPU_IAMR(r9)
+ stw r6, VCPU_PSPB(r9)
+ std r7, VCPU_FSCR(r9)
+ mfspr r5, SPRN_IC
+ mfspr r6, SPRN_VTB
+ mfspr r7, SPRN_TAR
+ std r5, VCPU_IC(r9)
+ std r6, VCPU_VTB(r9)
+ std r7, VCPU_TAR(r9)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ mfspr r5, SPRN_TFHAR
+ mfspr r6, SPRN_TFIAR
+ mfspr r7, SPRN_TEXASR
+ std r5, VCPU_TFHAR(r9)
+ std r6, VCPU_TFIAR(r9)
+ std r7, VCPU_TEXASR(r9)
+#endif
+ mfspr r8, SPRN_EBBHR
+ std r8, VCPU_EBBHR(r9)
+ mfspr r5, SPRN_EBBRR
+ mfspr r6, SPRN_BESCR
+ mfspr r7, SPRN_CSIGR
+ mfspr r8, SPRN_TACR
+ std r5, VCPU_EBBRR(r9)
+ std r6, VCPU_BESCR(r9)
+ std r7, VCPU_CSIGR(r9)
+ std r8, VCPU_TACR(r9)
+ mfspr r5, SPRN_TCSCR
+ mfspr r6, SPRN_ACOP
+ mfspr r7, SPRN_PID
+ mfspr r8, SPRN_WORT
+ std r5, VCPU_TCSCR(r9)
+ std r6, VCPU_ACOP(r9)
+ stw r7, VCPU_GUEST_PID(r9)
+ std r8, VCPU_WORT(r9)
+8:
+
/* Save and reset AMR and UAMOR before turning on the MMU */
BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
@@ -1217,130 +1575,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
- /* Switch DSCR back to host value */
-BEGIN_FTR_SECTION
- mfspr r8, SPRN_DSCR
- ld r7, HSTATE_DSCR(r13)
- std r8, VCPU_DSCR(r9)
- mtspr SPRN_DSCR, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
- /* Save non-volatile GPRs */
- std r14, VCPU_GPR(R14)(r9)
- std r15, VCPU_GPR(R15)(r9)
- std r16, VCPU_GPR(R16)(r9)
- std r17, VCPU_GPR(R17)(r9)
- std r18, VCPU_GPR(R18)(r9)
- std r19, VCPU_GPR(R19)(r9)
- std r20, VCPU_GPR(R20)(r9)
- std r21, VCPU_GPR(R21)(r9)
- std r22, VCPU_GPR(R22)(r9)
- std r23, VCPU_GPR(R23)(r9)
- std r24, VCPU_GPR(R24)(r9)
- std r25, VCPU_GPR(R25)(r9)
- std r26, VCPU_GPR(R26)(r9)
- std r27, VCPU_GPR(R27)(r9)
- std r28, VCPU_GPR(R28)(r9)
- std r29, VCPU_GPR(R29)(r9)
- std r30, VCPU_GPR(R30)(r9)
- std r31, VCPU_GPR(R31)(r9)
-
- /* Save SPRGs */
- mfspr r3, SPRN_SPRG0
- mfspr r4, SPRN_SPRG1
- mfspr r5, SPRN_SPRG2
- mfspr r6, SPRN_SPRG3
- std r3, VCPU_SPRG0(r9)
- std r4, VCPU_SPRG1(r9)
- std r5, VCPU_SPRG2(r9)
- std r6, VCPU_SPRG3(r9)
-
- /* save FP state */
- mr r3, r9
- bl .kvmppc_save_fp
-
- /* Increment yield count if they have a VPA */
- ld r8, VCPU_VPA(r9) /* do they have a VPA? */
- cmpdi r8, 0
- beq 25f
- lwz r3, LPPACA_YIELDCOUNT(r8)
- addi r3, r3, 1
- stw r3, LPPACA_YIELDCOUNT(r8)
- li r3, 1
- stb r3, VCPU_VPA_DIRTY(r9)
-25:
- /* Save PMU registers if requested */
- /* r8 and cr0.eq are live here */
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mfspr r4, SPRN_MMCR0 /* save MMCR0 */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
- mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
- li r7, 0
- mtspr SPRN_MMCRA, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
- isync
- beq 21f /* if no VPA, save PMU stuff anyway */
- lbz r7, LPPACA_PMCINUSE(r8)
- cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
- bne 21f
- std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
- b 22f
-21: mfspr r5, SPRN_MMCR1
- mfspr r7, SPRN_SIAR
- mfspr r8, SPRN_SDAR
- std r4, VCPU_MMCR(r9)
- std r5, VCPU_MMCR + 8(r9)
- std r6, VCPU_MMCR + 16(r9)
- std r7, VCPU_SIAR(r9)
- std r8, VCPU_SDAR(r9)
- mfspr r3, SPRN_PMC1
- mfspr r4, SPRN_PMC2
- mfspr r5, SPRN_PMC3
- mfspr r6, SPRN_PMC4
- mfspr r7, SPRN_PMC5
- mfspr r8, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
- stw r3, VCPU_PMC(r9)
- stw r4, VCPU_PMC + 4(r9)
- stw r5, VCPU_PMC + 8(r9)
- stw r6, VCPU_PMC + 12(r9)
- stw r7, VCPU_PMC + 16(r9)
- stw r8, VCPU_PMC + 20(r9)
-BEGIN_FTR_SECTION
- stw r10, VCPU_PMC + 24(r9)
- stw r11, VCPU_PMC + 28(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-22:
ld r0, 112+PPC_LR_STKOFF(r1)
addi r1, r1, 112
mtlr r0
blr
-secondary_too_late:
- ld r5,HSTATE_KVM_VCORE(r13)
- HMT_LOW
-13: lbz r3,VCORE_IN_GUEST(r5)
- cmpwi r3,0
- bne 13b
- HMT_MEDIUM
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
- ld r11,PACA_SLBSHADOWPTR(r13)
-
- .rept SLB_NUM_BOLTED
- ld r5,SLBSHADOW_SAVEAREA(r11)
- ld r6,SLBSHADOW_SAVEAREA+8(r11)
- andis. r7,r5,SLB_ESID_V@h
- beq 1f
- slbmte r6,r5
-1: addi r11,r11,16
- .endr
- b 22b
/*
* Check whether an HDSI is an HPTE not found fault or something else.
@@ -1386,8 +1624,7 @@ kvmppc_hdsi:
mtspr SPRN_SRR0, r10
mtspr SPRN_SRR1, r11
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9)
@@ -1456,8 +1693,7 @@ kvmppc_hisi:
1: mtspr SPRN_SRR0, r10
mtspr SPRN_SRR1, r11
li r10, BOOK3S_INTERRUPT_INST_STORAGE
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
b fast_interrupt_c_return
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
@@ -1474,7 +1710,8 @@ kvmppc_hisi:
hcall_try_real_mode:
ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
- bne guest_exit_cont
+ /* sc 1 from userspace - reflect to guest syscall */
+ bne sc_1_fast_return
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -1495,6 +1732,14 @@ hcall_try_real_mode:
ld r11,VCPU_MSR(r4)
b fast_guest_return
+sc_1_fast_return:
+ mtspr SPRN_SRR0,r10
+ mtspr SPRN_SRR1,r11
+ li r10, BOOK3S_INTERRUPT_SYSCALL
+ ld r11, VCPU_INTR_MSR(r9)
+ mr r4,r9
+ b fast_guest_return
+
/* We've attempted a real mode hcall, but it's punted it back
* to userspace. We need to restore some clobbered volatiles
* before resuming the pass-it-to-qemu path */
@@ -1588,14 +1833,34 @@ hcall_real_table:
.long 0 /* 0x11c */
.long 0 /* 0x120 */
.long .kvmppc_h_bulk_remove - hcall_real_table
+ .long 0 /* 0x128 */
+ .long 0 /* 0x12c */
+ .long 0 /* 0x130 */
+ .long .kvmppc_h_set_xdabr - hcall_real_table
hcall_real_table_end:
ignore_hdec:
mr r4,r9
b fast_guest_return
+_GLOBAL(kvmppc_h_set_xdabr)
+ andi. r0, r5, DABRX_USER | DABRX_KERNEL
+ beq 6f
+ li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
+ andc. r0, r5, r0
+ beq 3f
+6: li r3, H_PARAMETER
+ blr
+
_GLOBAL(kvmppc_h_set_dabr)
+ li r5, DABRX_USER | DABRX_KERNEL
+3:
+BEGIN_FTR_SECTION
+ b 2f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
std r4,VCPU_DABR(r3)
+ stw r5, VCPU_DABRX(r3)
+ mtspr SPRN_DABRX, r5
/* Work around P7 bug where DABR can get corrupted on mtspr */
1: mtspr SPRN_DABR,r4
mfspr r5, SPRN_DABR
@@ -1605,6 +1870,17 @@ _GLOBAL(kvmppc_h_set_dabr)
li r3,0
blr
+ /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
+2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
+ rlwimi r5, r4, 1, DAWRX_WT
+ clrrdi r4, r4, 3
+ std r4, VCPU_DAWR(r3)
+ std r5, VCPU_DAWRX(r3)
+ mtspr SPRN_DAWR, r4
+ mtspr SPRN_DAWRX, r5
+ li r3, 0
+ blr
+
_GLOBAL(kvmppc_h_cede)
ori r11,r11,MSR_EE
std r11,VCPU_MSR(r3)
@@ -1628,7 +1904,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* up to the host.
*/
ld r5,HSTATE_KVM_VCORE(r13)
- lwz r6,VCPU_PTID(r3)
+ lbz r6,HSTATE_PTID(r13)
lwz r8,VCORE_ENTRY_EXIT(r5)
clrldi r8,r8,56
li r0,1
@@ -1643,9 +1919,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
bne 31b
/* order napping_threads update vs testing entry_exit_count */
isync
- li r0,1
+ li r0,NAPPING_CEDE
stb r0,HSTATE_NAPPING(r13)
- mr r4,r3
lwz r7,VCORE_ENTRY_EXIT(r5)
cmpwi r7,0x100
bge 33f /* another thread already exiting */
@@ -1677,16 +1952,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
std r31, VCPU_GPR(R31)(r3)
/* save FP state */
- bl .kvmppc_save_fp
+ bl kvmppc_save_fp
/*
- * Take a nap until a decrementer or external interrupt occurs,
- * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
+ * Take a nap until a decrementer or external or doobell interrupt
+ * occurs, with PECE1, PECE0 and PECEDP set in LPCR
*/
li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
+BEGIN_FTR_SECTION
+ oris r5,r5,LPCR_PECEDP@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_LPCR,r5
isync
li r0, 0
@@ -1698,6 +1976,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
nap
b .
+33: mr r4, r3
+ li r3, 0
+ li r12, 0
+ b 34f
+
kvm_end_cede:
/* get vcpu pointer */
ld r4, HSTATE_KVM_VCPU(r13)
@@ -1727,12 +2010,15 @@ kvm_end_cede:
ld r29, VCPU_GPR(R29)(r4)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)
+
+ /* Check the wake reason in SRR1 to see why we got here */
+ bl kvmppc_check_wake_reason
/* clear our bit in vcore->napping_threads */
-33: ld r5,HSTATE_KVM_VCORE(r13)
- lwz r3,VCPU_PTID(r4)
+34: ld r5,HSTATE_KVM_VCORE(r13)
+ lbz r7,HSTATE_PTID(r13)
li r0,1
- sld r0,r0,r3
+ sld r0,r0,r7
addi r6,r5,VCORE_NAPPING_THREADS
32: lwarx r7,0,r6
andc r7,r7,r0
@@ -1741,23 +2027,18 @@ kvm_end_cede:
li r0,0
stb r0,HSTATE_NAPPING(r13)
- /* Check the wake reason in SRR1 to see why we got here */
- mfspr r3, SPRN_SRR1
- rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
- cmpwi r3, 4 /* was it an external interrupt? */
- li r12, BOOK3S_INTERRUPT_EXTERNAL
+ /* See if the wake reason means we need to exit */
+ stw r12, VCPU_TRAP(r4)
mr r9, r4
- ld r10, VCPU_PC(r9)
- ld r11, VCPU_MSR(r9)
- beq do_ext_interrupt /* if so */
+ cmpdi r3, 0
+ bgt guest_exit_cont
/* see if any other thread is already exiting */
lwz r0,VCORE_ENTRY_EXIT(r5)
cmpwi r0,0x100
- blt kvmppc_cede_reentry /* if not go back to guest */
+ bge guest_exit_cont
- /* some threads are exiting, so go to the guest exit path */
- b hcall_real_fallback
+ b kvmppc_cede_reentry /* if not go back to guest */
/* cede when already previously prodded case */
kvm_cede_prodded:
@@ -1783,11 +2064,48 @@ machine_check_realmode:
beq mc_cont
/* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
- li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
- rotldi r11, r11, 63
+ ld r11, VCPU_INTR_MSR(r9)
b fast_interrupt_c_return
/*
+ * Check the reason we woke from nap, and take appropriate action.
+ * Returns:
+ * 0 if nothing needs to be done
+ * 1 if something happened that needs to be handled by the host
+ * -1 if there was a guest wakeup (IPI)
+ *
+ * Also sets r12 to the interrupt vector for any interrupt that needs
+ * to be handled now by the host (0x500 for external interrupt), or zero.
+ */
+kvmppc_check_wake_reason:
+ mfspr r6, SPRN_SRR1
+BEGIN_FTR_SECTION
+ rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
+FTR_SECTION_ELSE
+ rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
+ cmpwi r6, 8 /* was it an external interrupt? */
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+ beq kvmppc_read_intr /* if so, see what it was */
+ li r3, 0
+ li r12, 0
+ cmpwi r6, 6 /* was it the decrementer? */
+ beq 0f
+BEGIN_FTR_SECTION
+ cmpwi r6, 5 /* privileged doorbell? */
+ beq 0f
+ cmpwi r6, 3 /* hypervisor doorbell? */
+ beq 3f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ li r3, 1 /* anything else, return 1 */
+0: blr
+
+ /* hypervisor doorbell */
+3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
+ li r3, 1
+ blr
+
+/*
* Determine what sort of external interrupt is pending (if any).
* Returns:
* 0 if no interrupt is pending
@@ -1818,7 +2136,6 @@ kvmppc_read_intr:
* interrupts directly to the guest
*/
cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
- li r3, 1
bne 42f
/* It's an IPI, clear the MFRR and EOI it */
@@ -1844,19 +2161,25 @@ kvmppc_read_intr:
* before exit, it will be picked up by the host ICP driver
*/
stw r0, HSTATE_SAVED_XIRR(r13)
+ li r3, 1
b 1b
43: /* We raced with the host, we need to resend that IPI, bummer */
li r0, IPI_PRIORITY
stbcix r0, r6, r8 /* set the IPI */
sync
+ li r3, 1
b 1b
/*
* Save away FP, VMX and VSX registers.
* r3 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
*/
-_GLOBAL(kvmppc_save_fp)
+kvmppc_save_fp:
+ mflr r30
+ mr r31,r3
mfmsr r5
ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC
@@ -1871,42 +2194,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
isync
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- reg = 0
- .rept 32
- li r6,reg*16+VCPU_VSRS
- STXVD2X(reg,R6,R3)
- reg = reg + 1
- .endr
-FTR_SECTION_ELSE
-#endif
- reg = 0
- .rept 32
- stfd reg,reg*8+VCPU_FPRS(r3)
- reg = reg + 1
- .endr
-#ifdef CONFIG_VSX
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#endif
- mffs fr0
- stfd fr0,VCPU_FPSCR(r3)
-
+ addi r3,r3,VCPU_FPRS
+ bl .store_fp_state
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
- reg = 0
- .rept 32
- li r6,reg*16+VCPU_VRS
- stvx reg,r6,r3
- reg = reg + 1
- .endr
- mfvscr vr0
- li r6,VCPU_VSCR
- stvx vr0,r6,r3
+ addi r3,r31,VCPU_VRS
+ bl .store_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3)
+ mtlr r30
mtmsrd r5
isync
blr
@@ -1914,9 +2212,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/*
* Load up FP, VMX and VSX registers
* r4 = vcpu pointer
+ * N.B. r30 and r31 are volatile across this function,
+ * thus it is not callable from C.
*/
- .globl kvmppc_load_fp
kvmppc_load_fp:
+ mflr r30
+ mr r31,r4
mfmsr r9
ori r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC
@@ -1931,42 +2232,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
isync
- lfd fr0,VCPU_FPSCR(r4)
- MTFSF_L(fr0)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- reg = 0
- .rept 32
- li r7,reg*16+VCPU_VSRS
- LXVD2X(reg,R7,R4)
- reg = reg + 1
- .endr
-FTR_SECTION_ELSE
-#endif
- reg = 0
- .rept 32
- lfd reg,reg*8+VCPU_FPRS(r4)
- reg = reg + 1
- .endr
-#ifdef CONFIG_VSX
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#endif
-
+ addi r3,r4,VCPU_FPRS
+ bl .load_fp_state
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
- li r7,VCPU_VSCR
- lvx vr0,r7,r4
- mtvscr vr0
- reg = 0
- .rept 32
- li r7,reg*16+VCPU_VRS
- lvx reg,r7,r4
- reg = reg + 1
- .endr
+ addi r3,r31,VCPU_VRS
+ bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
lwz r7,VCPU_VRSAVE(r4)
mtspr SPRN_VRSAVE,r7
+ mtlr r30
+ mr r4,r31
blr
/*
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index a59a25a1321..c1abd95063f 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -160,7 +160,7 @@
static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
{
- kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
+ kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
}
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
@@ -207,11 +207,11 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* put in registers */
switch (ls_type) {
case FPU_LS_SINGLE:
- kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
+ kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
vcpu->arch.qpr[rs] = *((u32*)tmp);
break;
case FPU_LS_DOUBLE:
- vcpu->arch.fpr[rs] = *((u64*)tmp);
+ VCPU_FPR(vcpu, rs) = *((u64*)tmp);
break;
}
@@ -233,18 +233,18 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (ls_type) {
case FPU_LS_SINGLE:
- kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
+ kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
val = *((u32*)tmp);
len = sizeof(u32);
break;
case FPU_LS_SINGLE_LOW:
- *((u32*)tmp) = vcpu->arch.fpr[rs];
- val = vcpu->arch.fpr[rs] & 0xffffffff;
+ *((u32*)tmp) = VCPU_FPR(vcpu, rs);
+ val = VCPU_FPR(vcpu, rs) & 0xffffffff;
len = sizeof(u32);
break;
case FPU_LS_DOUBLE:
- *((u64*)tmp) = vcpu->arch.fpr[rs];
- val = vcpu->arch.fpr[rs];
+ *((u64*)tmp) = VCPU_FPR(vcpu, rs);
+ val = VCPU_FPR(vcpu, rs);
len = sizeof(u64);
break;
default:
@@ -301,7 +301,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = EMULATE_DONE;
/* put in registers */
- kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
+ kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
vcpu->arch.qpr[rs] = tmp[1];
dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@@ -319,7 +319,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u32 tmp[2];
int len = w ? sizeof(u32) : sizeof(u64);
- kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
+ kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
tmp[1] = vcpu->arch.qpr[rs];
r = kvmppc_st(vcpu, &addr, len, tmp, true);
@@ -512,7 +512,6 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
u32 *src2, u32 *src3))
{
u32 *qpr = vcpu->arch.qpr;
- u64 *fpr = vcpu->arch.fpr;
u32 ps0_out;
u32 ps0_in1, ps0_in2, ps0_in3;
u32 ps1_in1, ps1_in2, ps1_in3;
@@ -521,20 +520,20 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
WARN_ON(rc);
/* PS0 */
- kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
- kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
- kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
+ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
+ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
+ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
if (scalar & SCALAR_LOW)
ps0_in2 = qpr[reg_in2];
- func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
+ func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
ps0_in1, ps0_in2, ps0_in3, ps0_out);
if (!(scalar & SCALAR_NO_PS0))
- kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
+ kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
/* PS1 */
ps1_in1 = qpr[reg_in1];
@@ -545,7 +544,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
ps1_in2 = ps0_in2;
if (!(scalar & SCALAR_NO_PS1))
- func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
+ func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
@@ -561,7 +560,6 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
u32 *src2))
{
u32 *qpr = vcpu->arch.qpr;
- u64 *fpr = vcpu->arch.fpr;
u32 ps0_out;
u32 ps0_in1, ps0_in2;
u32 ps1_out;
@@ -571,20 +569,20 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
WARN_ON(rc);
/* PS0 */
- kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
+ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
if (scalar & SCALAR_LOW)
ps0_in2 = qpr[reg_in2];
else
- kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
+ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
- func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
+ func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
if (!(scalar & SCALAR_NO_PS0)) {
dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
ps0_in1, ps0_in2, ps0_out);
- kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
+ kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
}
/* PS1 */
@@ -594,7 +592,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
if (scalar & SCALAR_HIGH)
ps1_in2 = ps0_in2;
- func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
+ func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
if (!(scalar & SCALAR_NO_PS1)) {
qpr[reg_out] = ps1_out;
@@ -612,7 +610,6 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
u32 *dst, u32 *src1))
{
u32 *qpr = vcpu->arch.qpr;
- u64 *fpr = vcpu->arch.fpr;
u32 ps0_out, ps0_in;
u32 ps1_in;
@@ -620,17 +617,17 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
WARN_ON(rc);
/* PS0 */
- kvm_cvt_df(&fpr[reg_in], &ps0_in);
- func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
+ kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
+ func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
ps0_in, ps0_out);
- kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
+ kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
/* PS1 */
ps1_in = qpr[reg_in];
- func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in);
+ func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
ps1_in, qpr[reg_out]);
@@ -649,10 +646,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int ax_rc = inst_get_field(inst, 21, 25);
short full_d = inst_get_field(inst, 16, 31);
- u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
- u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
- u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
- u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
+ u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
+ u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
+ u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
+ u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
bool rcomp = (inst & 1) ? true : false;
u32 cr = kvmppc_get_cr(vcpu);
@@ -674,11 +671,11 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Do we need to clear FE0 / FE1 here? Don't think so. */
#ifdef DEBUG
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
u32 f;
- kvm_cvt_df(&vcpu->arch.fpr[i], &f);
+ kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
- i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
+ i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
}
#endif
@@ -764,8 +761,8 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
}
case OP_4X_PS_NEG:
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
- vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
+ VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu->arch.qpr[ax_rd] ^= 0x80000000;
break;
@@ -775,7 +772,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_4X_PS_MR:
WARN_ON(rcomp);
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
case OP_4X_PS_CMPO1:
@@ -784,44 +781,44 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_4X_PS_NABS:
WARN_ON(rcomp);
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
- vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
+ VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu->arch.qpr[ax_rd] |= 0x80000000;
break;
case OP_4X_PS_ABS:
WARN_ON(rcomp);
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
- vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
+ VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu->arch.qpr[ax_rd] &= ~0x80000000;
break;
case OP_4X_PS_MERGE00:
WARN_ON(rcomp);
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
- /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
- kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
+ /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
+ kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
&vcpu->arch.qpr[ax_rd]);
break;
case OP_4X_PS_MERGE01:
WARN_ON(rcomp);
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
case OP_4X_PS_MERGE10:
WARN_ON(rcomp);
- /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+ /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
- &vcpu->arch.fpr[ax_rd]);
- /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
- kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
+ &VCPU_FPR(vcpu, ax_rd));
+ /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
+ kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
&vcpu->arch.qpr[ax_rd]);
break;
case OP_4X_PS_MERGE11:
WARN_ON(rcomp);
- /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+ /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
- &vcpu->arch.fpr[ax_rd]);
+ &VCPU_FPR(vcpu, ax_rd));
vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
break;
}
@@ -856,7 +853,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_4A_PS_SUM1:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
- vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
+ VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
break;
case OP_4A_PS_SUM0:
emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
@@ -1106,45 +1103,45 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
case 59:
switch (inst_get_field(inst, 21, 30)) {
case OP_59_FADDS:
- fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FSUBS:
- fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FDIVS:
- fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FRES:
- fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FRSQRTES:
- fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
}
switch (inst_get_field(inst, 26, 30)) {
case OP_59_FMULS:
- fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+ fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FMSUBS:
- fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FMADDS:
- fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FNMSUBS:
- fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_59_FNMADDS:
- fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
}
@@ -1159,12 +1156,12 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_63_MFFS:
/* XXX missing CR */
- *fpr_d = vcpu->arch.fpscr;
+ *fpr_d = vcpu->arch.fp.fpscr;
break;
case OP_63_MTFSF:
/* XXX missing fm bits */
/* XXX missing CR */
- vcpu->arch.fpscr = *fpr_b;
+ vcpu->arch.fp.fpscr = *fpr_b;
break;
case OP_63_FCMPU:
{
@@ -1172,7 +1169,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
u32 cr0_mask = 0xf0000000;
u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
- fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+ fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
cr &= ~(cr0_mask >> cr_shift);
cr |= (cr & cr0_mask) >> cr_shift;
break;
@@ -1183,40 +1180,40 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
u32 cr0_mask = 0xf0000000;
u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
- fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+ fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
cr &= ~(cr0_mask >> cr_shift);
cr |= (cr & cr0_mask) >> cr_shift;
break;
}
case OP_63_FNEG:
- fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FMR:
*fpr_d = *fpr_b;
break;
case OP_63_FABS:
- fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FCPSGN:
- fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FDIV:
- fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FADD:
- fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FSUB:
- fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
break;
case OP_63_FCTIW:
- fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FCTIWZ:
- fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
break;
case OP_63_FRSP:
- fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
kvmppc_sync_qpr(vcpu, ax_rd);
break;
case OP_63_FRSQRTE:
@@ -1224,39 +1221,39 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
double one = 1.0f;
/* fD = sqrt(fB) */
- fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
/* fD = 1.0f / fD */
- fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
+ fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
break;
}
}
switch (inst_get_field(inst, 26, 30)) {
case OP_63_FMUL:
- fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+ fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
break;
case OP_63_FSEL:
- fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FMSUB:
- fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FMADD:
- fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FNMSUB:
- fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
case OP_63_FNMADD:
- fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
break;
}
break;
}
#ifdef DEBUG
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
u32 f;
- kvm_cvt_df(&vcpu->arch.fpr[i], &f);
+ kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
}
#endif
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 5b9e9063cfa..c5c052a9729 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/module.h>
+#include <linux/miscdevice.h>
#include "book3s.h"
@@ -566,12 +567,6 @@ static inline int get_fpr_index(int i)
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{
struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = &t->fp_state.fpr[0][0];
- int i;
/*
* VSX instructions can access FP and vector registers, so if
@@ -594,26 +589,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
* both the traditional FP registers and the added VSX
* registers into thread.fp_state.fpr[].
*/
- if (current->thread.regs->msr & MSR_FP)
+ if (t->regs->msr & MSR_FP)
giveup_fpu(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
-
- vcpu->arch.fpscr = t->fp_state.fpscr;
-
-#ifdef CONFIG_VSX
- if (cpu_has_feature(CPU_FTR_VSX))
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
- vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
-#endif
+ t->fp_save_area = NULL;
}
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vr_state.vscr;
+ t->vr_save_area = NULL;
}
#endif
@@ -661,12 +646,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr)
{
struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = &t->fp_state.fpr[0][0];
- int i;
/* When we have paired singles, we emulate in software */
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
@@ -704,27 +683,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#endif
if (msr & MSR_FP) {
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
-#ifdef CONFIG_VSX
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
- thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
-#endif
- t->fp_state.fpscr = vcpu->arch.fpscr;
- t->fpexc_mode = 0;
- kvmppc_load_up_fpu();
+ enable_kernel_fp();
+ load_fp_state(&vcpu->arch.fp);
+ t->fp_save_area = &vcpu->arch.fp;
}
if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC
- memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vr_state.vscr = vcpu->arch.vscr;
- t->vrsave = -1;
- kvmppc_load_up_altivec();
+ enable_kernel_altivec();
+ load_vr_state(&vcpu->arch.vr);
+ t->vr_save_area = &vcpu->arch.vr;
#endif
}
- current->thread.regs->msr |= msr;
+ t->regs->msr |= msr;
vcpu->arch.guest_owned_ext |= msr;
kvmppc_recalc_shadow_msr(vcpu);
@@ -743,11 +715,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
if (!lost_ext)
return;
- if (lost_ext & MSR_FP)
- kvmppc_load_up_fpu();
+ if (lost_ext & MSR_FP) {
+ enable_kernel_fp();
+ load_fp_state(&vcpu->arch.fp);
+ }
#ifdef CONFIG_ALTIVEC
- if (lost_ext & MSR_VEC)
- kvmppc_load_up_altivec();
+ if (lost_ext & MSR_VEC) {
+ enable_kernel_altivec();
+ load_vr_state(&vcpu->arch.vr);
+ }
#endif
current->thread.regs->msr |= lost_ext;
}
@@ -873,6 +849,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_DECREMENTER:
case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ case BOOK3S_INTERRUPT_DOORBELL:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
break;
@@ -1045,14 +1022,14 @@ program_interrupt:
* and if we really did time things so badly, then we just exit
* again due to a host external interrupt.
*/
- local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu);
- if (s <= 0) {
- local_irq_enable();
+ if (s <= 0)
r = s;
- } else {
+ else {
+ /* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
}
+
kvmppc_handle_lost_ext(vcpu);
}
@@ -1133,19 +1110,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior);
break;
-#ifdef CONFIG_VSX
- case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
- long int i = id - KVM_REG_PPC_VSR0;
-
- if (!cpu_has_feature(CPU_FTR_VSX)) {
- r = -ENXIO;
- break;
- }
- val->vsxval[0] = vcpu->arch.fpr[i];
- val->vsxval[1] = vcpu->arch.vsr[i];
- break;
- }
-#endif /* CONFIG_VSX */
default:
r = -EINVAL;
break;
@@ -1164,19 +1128,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true;
break;
-#ifdef CONFIG_VSX
- case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
- long int i = id - KVM_REG_PPC_VSR0;
-
- if (!cpu_has_feature(CPU_FTR_VSX)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.fpr[i] = val->vsxval[0];
- vcpu->arch.vsr[i] = val->vsxval[1];
- break;
- }
-#endif /* CONFIG_VSX */
default:
r = -EINVAL;
break;
@@ -1274,17 +1225,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
- struct thread_fp_state fp;
- int fpexc_mode;
#ifdef CONFIG_ALTIVEC
- struct thread_vr_state vr;
unsigned long uninitialized_var(vrsave);
- int used_vr;
#endif
-#ifdef CONFIG_VSX
- int used_vsr;
-#endif
- ulong ext_msr;
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
@@ -1299,40 +1242,27 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* really did time things so badly, then we just exit again due to
* a host external interrupt.
*/
- local_irq_disable();
ret = kvmppc_prepare_to_enter(vcpu);
- if (ret <= 0) {
- local_irq_enable();
+ if (ret <= 0)
goto out;
- }
+ /* interrupts now hard-disabled */
- /* Save FPU state in stack */
+ /* Save FPU state in thread_struct */
if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current);
- fp = current->thread.fp_state;
- fpexc_mode = current->thread.fpexc_mode;
#ifdef CONFIG_ALTIVEC
- /* Save Altivec state in stack */
- used_vr = current->thread.used_vr;
- if (used_vr) {
- if (current->thread.regs->msr & MSR_VEC)
- giveup_altivec(current);
- vr = current->thread.vr_state;
- vrsave = current->thread.vrsave;
- }
+ /* Save Altivec state in thread_struct */
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
#endif
#ifdef CONFIG_VSX
- /* Save VSX state in stack */
- used_vsr = current->thread.used_vsr;
- if (used_vsr && (current->thread.regs->msr & MSR_VSX))
+ /* Save VSX state in thread_struct */
+ if (current->thread.regs->msr & MSR_VSX)
__giveup_vsx(current);
#endif
- /* Remember the MSR with disabled extensions */
- ext_msr = current->thread.regs->msr;
-
/* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -1347,25 +1277,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Make sure we save the guest FPU/Altivec/VSX state */
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
- current->thread.regs->msr = ext_msr;
-
- /* Restore FPU/VSX state from stack */
- current->thread.fp_state = fp;
- current->thread.fpexc_mode = fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Restore Altivec state from stack */
- if (used_vr && current->thread.used_vr) {
- current->thread.vr_state = vr;
- current->thread.vrsave = vrsave;
- }
- current->thread.used_vr = used_vr;
-#endif
-
-#ifdef CONFIG_VSX
- current->thread.used_vsr = used_vsr;
-#endif
-
out:
vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -1606,4 +1517,6 @@ module_init(kvmppc_book3s_init_pr);
module_exit(kvmppc_book3s_exit_pr);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
#endif
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c3c5231adad..9eec675220e 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -162,51 +162,4 @@ _GLOBAL(kvmppc_entry_trampoline)
mtsrr1 r6
RFI
-#if defined(CONFIG_PPC_BOOK3S_32)
-#define STACK_LR INT_FRAME_SIZE+4
-
-/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
-#define MSR_EXT_START \
- PPC_STL r20, _NIP(r1); \
- mfmsr r20; \
- LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
- andc r3,r20,r3; /* Disable DR,EE */ \
- mtmsr r3; \
- sync
-
-#define MSR_EXT_END \
- mtmsr r20; /* Enable DR,EE */ \
- sync; \
- PPC_LL r20, _NIP(r1)
-
-#elif defined(CONFIG_PPC_BOOK3S_64)
-#define STACK_LR _LINK
-#define MSR_EXT_START
-#define MSR_EXT_END
-#endif
-
-/*
- * Activate current's external feature (FPU/Altivec/VSX)
- */
-#define define_load_up(what) \
- \
-_GLOBAL(kvmppc_load_up_ ## what); \
- PPC_STLU r1, -INT_FRAME_SIZE(r1); \
- mflr r3; \
- PPC_STL r3, STACK_LR(r1); \
- MSR_EXT_START; \
- \
- bl FUNC(load_up_ ## what); \
- \
- MSR_EXT_END; \
- PPC_LL r3, STACK_LR(r1); \
- mtlr r3; \
- addi r1, r1, INT_FRAME_SIZE; \
- blr
-
-define_load_up(fpu)
-#ifdef CONFIG_ALTIVEC
-define_load_up(altivec)
-#endif
-
#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index bc50c97751d..1e0cc2adfd4 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -361,6 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
beqa BOOK3S_INTERRUPT_DECREMENTER
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
beqa BOOK3S_INTERRUPT_PERFMON
+ cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
+ beqa BOOK3S_INTERRUPT_DOORBELL
RFI
kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 02a17dcf161..d1acd32a64c 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1246,8 +1246,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
kvm->arch.xics = xics;
mutex_unlock(&kvm->lock);
- if (ret)
+ if (ret) {
+ kfree(xics);
return ret;
+ }
xics_debugfs_init(xics);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0591e05db74..ab62109fdfa 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable();
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
- local_irq_disable();
+ hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1;
@@ -682,34 +682,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret, s;
struct debug_reg debug;
-#ifdef CONFIG_PPC_FPU
- struct thread_fp_state fp;
- int fpexc_mode;
-#endif
if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
- local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) {
- local_irq_enable();
ret = s;
goto out;
}
+ /* interrupts now hard-disabled */
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
enable_kernel_fp();
- fp = current->thread.fp_state;
- fpexc_mode = current->thread.fpexc_mode;
-
- /* Restore guest FPU state to thread */
- memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
- sizeof(vcpu->arch.fpr));
- current->thread.fp_state.fpscr = vcpu->arch.fpscr;
/*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -728,6 +716,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg;
+ vcpu->arch.pgdir = current->mm->pgd;
kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -743,15 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu);
vcpu->fpu_active = 0;
-
- /* Save guest FPU state from thread */
- memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
- sizeof(vcpu->arch.fpr));
- vcpu->arch.fpscr = current->thread.fp_state.fpscr;
-
- /* Restore userspace FPU state from stack */
- current->thread.fp_state = fp;
- current->thread.fpexc_mode = fpexc_mode;
#endif
out:
@@ -898,17 +878,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
int s;
int idx;
-#ifdef CONFIG_PPC64
- WARN_ON(local_paca->irq_happened != 0);
-#endif
-
- /*
- * We enter with interrupts disabled in hardware, but
- * we need to call hard_irq_disable anyway to ensure that
- * the software state is kept in sync.
- */
- hard_irq_disable();
-
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
@@ -1217,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* aren't already exiting to userspace for some other reason.
*/
if (!(r & RESUME_HOST)) {
- local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu);
- if (s <= 0) {
- local_irq_enable();
+ if (s <= 0)
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
- } else {
+ else {
+ /* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
}
}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 09bfd9bc7cf..b632cd35919 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -136,7 +136,9 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
- load_up_fpu();
+ enable_kernel_fp();
+ load_fp_state(&vcpu->arch.fp);
+ current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP;
}
#endif
@@ -151,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current);
+ current->thread.fp_save_area = NULL;
#endif
}
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e8ed7d659c5..e4185f6b330 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -33,6 +33,8 @@
#ifdef CONFIG_64BIT
#include <asm/exception-64e.h>
+#include <asm/hw_irq.h>
+#include <asm/irqflags.h>
#else
#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
#endif
@@ -319,6 +321,8 @@ kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
SPRN_DSRR0, SPRN_DSRR1, 0
kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
#else
/*
* For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -465,6 +469,15 @@ _GLOBAL(kvmppc_resume_host)
mtspr SPRN_EPCR, r3
isync
+#ifdef CONFIG_64BIT
+ /*
+ * We enter with interrupts disabled in hardware, but
+ * we need to call RECONCILE_IRQ_STATE to ensure
+ * that the software state is kept in sync.
+ */
+ RECONCILE_IRQ_STATE(r3,r5)
+#endif
+
/* Switch to kernel stack and jump to handler. */
PPC_LL r3, HOST_RUN(r1)
mr r5, r14 /* intno */
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 497b142f651..2e02ed849f3 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -573,3 +575,5 @@ static void __exit kvmppc_e500_exit(void)
module_init(kvmppc_e500_init);
module_exit(kvmppc_e500_exit);
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 4fd9650eb01..a326178bdea 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -31,11 +31,13 @@ enum vcpu_ftr {
#define E500_TLB_NUM 2
/* entry is mapped somewhere in host TLB */
-#define E500_TLB_VALID (1 << 0)
+#define E500_TLB_VALID (1 << 31)
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
-#define E500_TLB_BITMAP (1 << 1)
+#define E500_TLB_BITMAP (1 << 30)
/* TLB1 entry is mapped by host TLB0 */
-#define E500_TLB_TLB0 (1 << 2)
+#define E500_TLB_TLB0 (1 << 29)
+/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
+#define E500_TLB_MAS2_ATTR (0x7f)
struct tlbe_ref {
pfn_t pfn; /* valid only for TLB0, except briefly */
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index ebca6b88ea5..50860e919cb 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
}
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
- unsigned int eaddr, int as)
+ gva_t eaddr, int as)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int victim, tsized;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index ecf2247b13b..dd2cc03f406 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
return mas3;
}
-static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
-{
-#ifdef CONFIG_SMP
- return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
-#else
- return mas2 & MAS2_ATTRIB_MASK;
-#endif
-}
-
/*
* writing shadow tlb entry to host TLB
*/
@@ -231,15 +222,15 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
}
- /* Already invalidated in between */
- if (!(ref->flags & E500_TLB_VALID))
- return;
-
- /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
- kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
+ /*
+ * If TLB entry is still valid then it's a TLB0 entry, and thus
+ * backed by at most one host tlbe per shadow pid
+ */
+ if (ref->flags & E500_TLB_VALID)
+ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
/* Mark the TLB as not backed by the host anymore */
- ref->flags &= ~E500_TLB_VALID;
+ ref->flags = 0;
}
static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
@@ -249,10 +240,13 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
struct kvm_book3e_206_tlb_entry *gtlbe,
- pfn_t pfn)
+ pfn_t pfn, unsigned int wimg)
{
ref->pfn = pfn;
- ref->flags |= E500_TLB_VALID;
+ ref->flags = E500_TLB_VALID;
+
+ /* Use guest supplied MAS2_G and MAS2_E */
+ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
/* Mark the page accessed */
kvm_set_pfn_accessed(pfn);
@@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe(
/* Force IPROT=0 for all guest mappings. */
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
- stlbe->mas2 = (gvaddr & MAS2_EPN) |
- e500_shadow_mas2_attrib(gtlbe->mas2, pr);
+ stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
@@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
int ret = 0;
unsigned long mmu_seq;
struct kvm *kvm = vcpu_e500->vcpu.kvm;
+ unsigned long tsize_pages = 0;
+ pte_t *ptep;
+ unsigned int wimg = 0;
+ pgd_t *pgdir;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
@@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
*/
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
- unsigned long gfn_start, gfn_end, tsize_pages;
+ unsigned long gfn_start, gfn_end;
tsize_pages = 1 << (tsize - 2);
gfn_start = gfn & ~(tsize_pages - 1);
@@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
if (likely(!pfnmap)) {
- unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
+ tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(slot, gfn);
if (is_error_noslot_pfn(pfn)) {
- printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
- (long)gfn);
+ if (printk_ratelimit())
+ pr_err("%s: real page not found for gfn %lx\n",
+ __func__, (long)gfn);
return -EINVAL;
}
@@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
goto out;
}
- kvmppc_e500_ref_setup(ref, gtlbe, pfn);
+
+ pgdir = vcpu_e500->vcpu.arch.pgdir;
+ ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
+ if (pte_present(*ptep))
+ wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
+ else {
+ if (printk_ratelimit())
+ pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
+ __func__, (long)gfn, pfn);
+ return -EINVAL;
+ }
+ kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe);
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 4132cd2fc17..17e45627922 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -391,3 +393,5 @@ static void __exit kvmppc_e500mc_exit(void)
module_init(kvmppc_e500mc_init);
module_exit(kvmppc_e500mc_exit);
+MODULE_ALIAS_MISCDEV(KVM_MINOR);
+MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2f9a0873b44..c2b887be2c2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
* lmw
* stmw
*
- * XXX is_bigendian should depend on MMU mapping or MSR[LE]
*/
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 2861ae9eaae..efbd9962a20 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -1635,6 +1635,7 @@ static void mpic_destroy(struct kvm_device *dev)
dev->kvm->arch.mpic = NULL;
kfree(opp);
+ kfree(dev);
}
static int mpic_set_default_irq_routing(struct openpic *opp)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 9ae97686e9f..3cf541a53e2 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
*/
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
- int r = 1;
+ int r;
+
+ WARN_ON(irqs_disabled());
+ hard_irq_disable();
- WARN_ON_ONCE(!irqs_disabled());
while (true) {
if (need_resched()) {
local_irq_enable();
cond_resched();
- local_irq_disable();
+ hard_irq_disable();
continue;
}
@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable();
trace_kvm_check_requests(vcpu);
r = kvmppc_core_check_requests(vcpu);
- local_irq_disable();
+ hard_irq_disable();
if (r > 0)
continue;
break;
@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
continue;
}
-#ifdef CONFIG_PPC64
- /* lazy EE magic */
- hard_irq_disable();
- if (lazy_irq_pending()) {
- /* Got an interrupt in between, try again */
- local_irq_enable();
- local_irq_disable();
- kvm_guest_exit();
- continue;
- }
-#endif
-
kvm_guest_enter();
- break;
+ return 1;
}
+ /* return to host */
+ local_irq_enable();
return r;
}
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
@@ -656,14 +648,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
break;
case KVM_MMIO_REG_FPR:
- vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
+ VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
break;
#ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
case KVM_MMIO_REG_FQPR:
- vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
+ VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
@@ -673,9 +665,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int rt, unsigned int bytes, int is_bigendian)
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian)
{
int idx, ret;
+ int is_bigendian;
+
+ if (kvmppc_need_byteswap(vcpu)) {
+ /* Default endianness is "little endian". */
+ is_bigendian = !is_default_endian;
+ } else {
+ /* Default endianness is "big endian". */
+ is_bigendian = is_default_endian;
+ }
if (bytes > sizeof(run->mmio.data)) {
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
@@ -711,21 +713,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int rt, unsigned int bytes, int is_bigendian)
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian)
{
int r;
vcpu->arch.mmio_sign_extend = 1;
- r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
+ r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
return r;
}
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u64 val, unsigned int bytes, int is_bigendian)
+ u64 val, unsigned int bytes, int is_default_endian)
{
void *data = run->mmio.data;
int idx, ret;
+ int is_bigendian;
+
+ if (kvmppc_need_byteswap(vcpu)) {
+ /* Default endianness is "little endian". */
+ is_bigendian = !is_default_endian;
+ } else {
+ /* Default endianness is "big endian". */
+ is_bigendian = is_default_endian;
+ }
if (bytes > sizeof(run->mmio.data)) {
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 17e5b236431..d5edbeb8eb8 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -159,6 +159,21 @@ unsigned int translate_branch(const unsigned int *dest, const unsigned int *src)
return 0;
}
+#ifdef CONFIG_PPC_BOOK3E_64
+void __patch_exception(int exc, unsigned long addr)
+{
+ extern unsigned int interrupt_base_book3e;
+ unsigned int *ibase = &interrupt_base_book3e;
+
+ /* Our exceptions vectors start with a NOP and -then- a branch
+ * to deal with single stepping from userspace which stops on
+ * the second instruction. Thus we need to patch the second
+ * instruction of the exception, not the first one
+ */
+
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
+}
+#endif
#ifdef CONFIG_CODE_PATCHING_SELFTEST
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index b2c68ce139a..a5b30c71a8d 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -231,6 +231,87 @@ _GLOBAL(_rest32gpr_31_x)
mr 1,11
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+_GLOBAL(_savevr_20)
+ li r11,-192
+ stvx vr20,r11,r0
+_GLOBAL(_savevr_21)
+ li r11,-176
+ stvx vr21,r11,r0
+_GLOBAL(_savevr_22)
+ li r11,-160
+ stvx vr22,r11,r0
+_GLOBAL(_savevr_23)
+ li r11,-144
+ stvx vr23,r11,r0
+_GLOBAL(_savevr_24)
+ li r11,-128
+ stvx vr24,r11,r0
+_GLOBAL(_savevr_25)
+ li r11,-112
+ stvx vr25,r11,r0
+_GLOBAL(_savevr_26)
+ li r11,-96
+ stvx vr26,r11,r0
+_GLOBAL(_savevr_27)
+ li r11,-80
+ stvx vr27,r11,r0
+_GLOBAL(_savevr_28)
+ li r11,-64
+ stvx vr28,r11,r0
+_GLOBAL(_savevr_29)
+ li r11,-48
+ stvx vr29,r11,r0
+_GLOBAL(_savevr_30)
+ li r11,-32
+ stvx vr30,r11,r0
+_GLOBAL(_savevr_31)
+ li r11,-16
+ stvx vr31,r11,r0
+ blr
+
+_GLOBAL(_restvr_20)
+ li r11,-192
+ lvx vr20,r11,r0
+_GLOBAL(_restvr_21)
+ li r11,-176
+ lvx vr21,r11,r0
+_GLOBAL(_restvr_22)
+ li r11,-160
+ lvx vr22,r11,r0
+_GLOBAL(_restvr_23)
+ li r11,-144
+ lvx vr23,r11,r0
+_GLOBAL(_restvr_24)
+ li r11,-128
+ lvx vr24,r11,r0
+_GLOBAL(_restvr_25)
+ li r11,-112
+ lvx vr25,r11,r0
+_GLOBAL(_restvr_26)
+ li r11,-96
+ lvx vr26,r11,r0
+_GLOBAL(_restvr_27)
+ li r11,-80
+ lvx vr27,r11,r0
+_GLOBAL(_restvr_28)
+ li r11,-64
+ lvx vr28,r11,r0
+_GLOBAL(_restvr_29)
+ li r11,-48
+ lvx vr29,r11,r0
+_GLOBAL(_restvr_30)
+ li r11,-32
+ lvx vr30,r11,r0
+_GLOBAL(_restvr_31)
+ li r11,-16
+ lvx vr31,r11,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#else /* CONFIG_PPC64 */
.section ".text.save.restore","ax",@progbits
@@ -356,6 +437,111 @@ _restgpr0_31:
mtlr r0
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+.globl _savevr_20
+_savevr_20:
+ li r12,-192
+ stvx vr20,r12,r0
+.globl _savevr_21
+_savevr_21:
+ li r12,-176
+ stvx vr21,r12,r0
+.globl _savevr_22
+_savevr_22:
+ li r12,-160
+ stvx vr22,r12,r0
+.globl _savevr_23
+_savevr_23:
+ li r12,-144
+ stvx vr23,r12,r0
+.globl _savevr_24
+_savevr_24:
+ li r12,-128
+ stvx vr24,r12,r0
+.globl _savevr_25
+_savevr_25:
+ li r12,-112
+ stvx vr25,r12,r0
+.globl _savevr_26
+_savevr_26:
+ li r12,-96
+ stvx vr26,r12,r0
+.globl _savevr_27
+_savevr_27:
+ li r12,-80
+ stvx vr27,r12,r0
+.globl _savevr_28
+_savevr_28:
+ li r12,-64
+ stvx vr28,r12,r0
+.globl _savevr_29
+_savevr_29:
+ li r12,-48
+ stvx vr29,r12,r0
+.globl _savevr_30
+_savevr_30:
+ li r12,-32
+ stvx vr30,r12,r0
+.globl _savevr_31
+_savevr_31:
+ li r12,-16
+ stvx vr31,r12,r0
+ blr
+
+.globl _restvr_20
+_restvr_20:
+ li r12,-192
+ lvx vr20,r12,r0
+.globl _restvr_21
+_restvr_21:
+ li r12,-176
+ lvx vr21,r12,r0
+.globl _restvr_22
+_restvr_22:
+ li r12,-160
+ lvx vr22,r12,r0
+.globl _restvr_23
+_restvr_23:
+ li r12,-144
+ lvx vr23,r12,r0
+.globl _restvr_24
+_restvr_24:
+ li r12,-128
+ lvx vr24,r12,r0
+.globl _restvr_25
+_restvr_25:
+ li r12,-112
+ lvx vr25,r12,r0
+.globl _restvr_26
+_restvr_26:
+ li r12,-96
+ lvx vr26,r12,r0
+.globl _restvr_27
+_restvr_27:
+ li r12,-80
+ lvx vr27,r12,r0
+.globl _restvr_28
+_restvr_28:
+ li r12,-64
+ lvx vr28,r12,r0
+.globl _restvr_29
+_restvr_29:
+ li r12,-48
+ lvx vr29,r12,r0
+.globl _restvr_30
+_restvr_30:
+ li r12,-32
+ lvx vr30,r12,r0
+.globl _restvr_31
+_restvr_31:
+ li r12,-16
+ lvx vr31,r12,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#endif /* CONFIG_PPC64 */
#endif
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index a73f0884d35..28337c9709a 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -20,6 +20,7 @@
*/
#include <linux/types.h>
+#include <linux/prctl.h>
#include <asm/uaccess.h>
#include <asm/reg.h>
@@ -275,21 +276,13 @@ int do_spe_mathemu(struct pt_regs *regs)
case EFSCTSF:
case EFSCTUF:
- if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) {
- /* NaN */
- if (((vb.wp[1] >> 23) & 0xff) == 0) {
- /* denorm */
- vc.wp[1] = 0x0;
- } else if ((vb.wp[1] >> 31) == 0) {
- /* positive normal */
- vc.wp[1] = (func == EFSCTSF) ?
- 0x7fffffff : 0xffffffff;
- } else { /* negative normal */
- vc.wp[1] = (func == EFSCTSF) ?
- 0x80000000 : 0x0;
- }
- } else { /* rB is NaN */
- vc.wp[1] = 0x0;
+ if (SB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ SB_e += (func == EFSCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
+ (func == EFSCTSF));
}
goto update_regs;
@@ -306,16 +299,25 @@ int do_spe_mathemu(struct pt_regs *regs)
}
case EFSCTSI:
- case EFSCTSIZ:
case EFSCTUI:
+ if (SB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
+ ((func & 0x3) != 0));
+ }
+ goto update_regs;
+
+ case EFSCTSIZ:
case EFSCTUIZ:
- if (func & 0x4) {
- _FP_ROUND(1, SB);
+ if (SB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
- _FP_ROUND_ZERO(1, SB);
+ FP_TO_INT_S(vc.wp[1], SB, 32,
+ ((func & 0x3) != 0));
}
- FP_TO_INT_S(vc.wp[1], SB, 32,
- (((func & 0x3) != 0) || SB_s));
goto update_regs;
default:
@@ -404,22 +406,13 @@ cmp_s:
case EFDCTSF:
case EFDCTUF:
- if (!((vb.wp[0] >> 20) == 0x7ff &&
- ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) {
- /* not a NaN */
- if (((vb.wp[0] >> 20) & 0x7ff) == 0) {
- /* denorm */
- vc.wp[1] = 0x0;
- } else if ((vb.wp[0] >> 31) == 0) {
- /* positive normal */
- vc.wp[1] = (func == EFDCTSF) ?
- 0x7fffffff : 0xffffffff;
- } else { /* negative normal */
- vc.wp[1] = (func == EFDCTSF) ?
- 0x80000000 : 0x0;
- }
- } else { /* NaN */
- vc.wp[1] = 0x0;
+ if (DB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ DB_e += (func == EFDCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
+ (func == EFDCTSF));
}
goto update_regs;
@@ -437,21 +430,35 @@ cmp_s:
case EFDCTUIDZ:
case EFDCTSIDZ:
- _FP_ROUND_ZERO(2, DB);
- FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0));
+ if (DB_c == FP_CLS_NAN) {
+ vc.dp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_D(vc.dp[0], DB, 64,
+ ((func & 0x1) == 0));
+ }
goto update_regs;
case EFDCTUI:
case EFDCTSI:
+ if (DB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
+ ((func & 0x3) != 0));
+ }
+ goto update_regs;
+
case EFDCTUIZ:
case EFDCTSIZ:
- if (func & 0x4) {
- _FP_ROUND(2, DB);
+ if (DB_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
- _FP_ROUND_ZERO(2, DB);
+ FP_TO_INT_D(vc.wp[1], DB, 32,
+ ((func & 0x3) != 0));
}
- FP_TO_INT_D(vc.wp[1], DB, 32,
- (((func & 0x3) != 0) || DB_s));
goto update_regs;
default:
@@ -556,37 +563,60 @@ cmp_d:
cmp = -1;
goto cmp_vs;
- case EVFSCTSF:
- __asm__ __volatile__ ("mtspr 512, %4\n"
- "efsctsf %0, %2\n"
- "efsctsf %1, %3\n"
- : "=r" (vc.wp[0]), "=r" (vc.wp[1])
- : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
- goto update_regs;
-
case EVFSCTUF:
- __asm__ __volatile__ ("mtspr 512, %4\n"
- "efsctuf %0, %2\n"
- "efsctuf %1, %3\n"
- : "=r" (vc.wp[0]), "=r" (vc.wp[1])
- : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
+ case EVFSCTSF:
+ if (SB0_c == FP_CLS_NAN) {
+ vc.wp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ SB0_e += (func == EVFSCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
+ (func == EVFSCTSF));
+ }
+ if (SB1_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ SB1_e += (func == EVFSCTSF ? 31 : 32);
+ FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
+ (func == EVFSCTSF));
+ }
goto update_regs;
case EVFSCTUI:
case EVFSCTSI:
+ if (SB0_c == FP_CLS_NAN) {
+ vc.wp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
+ ((func & 0x3) != 0));
+ }
+ if (SB1_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
+ ((func & 0x3) != 0));
+ }
+ goto update_regs;
+
case EVFSCTUIZ:
case EVFSCTSIZ:
- if (func & 0x4) {
- _FP_ROUND(1, SB0);
- _FP_ROUND(1, SB1);
+ if (SB0_c == FP_CLS_NAN) {
+ vc.wp[0] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
- _FP_ROUND_ZERO(1, SB0);
- _FP_ROUND_ZERO(1, SB1);
+ FP_TO_INT_S(vc.wp[0], SB0, 32,
+ ((func & 0x3) != 0));
+ }
+ if (SB1_c == FP_CLS_NAN) {
+ vc.wp[1] = 0;
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ } else {
+ FP_TO_INT_S(vc.wp[1], SB1, 32,
+ ((func & 0x3) != 0));
}
- FP_TO_INT_S(vc.wp[0], SB0, 32,
- (((func & 0x3) != 0) || SB0_s));
- FP_TO_INT_S(vc.wp[1], SB1, 32,
- (((func & 0x3) != 0) || SB1_s));
goto update_regs;
default:
@@ -630,9 +660,27 @@ update_ccr:
regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2));
update_regs:
- __FPU_FPSCR &= ~FP_EX_MASK;
+ /*
+ * If the "invalid" exception sticky bit was set by the
+ * processor for non-finite input, but was not set before the
+ * instruction being emulated, clear it. Likewise for the
+ * "underflow" bit, which may have been set by the processor
+ * for exact underflow, not just inexact underflow when the
+ * flag should be set for IEEE 754 semantics. Other sticky
+ * exceptions will only be set by the processor when they are
+ * correct according to IEEE 754 semantics, and we must not
+ * clear sticky bits that were already set before the emulated
+ * instruction as they represent the user-visible sticky
+ * exception status. "inexact" traps to kernel are not
+ * required for IEEE semantics and are not enabled by default,
+ * so the "inexact" sticky bit may have been set by a previous
+ * instruction without the kernel being aware of it.
+ */
+ __FPU_FPSCR
+ &= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last;
__FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK);
mtspr(SPRN_SPEFSCR, __FPU_FPSCR);
+ current->thread.spefscr_last = __FPU_FPSCR;
current->thread.evr[fc] = vc.wp[0];
regs->gpr[fc] = vc.wp[1];
@@ -644,6 +692,23 @@ update_regs:
pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]);
pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
+ if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
+ if ((FP_CUR_EXCEPTIONS & FP_EX_DIVZERO)
+ && (current->thread.fpexc_mode & PR_FP_EXC_DIV))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_OVERFLOW)
+ && (current->thread.fpexc_mode & PR_FP_EXC_OVF))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_UNDERFLOW)
+ && (current->thread.fpexc_mode & PR_FP_EXC_UND))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT)
+ && (current->thread.fpexc_mode & PR_FP_EXC_RES))
+ return 1;
+ if ((FP_CUR_EXCEPTIONS & FP_EX_INVALID)
+ && (current->thread.fpexc_mode & PR_FP_EXC_INV))
+ return 1;
+ }
return 0;
illegal:
@@ -662,21 +727,28 @@ int speround_handler(struct pt_regs *regs)
{
union dw_union fgpr;
int s_lo, s_hi;
- unsigned long speinsn, type, fc;
+ int lo_inexact, hi_inexact;
+ int fp_result;
+ unsigned long speinsn, type, fb, fc, fptype, func;
if (get_user(speinsn, (unsigned int __user *) regs->nip))
return -EFAULT;
if ((speinsn >> 26) != 4)
return -EINVAL; /* not an spe instruction */
- type = insn_type(speinsn & 0x7ff);
+ func = speinsn & 0x7ff;
+ type = insn_type(func);
if (type == XCR) return -ENOSYS;
__FPU_FPSCR = mfspr(SPRN_SPEFSCR);
pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR);
+ fptype = (speinsn >> 5) & 0x7;
+
/* No need to round if the result is exact */
- if (!(__FPU_FPSCR & FP_EX_INEXACT))
+ lo_inexact = __FPU_FPSCR & (SPEFSCR_FG | SPEFSCR_FX);
+ hi_inexact = __FPU_FPSCR & (SPEFSCR_FGH | SPEFSCR_FXH);
+ if (!(lo_inexact || (hi_inexact && fptype == VCT)))
return 0;
fc = (speinsn >> 21) & 0x1f;
@@ -685,9 +757,68 @@ int speround_handler(struct pt_regs *regs)
fgpr.wp[0] = current->thread.evr[fc];
fgpr.wp[1] = regs->gpr[fc];
+ fb = (speinsn >> 11) & 0x1f;
+ switch (func) {
+ case EFSCTUIZ:
+ case EFSCTSIZ:
+ case EVFSCTUIZ:
+ case EVFSCTSIZ:
+ case EFDCTUIDZ:
+ case EFDCTSIDZ:
+ case EFDCTUIZ:
+ case EFDCTSIZ:
+ /*
+ * These instructions always round to zero,
+ * independent of the rounding mode.
+ */
+ return 0;
+
+ case EFSCTUI:
+ case EFSCTUF:
+ case EVFSCTUI:
+ case EVFSCTUF:
+ case EFDCTUI:
+ case EFDCTUF:
+ fp_result = 0;
+ s_lo = 0;
+ s_hi = 0;
+ break;
+
+ case EFSCTSI:
+ case EFSCTSF:
+ fp_result = 0;
+ /* Recover the sign of a zero result if possible. */
+ if (fgpr.wp[1] == 0)
+ s_lo = regs->gpr[fb] & SIGN_BIT_S;
+ break;
+
+ case EVFSCTSI:
+ case EVFSCTSF:
+ fp_result = 0;
+ /* Recover the sign of a zero result if possible. */
+ if (fgpr.wp[1] == 0)
+ s_lo = regs->gpr[fb] & SIGN_BIT_S;
+ if (fgpr.wp[0] == 0)
+ s_hi = current->thread.evr[fb] & SIGN_BIT_S;
+ break;
+
+ case EFDCTSI:
+ case EFDCTSF:
+ fp_result = 0;
+ s_hi = s_lo;
+ /* Recover the sign of a zero result if possible. */
+ if (fgpr.wp[1] == 0)
+ s_hi = current->thread.evr[fb] & SIGN_BIT_S;
+ break;
+
+ default:
+ fp_result = 1;
+ break;
+ }
+
pr_debug("round fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]);
- switch ((speinsn >> 5) & 0x7) {
+ switch (fptype) {
/* Since SPE instructions on E500 core can handle round to nearest
* and round toward zero with IEEE-754 complied, we just need
* to handle round toward +Inf and round toward -Inf by software.
@@ -696,25 +827,52 @@ int speround_handler(struct pt_regs *regs)
if ((FP_ROUNDMODE) == FP_RND_PINF) {
if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */
} else { /* round to -Inf */
- if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */
+ if (s_lo) {
+ if (fp_result)
+ fgpr.wp[1]++; /* Z < 0, choose Z2 */
+ else
+ fgpr.wp[1]--; /* Z < 0, choose Z2 */
+ }
}
break;
case DPFP:
if (FP_ROUNDMODE == FP_RND_PINF) {
- if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */
+ if (!s_hi) {
+ if (fp_result)
+ fgpr.dp[0]++; /* Z > 0, choose Z1 */
+ else
+ fgpr.wp[1]++; /* Z > 0, choose Z1 */
+ }
} else { /* round to -Inf */
- if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */
+ if (s_hi) {
+ if (fp_result)
+ fgpr.dp[0]++; /* Z < 0, choose Z2 */
+ else
+ fgpr.wp[1]--; /* Z < 0, choose Z2 */
+ }
}
break;
case VCT:
if (FP_ROUNDMODE == FP_RND_PINF) {
- if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
- if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
+ if (lo_inexact && !s_lo)
+ fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
+ if (hi_inexact && !s_hi)
+ fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
} else { /* round to -Inf */
- if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
- if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
+ if (lo_inexact && s_lo) {
+ if (fp_result)
+ fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
+ else
+ fgpr.wp[1]--; /* Z_low < 0, choose Z2 */
+ }
+ if (hi_inexact && s_hi) {
+ if (fp_result)
+ fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
+ else
+ fgpr.wp[0]--; /* Z_high < 0, choose Z2 */
+ }
}
break;
@@ -727,6 +885,8 @@ int speround_handler(struct pt_regs *regs)
pr_debug(" to fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]);
+ if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
+ return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0;
return 0;
}
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 07ba45b0f07..94cd728166d 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -52,6 +52,7 @@
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/setup.h>
+#include <asm/paca.h>
#include "mmu_decl.h"
@@ -171,11 +172,10 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
return 1UL << camsize;
}
-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
+static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
+ unsigned long ram, int max_cam_idx)
{
int i;
- unsigned long virt = PAGE_OFFSET;
- phys_addr_t phys = memstart_addr;
unsigned long amount_mapped = 0;
/* Calculate CAM values */
@@ -192,9 +192,23 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
}
tlbcam_index = i;
+#ifdef CONFIG_PPC64
+ get_paca()->tcd.esel_next = i;
+ get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+ get_paca()->tcd.esel_first = i;
+#endif
+
return amount_mapped;
}
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
+{
+ unsigned long virt = PAGE_OFFSET;
+ phys_addr_t phys = memstart_addr;
+
+ return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx);
+}
+
#ifdef CONFIG_PPC32
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
@@ -222,7 +236,9 @@ void __init adjust_total_lowmem(void)
/* adjust lowmem size to __max_low_memory */
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
+ i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
+ restore_to_as0(i, 0, 0, 1);
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -241,4 +257,62 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
/* 64M mapped initially according to head_fsl_booke.S */
memblock_set_current_limit(min_t(u64, limit, 0x04000000));
}
+
+#ifdef CONFIG_RELOCATABLE
+int __initdata is_second_reloc;
+notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
+{
+ unsigned long base = KERNELBASE;
+
+ kernstart_addr = start;
+ if (is_second_reloc) {
+ virt_phys_offset = PAGE_OFFSET - memstart_addr;
+ return;
+ }
+
+ /*
+ * Relocatable kernel support based on processing of dynamic
+ * relocation entries. Before we get the real memstart_addr,
+ * We will compute the virt_phys_offset like this:
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0x3ffffff) +
+ * (kernstart_addr & 0x3ffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
+ * (kernstart_addr & ~0x3ffffff)
+ *
+ */
+ start &= ~0x3ffffff;
+ base &= ~0x3ffffff;
+ virt_phys_offset = base - start;
+ early_get_first_memblock_info(__va(dt_ptr), NULL);
+ /*
+ * We now get the memstart_addr, then we should check if this
+ * address is the same as what the PAGE_OFFSET map to now. If
+ * not we have to change the map of PAGE_OFFSET to memstart_addr
+ * and do a second relocation.
+ */
+ if (start != memstart_addr) {
+ int n;
+ long offset = start - memstart_addr;
+
+ is_second_reloc = 1;
+ n = switch_to_as1();
+ /* map a 64M area for the second relocation */
+ if (memstart_addr > start)
+ map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM);
+ else
+ map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
+ 0x4000000, CONFIG_LOWMEM_CAM_NUM);
+ restore_to_as0(n, offset, __va(dt_ptr), 1);
+ /* We should never reach here */
+ panic("Relocation error");
+ }
+}
+#endif
#endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index d3cbda62857..1136d26a95a 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -148,7 +148,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
- ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -457,7 +460,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r3,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
- ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -795,7 +801,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
- ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ ori r3,r3,HPTE_R_C | HPTE_R_M
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6176b3cdf57..d766d6ee33f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags)
if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
(pteflags & _PAGE_DIRTY)))
rflags |= 1;
-
- /* Always add C */
- return rflags | HPTE_R_C;
+ /*
+ * Always add "C" bit for perf. Memory coherence is always enabled
+ */
+ return rflags | HPTE_R_C | HPTE_R_M;
}
int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
@@ -206,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
+ /*
+ * If relocatable, check if it overlaps interrupt vectors that
+ * are copied down to real 0. For relocatable kernel
+ * (e.g. kdump case) we copy interrupt vectors down to real
+ * address 0. Mark that region as executable. This is
+ * because on p8 system with relocation on exception feature
+ * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
+ * in order to execute the interrupt handlers in virtual
+ * mode the vector region need to be marked as executable.
+ */
+ if ((PHYSICAL_START > MEMORY_START) &&
+ overlaps_interrupt_vector_text(vaddr, vaddr + step))
+ tprot &= ~HPTE_R_N;
+
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 34de9e0cdc3..826893fcb3a 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -127,7 +127,11 @@ repeat:
/* Add in WIMG bits */
rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
- _PAGE_COHERENT | _PAGE_GUARDED));
+ _PAGE_GUARDED));
+ /*
+ * enable the memory coherence always
+ */
+ rflags |= HPTE_R_M;
/* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 74551b5e41e..5e4ee257390 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -8,6 +8,44 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC64
+static inline int tlb1_next(void)
+{
+ struct paca_struct *paca = get_paca();
+ struct tlb_core_data *tcd;
+ int this, next;
+
+ tcd = paca->tcd_ptr;
+ this = tcd->esel_next;
+
+ next = this + 1;
+ if (next >= tcd->esel_max)
+ next = tcd->esel_first;
+
+ tcd->esel_next = next;
+ return this;
+}
+#else
+static inline int tlb1_next(void)
+{
+ int index, ncams;
+
+ ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+ index = __get_cpu_var(next_tlbcam_idx);
+
+ /* Just round-robin the entries and wrap when we hit the end */
+ if (unlikely(index == ncams - 1))
+ __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
+ else
+ __get_cpu_var(next_tlbcam_idx)++;
+
+ return index;
+}
+#endif /* !PPC64 */
+#endif /* FSL */
+
static inline int mmu_get_tsize(int psize)
{
return mmu_psize_defs[psize].enc;
@@ -47,7 +85,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
struct mm_struct *mm;
#ifdef CONFIG_PPC_FSL_BOOK3E
- int index, ncams;
+ int index;
#endif
if (unlikely(is_kernel_addr(ea)))
@@ -77,18 +115,11 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
}
#ifdef CONFIG_PPC_FSL_BOOK3E
- ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-
/* We have to use the CAM(TLB1) on FSL parts for hugepages */
- index = __get_cpu_var(next_tlbcam_idx);
+ index = tlb1_next();
mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
-
- /* Just round-robin the entries and wrap when we hit the end */
- if (unlikely(index == ncams - 1))
- __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
- else
- __get_cpu_var(next_tlbcam_idx)++;
#endif
+
mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
mas2 = ea & ~((1UL << shift) - 1);
mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
@@ -103,7 +134,8 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
mtspr(SPRN_MAS7_MAS3, mas7_3);
} else {
- mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
+ if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+ mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 0b7fb676101..a5bcf930119 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -99,6 +99,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Add in WIMG bits */
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED));
+ /*
+ * enable the memory coherence always
+ */
+ rflags |= HPTE_R_M;
slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
mmu_psize, ssize);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90bb6d9409b..eb923654ba8 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -472,12 +472,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
struct hugepd_freelist **batchp;
- batchp = &__get_cpu_var(hugepd_freelist_cur);
+ batchp = &get_cpu_var(hugepd_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpumask_equal(mm_cpumask(tlb->mm),
cpumask_of(smp_processor_id()))) {
kmem_cache_free(hugepte_cache, hugepte);
+ put_cpu_var(hugepd_freelist_cur);
return;
}
@@ -491,6 +492,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
*batchp = NULL;
}
+ put_cpu_var(hugepd_freelist_cur);
}
#endif
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 3fa93dc7fe7..4b5cd5c2594 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -209,7 +209,7 @@ void __init do_init_bootmem(void)
/* Place all memblock_regions in the same node and merge contiguous
* memblock_regions
*/
- memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
/* Add all physical memory to the bootmem map, mark each area
* present.
@@ -307,6 +307,12 @@ static void __init register_page_bootmem_info(void)
void __init mem_init(void)
{
+ /*
+ * book3s is limited to 16 page sizes due to encoding this in
+ * a 4-bit field for slices.
+ */
+ BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
+
#ifdef CONFIG_SWIOTLB
swiotlb_init(0);
#endif
@@ -507,7 +513,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* System memory should not be in /proc/iomem but various tools expect it
* (eg kdump).
*/
-static int add_system_ram_resources(void)
+static int __init add_system_ram_resources(void)
{
struct memblock_region *reg;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 83eb5d5f53d..9615d82919b 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -148,6 +148,8 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
+extern int switch_to_as1(void);
+extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
#endif
extern void loadcam_entry(unsigned int index);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 078d3e00a61..30a42e24bf1 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -31,6 +31,8 @@
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/topology.h>
#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/hvcall.h>
@@ -152,9 +154,22 @@ static void __init get_node_active_region(unsigned long pfn,
}
}
-static void map_cpu_to_node(int cpu, int node)
+static void reset_numa_cpu_lookup_table(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ numa_cpu_lookup_table[cpu] = -1;
+}
+
+static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
+}
+
+static void map_cpu_to_node(int cpu, int node)
+{
+ update_numa_cpu_lookup_table(cpu, node);
dbg("adding cpu %d to node %d\n", cpu, node);
@@ -522,11 +537,24 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = 0;
- struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
+ int nid;
+ struct device_node *cpu;
+
+ /*
+ * If a valid cpu-to-node mapping is already available, use it
+ * directly instead of querying the firmware, since it represents
+ * the most recent mapping notified to us by the platform (eg: VPHN).
+ */
+ if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ map_cpu_to_node(lcpu, nid);
+ return nid;
+ }
+
+ cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
WARN_ON(1);
+ nid = 0;
goto out;
}
@@ -542,16 +570,38 @@ out:
return nid;
}
+static void verify_cpu_node_mapping(int cpu, int node)
+{
+ int base, sibling, i;
+
+ /* Verify that all the threads in the core belong to the same node */
+ base = cpu_first_thread_sibling(cpu);
+
+ for (i = 0; i < threads_per_core; i++) {
+ sibling = base + i;
+
+ if (sibling == cpu || cpu_is_offline(sibling))
+ continue;
+
+ if (cpu_to_node(sibling) != node) {
+ WARN(1, "CPU thread siblings %d and %d don't belong"
+ " to the same node!\n", cpu, sibling);
+ break;
+ }
+ }
+}
+
static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
unsigned long lcpu = (unsigned long)hcpu;
- int ret = NOTIFY_DONE;
+ int ret = NOTIFY_DONE, nid;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- numa_setup_cpu(lcpu);
+ nid = numa_setup_cpu(lcpu);
+ verify_cpu_node_mapping((int)lcpu, nid);
ret = NOTIFY_OK;
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -670,7 +720,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
node_set_online(nid);
sz = numa_enforce_memory_limit(base, size);
if (sz)
- memblock_set_node(base, sz, nid);
+ memblock_set_node(base, sz,
+ &memblock.memory, nid);
} while (--ranges);
}
}
@@ -760,7 +811,7 @@ new_range:
continue;
}
- memblock_set_node(start, size, nid);
+ memblock_set_node(start, size, &memblock.memory, nid);
if (--ranges)
goto new_range;
@@ -797,7 +848,8 @@ static void __init setup_nonnuma(void)
fake_numa_create_new_node(end_pfn, &nid);
memblock_set_node(PFN_PHYS(start_pfn),
- PFN_PHYS(end_pfn - start_pfn), nid);
+ PFN_PHYS(end_pfn - start_pfn),
+ &memblock.memory, nid);
node_set_online(nid);
}
}
@@ -1067,6 +1119,7 @@ void __init do_init_bootmem(void)
*/
setup_node_to_cpumask_map();
+ reset_numa_cpu_lookup_table();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
@@ -1445,6 +1498,33 @@ static int update_cpu_topology(void *data)
return 0;
}
+static int update_lookup_table(void *data)
+{
+ struct topology_update_data *update;
+
+ if (!data)
+ return -EINVAL;
+
+ /*
+ * Upon topology update, the numa-cpu lookup table needs to be updated
+ * for all threads in the core, including offline CPUs, to ensure that
+ * future hotplug operations respect the cpu-to-node associativity
+ * properly.
+ */
+ for (update = data; update; update = update->next) {
+ int nid, base, j;
+
+ nid = update->new_nid;
+ base = cpu_first_thread_sibling(update->cpu);
+
+ for (j = 0; j < threads_per_core; j++) {
+ update_numa_cpu_lookup_table(base + j, nid);
+ }
+ }
+
+ return 0;
+}
+
/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
@@ -1513,6 +1593,14 @@ int arch_update_cpu_topology(void)
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+ /*
+ * Update the numa-cpu lookup table with the new mappings, even for
+ * offline CPUs. It is best to perform this update from the stop-
+ * machine context.
+ */
+ stop_machine(update_lookup_table, &updates[0],
+ cpumask_of(raw_smp_processor_id()));
+
for (ud = &updates[0]; ud; ud = ud->next) {
unregister_cpu_under_node(ud->cpu, ud->old_nid);
register_cpu_under_node(ud->cpu, ud->new_nid);
@@ -1697,7 +1785,7 @@ static const struct file_operations topology_ops = {
static int topology_update_init(void)
{
start_topology_update();
- proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
+ proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops);
return 0;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 841e0d00863..c695943a513 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
@@ -174,7 +173,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte)
{
#ifdef CONFIG_DEBUG_VM
- WARN_ON(pte_present(*ptep));
+ WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
#endif
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5b960171528..343a87fa78b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -299,6 +299,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
+ smp_wmb();
return err;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 9d95786aa80..62bf5e8e78d 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -33,7 +33,6 @@
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
-#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
@@ -153,6 +152,18 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
}
#endif /* !CONFIG_PPC_MMU_NOHASH */
}
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ /*
+ * With hardware tablewalk, a sync is needed to ensure that
+ * subsequent accesses see the PTE we just wrote. Unlike userspace
+ * mappings, we can't tolerate spurious faults, so make sure
+ * the new PTE will be seen the first time.
+ */
+ mb();
+#else
+ smp_wmb();
+#endif
return 0;
}
@@ -499,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
}
unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, unsigned long clr)
+ pmd_t *pmdp, unsigned long clr,
+ unsigned long set)
{
unsigned long old, tmp;
@@ -515,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
- : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY)
+ : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
old = pmd_val(*pmdp);
- *pmdp = __pmd(old & ~clr);
+ *pmdp = __pmd((old & ~clr) | set);
#endif
if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp);
@@ -687,7 +700,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pmd_none(*pmdp));
+ WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);
assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd));
#endif
@@ -697,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT);
+ pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
}
/*
@@ -824,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long old;
pgtable_t *pgtable_slot;
- old = pmd_hugepage_update(mm, addr, pmdp, ~0UL);
+ old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
* We have pmd == none and we are holding page_table_lock.
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 7ce9cf3b698..b0c75cc15ef 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -408,7 +408,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
if (fixed && (addr & ((1ul << pshift) - 1)))
return -EINVAL;
if (fixed && addr > (mm->task_size - len))
- return -EINVAL;
+ return -ENOMEM;
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index a770df2dae7..6c0b1f5f8d2 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
- pte_update(mm, addr, pte, 0, 0);
+ pte_update(mm, addr, pte, 0, 0, 0);
addr += PAGE_SIZE;
++pte;
}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 36e44b4260e..c99f6510a0b 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index b4113bf8635..c95eb323e9a 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -136,7 +136,7 @@ BEGIN_MMU_FTR_SECTION
*/
PPC_TLBSRX_DOT(0,R16)
ldx r14,r14,r15 /* grab pgd entry */
- beq normal_tlb_miss_done /* tlb exists already, bail */
+ beq tlb_miss_done_bolted /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
@@ -192,6 +192,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
mtspr SPRN_MAS7_MAS3,r15
tlbwe
+tlb_miss_done_bolted:
TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
tlb_epilog_bolted
rfi
@@ -239,6 +240,178 @@ itlb_miss_fault_bolted:
beq tlb_miss_common_bolted
b itlb_miss_kernel_bolted
+#ifdef CONFIG_PPC_FSL_BOOK3E
+/*
+ * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
+ *
+ * Linear mapping is bolted: no virtual page table or nested TLB misses
+ * Indirect entries in TLB1, hardware loads resulting direct entries
+ * into TLB0
+ * No HES or NV hint on TLB1, so we need to do software round-robin
+ * No tlbsrx. so we need a spinlock, and we have to deal
+ * with MAS-damage caused by tlbsx
+ * 4K pages only
+ */
+
+ START_EXCEPTION(instruction_tlb_miss_e6500)
+ tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
+
+ ld r11,PACA_TCD_PTR(r13)
+ srdi. r15,r16,60 /* get region */
+ ori r16,r16,1
+
+ TLB_MISS_STATS_SAVE_INFO_BOLTED
+ bne tlb_miss_kernel_e6500 /* user/kernel test */
+
+ b tlb_miss_common_e6500
+
+ START_EXCEPTION(data_tlb_miss_e6500)
+ tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
+
+ ld r11,PACA_TCD_PTR(r13)
+ srdi. r15,r16,60 /* get region */
+ rldicr r16,r16,0,62
+
+ TLB_MISS_STATS_SAVE_INFO_BOLTED
+ bne tlb_miss_kernel_e6500 /* user vs kernel check */
+
+/*
+ * This is the guts of the TLB miss handler for e6500 and derivatives.
+ * We are entered with:
+ *
+ * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
+ * r15 = crap (free to use)
+ * r14 = page table base
+ * r13 = PACA
+ * r11 = tlb_per_core ptr
+ * r10 = crap (free to use)
+ */
+tlb_miss_common_e6500:
+ /*
+ * Search if we already have an indirect entry for that virtual
+ * address, and if we do, bail out.
+ *
+ * MAS6:IND should be already set based on MAS4
+ */
+ addi r10,r11,TCD_LOCK
+1: lbarx r15,0,r10
+ cmpdi r15,0
+ bne 2f
+ li r15,1
+ stbcx. r15,0,r10
+ bne 1b
+ .subsection 1
+2: lbz r15,0(r10)
+ cmpdi r15,0
+ bne 2b
+ b 1b
+ .previous
+
+ mfspr r15,SPRN_MAS2
+
+ tlbsx 0,r16
+ mfspr r10,SPRN_MAS1
+ andis. r10,r10,MAS1_VALID@h
+ bne tlb_miss_done_e6500
+
+ /* Undo MAS-damage from the tlbsx */
+ mfspr r10,SPRN_MAS1
+ oris r10,r10,MAS1_VALID@h
+ mtspr SPRN_MAS1,r10
+ mtspr SPRN_MAS2,r15
+
+ /* Now, we need to walk the page tables. First check if we are in
+ * range.
+ */
+ rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
+ bne- tlb_miss_fault_e6500
+
+ rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
+ cmpldi cr0,r14,0
+ clrrdi r15,r15,3
+ beq- tlb_miss_fault_e6500 /* No PGDIR, bail */
+ ldx r14,r14,r15 /* grab pgd entry */
+
+ rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
+ clrrdi r15,r15,3
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */
+ ldx r14,r14,r15 /* grab pud entry */
+
+ rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
+ clrrdi r15,r15,3
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_e6500
+ ldx r14,r14,r15 /* Grab pmd entry */
+
+ mfspr r10,SPRN_MAS0
+ cmpdi cr0,r14,0
+ bge tlb_miss_fault_e6500
+
+ /* Now we build the MAS for a 2M indirect page:
+ *
+ * MAS 0 : ESEL needs to be filled by software round-robin
+ * MAS 1 : Fully set up
+ * - PID already updated by caller if necessary
+ * - TSIZE for now is base ind page size always
+ * - TID already cleared if necessary
+ * MAS 2 : Default not 2M-aligned, need to be redone
+ * MAS 3+7 : Needs to be done
+ */
+
+ ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
+ mtspr SPRN_MAS7_MAS3,r14
+
+ clrrdi r15,r16,21 /* make EA 2M-aligned */
+ mtspr SPRN_MAS2,r15
+
+ lbz r15,TCD_ESEL_NEXT(r11)
+ lbz r16,TCD_ESEL_MAX(r11)
+ lbz r14,TCD_ESEL_FIRST(r11)
+ rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
+ addi r15,r15,1 /* increment esel_next */
+ mtspr SPRN_MAS0,r10
+ cmpw r15,r16
+ iseleq r15,r14,r15 /* if next == last use first */
+ stb r15,TCD_ESEL_NEXT(r11)
+
+ tlbwe
+
+tlb_miss_done_e6500:
+ .macro tlb_unlock_e6500
+ li r15,0
+ isync
+ stb r15,TCD_LOCK(r11)
+ .endm
+
+ tlb_unlock_e6500
+ TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
+ tlb_epilog_bolted
+ rfi
+
+tlb_miss_kernel_e6500:
+ mfspr r10,SPRN_MAS1
+ ld r14,PACA_KERNELPGD(r13)
+ cmpldi cr0,r15,8 /* Check for vmalloc region */
+ rlwinm r10,r10,0,16,1 /* Clear TID */
+ mtspr SPRN_MAS1,r10
+ beq+ tlb_miss_common_e6500
+
+tlb_miss_fault_e6500:
+ tlb_unlock_e6500
+ /* We need to check if it was an instruction miss */
+ andi. r16,r16,1
+ bne itlb_miss_fault_e6500
+dtlb_miss_fault_e6500:
+ TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
+ tlb_epilog_bolted
+ b exc_data_storage_book3e
+itlb_miss_fault_e6500:
+ TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
+ tlb_epilog_bolted
+ b exc_instruction_storage_book3e
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
/**********************************************************************
* *
* TLB miss handling for Book3E with TLB reservation and HES support *
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 358d7430313..b37a58e1c92 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -43,6 +43,7 @@
#include <asm/tlb.h>
#include <asm/code-patching.h>
#include <asm/hugetlb.h>
+#include <asm/paca.h>
#include "mmu_decl.h"
@@ -58,6 +59,10 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.shift = 12,
.enc = BOOK3E_PAGESZ_4K,
},
+ [MMU_PAGE_2M] = {
+ .shift = 21,
+ .enc = BOOK3E_PAGESZ_2M,
+ },
[MMU_PAGE_4M] = {
.shift = 22,
.enc = BOOK3E_PAGESZ_4M,
@@ -136,7 +141,7 @@ static inline int mmu_get_tsize(int psize)
int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
-int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
+int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */
#endif /* CONFIG_PPC64 */
@@ -377,7 +382,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
- if (book3e_htw_enabled) {
+ if (book3e_htw_mode != PPC_HTW_NONE) {
unsigned long start = address & PMD_MASK;
unsigned long end = address + PMD_SIZE;
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
@@ -430,7 +435,7 @@ static void setup_page_sizes(void)
def = &mmu_psize_defs[psize];
shift = def->shift;
- if (shift == 0)
+ if (shift == 0 || shift & 1)
continue;
/* adjust to be in terms of 4^shift Kb */
@@ -440,21 +445,40 @@ static void setup_page_sizes(void)
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
- goto no_indirect;
+ goto out;
}
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
- u32 tlb1ps = mfspr(SPRN_TLB1PS);
+ u32 tlb1cfg, tlb1ps;
+
+ tlb0cfg = mfspr(SPRN_TLB0CFG);
+ tlb1cfg = mfspr(SPRN_TLB1CFG);
+ tlb1ps = mfspr(SPRN_TLB1PS);
+ eptcfg = mfspr(SPRN_EPTCFG);
+
+ if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
+ book3e_htw_mode = PPC_HTW_E6500;
+
+ /*
+ * We expect 4K subpage size and unrestricted indirect size.
+ * The lack of a restriction on indirect size is a Freescale
+ * extension, indicated by PSn = 0 but SPSn != 0.
+ */
+ if (eptcfg != 2)
+ book3e_htw_mode = PPC_HTW_NONE;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
+
+ if (book3e_htw_mode && psize == MMU_PAGE_2M)
+ def->flags |= MMU_PAGE_SIZE_INDIRECT;
}
}
- goto no_indirect;
+ goto out;
}
#endif
@@ -471,8 +495,11 @@ static void setup_page_sizes(void)
}
/* Indirect page sizes supported ? */
- if ((tlb0cfg & TLBnCFG_IND) == 0)
- goto no_indirect;
+ if ((tlb0cfg & TLBnCFG_IND) == 0 ||
+ (tlb0cfg & TLBnCFG_PT) == 0)
+ goto out;
+
+ book3e_htw_mode = PPC_HTW_IBM;
/* Now, we only deal with one IND page size for each
* direct size. Hopefully all implementations today are
@@ -497,8 +524,8 @@ static void setup_page_sizes(void)
def->ind = ps + 10;
}
}
- no_indirect:
+out:
/* Cleanup array and print summary */
pr_info("MMU: Supported page sizes\n");
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
@@ -518,44 +545,27 @@ static void setup_page_sizes(void)
}
}
-static void __patch_exception(int exc, unsigned long addr)
-{
- extern unsigned int interrupt_base_book3e;
- unsigned int *ibase = &interrupt_base_book3e;
-
- /* Our exceptions vectors start with a NOP and -then- a branch
- * to deal with single stepping from userspace which stops on
- * the second instruction. Thus we need to patch the second
- * instruction of the exception, not the first one
- */
-
- patch_branch(ibase + (exc / 4) + 1, addr, 0);
-}
-
-#define patch_exception(exc, name) do { \
- extern unsigned int name; \
- __patch_exception((exc), (unsigned long)&name); \
-} while (0)
-
static void setup_mmu_htw(void)
{
- /* Check if HW tablewalk is present, and if yes, enable it by:
- *
- * - patching the TLB miss handlers to branch to the
- * one dedicates to it
- *
- * - setting the global book3e_htw_enabled
- */
- unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
+ /*
+ * If we want to use HW tablewalk, enable it by patching the TLB miss
+ * handlers to branch to the one dedicated to it.
+ */
- if ((tlb0cfg & TLBnCFG_IND) &&
- (tlb0cfg & TLBnCFG_PT)) {
+ switch (book3e_htw_mode) {
+ case PPC_HTW_IBM:
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
- book3e_htw_enabled = 1;
+ break;
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ case PPC_HTW_E6500:
+ patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
+ patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
+ break;
+#endif
}
pr_info("MMU: Book3E HW tablewalk %s\n",
- book3e_htw_enabled ? "enabled" : "not supported");
+ book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
}
/*
@@ -595,8 +605,16 @@ static void __early_init_mmu(int boot_cpu)
/* Set MAS4 based on page table setting */
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
- if (book3e_htw_enabled) {
- mas4 |= mas4 | MAS4_INDD;
+ switch (book3e_htw_mode) {
+ case PPC_HTW_E6500:
+ mas4 |= MAS4_INDD;
+ mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
+ mas4 |= MAS4_TLBSELD(1);
+ mmu_pte_psize = MMU_PAGE_2M;
+ break;
+
+ case PPC_HTW_IBM:
+ mas4 |= MAS4_INDD;
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_256M;
@@ -604,13 +622,16 @@ static void __early_init_mmu(int boot_cpu)
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_1M;
#endif
- } else {
+ break;
+
+ case PPC_HTW_NONE:
#ifdef CONFIG_PPC_64K_PAGES
mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
#else
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
#endif
mmu_pte_psize = mmu_virtual_psize;
+ break;
}
mtspr(SPRN_MAS4, mas4);
@@ -630,8 +651,11 @@ static void __early_init_mmu(int boot_cpu)
/* limit memory so we dont have linear faults */
memblock_enforce_memory_limit(linear_map_top);
- patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
- patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
+ if (book3e_htw_mode == PPC_HTW_NONE) {
+ patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
+ patch_exception(0x1e0,
+ exc_instruction_tlb_miss_bolted_book3e);
+ }
}
#endif
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 626ad081639..43ff3c797fb 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -402,7 +402,9 @@ _GLOBAL(set_context)
* Load TLBCAM[index] entry in to the L2 CAM MMU
*/
_GLOBAL(loadcam_entry)
- LOAD_REG_ADDR(r4, TLBCAM)
+ mflr r5
+ LOAD_REG_ADDR_PIC(r4, TLBCAM)
+ mtlr r5
mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,TLBCAM_MAS0(r3)
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
index ff617246d12..d29b6e4e5e7 100644
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ b/arch/powerpc/oprofile/op_model_7450.c
@@ -16,7 +16,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index b9589c19ccd..1f0ebdeea5f 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -16,7 +16,6 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/oprofile.h>
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
index 2a82d3ed464..14cf86fddda 100644
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ b/arch/powerpc/oprofile/op_model_fsl_emb.c
@@ -14,7 +14,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
index 42f778dff91..a114a7c22d4 100644
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ b/arch/powerpc/oprofile/op_model_pa6t.c
@@ -22,7 +22,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index f444b94935f..962fe7b3e3f 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -10,7 +10,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index 9b801b8c8c5..7e5b8ed3a1b 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -8,7 +8,6 @@
*/
#include <linux/oprofile.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e863d7..67cf22083f4 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
+ if (cpuhw->bhrb_users)
+ ppmu->config_bhrb(cpuhw->bhrb_filter);
+
write_mmcr0(cpuhw, mmcr0);
/*
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
- if (cpuhw->bhrb_users)
- ppmu->config_bhrb(cpuhw->bhrb_filter);
local_irq_restore(flags);
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd2f13..96cee20dcd3 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
#define PM_BRU_FIN 0x10068
#define PM_BR_MPRED_CMPL 0x400f6
+/* All L1 D cache load references counted at finish, gated by reject */
+#define PM_LD_REF_L1 0x100ee
+/* Load Missed L1 */
+#define PM_LD_MISS_L1 0x3e054
+/* Store Missed L1 */
+#define PM_ST_MISS_L1 0x300f0
+/* L1 cache data prefetches */
+#define PM_L1_PREF 0x0d8b8
+/* Instruction fetches from L1 */
+#define PM_INST_FROM_L1 0x04080
+/* Demand iCache Miss */
+#define PM_L1_ICACHE_MISS 0x200fd
+/* Instruction Demand sectors wriittent into IL1 */
+#define PM_L1_DEMAND_WRITE 0x0408c
+/* Instruction prefetch written into IL1 */
+#define PM_IC_PREF_WRITE 0x0408e
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define PM_DATA_FROM_L3 0x4c042
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+#define PM_DATA_FROM_L3MISS 0x300fe
+/* All successful D-side store dispatches for this thread */
+#define PM_L2_ST 0x17080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define PM_L2_ST_MISS 0x17082
+/* Total HW L3 prefetches(Load+store) */
+#define PM_L3_PREF_ALL 0x4e052
+/* Data PTEG reload */
+#define PM_DTLB_MISS 0x300fc
+/* ITLB Reloaded */
+#define PM_ITLB_MISS 0x400fc
+
/*
* Raw event encoding for POWER8:
@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_PREF,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L2_ST,
+ [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
static struct power_pmu power8_pmu = {
.name = "POWER8",
.n_counter = 6,
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
+ .cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32,
};
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index fc9c1cbfcb1..5aa3f4b5332 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -1,9 +1,9 @@
config PPC_MPC512x
bool "512x-based boards"
depends on 6xx
+ select COMMON_CLK
select FSL_SOC
select IPIC
- select PPC_CLOCK
select PPC_PCI_CHOICE
select FSL_PCI if PCI
select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/powerpc/platforms/512x/Makefile b/arch/powerpc/platforms/512x/Makefile
index 72fb9340e09..01693121a2b 100644
--- a/arch/powerpc/platforms/512x/Makefile
+++ b/arch/powerpc/platforms/512x/Makefile
@@ -1,7 +1,8 @@
#
# Makefile for the Freescale PowerPC 512x linux kernel.
#
-obj-y += clock.o mpc512x_shared.o
+obj-$(CONFIG_COMMON_CLK) += clock-commonclk.o
+obj-y += mpc512x_shared.o
obj-$(CONFIG_MPC5121_ADS) += mpc5121_ads.o mpc5121_ads_cpld.o
obj-$(CONFIG_MPC512x_GENERIC) += mpc512x_generic.o
obj-$(CONFIG_PDM360NG) += pdm360ng.o
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
new file mode 100644
index 00000000000..6eb614a271f
--- /dev/null
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -0,0 +1,1221 @@
+/*
+ * Copyright (C) 2013 DENX Software Engineering
+ *
+ * Gerhard Sittig, <gsi@denx.de>
+ *
+ * common clock driver support for the MPC512x platform
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/mpc5121.h>
+#include <dt-bindings/clock/mpc512x-clock.h>
+
+#include "mpc512x.h" /* our public mpc5121_clk_init() API */
+
+/* helpers to keep the MCLK intermediates "somewhere" in our table */
+enum {
+ MCLK_IDX_MUX0,
+ MCLK_IDX_EN0,
+ MCLK_IDX_DIV0,
+ MCLK_MAX_IDX,
+};
+
+#define NR_PSCS 12
+#define NR_MSCANS 4
+#define NR_SPDIFS 1
+#define NR_OUTCLK 4
+#define NR_MCLKS (NR_PSCS + NR_MSCANS + NR_SPDIFS + NR_OUTCLK)
+
+/* extend the public set of clocks by adding internal slots for management */
+enum {
+ /* arrange for adjacent numbers after the public set */
+ MPC512x_CLK_START_PRIVATE = MPC512x_CLK_LAST_PUBLIC,
+ /* clocks which aren't announced to the public */
+ MPC512x_CLK_DDR,
+ MPC512x_CLK_MEM,
+ MPC512x_CLK_IIM,
+ /* intermediates in div+gate combos or fractional dividers */
+ MPC512x_CLK_DDR_UG,
+ MPC512x_CLK_SDHC_x4,
+ MPC512x_CLK_SDHC_UG,
+ MPC512x_CLK_SDHC2_UG,
+ MPC512x_CLK_DIU_x4,
+ MPC512x_CLK_DIU_UG,
+ MPC512x_CLK_MBX_BUS_UG,
+ MPC512x_CLK_MBX_UG,
+ MPC512x_CLK_MBX_3D_UG,
+ MPC512x_CLK_PCI_UG,
+ MPC512x_CLK_NFC_UG,
+ MPC512x_CLK_LPC_UG,
+ MPC512x_CLK_SPDIF_TX_IN,
+ /* intermediates for the mux+gate+div+mux MCLK generation */
+ MPC512x_CLK_MCLKS_FIRST,
+ MPC512x_CLK_MCLKS_LAST = MPC512x_CLK_MCLKS_FIRST
+ + NR_MCLKS * MCLK_MAX_IDX,
+ /* internal, symbolic spec for the number of slots */
+ MPC512x_CLK_LAST_PRIVATE,
+};
+
+/* data required for the OF clock provider registration */
+static struct clk *clks[MPC512x_CLK_LAST_PRIVATE];
+static struct clk_onecell_data clk_data;
+
+/* CCM register access */
+static struct mpc512x_ccm __iomem *clkregs;
+static DEFINE_SPINLOCK(clklock);
+
+/* SoC variants {{{ */
+
+/*
+ * tell SoC variants apart as they are rather similar yet not identical,
+ * cache the result in an enum to not repeatedly run the expensive OF test
+ *
+ * MPC5123 is an MPC5121 without the MBX graphics accelerator
+ *
+ * MPC5125 has many more differences: no MBX, no AXE, no VIU, no SPDIF,
+ * no PATA, no SATA, no PCI, two FECs (of different compatibility name),
+ * only 10 PSCs (of different compatibility name), two SDHCs, different
+ * NFC IP block, output clocks, system PLL status query, different CPMF
+ * interpretation, no CFM, different fourth PSC/CAN mux0 input -- yet
+ * those differences can get folded into this clock provider support
+ * code and don't warrant a separate highly redundant implementation
+ */
+
+static enum soc_type {
+ MPC512x_SOC_MPC5121,
+ MPC512x_SOC_MPC5123,
+ MPC512x_SOC_MPC5125,
+} soc;
+
+static void mpc512x_clk_determine_soc(void)
+{
+ if (of_machine_is_compatible("fsl,mpc5121")) {
+ soc = MPC512x_SOC_MPC5121;
+ return;
+ }
+ if (of_machine_is_compatible("fsl,mpc5123")) {
+ soc = MPC512x_SOC_MPC5123;
+ return;
+ }
+ if (of_machine_is_compatible("fsl,mpc5125")) {
+ soc = MPC512x_SOC_MPC5125;
+ return;
+ }
+}
+
+static bool soc_has_mbx(void)
+{
+ if (soc == MPC512x_SOC_MPC5121)
+ return true;
+ return false;
+}
+
+static bool soc_has_axe(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return false;
+ return true;
+}
+
+static bool soc_has_viu(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return false;
+ return true;
+}
+
+static bool soc_has_spdif(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return false;
+ return true;
+}
+
+static bool soc_has_pata(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return false;
+ return true;
+}
+
+static bool soc_has_sata(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return false;
+ return true;
+}
+
+static bool soc_has_pci(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return false;
+ return true;
+}
+
+static bool soc_has_fec2(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return true;
+ return false;
+}
+
+static int soc_max_pscnum(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return 10;
+ return 12;
+}
+
+static bool soc_has_sdhc2(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return true;
+ return false;
+}
+
+static bool soc_has_nfc_5125(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return true;
+ return false;
+}
+
+static bool soc_has_outclk(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return true;
+ return false;
+}
+
+static bool soc_has_cpmf_0_bypass(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return true;
+ return false;
+}
+
+static bool soc_has_mclk_mux0_canin(void)
+{
+ if (soc == MPC512x_SOC_MPC5125)
+ return true;
+ return false;
+}
+
+/* }}} SoC variants */
+/* common clk API wrappers {{{ */
+
+/* convenience wrappers around the common clk API */
+static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
+{
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mpc512x_clk_factor(
+ const char *name, const char *parent_name,
+ int mul, int div)
+{
+ int clkflags;
+
+ clkflags = CLK_SET_RATE_PARENT;
+ return clk_register_fixed_factor(NULL, name, parent_name, clkflags,
+ mul, div);
+}
+
+static inline struct clk *mpc512x_clk_divider(
+ const char *name, const char *parent_name, u8 clkflags,
+ u32 __iomem *reg, u8 pos, u8 len, int divflags)
+{
+ return clk_register_divider(NULL, name, parent_name, clkflags,
+ reg, pos, len, divflags, &clklock);
+}
+
+static inline struct clk *mpc512x_clk_divtable(
+ const char *name, const char *parent_name,
+ u32 __iomem *reg, u8 pos, u8 len,
+ const struct clk_div_table *divtab)
+{
+ u8 divflags;
+
+ divflags = 0;
+ return clk_register_divider_table(NULL, name, parent_name, 0,
+ reg, pos, len, divflags,
+ divtab, &clklock);
+}
+
+static inline struct clk *mpc512x_clk_gated(
+ const char *name, const char *parent_name,
+ u32 __iomem *reg, u8 pos)
+{
+ int clkflags;
+
+ clkflags = CLK_SET_RATE_PARENT;
+ return clk_register_gate(NULL, name, parent_name, clkflags,
+ reg, pos, 0, &clklock);
+}
+
+static inline struct clk *mpc512x_clk_muxed(const char *name,
+ const char **parent_names, int parent_count,
+ u32 __iomem *reg, u8 pos, u8 len)
+{
+ int clkflags;
+ u8 muxflags;
+
+ clkflags = CLK_SET_RATE_PARENT;
+ muxflags = 0;
+ return clk_register_mux(NULL, name,
+ parent_names, parent_count, clkflags,
+ reg, pos, len, muxflags, &clklock);
+}
+
+/* }}} common clk API wrappers */
+
+/* helper to isolate a bit field from a register */
+static inline int get_bit_field(uint32_t __iomem *reg, uint8_t pos, uint8_t len)
+{
+ uint32_t val;
+
+ val = in_be32(reg);
+ val >>= pos;
+ val &= (1 << len) - 1;
+ return val;
+}
+
+/* get the SPMF and translate it into the "sys pll" multiplier */
+static int get_spmf_mult(void)
+{
+ static int spmf_to_mult[] = {
+ 68, 1, 12, 16, 20, 24, 28, 32,
+ 36, 40, 44, 48, 52, 56, 60, 64,
+ };
+ int spmf;
+
+ spmf = get_bit_field(&clkregs->spmr, 24, 4);
+ return spmf_to_mult[spmf];
+}
+
+/*
+ * get the SYS_DIV value and translate it into a divide factor
+ *
+ * values returned from here are a multiple of the real factor since the
+ * divide ratio is fractional
+ */
+static int get_sys_div_x2(void)
+{
+ static int sysdiv_code_to_x2[] = {
+ 4, 5, 6, 7, 8, 9, 10, 14,
+ 12, 16, 18, 22, 20, 24, 26, 30,
+ 28, 32, 34, 38, 36, 40, 42, 46,
+ 44, 48, 50, 54, 52, 56, 58, 62,
+ 60, 64, 66,
+ };
+ int divcode;
+
+ divcode = get_bit_field(&clkregs->scfr2, 26, 6);
+ return sysdiv_code_to_x2[divcode];
+}
+
+/*
+ * get the CPMF value and translate it into a multiplier factor
+ *
+ * values returned from here are a multiple of the real factor since the
+ * multiplier ratio is fractional
+ */
+static int get_cpmf_mult_x2(void)
+{
+ static int cpmf_to_mult_x36[] = {
+ /* 0b000 is "times 36" */
+ 72, 2, 2, 3, 4, 5, 6, 7,
+ };
+ static int cpmf_to_mult_0by[] = {
+ /* 0b000 is "bypass" */
+ 2, 2, 2, 3, 4, 5, 6, 7,
+ };
+
+ int *cpmf_to_mult;
+ int cpmf;
+
+ cpmf = get_bit_field(&clkregs->spmr, 16, 4);
+ if (soc_has_cpmf_0_bypass())
+ cpmf_to_mult = cpmf_to_mult_0by;
+ else
+ cpmf_to_mult = cpmf_to_mult_x36;
+ return cpmf_to_mult[cpmf];
+}
+
+/*
+ * some of the clock dividers do scale in a linear way, yet not all of
+ * their bit combinations are legal; use a divider table to get a
+ * resulting set of applicable divider values
+ */
+
+/* applies to the IPS_DIV, and PCI_DIV values */
+static struct clk_div_table divtab_2346[] = {
+ { .val = 2, .div = 2, },
+ { .val = 3, .div = 3, },
+ { .val = 4, .div = 4, },
+ { .val = 6, .div = 6, },
+ { .div = 0, },
+};
+
+/* applies to the MBX_DIV, LPC_DIV, and NFC_DIV values */
+static struct clk_div_table divtab_1234[] = {
+ { .val = 1, .div = 1, },
+ { .val = 2, .div = 2, },
+ { .val = 3, .div = 3, },
+ { .val = 4, .div = 4, },
+ { .div = 0, },
+};
+
+static int get_freq_from_dt(char *propname)
+{
+ struct device_node *np;
+ const unsigned int *prop;
+ int val;
+
+ val = 0;
+ np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-immr");
+ if (np) {
+ prop = of_get_property(np, propname, NULL);
+ if (prop)
+ val = *prop;
+ of_node_put(np);
+ }
+ return val;
+}
+
+static void mpc512x_clk_preset_data(void)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ clks[i] = ERR_PTR(-ENODEV);
+}
+
+/*
+ * - receives the "bus frequency" from the caller (that's the IPS clock
+ * rate, the historical source of clock information)
+ * - fetches the system PLL multiplier and divider values as well as the
+ * IPS divider value from hardware
+ * - determines the REF clock rate either from the XTAL/OSC spec (if
+ * there is a device tree node describing the oscillator) or from the
+ * IPS bus clock (supported for backwards compatibility, such that
+ * setups without XTAL/OSC specs keep working)
+ * - creates the "ref" clock item in the clock tree, such that
+ * subsequent code can create the remainder of the hierarchy (REF ->
+ * SYS -> CSB -> IPS) from the REF clock rate and the returned mul/div
+ * values
+ */
+static void mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq,
+ int *sys_mul, int *sys_div,
+ int *ips_div)
+{
+ struct clk *osc_clk;
+ int calc_freq;
+
+ /* fetch mul/div factors from the hardware */
+ *sys_mul = get_spmf_mult();
+ *sys_mul *= 2; /* compensate for the fractional divider */
+ *sys_div = get_sys_div_x2();
+ *ips_div = get_bit_field(&clkregs->scfr1, 23, 3);
+
+ /* lookup the oscillator clock for its rate */
+ osc_clk = of_clk_get_by_name(np, "osc");
+
+ /*
+ * either descend from OSC to REF (and in bypassing verify the
+ * IPS rate), or backtrack from IPS and multiplier values that
+ * were fetched from hardware to REF and thus to the OSC value
+ *
+ * in either case the REF clock gets created here and the
+ * remainder of the clock tree can get spanned from there
+ */
+ if (!IS_ERR(osc_clk)) {
+ clks[MPC512x_CLK_REF] = mpc512x_clk_factor("ref", "osc", 1, 1);
+ calc_freq = clk_get_rate(clks[MPC512x_CLK_REF]);
+ calc_freq *= *sys_mul;
+ calc_freq /= *sys_div;
+ calc_freq /= 2;
+ calc_freq /= *ips_div;
+ if (bus_freq && calc_freq != bus_freq)
+ pr_warn("calc rate %d != OF spec %d\n",
+ calc_freq, bus_freq);
+ } else {
+ calc_freq = bus_freq; /* start with IPS */
+ calc_freq *= *ips_div; /* IPS -> CSB */
+ calc_freq *= 2; /* CSB -> SYS */
+ calc_freq *= *sys_div; /* SYS -> PLL out */
+ calc_freq /= *sys_mul; /* PLL out -> REF == OSC */
+ clks[MPC512x_CLK_REF] = mpc512x_clk_fixed("ref", calc_freq);
+ }
+}
+
+/* MCLK helpers {{{ */
+
+/*
+ * helper code for the MCLK subtree setup
+ *
+ * the overview in section 5.2.4 of the MPC5121e Reference Manual rev4
+ * suggests that all instances of the "PSC clock generation" are equal,
+ * and that one might re-use the PSC setup for MSCAN clock generation
+ * (section 5.2.5) as well, at least the logic if not the data for
+ * description
+ *
+ * the details (starting at page 5-20) show differences in the specific
+ * inputs of the first mux stage ("can clk in", "spdif tx"), and the
+ * factual non-availability of the second mux stage (it's present yet
+ * only one input is valid)
+ *
+ * the MSCAN clock related registers (starting at page 5-35) all
+ * reference "spdif clk" at the first mux stage and don't mention any
+ * "can clk" at all, which somehow is unexpected
+ *
+ * TODO re-check the document, and clarify whether the RM is correct in
+ * the overview or in the details, and whether the difference is a
+ * clipboard induced error or results from chip revisions
+ *
+ * it turns out that the RM rev4 as of 2012-06 talks about "can" for the
+ * PSCs while RM rev3 as of 2008-10 talks about "spdif", so I guess that
+ * first a doc update is required which better reflects reality in the
+ * SoC before the implementation should follow while no questions remain
+ */
+
+/*
+ * note that this declaration raises a checkpatch warning, but
+ * it's the very data type dictated by <linux/clk-provider.h>,
+ * "fixing" this warning will break compilation
+ */
+static const char *parent_names_mux0_spdif[] = {
+ "sys", "ref", "psc-mclk-in", "spdif-tx",
+};
+
+static const char *parent_names_mux0_canin[] = {
+ "sys", "ref", "psc-mclk-in", "can-clk-in",
+};
+
+enum mclk_type {
+ MCLK_TYPE_PSC,
+ MCLK_TYPE_MSCAN,
+ MCLK_TYPE_SPDIF,
+ MCLK_TYPE_OUTCLK,
+};
+
+struct mclk_setup_data {
+ enum mclk_type type;
+ bool has_mclk1;
+ const char *name_mux0;
+ const char *name_en0;
+ const char *name_div0;
+ const char *parent_names_mux1[2];
+ const char *name_mclk;
+};
+
+#define MCLK_SETUP_DATA_PSC(id) { \
+ MCLK_TYPE_PSC, 0, \
+ "psc" #id "-mux0", \
+ "psc" #id "-en0", \
+ "psc" #id "_mclk_div", \
+ { "psc" #id "_mclk_div", "dummy", }, \
+ "psc" #id "_mclk", \
+}
+
+#define MCLK_SETUP_DATA_MSCAN(id) { \
+ MCLK_TYPE_MSCAN, 0, \
+ "mscan" #id "-mux0", \
+ "mscan" #id "-en0", \
+ "mscan" #id "_mclk_div", \
+ { "mscan" #id "_mclk_div", "dummy", }, \
+ "mscan" #id "_mclk", \
+}
+
+#define MCLK_SETUP_DATA_SPDIF { \
+ MCLK_TYPE_SPDIF, 1, \
+ "spdif-mux0", \
+ "spdif-en0", \
+ "spdif_mclk_div", \
+ { "spdif_mclk_div", "spdif-rx", }, \
+ "spdif_mclk", \
+}
+
+#define MCLK_SETUP_DATA_OUTCLK(id) { \
+ MCLK_TYPE_OUTCLK, 0, \
+ "out" #id "-mux0", \
+ "out" #id "-en0", \
+ "out" #id "_mclk_div", \
+ { "out" #id "_mclk_div", "dummy", }, \
+ "out" #id "_clk", \
+}
+
+static struct mclk_setup_data mclk_psc_data[] = {
+ MCLK_SETUP_DATA_PSC(0),
+ MCLK_SETUP_DATA_PSC(1),
+ MCLK_SETUP_DATA_PSC(2),
+ MCLK_SETUP_DATA_PSC(3),
+ MCLK_SETUP_DATA_PSC(4),
+ MCLK_SETUP_DATA_PSC(5),
+ MCLK_SETUP_DATA_PSC(6),
+ MCLK_SETUP_DATA_PSC(7),
+ MCLK_SETUP_DATA_PSC(8),
+ MCLK_SETUP_DATA_PSC(9),
+ MCLK_SETUP_DATA_PSC(10),
+ MCLK_SETUP_DATA_PSC(11),
+};
+
+static struct mclk_setup_data mclk_mscan_data[] = {
+ MCLK_SETUP_DATA_MSCAN(0),
+ MCLK_SETUP_DATA_MSCAN(1),
+ MCLK_SETUP_DATA_MSCAN(2),
+ MCLK_SETUP_DATA_MSCAN(3),
+};
+
+static struct mclk_setup_data mclk_spdif_data[] = {
+ MCLK_SETUP_DATA_SPDIF,
+};
+
+static struct mclk_setup_data mclk_outclk_data[] = {
+ MCLK_SETUP_DATA_OUTCLK(0),
+ MCLK_SETUP_DATA_OUTCLK(1),
+ MCLK_SETUP_DATA_OUTCLK(2),
+ MCLK_SETUP_DATA_OUTCLK(3),
+};
+
+/* setup the MCLK clock subtree of an individual PSC/MSCAN/SPDIF */
+static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
+{
+ size_t clks_idx_pub, clks_idx_int;
+ u32 __iomem *mccr_reg; /* MCLK control register (mux, en, div) */
+ int div;
+
+ /* derive a few parameters from the component type and index */
+ switch (entry->type) {
+ case MCLK_TYPE_PSC:
+ clks_idx_pub = MPC512x_CLK_PSC0_MCLK + idx;
+ clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+ + (idx) * MCLK_MAX_IDX;
+ mccr_reg = &clkregs->psc_ccr[idx];
+ break;
+ case MCLK_TYPE_MSCAN:
+ clks_idx_pub = MPC512x_CLK_MSCAN0_MCLK + idx;
+ clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+ + (NR_PSCS + idx) * MCLK_MAX_IDX;
+ mccr_reg = &clkregs->mscan_ccr[idx];
+ break;
+ case MCLK_TYPE_SPDIF:
+ clks_idx_pub = MPC512x_CLK_SPDIF_MCLK;
+ clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+ + (NR_PSCS + NR_MSCANS) * MCLK_MAX_IDX;
+ mccr_reg = &clkregs->spccr;
+ break;
+ case MCLK_TYPE_OUTCLK:
+ clks_idx_pub = MPC512x_CLK_OUT0_CLK + idx;
+ clks_idx_int = MPC512x_CLK_MCLKS_FIRST
+ + (NR_PSCS + NR_MSCANS + NR_SPDIFS + idx)
+ * MCLK_MAX_IDX;
+ mccr_reg = &clkregs->out_ccr[idx];
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * this was grabbed from the PPC_CLOCK implementation, which
+ * enforced a specific MCLK divider while the clock was gated
+ * during setup (that's a documented hardware requirement)
+ *
+ * the PPC_CLOCK implementation might even have violated the
+ * "MCLK <= IPS" constraint, the fixed divider value of 1
+ * results in a divider of 2 and thus MCLK = SYS/2 which equals
+ * CSB which is greater than IPS; the serial port setup may have
+ * adjusted the divider which the clock setup might have left in
+ * an undesirable state
+ *
+ * initial setup is:
+ * - MCLK 0 from SYS
+ * - MCLK DIV such to not exceed the IPS clock
+ * - MCLK 0 enabled
+ * - MCLK 1 from MCLK DIV
+ */
+ div = clk_get_rate(clks[MPC512x_CLK_SYS]);
+ div /= clk_get_rate(clks[MPC512x_CLK_IPS]);
+ out_be32(mccr_reg, (0 << 16));
+ out_be32(mccr_reg, (0 << 16) | ((div - 1) << 17));
+ out_be32(mccr_reg, (1 << 16) | ((div - 1) << 17));
+
+ /*
+ * create the 'struct clk' items of the MCLK's clock subtree
+ *
+ * note that by design we always create all nodes and won't take
+ * shortcuts here, because
+ * - the "internal" MCLK_DIV and MCLK_OUT signal in turn are
+ * selectable inputs to the CFM while those who "actually use"
+ * the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK
+ * for their bitrate
+ * - in the absence of "aliases" for clocks we need to create
+ * individial 'struct clk' items for whatever might get
+ * referenced or looked up, even if several of those items are
+ * identical from the logical POV (their rate value)
+ * - for easier future maintenance and for better reflection of
+ * the SoC's documentation, it appears appropriate to generate
+ * clock items even for those muxers which actually are NOPs
+ * (those with two inputs of which one is reserved)
+ */
+ clks[clks_idx_int + MCLK_IDX_MUX0] = mpc512x_clk_muxed(
+ entry->name_mux0,
+ soc_has_mclk_mux0_canin()
+ ? &parent_names_mux0_canin[0]
+ : &parent_names_mux0_spdif[0],
+ ARRAY_SIZE(parent_names_mux0_spdif),
+ mccr_reg, 14, 2);
+ clks[clks_idx_int + MCLK_IDX_EN0] = mpc512x_clk_gated(
+ entry->name_en0, entry->name_mux0,
+ mccr_reg, 16);
+ clks[clks_idx_int + MCLK_IDX_DIV0] = mpc512x_clk_divider(
+ entry->name_div0,
+ entry->name_en0, CLK_SET_RATE_GATE,
+ mccr_reg, 17, 15, 0);
+ if (entry->has_mclk1) {
+ clks[clks_idx_pub] = mpc512x_clk_muxed(
+ entry->name_mclk,
+ &entry->parent_names_mux1[0],
+ ARRAY_SIZE(entry->parent_names_mux1),
+ mccr_reg, 7, 1);
+ } else {
+ clks[clks_idx_pub] = mpc512x_clk_factor(
+ entry->name_mclk,
+ entry->parent_names_mux1[0],
+ 1, 1);
+ }
+}
+
+/* }}} MCLK helpers */
+
+static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
+{
+ int sys_mul, sys_div, ips_div;
+ int mul, div;
+ size_t mclk_idx;
+ int freq;
+
+ /*
+ * developer's notes:
+ * - consider whether to handle clocks which have both gates and
+ * dividers via intermediates or by means of composites
+ * - fractional dividers appear to not map well to composites
+ * since they can be seen as a fixed multiplier and an
+ * adjustable divider, while composites can only combine at
+ * most one of a mux, div, and gate each into one 'struct clk'
+ * item
+ * - PSC/MSCAN/SPDIF clock generation OTOH already is very
+ * specific and cannot get mapped to componsites (at least not
+ * a single one, maybe two of them, but then some of these
+ * intermediate clock signals get referenced elsewhere (e.g.
+ * in the clock frequency measurement, CFM) and thus need
+ * publicly available names
+ * - the current source layout appropriately reflects the
+ * hardware setup, and it works, so it's questionable whether
+ * further changes will result in big enough a benefit
+ */
+
+ /* regardless of whether XTAL/OSC exists, have REF created */
+ mpc512x_clk_setup_ref_clock(np, busfreq, &sys_mul, &sys_div, &ips_div);
+
+ /* now setup the REF -> SYS -> CSB -> IPS hierarchy */
+ clks[MPC512x_CLK_SYS] = mpc512x_clk_factor("sys", "ref",
+ sys_mul, sys_div);
+ clks[MPC512x_CLK_CSB] = mpc512x_clk_factor("csb", "sys", 1, 2);
+ clks[MPC512x_CLK_IPS] = mpc512x_clk_divtable("ips", "csb",
+ &clkregs->scfr1, 23, 3,
+ divtab_2346);
+ /* now setup anything below SYS and CSB and IPS */
+
+ clks[MPC512x_CLK_DDR_UG] = mpc512x_clk_factor("ddr-ug", "sys", 1, 2);
+
+ /*
+ * the Reference Manual discusses that for SDHC only even divide
+ * ratios are supported because clock domain synchronization
+ * between 'per' and 'ipg' is broken;
+ * keep the divider's bit 0 cleared (per reset value), and only
+ * allow to setup the divider's bits 7:1, which results in that
+ * only even divide ratios can get configured upon rate changes;
+ * keep the "x4" name because this bit shift hack is an internal
+ * implementation detail, the "fractional divider with quarters"
+ * semantics remains
+ */
+ clks[MPC512x_CLK_SDHC_x4] = mpc512x_clk_factor("sdhc-x4", "csb", 2, 1);
+ clks[MPC512x_CLK_SDHC_UG] = mpc512x_clk_divider("sdhc-ug", "sdhc-x4", 0,
+ &clkregs->scfr2, 1, 7,
+ CLK_DIVIDER_ONE_BASED);
+ if (soc_has_sdhc2()) {
+ clks[MPC512x_CLK_SDHC2_UG] = mpc512x_clk_divider(
+ "sdhc2-ug", "sdhc-x4", 0, &clkregs->scfr2,
+ 9, 7, CLK_DIVIDER_ONE_BASED);
+ }
+
+ clks[MPC512x_CLK_DIU_x4] = mpc512x_clk_factor("diu-x4", "csb", 4, 1);
+ clks[MPC512x_CLK_DIU_UG] = mpc512x_clk_divider("diu-ug", "diu-x4", 0,
+ &clkregs->scfr1, 0, 8,
+ CLK_DIVIDER_ONE_BASED);
+
+ /*
+ * the "power architecture PLL" was setup from data which was
+ * sampled from the reset config word, at this point in time the
+ * configuration can be considered fixed and read only (i.e. no
+ * longer adjustable, or no longer in need of adjustment), which
+ * is why we don't register a PLL here but assume fixed factors
+ */
+ mul = get_cpmf_mult_x2();
+ div = 2; /* compensate for the fractional factor */
+ clks[MPC512x_CLK_E300] = mpc512x_clk_factor("e300", "csb", mul, div);
+
+ if (soc_has_mbx()) {
+ clks[MPC512x_CLK_MBX_BUS_UG] = mpc512x_clk_factor(
+ "mbx-bus-ug", "csb", 1, 2);
+ clks[MPC512x_CLK_MBX_UG] = mpc512x_clk_divtable(
+ "mbx-ug", "mbx-bus-ug", &clkregs->scfr1,
+ 14, 3, divtab_1234);
+ clks[MPC512x_CLK_MBX_3D_UG] = mpc512x_clk_factor(
+ "mbx-3d-ug", "mbx-ug", 1, 1);
+ }
+ if (soc_has_pci()) {
+ clks[MPC512x_CLK_PCI_UG] = mpc512x_clk_divtable(
+ "pci-ug", "csb", &clkregs->scfr1,
+ 20, 3, divtab_2346);
+ }
+ if (soc_has_nfc_5125()) {
+ /*
+ * XXX TODO implement 5125 NFC clock setup logic,
+ * with high/low period counters in clkregs->scfr3,
+ * currently there are no users so it's ENOIMPL
+ */
+ clks[MPC512x_CLK_NFC_UG] = ERR_PTR(-ENOTSUPP);
+ } else {
+ clks[MPC512x_CLK_NFC_UG] = mpc512x_clk_divtable(
+ "nfc-ug", "ips", &clkregs->scfr1,
+ 8, 3, divtab_1234);
+ }
+ clks[MPC512x_CLK_LPC_UG] = mpc512x_clk_divtable("lpc-ug", "ips",
+ &clkregs->scfr1, 11, 3,
+ divtab_1234);
+
+ clks[MPC512x_CLK_LPC] = mpc512x_clk_gated("lpc", "lpc-ug",
+ &clkregs->sccr1, 30);
+ clks[MPC512x_CLK_NFC] = mpc512x_clk_gated("nfc", "nfc-ug",
+ &clkregs->sccr1, 29);
+ if (soc_has_pata()) {
+ clks[MPC512x_CLK_PATA] = mpc512x_clk_gated(
+ "pata", "ips", &clkregs->sccr1, 28);
+ }
+ /* for PSCs there is a "registers" gate and a bitrate MCLK subtree */
+ for (mclk_idx = 0; mclk_idx < soc_max_pscnum(); mclk_idx++) {
+ char name[12];
+ snprintf(name, sizeof(name), "psc%d", mclk_idx);
+ clks[MPC512x_CLK_PSC0 + mclk_idx] = mpc512x_clk_gated(
+ name, "ips", &clkregs->sccr1, 27 - mclk_idx);
+ mpc512x_clk_setup_mclk(&mclk_psc_data[mclk_idx], mclk_idx);
+ }
+ clks[MPC512x_CLK_PSC_FIFO] = mpc512x_clk_gated("psc-fifo", "ips",
+ &clkregs->sccr1, 15);
+ if (soc_has_sata()) {
+ clks[MPC512x_CLK_SATA] = mpc512x_clk_gated(
+ "sata", "ips", &clkregs->sccr1, 14);
+ }
+ clks[MPC512x_CLK_FEC] = mpc512x_clk_gated("fec", "ips",
+ &clkregs->sccr1, 13);
+ if (soc_has_pci()) {
+ clks[MPC512x_CLK_PCI] = mpc512x_clk_gated(
+ "pci", "pci-ug", &clkregs->sccr1, 11);
+ }
+ clks[MPC512x_CLK_DDR] = mpc512x_clk_gated("ddr", "ddr-ug",
+ &clkregs->sccr1, 10);
+ if (soc_has_fec2()) {
+ clks[MPC512x_CLK_FEC2] = mpc512x_clk_gated(
+ "fec2", "ips", &clkregs->sccr1, 9);
+ }
+
+ clks[MPC512x_CLK_DIU] = mpc512x_clk_gated("diu", "diu-ug",
+ &clkregs->sccr2, 31);
+ if (soc_has_axe()) {
+ clks[MPC512x_CLK_AXE] = mpc512x_clk_gated(
+ "axe", "csb", &clkregs->sccr2, 30);
+ }
+ clks[MPC512x_CLK_MEM] = mpc512x_clk_gated("mem", "ips",
+ &clkregs->sccr2, 29);
+ clks[MPC512x_CLK_USB1] = mpc512x_clk_gated("usb1", "csb",
+ &clkregs->sccr2, 28);
+ clks[MPC512x_CLK_USB2] = mpc512x_clk_gated("usb2", "csb",
+ &clkregs->sccr2, 27);
+ clks[MPC512x_CLK_I2C] = mpc512x_clk_gated("i2c", "ips",
+ &clkregs->sccr2, 26);
+ /* MSCAN differs from PSC with just one gate for multiple components */
+ clks[MPC512x_CLK_BDLC] = mpc512x_clk_gated("bdlc", "ips",
+ &clkregs->sccr2, 25);
+ for (mclk_idx = 0; mclk_idx < ARRAY_SIZE(mclk_mscan_data); mclk_idx++)
+ mpc512x_clk_setup_mclk(&mclk_mscan_data[mclk_idx], mclk_idx);
+ clks[MPC512x_CLK_SDHC] = mpc512x_clk_gated("sdhc", "sdhc-ug",
+ &clkregs->sccr2, 24);
+ /* there is only one SPDIF component, which shares MCLK support code */
+ if (soc_has_spdif()) {
+ clks[MPC512x_CLK_SPDIF] = mpc512x_clk_gated(
+ "spdif", "ips", &clkregs->sccr2, 23);
+ mpc512x_clk_setup_mclk(&mclk_spdif_data[0], 0);
+ }
+ if (soc_has_mbx()) {
+ clks[MPC512x_CLK_MBX_BUS] = mpc512x_clk_gated(
+ "mbx-bus", "mbx-bus-ug", &clkregs->sccr2, 22);
+ clks[MPC512x_CLK_MBX] = mpc512x_clk_gated(
+ "mbx", "mbx-ug", &clkregs->sccr2, 21);
+ clks[MPC512x_CLK_MBX_3D] = mpc512x_clk_gated(
+ "mbx-3d", "mbx-3d-ug", &clkregs->sccr2, 20);
+ }
+ clks[MPC512x_CLK_IIM] = mpc512x_clk_gated("iim", "csb",
+ &clkregs->sccr2, 19);
+ if (soc_has_viu()) {
+ clks[MPC512x_CLK_VIU] = mpc512x_clk_gated(
+ "viu", "csb", &clkregs->sccr2, 18);
+ }
+ if (soc_has_sdhc2()) {
+ clks[MPC512x_CLK_SDHC2] = mpc512x_clk_gated(
+ "sdhc-2", "sdhc2-ug", &clkregs->sccr2, 17);
+ }
+
+ if (soc_has_outclk()) {
+ size_t idx; /* used as mclk_idx, just to trim line length */
+ for (idx = 0; idx < ARRAY_SIZE(mclk_outclk_data); idx++)
+ mpc512x_clk_setup_mclk(&mclk_outclk_data[idx], idx);
+ }
+
+ /*
+ * externally provided clocks (when implemented in hardware,
+ * device tree may specify values which otherwise were unknown)
+ */
+ freq = get_freq_from_dt("psc_mclk_in");
+ if (!freq)
+ freq = 25000000;
+ clks[MPC512x_CLK_PSC_MCLK_IN] = mpc512x_clk_fixed("psc_mclk_in", freq);
+ if (soc_has_mclk_mux0_canin()) {
+ freq = get_freq_from_dt("can_clk_in");
+ clks[MPC512x_CLK_CAN_CLK_IN] = mpc512x_clk_fixed(
+ "can_clk_in", freq);
+ } else {
+ freq = get_freq_from_dt("spdif_tx_in");
+ clks[MPC512x_CLK_SPDIF_TX_IN] = mpc512x_clk_fixed(
+ "spdif_tx_in", freq);
+ freq = get_freq_from_dt("spdif_rx_in");
+ clks[MPC512x_CLK_SPDIF_TX_IN] = mpc512x_clk_fixed(
+ "spdif_rx_in", freq);
+ }
+
+ /* fixed frequency for AC97, always 24.567MHz */
+ clks[MPC512x_CLK_AC97] = mpc512x_clk_fixed("ac97", 24567000);
+
+ /*
+ * pre-enable those "internal" clock items which never get
+ * claimed by any peripheral driver, to not have the clock
+ * subsystem disable them late at startup
+ */
+ clk_prepare_enable(clks[MPC512x_CLK_DUMMY]);
+ clk_prepare_enable(clks[MPC512x_CLK_E300]); /* PowerPC CPU */
+ clk_prepare_enable(clks[MPC512x_CLK_DDR]); /* DRAM */
+ clk_prepare_enable(clks[MPC512x_CLK_MEM]); /* SRAM */
+ clk_prepare_enable(clks[MPC512x_CLK_IPS]); /* SoC periph */
+ clk_prepare_enable(clks[MPC512x_CLK_LPC]); /* boot media */
+}
+
+/*
+ * registers the set of public clocks (those listed in the dt-bindings/
+ * header file) for OF lookups, keeps the intermediates private to us
+ */
+static void mpc5121_clk_register_of_provider(struct device_node *np)
+{
+ clk_data.clks = clks;
+ clk_data.clk_num = MPC512x_CLK_LAST_PUBLIC + 1; /* _not_ ARRAY_SIZE() */
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+/*
+ * temporary support for the period of time between introduction of CCF
+ * support and the adjustment of peripheral drivers to OF based lookups
+ */
+static void mpc5121_clk_provide_migration_support(void)
+{
+
+ /*
+ * pre-enable those clock items which are not yet appropriately
+ * acquired by their peripheral driver
+ *
+ * the PCI clock cannot get acquired by its peripheral driver,
+ * because for this platform the driver won't probe(), instead
+ * initialization is done from within the .setup_arch() routine
+ * at a point in time where the clock provider has not been
+ * setup yet and thus isn't available yet
+ *
+ * so we "pre-enable" the clock here, to not have the clock
+ * subsystem automatically disable this item in a late init call
+ *
+ * this PCI clock pre-enable workaround only applies when there
+ * are device tree nodes for PCI and thus the peripheral driver
+ * has attached to bridges, otherwise the PCI clock remains
+ * unused and so it gets disabled
+ */
+ clk_prepare_enable(clks[MPC512x_CLK_PSC3_MCLK]);/* serial console */
+ if (of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci"))
+ clk_prepare_enable(clks[MPC512x_CLK_PCI]);
+}
+
+/*
+ * those macros are not exactly pretty, but they encapsulate a lot
+ * of copy'n'paste heavy code which is even more ugly, and reduce
+ * the potential for inconsistencies in those many code copies
+ */
+#define FOR_NODES(compatname) \
+ for_each_compatible_node(np, NULL, compatname)
+
+#define NODE_PREP do { \
+ of_address_to_resource(np, 0, &res); \
+ snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
+} while (0)
+
+#define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
+ struct clk *clk; \
+ clk = of_clk_get_by_name(np, clkname); \
+ if (IS_ERR(clk)) { \
+ clk = clkitem; \
+ clk_register_clkdev(clk, clkname, devname); \
+ if (regnode) \
+ clk_register_clkdev(clk, clkname, np->name); \
+ did_register |= DID_REG_ ## regflag; \
+ pr_debug("clock alias name '%s' for dev '%s' pointer %p\n", \
+ clkname, devname, clk); \
+ } else { \
+ clk_put(clk); \
+ } \
+} while (0)
+
+/*
+ * register source code provided fallback results for clock lookups,
+ * these get consulted when OF based clock lookup fails (that is in the
+ * case of not yet adjusted device tree data, where clock related specs
+ * are missing)
+ */
+static void mpc5121_clk_provide_backwards_compat(void)
+{
+ enum did_reg_flags {
+ DID_REG_PSC = BIT(0),
+ DID_REG_PSCFIFO = BIT(1),
+ DID_REG_NFC = BIT(2),
+ DID_REG_CAN = BIT(3),
+ DID_REG_I2C = BIT(4),
+ DID_REG_DIU = BIT(5),
+ DID_REG_VIU = BIT(6),
+ DID_REG_FEC = BIT(7),
+ DID_REG_USB = BIT(8),
+ DID_REG_PATA = BIT(9),
+ };
+
+ int did_register;
+ struct device_node *np;
+ struct resource res;
+ int idx;
+ char devname[32];
+
+ did_register = 0;
+
+ FOR_NODES(mpc512x_select_psc_compat()) {
+ NODE_PREP;
+ idx = (res.start >> 8) & 0xf;
+ NODE_CHK("ipg", clks[MPC512x_CLK_PSC0 + idx], 0, PSC);
+ NODE_CHK("mclk", clks[MPC512x_CLK_PSC0_MCLK + idx], 0, PSC);
+ }
+
+ FOR_NODES("fsl,mpc5121-psc-fifo") {
+ NODE_PREP;
+ NODE_CHK("ipg", clks[MPC512x_CLK_PSC_FIFO], 1, PSCFIFO);
+ }
+
+ FOR_NODES("fsl,mpc5121-nfc") {
+ NODE_PREP;
+ NODE_CHK("ipg", clks[MPC512x_CLK_NFC], 0, NFC);
+ }
+
+ FOR_NODES("fsl,mpc5121-mscan") {
+ NODE_PREP;
+ idx = 0;
+ idx += (res.start & 0x2000) ? 2 : 0;
+ idx += (res.start & 0x0080) ? 1 : 0;
+ NODE_CHK("ipg", clks[MPC512x_CLK_BDLC], 0, CAN);
+ NODE_CHK("mclk", clks[MPC512x_CLK_MSCAN0_MCLK + idx], 0, CAN);
+ }
+
+ /*
+ * do register the 'ips', 'sys', and 'ref' names globally
+ * instead of inside each individual CAN node, as there is no
+ * potential for a name conflict (in contrast to 'ipg' and 'mclk')
+ */
+ if (did_register & DID_REG_CAN) {
+ clk_register_clkdev(clks[MPC512x_CLK_IPS], "ips", NULL);
+ clk_register_clkdev(clks[MPC512x_CLK_SYS], "sys", NULL);
+ clk_register_clkdev(clks[MPC512x_CLK_REF], "ref", NULL);
+ }
+
+ FOR_NODES("fsl,mpc5121-i2c") {
+ NODE_PREP;
+ NODE_CHK("ipg", clks[MPC512x_CLK_I2C], 0, I2C);
+ }
+
+ /*
+ * workaround for the fact that the I2C driver does an "anonymous"
+ * lookup (NULL name spec, which yields the first clock spec) for
+ * which we cannot register an alias -- a _global_ 'ipg' alias that
+ * is not bound to any device name and returns the I2C clock item
+ * is not a good idea
+ *
+ * so we have the lookup in the peripheral driver fail, which is
+ * silent and non-fatal, and pre-enable the clock item here such
+ * that register access is possible
+ *
+ * see commit b3bfce2b "i2c: mpc: cleanup clock API use" for
+ * details, adjusting s/NULL/"ipg"/ in i2c-mpc.c would make this
+ * workaround obsolete
+ */
+ if (did_register & DID_REG_I2C)
+ clk_prepare_enable(clks[MPC512x_CLK_I2C]);
+
+ FOR_NODES("fsl,mpc5121-diu") {
+ NODE_PREP;
+ NODE_CHK("ipg", clks[MPC512x_CLK_DIU], 1, DIU);
+ }
+
+ FOR_NODES("fsl,mpc5121-viu") {
+ NODE_PREP;
+ NODE_CHK("ipg", clks[MPC512x_CLK_VIU], 0, VIU);
+ }
+
+ /*
+ * note that 2771399a "fs_enet: cleanup clock API use" did use the
+ * "per" string for the clock lookup in contrast to the "ipg" name
+ * which most other nodes are using -- this is not a fatal thing
+ * but just something to keep in mind when doing compatibility
+ * registration, it's a non-issue with up-to-date device tree data
+ */
+ FOR_NODES("fsl,mpc5121-fec") {
+ NODE_PREP;
+ NODE_CHK("per", clks[MPC512x_CLK_FEC], 0, FEC);
+ }
+ FOR_NODES("fsl,mpc5121-fec-mdio") {
+ NODE_PREP;
+ NODE_CHK("per", clks[MPC512x_CLK_FEC], 0, FEC);
+ }
+ /*
+ * MPC5125 has two FECs: FEC1 at 0x2800, FEC2 at 0x4800;
+ * the clock items don't "form an array" since FEC2 was
+ * added only later and was not allowed to shift all other
+ * clock item indices, so the numbers aren't adjacent
+ */
+ FOR_NODES("fsl,mpc5125-fec") {
+ NODE_PREP;
+ if (res.start & 0x4000)
+ idx = MPC512x_CLK_FEC2;
+ else
+ idx = MPC512x_CLK_FEC;
+ NODE_CHK("per", clks[idx], 0, FEC);
+ }
+
+ FOR_NODES("fsl,mpc5121-usb2-dr") {
+ NODE_PREP;
+ idx = (res.start & 0x4000) ? 1 : 0;
+ NODE_CHK("ipg", clks[MPC512x_CLK_USB1 + idx], 0, USB);
+ }
+
+ FOR_NODES("fsl,mpc5121-pata") {
+ NODE_PREP;
+ NODE_CHK("ipg", clks[MPC512x_CLK_PATA], 0, PATA);
+ }
+
+ /*
+ * try to collapse diagnostics into a single line of output yet
+ * provide a full list of what is missing, to avoid noise in the
+ * absence of up-to-date device tree data -- backwards
+ * compatibility to old DTBs is a requirement, updates may be
+ * desirable or preferrable but are not at all mandatory
+ */
+ if (did_register) {
+ pr_notice("device tree lacks clock specs, adding fallbacks (0x%x,%s%s%s%s%s%s%s%s%s%s)\n",
+ did_register,
+ (did_register & DID_REG_PSC) ? " PSC" : "",
+ (did_register & DID_REG_PSCFIFO) ? " PSCFIFO" : "",
+ (did_register & DID_REG_NFC) ? " NFC" : "",
+ (did_register & DID_REG_CAN) ? " CAN" : "",
+ (did_register & DID_REG_I2C) ? " I2C" : "",
+ (did_register & DID_REG_DIU) ? " DIU" : "",
+ (did_register & DID_REG_VIU) ? " VIU" : "",
+ (did_register & DID_REG_FEC) ? " FEC" : "",
+ (did_register & DID_REG_USB) ? " USB" : "",
+ (did_register & DID_REG_PATA) ? " PATA" : "");
+ } else {
+ pr_debug("device tree has clock specs, no fallbacks added\n");
+ }
+}
+
+int __init mpc5121_clk_init(void)
+{
+ struct device_node *clk_np;
+ int busfreq;
+
+ /* map the clock control registers */
+ clk_np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
+ if (!clk_np)
+ return -ENODEV;
+ clkregs = of_iomap(clk_np, 0);
+ WARN_ON(!clkregs);
+
+ /* determine the SoC variant we run on */
+ mpc512x_clk_determine_soc();
+
+ /* invalidate all not yet registered clock slots */
+ mpc512x_clk_preset_data();
+
+ /*
+ * have the device tree scanned for "fixed-clock" nodes (which
+ * includes the oscillator node if the board's DT provides one)
+ */
+ of_clk_init(NULL);
+
+ /*
+ * add a dummy clock for those situations where a clock spec is
+ * required yet no real clock is involved
+ */
+ clks[MPC512x_CLK_DUMMY] = mpc512x_clk_fixed("dummy", 0);
+
+ /*
+ * have all the real nodes in the clock tree populated from REF
+ * down to all leaves, either starting from the OSC node or from
+ * a REF root that was created from the IPS bus clock input
+ */
+ busfreq = get_freq_from_dt("bus-frequency");
+ mpc512x_clk_setup_clock_tree(clk_np, busfreq);
+
+ /* register as an OF clock provider */
+ mpc5121_clk_register_of_provider(clk_np);
+
+ /*
+ * unbreak not yet adjusted peripheral drivers during migration
+ * towards fully operational common clock support, and allow
+ * operation in the absence of clock related device tree specs
+ */
+ mpc5121_clk_provide_migration_support();
+ mpc5121_clk_provide_backwards_compat();
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
deleted file mode 100644
index fd8a3765341..00000000000
--- a/arch/powerpc/platforms/512x/clock.c
+++ /dev/null
@@ -1,754 +0,0 @@
-/*
- * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: John Rigby <jrigby@freescale.com>
- *
- * Implements the clk api defined in include/linux/clk.h
- *
- * Original based on linux/arch/arm/mach-integrator/clock.c
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <asm/mpc5xxx.h>
-#include <asm/mpc5121.h>
-#include <asm/clk_interface.h>
-
-#include "mpc512x.h"
-
-#undef CLK_DEBUG
-
-static int clocks_initialized;
-
-#define CLK_HAS_RATE 0x1 /* has rate in MHz */
-#define CLK_HAS_CTRL 0x2 /* has control reg and bit */
-
-struct clk {
- struct list_head node;
- char name[32];
- int flags;
- struct device *dev;
- unsigned long rate;
- struct module *owner;
- void (*calc) (struct clk *);
- struct clk *parent;
- int reg, bit; /* CLK_HAS_CTRL */
- int div_shift; /* only used by generic_div_clk_calc */
-};
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
-{
- struct clk *p, *clk = ERR_PTR(-ENOENT);
- int dev_match;
- int id_match;
-
- if (dev == NULL || id == NULL)
- return clk;
-
- mutex_lock(&clocks_mutex);
- list_for_each_entry(p, &clocks, node) {
- dev_match = id_match = 0;
-
- if (dev == p->dev)
- dev_match++;
- if (strcmp(id, p->name) == 0)
- id_match++;
- if ((dev_match || id_match) && try_module_get(p->owner)) {
- clk = p;
- break;
- }
- }
- mutex_unlock(&clocks_mutex);
-
- return clk;
-}
-
-#ifdef CLK_DEBUG
-static void dump_clocks(void)
-{
- struct clk *p;
-
- mutex_lock(&clocks_mutex);
- printk(KERN_INFO "CLOCKS:\n");
- list_for_each_entry(p, &clocks, node) {
- pr_info(" %s=%ld", p->name, p->rate);
- if (p->parent)
- pr_cont(" %s=%ld", p->parent->name,
- p->parent->rate);
- if (p->flags & CLK_HAS_CTRL)
- pr_cont(" reg/bit=%d/%d", p->reg, p->bit);
- pr_cont("\n");
- }
- mutex_unlock(&clocks_mutex);
-}
-#define DEBUG_CLK_DUMP() dump_clocks()
-#else
-#define DEBUG_CLK_DUMP()
-#endif
-
-
-static void mpc5121_clk_put(struct clk *clk)
-{
- module_put(clk->owner);
-}
-
-#define NRPSC 12
-
-struct mpc512x_clockctl {
- u32 spmr; /* System PLL Mode Reg */
- u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
- u32 scfr1; /* System Clk Freq Reg 1 */
- u32 scfr2; /* System Clk Freq Reg 2 */
- u32 reserved;
- u32 bcr; /* Bread Crumb Reg */
- u32 pccr[NRPSC]; /* PSC Clk Ctrl Reg 0-11 */
- u32 spccr; /* SPDIF Clk Ctrl Reg */
- u32 cccr; /* CFM Clk Ctrl Reg */
- u32 dccr; /* DIU Clk Cnfg Reg */
-};
-
-static struct mpc512x_clockctl __iomem *clockctl;
-
-static int mpc5121_clk_enable(struct clk *clk)
-{
- unsigned int mask;
-
- if (clk->flags & CLK_HAS_CTRL) {
- mask = in_be32(&clockctl->sccr[clk->reg]);
- mask |= 1 << clk->bit;
- out_be32(&clockctl->sccr[clk->reg], mask);
- }
- return 0;
-}
-
-static void mpc5121_clk_disable(struct clk *clk)
-{
- unsigned int mask;
-
- if (clk->flags & CLK_HAS_CTRL) {
- mask = in_be32(&clockctl->sccr[clk->reg]);
- mask &= ~(1 << clk->bit);
- out_be32(&clockctl->sccr[clk->reg], mask);
- }
-}
-
-static unsigned long mpc5121_clk_get_rate(struct clk *clk)
-{
- if (clk->flags & CLK_HAS_RATE)
- return clk->rate;
- else
- return 0;
-}
-
-static long mpc5121_clk_round_rate(struct clk *clk, unsigned long rate)
-{
- return rate;
-}
-
-static int mpc5121_clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return 0;
-}
-
-static int clk_register(struct clk *clk)
-{
- mutex_lock(&clocks_mutex);
- list_add(&clk->node, &clocks);
- mutex_unlock(&clocks_mutex);
- return 0;
-}
-
-static unsigned long spmf_mult(void)
-{
- /*
- * Convert spmf to multiplier
- */
- static int spmf_to_mult[] = {
- 68, 1, 12, 16,
- 20, 24, 28, 32,
- 36, 40, 44, 48,
- 52, 56, 60, 64
- };
- int spmf = (in_be32(&clockctl->spmr) >> 24) & 0xf;
- return spmf_to_mult[spmf];
-}
-
-static unsigned long sysdiv_div_x_2(void)
-{
- /*
- * Convert sysdiv to divisor x 2
- * Some divisors have fractional parts so
- * multiply by 2 then divide by this value
- */
- static int sysdiv_to_div_x_2[] = {
- 4, 5, 6, 7,
- 8, 9, 10, 14,
- 12, 16, 18, 22,
- 20, 24, 26, 30,
- 28, 32, 34, 38,
- 36, 40, 42, 46,
- 44, 48, 50, 54,
- 52, 56, 58, 62,
- 60, 64, 66,
- };
- int sysdiv = (in_be32(&clockctl->scfr2) >> 26) & 0x3f;
- return sysdiv_to_div_x_2[sysdiv];
-}
-
-static unsigned long ref_to_sys(unsigned long rate)
-{
- rate *= spmf_mult();
- rate *= 2;
- rate /= sysdiv_div_x_2();
-
- return rate;
-}
-
-static unsigned long sys_to_ref(unsigned long rate)
-{
- rate *= sysdiv_div_x_2();
- rate /= 2;
- rate /= spmf_mult();
-
- return rate;
-}
-
-static long ips_to_ref(unsigned long rate)
-{
- int ips_div = (in_be32(&clockctl->scfr1) >> 23) & 0x7;
-
- rate *= ips_div; /* csb_clk = ips_clk * ips_div */
- rate *= 2; /* sys_clk = csb_clk * 2 */
- return sys_to_ref(rate);
-}
-
-static unsigned long devtree_getfreq(char *clockname)
-{
- struct device_node *np;
- const unsigned int *prop;
- unsigned int val = 0;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-immr");
- if (np) {
- prop = of_get_property(np, clockname, NULL);
- if (prop)
- val = *prop;
- of_node_put(np);
- }
- return val;
-}
-
-static void ref_clk_calc(struct clk *clk)
-{
- unsigned long rate;
-
- rate = devtree_getfreq("bus-frequency");
- if (rate == 0) {
- printk(KERN_ERR "No bus-frequency in dev tree\n");
- clk->rate = 0;
- return;
- }
- clk->rate = ips_to_ref(rate);
-}
-
-static struct clk ref_clk = {
- .name = "ref_clk",
- .calc = ref_clk_calc,
-};
-
-
-static void sys_clk_calc(struct clk *clk)
-{
- clk->rate = ref_to_sys(ref_clk.rate);
-}
-
-static struct clk sys_clk = {
- .name = "sys_clk",
- .calc = sys_clk_calc,
-};
-
-static void diu_clk_calc(struct clk *clk)
-{
- int diudiv_x_2 = in_be32(&clockctl->scfr1) & 0xff;
- unsigned long rate;
-
- rate = sys_clk.rate;
-
- rate *= 2;
- rate /= diudiv_x_2;
-
- clk->rate = rate;
-}
-
-static void viu_clk_calc(struct clk *clk)
-{
- unsigned long rate;
-
- rate = sys_clk.rate;
- rate /= 2;
- clk->rate = rate;
-}
-
-static void half_clk_calc(struct clk *clk)
-{
- clk->rate = clk->parent->rate / 2;
-}
-
-static void generic_div_clk_calc(struct clk *clk)
-{
- int div = (in_be32(&clockctl->scfr1) >> clk->div_shift) & 0x7;
-
- clk->rate = clk->parent->rate / div;
-}
-
-static void unity_clk_calc(struct clk *clk)
-{
- clk->rate = clk->parent->rate;
-}
-
-static struct clk csb_clk = {
- .name = "csb_clk",
- .calc = half_clk_calc,
- .parent = &sys_clk,
-};
-
-static void e300_clk_calc(struct clk *clk)
-{
- int spmf = (in_be32(&clockctl->spmr) >> 16) & 0xf;
- int ratex2 = clk->parent->rate * spmf;
-
- clk->rate = ratex2 / 2;
-}
-
-static struct clk e300_clk = {
- .name = "e300_clk",
- .calc = e300_clk_calc,
- .parent = &csb_clk,
-};
-
-static struct clk ips_clk = {
- .name = "ips_clk",
- .calc = generic_div_clk_calc,
- .parent = &csb_clk,
- .div_shift = 23,
-};
-
-/*
- * Clocks controlled by SCCR1 (.reg = 0)
- */
-static struct clk lpc_clk = {
- .name = "lpc_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 0,
- .bit = 30,
- .calc = generic_div_clk_calc,
- .parent = &ips_clk,
- .div_shift = 11,
-};
-
-static struct clk nfc_clk = {
- .name = "nfc_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 0,
- .bit = 29,
- .calc = generic_div_clk_calc,
- .parent = &ips_clk,
- .div_shift = 8,
-};
-
-static struct clk pata_clk = {
- .name = "pata_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 0,
- .bit = 28,
- .calc = unity_clk_calc,
- .parent = &ips_clk,
-};
-
-/*
- * PSC clocks (bits 27 - 16)
- * are setup elsewhere
- */
-
-static struct clk sata_clk = {
- .name = "sata_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 0,
- .bit = 14,
- .calc = unity_clk_calc,
- .parent = &ips_clk,
-};
-
-static struct clk fec_clk = {
- .name = "fec_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 0,
- .bit = 13,
- .calc = unity_clk_calc,
- .parent = &ips_clk,
-};
-
-static struct clk pci_clk = {
- .name = "pci_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 0,
- .bit = 11,
- .calc = generic_div_clk_calc,
- .parent = &csb_clk,
- .div_shift = 20,
-};
-
-/*
- * Clocks controlled by SCCR2 (.reg = 1)
- */
-static struct clk diu_clk = {
- .name = "diu_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 31,
- .calc = diu_clk_calc,
-};
-
-static struct clk viu_clk = {
- .name = "viu_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 18,
- .calc = viu_clk_calc,
-};
-
-static struct clk axe_clk = {
- .name = "axe_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 30,
- .calc = unity_clk_calc,
- .parent = &csb_clk,
-};
-
-static struct clk usb1_clk = {
- .name = "usb1_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 28,
- .calc = unity_clk_calc,
- .parent = &csb_clk,
-};
-
-static struct clk usb2_clk = {
- .name = "usb2_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 27,
- .calc = unity_clk_calc,
- .parent = &csb_clk,
-};
-
-static struct clk i2c_clk = {
- .name = "i2c_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 26,
- .calc = unity_clk_calc,
- .parent = &ips_clk,
-};
-
-static struct clk mscan_clk = {
- .name = "mscan_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 25,
- .calc = unity_clk_calc,
- .parent = &ips_clk,
-};
-
-static struct clk sdhc_clk = {
- .name = "sdhc_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 24,
- .calc = unity_clk_calc,
- .parent = &ips_clk,
-};
-
-static struct clk mbx_bus_clk = {
- .name = "mbx_bus_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 22,
- .calc = half_clk_calc,
- .parent = &csb_clk,
-};
-
-static struct clk mbx_clk = {
- .name = "mbx_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 21,
- .calc = unity_clk_calc,
- .parent = &csb_clk,
-};
-
-static struct clk mbx_3d_clk = {
- .name = "mbx_3d_clk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 20,
- .calc = generic_div_clk_calc,
- .parent = &mbx_bus_clk,
- .div_shift = 14,
-};
-
-static void psc_mclk_in_calc(struct clk *clk)
-{
- clk->rate = devtree_getfreq("psc_mclk_in");
- if (!clk->rate)
- clk->rate = 25000000;
-}
-
-static struct clk psc_mclk_in = {
- .name = "psc_mclk_in",
- .calc = psc_mclk_in_calc,
-};
-
-static struct clk spdif_txclk = {
- .name = "spdif_txclk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 23,
-};
-
-static struct clk spdif_rxclk = {
- .name = "spdif_rxclk",
- .flags = CLK_HAS_CTRL,
- .reg = 1,
- .bit = 23,
-};
-
-static void ac97_clk_calc(struct clk *clk)
-{
- /* ac97 bit clock is always 24.567 MHz */
- clk->rate = 24567000;
-}
-
-static struct clk ac97_clk = {
- .name = "ac97_clk_in",
- .calc = ac97_clk_calc,
-};
-
-static struct clk *rate_clks[] = {
- &ref_clk,
- &sys_clk,
- &diu_clk,
- &viu_clk,
- &csb_clk,
- &e300_clk,
- &ips_clk,
- &fec_clk,
- &sata_clk,
- &pata_clk,
- &nfc_clk,
- &lpc_clk,
- &mbx_bus_clk,
- &mbx_clk,
- &mbx_3d_clk,
- &axe_clk,
- &usb1_clk,
- &usb2_clk,
- &i2c_clk,
- &mscan_clk,
- &sdhc_clk,
- &pci_clk,
- &psc_mclk_in,
- &spdif_txclk,
- &spdif_rxclk,
- &ac97_clk,
- NULL
-};
-
-static void rate_clk_init(struct clk *clk)
-{
- if (clk->calc) {
- clk->calc(clk);
- clk->flags |= CLK_HAS_RATE;
- clk_register(clk);
- } else {
- printk(KERN_WARNING
- "Could not initialize clk %s without a calc routine\n",
- clk->name);
- }
-}
-
-static void rate_clks_init(void)
-{
- struct clk **cpp, *clk;
-
- cpp = rate_clks;
- while ((clk = *cpp++))
- rate_clk_init(clk);
-}
-
-/*
- * There are two clk enable registers with 32 enable bits each
- * psc clocks and device clocks are all stored in dev_clks
- */
-static struct clk dev_clks[2][32];
-
-/*
- * Given a psc number return the dev_clk
- * associated with it
- */
-static struct clk *psc_dev_clk(int pscnum)
-{
- int reg, bit;
- struct clk *clk;
-
- reg = 0;
- bit = 27 - pscnum;
-
- clk = &dev_clks[reg][bit];
- clk->reg = 0;
- clk->bit = bit;
- return clk;
-}
-
-/*
- * PSC clock rate calculation
- */
-static void psc_calc_rate(struct clk *clk, int pscnum, struct device_node *np)
-{
- unsigned long mclk_src = sys_clk.rate;
- unsigned long mclk_div;
-
- /*
- * Can only change value of mclk divider
- * when the divider is disabled.
- *
- * Zero is not a valid divider so minimum
- * divider is 1
- *
- * disable/set divider/enable
- */
- out_be32(&clockctl->pccr[pscnum], 0);
- out_be32(&clockctl->pccr[pscnum], 0x00020000);
- out_be32(&clockctl->pccr[pscnum], 0x00030000);
-
- if (in_be32(&clockctl->pccr[pscnum]) & 0x80) {
- clk->rate = spdif_rxclk.rate;
- return;
- }
-
- switch ((in_be32(&clockctl->pccr[pscnum]) >> 14) & 0x3) {
- case 0:
- mclk_src = sys_clk.rate;
- break;
- case 1:
- mclk_src = ref_clk.rate;
- break;
- case 2:
- mclk_src = psc_mclk_in.rate;
- break;
- case 3:
- mclk_src = spdif_txclk.rate;
- break;
- }
-
- mclk_div = ((in_be32(&clockctl->pccr[pscnum]) >> 17) & 0x7fff) + 1;
- clk->rate = mclk_src / mclk_div;
-}
-
-/*
- * Find all psc nodes in device tree and assign a clock
- * with name "psc%d_mclk" and dev pointing at the device
- * returned from of_find_device_by_node
- */
-static void psc_clks_init(void)
-{
- struct device_node *np;
- struct platform_device *ofdev;
- u32 reg;
- const char *psc_compat;
-
- psc_compat = mpc512x_select_psc_compat();
- if (!psc_compat)
- return;
-
- for_each_compatible_node(np, NULL, psc_compat) {
- if (!of_property_read_u32(np, "reg", &reg)) {
- int pscnum = (reg & 0xf00) >> 8;
- struct clk *clk = psc_dev_clk(pscnum);
-
- clk->flags = CLK_HAS_RATE | CLK_HAS_CTRL;
- ofdev = of_find_device_by_node(np);
- clk->dev = &ofdev->dev;
- /*
- * AC97 is special rate clock does
- * not go through normal path
- */
- if (of_device_is_compatible(np, "fsl,mpc5121-psc-ac97"))
- clk->rate = ac97_clk.rate;
- else
- psc_calc_rate(clk, pscnum, np);
- sprintf(clk->name, "psc%d_mclk", pscnum);
- clk_register(clk);
- clk_enable(clk);
- }
- }
-}
-
-static struct clk_interface mpc5121_clk_functions = {
- .clk_get = mpc5121_clk_get,
- .clk_enable = mpc5121_clk_enable,
- .clk_disable = mpc5121_clk_disable,
- .clk_get_rate = mpc5121_clk_get_rate,
- .clk_put = mpc5121_clk_put,
- .clk_round_rate = mpc5121_clk_round_rate,
- .clk_set_rate = mpc5121_clk_set_rate,
- .clk_set_parent = NULL,
- .clk_get_parent = NULL,
-};
-
-int __init mpc5121_clk_init(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
- if (np) {
- clockctl = of_iomap(np, 0);
- of_node_put(np);
- }
-
- if (!clockctl) {
- printk(KERN_ERR "Could not map clock control registers\n");
- return 0;
- }
-
- rate_clks_init();
- psc_clks_init();
-
- /* leave clockctl mapped forever */
- /*iounmap(clockctl); */
- DEBUG_CLK_DUMP();
- clocks_initialized++;
- clk_functions = mpc5121_clk_functions;
- return 0;
-}
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 36b5652aada..adb95f03d4d 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -68,98 +69,112 @@ struct fsl_diu_shared_fb {
bool in_use;
};
-#define DIU_DIV_MASK 0x000000ff
+/* receives a pixel clock spec in pico seconds, adjusts the DIU clock rate */
static void mpc512x_set_pixel_clock(unsigned int pixclock)
{
- unsigned long bestval, bestfreq, speed, busfreq;
- unsigned long minpixclock, maxpixclock, pixval;
- struct mpc512x_ccm __iomem *ccm;
struct device_node *np;
- u32 temp;
- long err;
- int i;
+ struct clk *clk_diu;
+ unsigned long epsilon, minpixclock, maxpixclock;
+ unsigned long offset, want, got, delta;
- np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
+ /* lookup and enable the DIU clock */
+ np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-diu");
if (!np) {
- pr_err("Can't find clock control module.\n");
+ pr_err("Could not find DIU device tree node.\n");
return;
}
-
- ccm = of_iomap(np, 0);
+ clk_diu = of_clk_get(np, 0);
+ if (IS_ERR(clk_diu)) {
+ /* backwards compat with device trees that lack clock specs */
+ clk_diu = clk_get_sys(np->name, "ipg");
+ }
of_node_put(np);
- if (!ccm) {
- pr_err("Can't map clock control module reg.\n");
+ if (IS_ERR(clk_diu)) {
+ pr_err("Could not lookup DIU clock.\n");
return;
}
-
- np = of_find_node_by_type(NULL, "cpu");
- if (np) {
- const unsigned int *prop =
- of_get_property(np, "bus-frequency", NULL);
-
- of_node_put(np);
- if (prop) {
- busfreq = *prop;
- } else {
- pr_err("Can't get bus-frequency property\n");
- return;
- }
- } else {
- pr_err("Can't find 'cpu' node.\n");
+ if (clk_prepare_enable(clk_diu)) {
+ pr_err("Could not enable DIU clock.\n");
return;
}
- /* Pixel Clock configuration */
- pr_debug("DIU: Bus Frequency = %lu\n", busfreq);
- speed = busfreq * 4; /* DIU_DIV ratio is 4 * CSB_CLK / DIU_CLK */
-
- /* Calculate the pixel clock with the smallest error */
- /* calculate the following in steps to avoid overflow */
- pr_debug("DIU pixclock in ps - %d\n", pixclock);
- temp = (1000000000 / pixclock) * 1000;
- pixclock = temp;
- pr_debug("DIU pixclock freq - %u\n", pixclock);
-
- temp = temp / 20; /* pixclock * 0.05 */
- pr_debug("deviation = %d\n", temp);
- minpixclock = pixclock - temp;
- maxpixclock = pixclock + temp;
- pr_debug("DIU minpixclock - %lu\n", minpixclock);
- pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
- pixval = speed/pixclock;
- pr_debug("DIU pixval = %lu\n", pixval);
-
- err = LONG_MAX;
- bestval = pixval;
- pr_debug("DIU bestval = %lu\n", bestval);
-
- bestfreq = 0;
- for (i = -1; i <= 1; i++) {
- temp = speed / (pixval+i);
- pr_debug("DIU test pixval i=%d, pixval=%lu, temp freq. = %u\n",
- i, pixval, temp);
- if ((temp < minpixclock) || (temp > maxpixclock))
- pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
- minpixclock, maxpixclock);
- else if (abs(temp - pixclock) < err) {
- pr_debug("Entered the else if block %d\n", i);
- err = abs(temp - pixclock);
- bestval = pixval + i;
- bestfreq = temp;
- }
+ /*
+ * convert the picoseconds spec into the desired clock rate,
+ * determine the acceptable clock range for the monitor (+/- 5%),
+ * do the calculation in steps to avoid integer overflow
+ */
+ pr_debug("DIU pixclock in ps - %u\n", pixclock);
+ pixclock = (1000000000 / pixclock) * 1000;
+ pr_debug("DIU pixclock freq - %u\n", pixclock);
+ epsilon = pixclock / 20; /* pixclock * 0.05 */
+ pr_debug("DIU deviation - %lu\n", epsilon);
+ minpixclock = pixclock - epsilon;
+ maxpixclock = pixclock + epsilon;
+ pr_debug("DIU minpixclock - %lu\n", minpixclock);
+ pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
+
+ /*
+ * check whether the DIU supports the desired pixel clock
+ *
+ * - simply request the desired clock and see what the
+ * platform's clock driver will make of it, assuming that it
+ * will setup the best approximation of the requested value
+ * - try other candidate frequencies in the order of decreasing
+ * preference (i.e. with increasing distance from the desired
+ * pixel clock, and checking the lower frequency before the
+ * higher frequency to not overload the hardware) until the
+ * first match is found -- any potential subsequent match
+ * would only be as good as the former match or typically
+ * would be less preferrable
+ *
+ * the offset increment of pixelclock divided by 64 is an
+ * arbitrary choice -- it's simple to calculate, in the typical
+ * case we expect the first check to succeed already, in the
+ * worst case seven frequencies get tested (the exact center and
+ * three more values each to the left and to the right) before
+ * the 5% tolerance window is exceeded, resulting in fast enough
+ * execution yet high enough probability of finding a suitable
+ * value, while the error rate will be in the order of single
+ * percents
+ */
+ for (offset = 0; offset <= epsilon; offset += pixclock / 64) {
+ want = pixclock - offset;
+ pr_debug("DIU checking clock - %lu\n", want);
+ clk_set_rate(clk_diu, want);
+ got = clk_get_rate(clk_diu);
+ delta = abs(pixclock - got);
+ if (delta < epsilon)
+ break;
+ if (!offset)
+ continue;
+ want = pixclock + offset;
+ pr_debug("DIU checking clock - %lu\n", want);
+ clk_set_rate(clk_diu, want);
+ got = clk_get_rate(clk_diu);
+ delta = abs(pixclock - got);
+ if (delta < epsilon)
+ break;
}
+ if (offset <= epsilon) {
+ pr_debug("DIU clock accepted - %lu\n", want);
+ pr_debug("DIU pixclock want %u, got %lu, delta %lu, eps %lu\n",
+ pixclock, got, delta, epsilon);
+ return;
+ }
+ pr_warn("DIU pixclock auto search unsuccessful\n");
- pr_debug("DIU chose = %lx\n", bestval);
- pr_debug("DIU error = %ld\n NomPixClk ", err);
- pr_debug("DIU: Best Freq = %lx\n", bestfreq);
- /* Modify DIU_DIV in CCM SCFR1 */
- temp = in_be32(&ccm->scfr1);
- pr_debug("DIU: Current value of SCFR1: 0x%08x\n", temp);
- temp &= ~DIU_DIV_MASK;
- temp |= (bestval & DIU_DIV_MASK);
- out_be32(&ccm->scfr1, temp);
- pr_debug("DIU: Modified value of SCFR1: 0x%08x\n", temp);
- iounmap(ccm);
+ /*
+ * what is the most appropriate action to take when the search
+ * for an available pixel clock which is acceptable to the
+ * monitor has failed? disable the DIU (clock) or just provide
+ * a "best effort"? we go with the latter
+ */
+ pr_warn("DIU pixclock best effort fallback (backend's choice)\n");
+ clk_set_rate(clk_diu, pixclock);
+ got = clk_get_rate(clk_diu);
+ delta = abs(pixclock - got);
+ pr_debug("DIU pixclock want %u, got %lu, delta %lu, eps %lu\n",
+ pixclock, got, delta, epsilon);
}
static enum fsl_diu_monitor_port
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index af54174801f..b625a2c6f4f 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -1,7 +1,7 @@
config PPC_MPC52xx
bool "52xx-based boards"
depends on 6xx
- select PPC_CLOCK
+ select COMMON_CLK
select PPC_PCI_CHOICE
config PPC_MPC5200_SIMPLE
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index be7b1aa4d54..37f7a89c10f 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -245,7 +245,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
if (dma && !write) {
spin_unlock_irqrestore(&lpbfifo.lock, flags);
- pr_err("bogus LPBFIFO IRQ (dma and not writting)\n");
+ pr_err("bogus LPBFIFO IRQ (dma and not writing)\n");
return IRQ_HANDLED;
}
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 670a033264c..2bdc8c862c4 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -99,7 +99,6 @@ config SBC834x
config ASP834x
bool "Analogue & Micro ASP 834x"
select PPC_MPC834x
- select REDBOOT
help
This enables support for the Analogue & Micro ASP 83xx
board.
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index fd71cfdf238..e238b6a55b1 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 3d9716ccd32..4b4c081df94 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -10,7 +10,6 @@
* by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/ioport.h>
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 4d4634958cf..c17aae80e7f 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -123,6 +123,12 @@ config P1023_RDS
help
This option enables support for the P1023 RDS and RDB boards
+config TWR_P102x
+ bool "Freescale TWR-P102x"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the TWR-P1025 board.
+
config SOCRATES
bool "Socrates"
select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index dd4c0b59577..25cebe74ac4 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_P1010_RDB) += p1010rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
obj-$(CONFIG_P1023_RDS) += p1023_rds.o
+obj-$(CONFIG_TWR_P102x) += twr_p102x.o
obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index eba78c85303..3b085c7ee53 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <asm/qe.h>
#include <sysdev/cpm2_pic.h>
#include "mpc85xx.h"
@@ -82,3 +83,40 @@ void __init mpc85xx_cpm2_pic_init(void)
irq_set_chained_handler(irq, cpm2_cascade);
}
#endif
+
+#ifdef CONFIG_QUICC_ENGINE
+void __init mpc85xx_qe_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np) {
+ np = of_find_node_by_name(NULL, "qe");
+ if (!np) {
+ pr_err("%s: Could not find Quicc Engine node\n",
+ __func__);
+ return;
+ }
+ }
+
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return;
+ }
+
+ qe_reset();
+ of_node_put(np);
+
+ np = of_find_node_by_name(NULL, "par_io");
+ if (np) {
+ struct device_node *ucc;
+
+ par_io_init(np);
+ of_node_put(np);
+
+ for_each_node_by_name(ucc, "ucc")
+ par_io_of_config(ucc);
+
+ }
+}
+#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index 2aa7c5dc2c7..fc51dd4092e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -8,4 +8,10 @@ extern void mpc85xx_cpm2_pic_init(void);
static inline void __init mpc85xx_cpm2_pic_init(void) {}
#endif /* CONFIG_CPM2 */
+#ifdef CONFIG_QUICC_ENGINE
+extern void mpc85xx_qe_init(void);
+#else
+static inline void __init mpc85xx_qe_init(void) {}
+#endif
+
#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index a7b3621a8df..34f3c5eb3be 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010, 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2006-2010, 2012-2013 Freescale Semiconductor, Inc.
* All rights reserved.
*
* Author: Andy Fleming <afleming@freescale.com>
@@ -238,32 +238,7 @@ static void __init mpc85xx_mds_qe_init(void)
{
struct device_node *np;
- np = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!np) {
- np = of_find_node_by_name(NULL, "qe");
- if (!np)
- return;
- }
-
- if (!of_device_is_available(np)) {
- of_node_put(np);
- return;
- }
-
- qe_reset();
- of_node_put(np);
-
- np = of_find_node_by_name(NULL, "par_io");
- if (np) {
- struct device_node *ucc;
-
- par_io_init(np);
- of_node_put(np);
-
- for_each_node_by_name(ucc, "ucc")
- par_io_of_config(ucc);
- }
-
+ mpc85xx_qe_init();
mpc85xx_mds_reset_ucc_phys();
if (machine_is(p1021_mds)) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 53b6fb0a3d5..e15bdd18fdb 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -1,7 +1,7 @@
/*
* MPC85xx RDB Board Setup
*
- * Copyright 2009,2012 Freescale Semiconductor Inc.
+ * Copyright 2009,2012-2013 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -98,26 +98,7 @@ static void __init mpc85xx_rdb_setup_arch(void)
fsl_pci_assign_primary();
#ifdef CONFIG_QUICC_ENGINE
- np = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!np) {
- pr_err("%s: Could not find Quicc Engine node\n", __func__);
- goto qe_fail;
- }
-
- qe_reset();
- of_node_put(np);
-
- np = of_find_node_by_name(NULL, "par_io");
- if (np) {
- struct device_node *ucc;
-
- par_io_init(np);
- of_node_put(np);
-
- for_each_node_by_name(ucc, "ucc")
- par_io_of_config(ucc);
-
- }
+ mpc85xx_qe_init();
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
if (machine_is(p1025_rdb)) {
@@ -148,8 +129,6 @@ static void __init mpc85xx_rdb_setup_arch(void)
}
#endif
-
-qe_fail:
#endif /* CONFIG_QUICC_ENGINE */
printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index b9197cea185..bb75add6708 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -14,7 +14,6 @@
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/workqueue.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 393f975ab39..6382098d6f8 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -389,15 +389,18 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
}
#endif /* CONFIG_KEXEC */
-static void smp_85xx_setup_cpu(int cpu_nr)
+static void smp_85xx_basic_setup(int cpu_nr)
{
- if (smp_85xx_ops.probe == smp_mpic_probe)
- mpic_setup_this_cpu();
-
if (cpu_has_feature(CPU_FTR_DBELL))
doorbell_setup_this_cpu();
}
+static void smp_85xx_setup_cpu(int cpu_nr)
+{
+ mpic_setup_this_cpu();
+ smp_85xx_basic_setup(cpu_nr);
+}
+
static const struct of_device_id mpc85xx_smp_guts_ids[] = {
{ .compatible = "fsl,mpc8572-guts", },
{ .compatible = "fsl,p1020-guts", },
@@ -412,13 +415,14 @@ void __init mpc85xx_smp_init(void)
{
struct device_node *np;
- smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
np = of_find_node_by_type(NULL, "open-pic");
if (np) {
smp_85xx_ops.probe = smp_mpic_probe;
+ smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
smp_85xx_ops.message_pass = smp_mpic_message_pass;
- }
+ } else
+ smp_85xx_ops.setup_cpu = smp_85xx_basic_setup;
if (cpu_has_feature(CPU_FTR_DBELL)) {
/*
@@ -427,6 +431,7 @@ void __init mpc85xx_smp_init(void)
*/
smp_85xx_ops.message_pass = NULL;
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
+ smp_85xx_ops.probe = NULL;
}
np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
new file mode 100644
index 00000000000..c25ff10f05e
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Michael Johnston <michael.johnston@freescale.com>
+ *
+ * Description:
+ * TWR-P102x Board Setup
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/of_platform.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+#include <asm/fsl_guts.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
+
+static void __init twr_p1025_pic_init(void)
+{
+ struct mpic *mpic;
+
+#ifdef CONFIG_QUICC_ENGINE
+ struct device_node *np;
+#endif
+
+ mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+
+#ifdef CONFIG_QUICC_ENGINE
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (np) {
+ qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
+ qe_ic_cascade_high_mpic);
+ of_node_put(np);
+ } else
+ pr_err("Could not find qe-ic node\n");
+#endif
+}
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init twr_p1025_setup_arch(void)
+{
+#ifdef CONFIG_QUICC_ENGINE
+ struct device_node *np;
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("twr_p1025_setup_arch()", 0);
+
+ mpc85xx_smp_init();
+
+ fsl_pci_assign_primary();
+
+#ifdef CONFIG_QUICC_ENGINE
+ mpc85xx_qe_init();
+
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
+ if (machine_is(twr_p1025)) {
+ struct ccsr_guts __iomem *guts;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts");
+ if (np) {
+ guts = of_iomap(np, 0);
+ if (!guts)
+ pr_err("twr_p1025: could not map global utilities register\n");
+ else {
+ /* P1025 has pins muxed for QE and other functions. To
+ * enable QE UEC mode, we need to set bit QE0 for UCC1
+ * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
+ * and QE12 for QE MII management signals in PMUXCR
+ * register.
+ * Set QE mux bits in PMUXCR */
+ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
+ MPC85xx_PMUXCR_QE(3) |
+ MPC85xx_PMUXCR_QE(9) |
+ MPC85xx_PMUXCR_QE(12));
+ iounmap(guts);
+
+#if defined(CONFIG_SERIAL_QE)
+ /* On P1025TWR board, the UCC7 acted as UART port.
+ * However, The UCC7's CTS pin is low level in default,
+ * it will impact the transmission in full duplex
+ * communication. So disable the Flow control pin PA18.
+ * The UCC7 UART just can use RXD and TXD pins.
+ */
+ par_io_config_pin(0, 18, 0, 0, 0, 0);
+#endif
+ /* Drive PB29 to CPLD low - CPLD will then change
+ * muxing from LBC to QE */
+ par_io_config_pin(1, 29, 1, 0, 0, 0);
+ par_io_data_set(1, 29, 0);
+ }
+ of_node_put(np);
+ }
+ }
+#endif
+#endif /* CONFIG_QUICC_ENGINE */
+
+ pr_info("TWR-P1025 board from Freescale Semiconductor\n");
+}
+
+machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices);
+
+static int __init twr_p1025_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,TWR-P1025");
+}
+
+define_machine(twr_p1025) {
+ .name = "TWR-P1025",
+ .probe = twr_p1025_probe,
+ .setup_arch = twr_p1025_setup_arch,
+ .init_IRQ = twr_p1025_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 8dec3c0911a..bd6f1a1cf92 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -45,7 +45,6 @@ config PPC_EP88XC
config PPC_ADDER875
bool "Analogue & Micro Adder 875"
select CPM1
- select REDBOOT
help
This enables support for the Analogue & Micro Adder 875
board.
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index bca2465a9c3..434fda39bf8 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -72,6 +72,7 @@ config PPC_BOOK3S_64
select PPC_HAVE_PMU_SUPPORT
select SYS_SUPPORTS_HUGETLBFS
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES
+ select ARCH_SUPPORTS_NUMA_BALANCING
config PPC_BOOK3E_64
bool "Embedded processors"
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index c34ee4e6087..d4d245c0d78 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -111,7 +111,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
raw_spin_lock(&beat_htab_lock);
lpar_rc = beat_read_mask(hpte_group);
@@ -337,7 +337,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
/* insert into not-volted entry */
lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b53560660b7..2b90ff8a93b 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
- for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
+ for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
{
cell_iommu_setup_stab(iommu, base, size, 0, 0);
iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
- IOMMU_PAGE_SHIFT);
+ IOMMU_PAGE_SHIFT_4K);
cell_iommu_enable_hardware(iommu);
}
@@ -487,8 +487,10 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
window->table.it_blocksize = 16;
window->table.it_base = (unsigned long)iommu->ptab;
window->table.it_index = iommu->nid;
- window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
- window->table.it_size = size >> IOMMU_PAGE_SHIFT;
+ window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ window->table.it_offset =
+ (offset >> window->table.it_page_shift) + pte_offset;
+ window->table.it_size = size >> window->table.it_page_shift;
iommu_init_table(&window->table, iommu->nid);
@@ -773,7 +775,7 @@ static void __init cell_iommu_init_one(struct device_node *np,
/* Setup the iommu_table */
cell_iommu_setup_window(iommu, np, base, size,
- offset >> IOMMU_PAGE_SHIFT);
+ offset >> IOMMU_PAGE_SHIFT_4K);
}
static void __init cell_disable_iommus(void)
@@ -1122,7 +1124,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
- IOMMU_PAGE_SHIFT);
+ IOMMU_PAGE_SHIFT_4K);
cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
fbase, fsize);
cell_iommu_enable_hardware(iommu);
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index dead91b177b..b6c9a0dcc92 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <asm/ptrace.h>
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 302ba43d73a..6d3c7a9fd04 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -67,6 +67,18 @@ config PPC_C2K
This option enables support for the GE Fanuc C2K board (formerly
an SBS board).
+config MVME5100
+ bool "Motorola/Emerson MVME5100"
+ depends on EMBEDDED6xx
+ select MPIC
+ select PCI
+ select PPC_INDIRECT_PCI
+ select PPC_I8259
+ select PPC_NATIVE
+ help
+ This option enables support for the Motorola (now Emerson) MVME5100
+ board.
+
config TSI108_BRIDGE
bool
select PCI
@@ -113,4 +125,3 @@ config WII
help
Select WII if configuring for the Nintendo Wii.
More information at: <http://gc-linux.sourceforge.net/>
-
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index 66c23e423f4..cdd48d402b9 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_USBGECKO_UDBG) += usbgecko_udbg.o
obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o
obj-$(CONFIG_GAMECUBE) += gamecube.o
obj-$(CONFIG_WII) += wii.o hlwd-pic.o
+obj-$(CONFIG_MVME5100) += mvme5100.o
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 6c03034dbbd..c269caee58f 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
new file mode 100644
index 00000000000..25e3bfb64ef
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -0,0 +1,221 @@
+/*
+ * Board setup routines for the Motorola/Emerson MVME5100.
+ *
+ * Copyright 2013 CSC Australia Pty. Ltd.
+ *
+ * Based on earlier code by:
+ *
+ * Matt Porter, MontaVista Software Inc.
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Author: Stephen Chivers <schivers@csc.com>
+ *
+ */
+
+#include <linux/of_platform.h>
+
+#include <asm/i8259.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpic.h>
+#include <asm/prom.h>
+#include <mm/mmu_decl.h>
+#include <asm/udbg.h>
+
+#define HAWK_MPIC_SIZE 0x00040000U
+#define MVME5100_PCI_MEM_OFFSET 0x00000000
+
+/* Board register addresses. */
+#define BOARD_STATUS_REG 0xfef88080
+#define BOARD_MODFAIL_REG 0xfef88090
+#define BOARD_MODRST_REG 0xfef880a0
+#define BOARD_TBEN_REG 0xfef880c0
+#define BOARD_SW_READ_REG 0xfef880e0
+#define BOARD_GEO_ADDR_REG 0xfef880e8
+#define BOARD_EXT_FEATURE1_REG 0xfef880f0
+#define BOARD_EXT_FEATURE2_REG 0xfef88100
+
+static phys_addr_t pci_membase;
+static u_char *restart;
+
+static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int cascade_irq = i8259_irq();
+
+ if (cascade_irq != NO_IRQ)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void __init mvme5100_pic_init(void)
+{
+ struct mpic *mpic;
+ struct device_node *np;
+ struct device_node *cp = NULL;
+ unsigned int cirq;
+ unsigned long intack = 0;
+ const u32 *prop = NULL;
+
+ np = of_find_node_by_type(NULL, "open-pic");
+ if (!np) {
+ pr_err("Could not find open-pic node\n");
+ return;
+ }
+
+ mpic = mpic_alloc(np, pci_membase, 0, 16, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ of_node_put(np);
+
+ mpic_assign_isu(mpic, 0, pci_membase + 0x10000);
+
+ mpic_init(mpic);
+
+ cp = of_find_compatible_node(NULL, NULL, "chrp,iic");
+ if (cp == NULL) {
+ pr_warn("mvme5100_pic_init: couldn't find i8259\n");
+ return;
+ }
+
+ cirq = irq_of_parse_and_map(cp, 0);
+ if (cirq == NO_IRQ) {
+ pr_warn("mvme5100_pic_init: no cascade interrupt?\n");
+ return;
+ }
+
+ np = of_find_compatible_node(NULL, "pci", "mpc10x-pci");
+ if (np) {
+ prop = of_get_property(np, "8259-interrupt-acknowledge", NULL);
+
+ if (prop)
+ intack = prop[0];
+
+ of_node_put(np);
+ }
+
+ if (intack)
+ pr_debug("mvme5100_pic_init: PCI 8259 intack at 0x%016lx\n",
+ intack);
+
+ i8259_init(cp, intack);
+ of_node_put(cp);
+ irq_set_chained_handler(cirq, mvme5100_8259_cascade);
+}
+
+static int __init mvme5100_add_bridge(struct device_node *dev)
+{
+ const int *bus_range;
+ int len;
+ struct pci_controller *hose;
+ unsigned short devid;
+
+ pr_info("Adding PCI host bridge %s\n", dev->full_name);
+
+ bus_range = of_get_property(dev, "bus-range", &len);
+
+ hose = pcibios_alloc_controller(dev);
+ if (hose == NULL)
+ return -ENOMEM;
+
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+ setup_indirect_pci(hose, 0xfe000cf8, 0xfe000cfc, 0);
+
+ pci_process_bridge_OF_ranges(hose, dev, 1);
+
+ early_read_config_word(hose, 0, 0, PCI_DEVICE_ID, &devid);
+
+ if (devid != PCI_DEVICE_ID_MOTOROLA_HAWK) {
+ pr_err("HAWK PHB not present?\n");
+ return 0;
+ }
+
+ early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
+
+ if (pci_membase == 0) {
+ pr_err("HAWK PHB mibar not correctly set?\n");
+ return 0;
+ }
+
+ pr_info("mvme5100_pic_init: pci_membase: %x\n", pci_membase);
+
+ return 0;
+}
+
+static struct of_device_id mvme5100_of_bus_ids[] __initdata = {
+ { .compatible = "hawk-bridge", },
+ {},
+};
+
+/*
+ * Setup the architecture
+ */
+static void __init mvme5100_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mvme5100_setup_arch()", 0);
+
+ for_each_compatible_node(np, "pci", "hawk-pci")
+ mvme5100_add_bridge(np);
+
+ restart = ioremap(BOARD_MODRST_REG, 4);
+}
+
+
+static void mvme5100_show_cpuinfo(struct seq_file *m)
+{
+ seq_puts(m, "Vendor\t\t: Motorola/Emerson\n");
+ seq_puts(m, "Machine\t\t: MVME5100\n");
+}
+
+static void mvme5100_restart(char *cmd)
+{
+
+ local_irq_disable();
+ mtmsr(mfmsr() | MSR_IP);
+
+ out_8((u_char *) restart, 0x01);
+
+ while (1)
+ ;
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mvme5100_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "MVME5100");
+}
+
+static int __init probe_of_platform_devices(void)
+{
+
+ of_platform_bus_probe(NULL, mvme5100_of_bus_ids, NULL);
+ return 0;
+}
+
+machine_device_initcall(mvme5100, probe_of_platform_devices);
+
+define_machine(mvme5100) {
+ .name = "MVME5100",
+ .probe = mvme5100_probe,
+ .setup_arch = mvme5100_setup_arch,
+ .init_IRQ = mvme5100_pic_init,
+ .show_cpuinfo = mvme5100_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = mvme5100_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index f3defd8a280..aafa01ba062 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -18,7 +18,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7d2d036754b..2e576f2ae44 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -138,8 +138,11 @@ static void iommu_table_iobmap_setup(void)
pr_debug(" -> %s\n", __func__);
iommu_table_iobmap.it_busno = 0;
iommu_table_iobmap.it_offset = 0;
+ iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
+
/* it_size is in number of entries */
- iommu_table_iobmap.it_size = 0x80000000 >> IOBMAP_PAGE_SHIFT;
+ iommu_table_iobmap.it_size =
+ 0x80000000 >> iommu_table_iobmap.it_page_shift;
/* Initialize the common IOMMU code */
iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index d588e48dff7..43075081721 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -5,7 +5,6 @@
* FIXME: LOCKING !!!
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 9fced3f6d2d..895e8a20a3f 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -13,11 +13,6 @@ config PPC_POWERNV
select ARCH_RANDOM
default y
-config POWERNV_MSI
- bool "Support PCI MSI on PowerNV platform"
- depends on PCI_MSI
- default y
-
config PPC_POWERNV_RTAS
depends on PPC_POWERNV
bool "Support for RTAS based PowerNV platforms such as BML"
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 873fa1370dc..8d767fde5a6 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
+obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index d7ddcee7feb..f5147433646 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -14,7 +14,6 @@
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -45,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb,
/* We simply send special EEH event */
if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
- (events & OPAL_EVENT_PCI_ERROR))
+ (events & OPAL_EVENT_PCI_ERROR) &&
+ eeh_enabled())
eeh_send_failure_event(NULL);
return 0;
@@ -490,8 +490,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
static int ioda_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
- struct eeh_dev *edev;
- struct pci_dev *dev;
+ struct pci_bus *bus;
int ret;
/*
@@ -520,31 +519,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
if (pe->type & EEH_PE_PHB) {
ret = ioda_eeh_phb_reset(hose, option);
} else {
- if (pe->type & EEH_PE_DEVICE) {
- /*
- * If it's device PE, we didn't refer to the parent
- * PCI bus yet. So we have to figure it out indirectly.
- */
- edev = list_first_entry(&pe->edevs,
- struct eeh_dev, list);
- dev = eeh_dev_to_pci_dev(edev);
- dev = dev->bus->self;
- } else {
- /*
- * If it's bus PE, the parent PCI bus is already there
- * and just pick it up.
- */
- dev = pe->bus->self;
- }
-
- /*
- * Do reset based on the fact that the direct upstream bridge
- * is root bridge (port) or not.
- */
- if (dev->bus->number == 0)
+ bus = eeh_pe_bus_get(pe);
+ if (pci_is_root_bus(bus))
ret = ioda_eeh_root_reset(hose, option);
else
- ret = ioda_eeh_bridge_reset(hose, dev, option);
+ ret = ioda_eeh_bridge_reset(hose, bus->self, option);
}
return ret;
@@ -578,11 +557,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
return -EIO;
}
- /*
- * FIXME: We probably need log the error in somewhere.
- * Lets make it up in future.
- */
- /* pr_info("%s", phb->diag.blob); */
+ /* The PHB diag-data is always indicative */
+ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
spin_unlock_irqrestore(&phb->lock, flags);
@@ -670,143 +646,9 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
}
}
-static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
- struct OpalIoPhbErrorCommon *common)
-{
- struct OpalIoP7IOCPhbErrorData *data;
- int i;
-
- data = (struct OpalIoP7IOCPhbErrorData *)common;
-
- pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n",
- hose->global_number, common->version);
-
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
-
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
- pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
- pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
-
- for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
- if ((data->pestA[i] >> 63) == 0 &&
- (data->pestB[i] >> 63) == 0)
- continue;
-
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
- }
-}
-
-static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
- struct OpalIoPhbErrorCommon *common)
-{
- struct OpalIoPhb3ErrorData *data;
- int i;
-
- data = (struct OpalIoPhb3ErrorData*)common;
- pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
- hose->global_number, common->version);
-
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
- pr_info(" nFir: %016llx\n", data->nFir);
- pr_info(" nFirMask: %016llx\n", data->nFirMask);
- pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
- pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
- pr_info(" PhbCsr: %016llx\n", data->phbCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
-
- for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
- if ((data->pestA[i] >> 63) == 0 &&
- (data->pestB[i] >> 63) == 0)
- continue;
-
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
- }
-}
-
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
- struct OpalIoPhbErrorCommon *common;
long rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
@@ -817,18 +659,7 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
return;
}
- common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
- switch (common->ioType) {
- case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
- ioda_eeh_p7ioc_phb_diag(hose, common);
- break;
- case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
- ioda_eeh_phb3_phb_diag(hose, common);
- break;
- default:
- pr_warning("%s: Unrecognized I/O chip %d\n",
- __func__, common->ioType);
- }
+ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
}
static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
@@ -862,11 +693,7 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
dev.phb = hose;
dev.pe_config_addr = pe_no;
dev_pe = eeh_pe_get(&dev);
- if (!dev_pe) {
- pr_warning("%s: Can't find PE for PHB#%x - PE#%x\n",
- __func__, hose->global_number, pe_no);
- return -EEXIST;
- }
+ if (!dev_pe) return -EEXIST;
*pe = dev_pe;
return 0;
@@ -884,12 +711,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
*/
static int ioda_eeh_next_error(struct eeh_pe **pe)
{
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
struct pnv_phb *phb;
u64 frozen_pe_no;
u16 err_type, severity;
long rc;
- int ret = 1;
+ int ret = EEH_NEXT_ERR_NONE;
/*
* While running here, it's safe to purge the event queue.
@@ -899,7 +726,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
eeh_remove_event(NULL);
opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list, list_node) {
/*
* If the subordinate PCI buses of the PHB has been
* removed, we needn't take care of it any more.
@@ -938,19 +765,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
switch (err_type) {
case OPAL_EEH_IOC_ERROR:
if (severity == OPAL_EEH_SEV_IOC_DEAD) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list,
+ list_node) {
phb = hose->private_data;
phb->eeh_state |= PNV_EEH_STATE_REMOVED;
}
pr_err("EEH: dead IOC detected\n");
- ret = 4;
- goto out;
+ ret = EEH_NEXT_ERR_DEAD_IOC;
} else if (severity == OPAL_EEH_SEV_INF) {
pr_info("EEH: IOC informative error "
"detected\n");
ioda_eeh_hub_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
}
break;
@@ -962,37 +789,61 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
pr_err("EEH: dead PHB#%x detected\n",
hose->global_number);
phb->eeh_state |= PNV_EEH_STATE_REMOVED;
- ret = 3;
- goto out;
+ ret = EEH_NEXT_ERR_DEAD_PHB;
} else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
if (ioda_eeh_get_phb_pe(hose, pe))
break;
pr_err("EEH: fenced PHB#%x detected\n",
hose->global_number);
- ret = 2;
- goto out;
+ ret = EEH_NEXT_ERR_FENCED_PHB;
} else if (severity == OPAL_EEH_SEV_INF) {
pr_info("EEH: PHB#%x informative error "
"detected\n",
hose->global_number);
ioda_eeh_phb_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
}
break;
case OPAL_EEH_PE_ERROR:
- if (ioda_eeh_get_pe(hose, frozen_pe_no, pe))
- break;
+ /*
+ * If we can't find the corresponding PE, the
+ * PEEV / PEST would be messy. So we force an
+ * fenced PHB so that it can be recovered.
+ */
+ if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
+ if (!ioda_eeh_get_phb_pe(hose, pe)) {
+ pr_err("EEH: Escalated fenced PHB#%x "
+ "detected for PE#%llx\n",
+ hose->global_number,
+ frozen_pe_no);
+ ret = EEH_NEXT_ERR_FENCED_PHB;
+ } else {
+ ret = EEH_NEXT_ERR_NONE;
+ }
+ } else {
+ pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
+ (*pe)->addr, (*pe)->phb->global_number);
+ ret = EEH_NEXT_ERR_FROZEN_PE;
+ }
- pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
- (*pe)->addr, (*pe)->phb->global_number);
- ret = 1;
- goto out;
+ break;
+ default:
+ pr_warn("%s: Unexpected error type %d\n",
+ __func__, err_type);
}
+
+ /*
+ * If we have no errors on the specific PHB or only
+ * informative error there, we continue poking it.
+ * Otherwise, we need actions to be taken by upper
+ * layer.
+ */
+ if (ret > EEH_NEXT_ERR_INF)
+ break;
}
- ret = 0;
-out:
return ret;
}
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 73b981438cc..a59788e83b8 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Enable EEH explicitly so that we will do EEH check
* while accessing I/O stuff
*/
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
/* Save memory bars */
eeh_save_bars(edev);
@@ -344,6 +344,27 @@ static int powernv_eeh_next_error(struct eeh_pe **pe)
return -EEXIST;
}
+static int powernv_eeh_restore_config(struct device_node *dn)
+{
+ struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct pnv_phb *phb;
+ s64 ret;
+
+ if (!edev)
+ return -EEXIST;
+
+ phb = edev->phb->private_data;
+ ret = opal_pci_reinit(phb->opal_id,
+ OPAL_REINIT_PCI_DEV, edev->config_addr);
+ if (ret) {
+ pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
+ __func__, edev->config_addr, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static struct eeh_ops powernv_eeh_ops = {
.name = "powernv",
.init = powernv_eeh_init,
@@ -359,7 +380,8 @@ static struct eeh_ops powernv_eeh_ops = {
.configure_bridge = powernv_eeh_configure_bridge,
.read_config = pnv_pci_cfg_read,
.write_config = pnv_pci_cfg_write,
- .next_error = powernv_eeh_next_error
+ .next_error = powernv_eeh_next_error,
+ .restore_config = powernv_eeh_restore_config
};
/**
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 6ffa6b1ec5b..714ef972406 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -76,8 +76,8 @@
/* Validate buffer size */
#define VALIDATE_BUF_SIZE 4096
-/* XXX: Assume candidate image size is <= 256MB */
-#define MAX_IMAGE_SIZE 0x10000000
+/* XXX: Assume candidate image size is <= 1GB */
+#define MAX_IMAGE_SIZE 0x40000000
/* Flash sg list version */
#define SG_LIST_VERSION (1UL)
@@ -103,30 +103,9 @@ struct image_header_t {
uint32_t size;
};
-/* Scatter/gather entry */
-struct opal_sg_entry {
- void *data;
- long length;
-};
-
-/* We calculate number of entries based on PAGE_SIZE */
-#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
-
-/*
- * This struct is very similar but not identical to that
- * needed by the opal flash update. All we need to do for
- * opal is rewrite num_entries into a version/length and
- * translate the pointers to absolute.
- */
-struct opal_sg_list {
- unsigned long num_entries;
- struct opal_sg_list *next;
- struct opal_sg_entry entry[SG_ENTRIES_PER_NODE];
-};
-
struct validate_flash_t {
int status; /* Return status */
- void *buf; /* Candiate image buffer */
+ void *buf; /* Candidate image buffer */
uint32_t buf_size; /* Image size */
uint32_t result; /* Update results token */
};
@@ -333,7 +312,7 @@ static struct opal_sg_list *image_data_to_sglist(void)
addr = image_data.data;
size = image_data.size;
- sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL);
+ sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1)
return NULL;
@@ -351,8 +330,7 @@ static struct opal_sg_list *image_data_to_sglist(void)
sg1->num_entries++;
if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
- sg1->next = kzalloc((sizeof(struct opal_sg_list)),
- GFP_KERNEL);
+ sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!sg1->next) {
pr_err("%s : Failed to allocate memory\n",
__func__);
@@ -402,7 +380,10 @@ static int opal_flash_update(int op)
else
sg->next = NULL;
- /* Make num_entries into the version/length field */
+ /*
+ * Convert num_entries to version/length format
+ * to satisfy OPAL.
+ */
sg->num_entries = (SG_LIST_VERSION << 56) |
(sg->num_entries * sizeof(struct opal_sg_entry) + 16);
}
@@ -500,7 +481,7 @@ static int alloc_image_buf(char *buffer, size_t count)
memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t));
image_data.size = be32_to_cpu(image_header.size);
- pr_debug("FLASH: Candiate image size = %u\n", image_data.size);
+ pr_debug("FLASH: Candidate image size = %u\n", image_data.size);
if (image_data.size > MAX_IMAGE_SIZE) {
pr_warn("FLASH: Too large image\n");
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
new file mode 100644
index 00000000000..ec4132239cd
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -0,0 +1,146 @@
+/*
+ * OPAL asynchronus Memory error handling support in PowreNV.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2013 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <asm/opal.h>
+#include <asm/cputable.h>
+
+static int opal_mem_err_nb_init;
+static LIST_HEAD(opal_memory_err_list);
+static DEFINE_SPINLOCK(opal_mem_err_lock);
+
+struct OpalMsgNode {
+ struct list_head list;
+ struct opal_msg msg;
+};
+
+static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
+{
+ uint64_t paddr_start, paddr_end;
+
+ pr_debug("%s: Retrived memory error event, type: 0x%x\n",
+ __func__, merr_evt->type);
+ switch (merr_evt->type) {
+ case OPAL_MEM_ERR_TYPE_RESILIENCE:
+ paddr_start = merr_evt->u.resilience.physical_address_start;
+ paddr_end = merr_evt->u.resilience.physical_address_end;
+ break;
+ case OPAL_MEM_ERR_TYPE_DYN_DALLOC:
+ paddr_start = merr_evt->u.dyn_dealloc.physical_address_start;
+ paddr_end = merr_evt->u.dyn_dealloc.physical_address_end;
+ break;
+ default:
+ return;
+ }
+
+ for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
+ memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
+ }
+}
+
+static void handle_memory_error(void)
+{
+ unsigned long flags;
+ struct OpalMemoryErrorData *merr_evt;
+ struct OpalMsgNode *msg_node;
+
+ spin_lock_irqsave(&opal_mem_err_lock, flags);
+ while (!list_empty(&opal_memory_err_list)) {
+ msg_node = list_entry(opal_memory_err_list.next,
+ struct OpalMsgNode, list);
+ list_del(&msg_node->list);
+ spin_unlock_irqrestore(&opal_mem_err_lock, flags);
+
+ merr_evt = (struct OpalMemoryErrorData *)
+ &msg_node->msg.params[0];
+ handle_memory_error_event(merr_evt);
+ kfree(msg_node);
+ spin_lock_irqsave(&opal_mem_err_lock, flags);
+ }
+ spin_unlock_irqrestore(&opal_mem_err_lock, flags);
+}
+
+static void mem_error_handler(struct work_struct *work)
+{
+ handle_memory_error();
+}
+
+static DECLARE_WORK(mem_error_work, mem_error_handler);
+
+/*
+ * opal_memory_err_event - notifier handler that queues up the opal message
+ * to be preocessed later.
+ */
+static int opal_memory_err_event(struct notifier_block *nb,
+ unsigned long msg_type, void *msg)
+{
+ unsigned long flags;
+ struct OpalMsgNode *msg_node;
+
+ if (msg_type != OPAL_MSG_MEM_ERR)
+ return 0;
+
+ msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
+ if (!msg_node) {
+ pr_err("MEMORY_ERROR: out of memory, Opal message event not"
+ "handled\n");
+ return -ENOMEM;
+ }
+ memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
+
+ spin_lock_irqsave(&opal_mem_err_lock, flags);
+ list_add(&msg_node->list, &opal_memory_err_list);
+ spin_unlock_irqrestore(&opal_mem_err_lock, flags);
+
+ schedule_work(&mem_error_work);
+ return 0;
+}
+
+static struct notifier_block opal_mem_err_nb = {
+ .notifier_call = opal_memory_err_event,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int __init opal_mem_err_init(void)
+{
+ int ret;
+
+ if (!opal_mem_err_nb_init) {
+ ret = opal_message_notifier_register(
+ OPAL_MSG_MEM_ERR, &opal_mem_err_nb);
+ if (ret) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ opal_mem_err_nb_init = 1;
+ }
+ return 0;
+}
+subsys_initcall(opal_mem_err_init);
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 7d07c7e80ec..b1885db8fdf 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -18,6 +18,7 @@
#include <asm/opal.h>
#include <asm/firmware.h>
+#include <asm/machdep.h>
static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
{
@@ -48,8 +49,11 @@ unsigned long __init opal_get_boot_time(void)
else
mdelay(10);
}
- if (rc != OPAL_SUCCESS)
+ if (rc != OPAL_SUCCESS) {
+ ppc_md.get_rtc_time = NULL;
+ ppc_md.set_rtc_time = NULL;
return 0;
+ }
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, &tm);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e7806504e97..3e8829c40fb 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -126,3 +126,6 @@ OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
+OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
+OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
+OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 1c798cd5537..65499adaecf 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -18,9 +18,12 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/kobject.h>
+#include <linux/delay.h>
#include <asm/opal.h>
#include <asm/firmware.h>
+#include <asm/mce.h>
#include "powernv.h"
@@ -38,6 +41,7 @@ extern u64 opal_mc_secondary_handler[];
static unsigned int *opal_irqs;
static unsigned int opal_irq_count;
static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
+static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static DEFINE_SPINLOCK(opal_notifier_lock);
static uint64_t last_notified_mask = 0x0ul;
static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
@@ -88,14 +92,10 @@ static int __init opal_register_exception_handlers(void)
if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
return -ENODEV;
- /* Hookup some exception handlers. We use the fwnmi area at 0x7000
- * to provide the glue space to OPAL
+ /* Hookup some exception handlers except machine check. We use the
+ * fwnmi area at 0x7000 to provide the glue space to OPAL
*/
glue = 0x7000;
- opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER,
- __pa(opal_mc_secondary_handler[0]),
- glue);
- glue += 128;
opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
0, glue);
glue += 128;
@@ -169,6 +169,95 @@ void opal_notifier_disable(void)
atomic_set(&opal_notifier_hold, 1);
}
+/*
+ * Opal message notifier based on message type. Allow subscribers to get
+ * notified for specific messgae type.
+ */
+int opal_message_notifier_register(enum OpalMessageType msg_type,
+ struct notifier_block *nb)
+{
+ if (!nb) {
+ pr_warning("%s: Invalid argument (%p)\n",
+ __func__, nb);
+ return -EINVAL;
+ }
+ if (msg_type > OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Invalid message type argument (%d)\n",
+ __func__, msg_type);
+ return -EINVAL;
+ }
+ return atomic_notifier_chain_register(
+ &opal_msg_notifier_head[msg_type], nb);
+}
+
+static void opal_message_do_notify(uint32_t msg_type, void *msg)
+{
+ /* notify subscribers */
+ atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
+ msg_type, msg);
+}
+
+static void opal_handle_message(void)
+{
+ s64 ret;
+ /*
+ * TODO: pre-allocate a message buffer depending on opal-msg-size
+ * value in /proc/device-tree.
+ */
+ static struct opal_msg msg;
+
+ ret = opal_get_msg(__pa(&msg), sizeof(msg));
+ /* No opal message pending. */
+ if (ret == OPAL_RESOURCE)
+ return;
+
+ /* check for errors. */
+ if (ret) {
+ pr_warning("%s: Failed to retrive opal message, err=%lld\n",
+ __func__, ret);
+ return;
+ }
+
+ /* Sanity check */
+ if (msg.msg_type > OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Unknown message type: %u\n",
+ __func__, msg.msg_type);
+ return;
+ }
+ opal_message_do_notify(msg.msg_type, (void *)&msg);
+}
+
+static int opal_message_notify(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ if (events & OPAL_EVENT_MSG_PENDING)
+ opal_handle_message();
+ return 0;
+}
+
+static struct notifier_block opal_message_nb = {
+ .notifier_call = opal_message_notify,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int __init opal_message_init(void)
+{
+ int ret, i;
+
+ for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
+ ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
+
+ ret = opal_notifier_register(&opal_message_nb);
+ if (ret) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ return 0;
+}
+early_initcall(opal_message_init);
+
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
s64 rc;
@@ -254,119 +343,62 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
return written;
}
+static int opal_recover_mce(struct pt_regs *regs,
+ struct machine_check_event *evt)
+{
+ int recovered = 0;
+ uint64_t ea = get_mce_fault_addr(evt);
+
+ if (!(regs->msr & MSR_RI)) {
+ /* If MSR_RI isn't set, we cannot recover */
+ recovered = 0;
+ } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
+ /* Platform corrected itself */
+ recovered = 1;
+ } else if (ea && !is_kernel_addr(ea)) {
+ /*
+ * Faulting address is not in kernel text. We should be fine.
+ * We need to find which process uses this address.
+ * For now, kill the task if we have received exception when
+ * in userspace.
+ *
+ * TODO: Queue up this address for hwpoisioning later.
+ */
+ if (user_mode(regs) && !is_global_init(current)) {
+ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+ recovered = 1;
+ } else
+ recovered = 0;
+ } else if (user_mode(regs) && !is_global_init(current) &&
+ evt->severity == MCE_SEV_ERROR_SYNC) {
+ /*
+ * If we have received a synchronous error when in userspace
+ * kill the task.
+ */
+ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+ recovered = 1;
+ }
+ return recovered;
+}
+
int opal_machine_check(struct pt_regs *regs)
{
- struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt;
- struct opal_machine_check_event evt;
- const char *level, *sevstr, *subtype;
- static const char *opal_mc_ue_types[] = {
- "Indeterminate",
- "Instruction fetch",
- "Page table walk ifetch",
- "Load/Store",
- "Page table walk Load/Store",
- };
- static const char *opal_mc_slb_types[] = {
- "Indeterminate",
- "Parity",
- "Multihit",
- };
- static const char *opal_mc_erat_types[] = {
- "Indeterminate",
- "Parity",
- "Multihit",
- };
- static const char *opal_mc_tlb_types[] = {
- "Indeterminate",
- "Parity",
- "Multihit",
- };
-
- /* Copy the event structure and release the original */
- evt = *opal_evt;
- opal_evt->in_use = 0;
+ struct machine_check_event evt;
+
+ if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
+ return 0;
/* Print things out */
- if (evt.version != OpalMCE_V1) {
+ if (evt.version != MCE_V1) {
pr_err("Machine Check Exception, Unknown event version %d !\n",
evt.version);
return 0;
}
- switch(evt.severity) {
- case OpalMCE_SEV_NO_ERROR:
- level = KERN_INFO;
- sevstr = "Harmless";
- break;
- case OpalMCE_SEV_WARNING:
- level = KERN_WARNING;
- sevstr = "";
- break;
- case OpalMCE_SEV_ERROR_SYNC:
- level = KERN_ERR;
- sevstr = "Severe";
- break;
- case OpalMCE_SEV_FATAL:
- default:
- level = KERN_ERR;
- sevstr = "Fatal";
- break;
- }
+ machine_check_print_event_info(&evt);
- printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
- evt.disposition == OpalMCE_DISPOSITION_RECOVERED ?
- "Recovered" : "[Not recovered");
- printk("%s Initiator: %s\n", level,
- evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown");
- switch(evt.error_type) {
- case OpalMCE_ERROR_TYPE_UE:
- subtype = evt.u.ue_error.ue_error_type <
- ARRAY_SIZE(opal_mc_ue_types) ?
- opal_mc_ue_types[evt.u.ue_error.ue_error_type]
- : "Unknown";
- printk("%s Error type: UE [%s]\n", level, subtype);
- if (evt.u.ue_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.ue_error.effective_address);
- if (evt.u.ue_error.physical_address_provided)
- printk("%s Physial address: %016llx\n",
- level, evt.u.ue_error.physical_address);
- break;
- case OpalMCE_ERROR_TYPE_SLB:
- subtype = evt.u.slb_error.slb_error_type <
- ARRAY_SIZE(opal_mc_slb_types) ?
- opal_mc_slb_types[evt.u.slb_error.slb_error_type]
- : "Unknown";
- printk("%s Error type: SLB [%s]\n", level, subtype);
- if (evt.u.slb_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.slb_error.effective_address);
- break;
- case OpalMCE_ERROR_TYPE_ERAT:
- subtype = evt.u.erat_error.erat_error_type <
- ARRAY_SIZE(opal_mc_erat_types) ?
- opal_mc_erat_types[evt.u.erat_error.erat_error_type]
- : "Unknown";
- printk("%s Error type: ERAT [%s]\n", level, subtype);
- if (evt.u.erat_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.erat_error.effective_address);
- break;
- case OpalMCE_ERROR_TYPE_TLB:
- subtype = evt.u.tlb_error.tlb_error_type <
- ARRAY_SIZE(opal_mc_tlb_types) ?
- opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type]
- : "Unknown";
- printk("%s Error type: TLB [%s]\n", level, subtype);
- if (evt.u.tlb_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt.u.tlb_error.effective_address);
- break;
- default:
- case OpalMCE_ERROR_TYPE_UNKNOWN:
- printk("%s Error type: Unknown\n", level);
- break;
- }
- return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1;
+ if (opal_recover_mce(regs, &evt))
+ return 1;
+ return 0;
}
static irqreturn_t opal_interrupt(int irq, void *data)
@@ -451,10 +483,25 @@ subsys_initcall(opal_init);
void opal_shutdown(void)
{
unsigned int i;
+ long rc = OPAL_BUSY;
+ /* First free interrupts, which will also mask them */
for (i = 0; i < opal_irq_count; i++) {
if (opal_irqs[i])
free_irq(opal_irqs[i], NULL);
opal_irqs[i] = 0;
}
+
+ /*
+ * Then sync with OPAL which ensure anything that can
+ * potentially write to our memory has completed such
+ * as an ongoing dump retrieval
+ */
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_sync_host_reboot();
+ if (rc == OPAL_BUSY)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 2c6d173842b..3b2b4fb3585 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
+#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -460,7 +461,37 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
return;
pe = &phb->ioda.pe_array[pdn->pe_number];
- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
+ set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+}
+
+static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+ struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+ uint64_t top;
+ bool bypass = false;
+
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return -ENODEV;;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (pe->tce_bypass_enabled) {
+ top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+ bypass = (dma_mask >= top);
+ }
+
+ if (bypass) {
+ dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
+ set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_offset(&pdev->dev, pe->tce_bypass_base);
+ } else {
+ dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
+ set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ }
+ return 0;
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
@@ -468,7 +499,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ set_iommu_table_base_and_group(&dev->dev, &pe->tce32_table);
if (dev->subordinate)
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
}
@@ -644,7 +675,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
+ set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
+static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+{
+ struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+ tce32_table);
+ uint16_t window_id = (pe->pe_number << 1 ) + 1;
+ int64_t rc;
+
+ pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
+ if (enable) {
+ phys_addr_t top = memblock_end_of_DRAM();
+
+ top = roundup_pow_of_two(top);
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ top);
+ } else {
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ 0);
+
+ /*
+ * We might want to reset the DMA ops of all devices on
+ * this PE. However in theory, that shouldn't be necessary
+ * as this is used for VFIO/KVM pass-through and the device
+ * hasn't yet been returned to its kernel driver
+ */
+ }
+ if (rc)
+ pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
+ else
+ pe->tce_bypass_enabled = enable;
+}
+
+static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ /* TVE #1 is selected by PCI address bit 59 */
+ pe->tce_bypass_base = 1ull << 59;
+
+ /* Install set_bypass callback for VFIO */
+ pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+
+ /* Enable bypass by default */
+ pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+}
+
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe)
{
@@ -723,10 +804,12 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
+ set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ /* Also create a bypass window */
+ pnv_pci_ioda2_setup_bypass_pe(phb, pe);
return;
fail:
if (pe->tce32_seg >= 0)
@@ -1144,7 +1227,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
{
struct pci_controller *hose;
struct pnv_phb *phb;
- unsigned long size, m32map_off, iomap_off, pemap_off;
+ unsigned long size, m32map_off, pemap_off, iomap_off = 0;
const __be64 *prop64;
const __be32 *prop32;
int len;
@@ -1231,7 +1314,6 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
m32map_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
- iomap_off = size;
if (phb->type == PNV_PHB_IODA1) {
iomap_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
@@ -1287,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+ phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
/* Setup shutdown function for kexec */
phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index f8b4bd8afb2..e3807d69393 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -92,7 +92,7 @@ static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
pci_domain_nr(phb->hose->bus), phb->opal_id);
}
- set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table);
+ set_iommu_table_base_and_group(&pdev->dev, &phb->p5ioc2.iommu_table);
}
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 4eb33a9ed53..95633d79ef5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -124,77 +124,157 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
}
#endif /* CONFIG_PCI_MSI */
-static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
+static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
+ struct OpalIoPhbErrorCommon *common)
{
- struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
+ struct OpalIoP7IOCPhbErrorData *data;
int i;
- pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
-
- pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus);
- pr_info(" slotStatus = 0x%08x\n", data->slotStatus);
- pr_info(" linkStatus = 0x%08x\n", data->linkStatus);
- pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus);
- pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4);
- pr_info(" sourceId = 0x%08x\n", data->sourceId);
-
- pr_info(" errorClass = 0x%016llx\n", data->errorClass);
- pr_info(" correlator = 0x%016llx\n", data->correlator);
-
- pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr);
- pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr);
- pr_info(" lemFir = 0x%016llx\n", data->lemFir);
- pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask);
- pr_info(" lemWOF = 0x%016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
+ data = (struct OpalIoP7IOCPhbErrorData *)common;
+ pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
+ hose->global_number, common->version);
+
+ pr_info(" brdgCtl: %08x\n", data->brdgCtl);
+
+ pr_info(" portStatusReg: %08x\n", data->portStatusReg);
+ pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
+ pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
+
+ pr_info(" deviceStatus: %08x\n", data->deviceStatus);
+ pr_info(" slotStatus: %08x\n", data->slotStatus);
+ pr_info(" linkStatus: %08x\n", data->linkStatus);
+ pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
+ pr_info(" devSecStatus: %08x\n", data->devSecStatus);
+
+ pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
+ pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
+ pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
+ pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
+ pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
+ pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
+ pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
+ pr_info(" sourceId: %08x\n", data->sourceId);
+ pr_info(" errorClass: %016llx\n", data->errorClass);
+ pr_info(" correlator: %016llx\n", data->correlator);
+ pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
+ pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
+ pr_info(" lemFir: %016llx\n", data->lemFir);
+ pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
+ pr_info(" lemWOF: %016llx\n", data->lemWOF);
+ pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
+ pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
+ pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
+ pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
+ pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
+ pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+ pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
+ pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
+ pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
+ pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+ pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
+ pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
+ pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
+ pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+ pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
+ pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
- pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
- pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);
+
+ pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
+ pr_info(" PESTB: %016llx\n", data->pestB[i]);
}
}
-static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
+static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
+ struct OpalIoPhbErrorCommon *common)
{
- switch(phb->model) {
- case PNV_PHB_MODEL_P7IOC:
- pnv_pci_dump_p7ioc_diag_data(phb);
+ struct OpalIoPhb3ErrorData *data;
+ int i;
+
+ data = (struct OpalIoPhb3ErrorData*)common;
+ pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
+ hose->global_number, common->version);
+
+ pr_info(" brdgCtl: %08x\n", data->brdgCtl);
+
+ pr_info(" portStatusReg: %08x\n", data->portStatusReg);
+ pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
+ pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
+
+ pr_info(" deviceStatus: %08x\n", data->deviceStatus);
+ pr_info(" slotStatus: %08x\n", data->slotStatus);
+ pr_info(" linkStatus: %08x\n", data->linkStatus);
+ pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
+ pr_info(" devSecStatus: %08x\n", data->devSecStatus);
+
+ pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
+ pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
+ pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
+ pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
+ pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
+ pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
+ pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
+ pr_info(" sourceId: %08x\n", data->sourceId);
+ pr_info(" errorClass: %016llx\n", data->errorClass);
+ pr_info(" correlator: %016llx\n", data->correlator);
+
+ pr_info(" nFir: %016llx\n", data->nFir);
+ pr_info(" nFirMask: %016llx\n", data->nFirMask);
+ pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
+ pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
+ pr_info(" PhbCsr: %016llx\n", data->phbCsr);
+ pr_info(" lemFir: %016llx\n", data->lemFir);
+ pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
+ pr_info(" lemWOF: %016llx\n", data->lemWOF);
+ pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
+ pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
+ pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
+ pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
+ pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
+ pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+ pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
+ pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
+ pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
+ pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+ pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
+ pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
+ pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
+ pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+ pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
+ pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+
+ for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+ if ((data->pestA[i] >> 63) == 0 &&
+ (data->pestB[i] >> 63) == 0)
+ continue;
+
+ pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
+ pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ }
+}
+
+void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
+ unsigned char *log_buff)
+{
+ struct OpalIoPhbErrorCommon *common;
+
+ if (!hose || !log_buff)
+ return;
+
+ common = (struct OpalIoPhbErrorCommon *)log_buff;
+ switch (common->ioType) {
+ case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
+ pnv_pci_dump_p7ioc_diag_data(hose, common);
+ break;
+ case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
+ pnv_pci_dump_phb3_diag_data(hose, common);
break;
default:
- pr_warning("PCI %d: Can't decode this PHB diag data\n",
- phb->hose->global_number);
+ pr_warn("%s: Unrecognized ioType %d\n",
+ __func__, common->ioType);
}
}
@@ -222,7 +302,7 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
* with the normal errors generated when probing empty slots
*/
if (has_diag)
- pnv_pci_dump_phb_diag_data(phb);
+ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
else
pr_warning("PCI %d: No diag data available\n",
phb->hose->global_number);
@@ -484,7 +564,8 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
{
tbl->it_blocksize = 16;
tbl->it_base = (unsigned long)tce_mem;
- tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ tbl->it_offset = dma_offset >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_size = tce_size >> 3;
tbl->it_busno = 0;
@@ -536,7 +617,7 @@ static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
if (!pdn->iommu_table)
return;
- set_iommu_table_base(&pdev->dev, pdn->iommu_table);
+ set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
}
static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
@@ -553,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
+int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->dma_set_mask)
+ return phb->dma_set_mask(phb, pdev, dma_mask);
+ return __dma_set_mask(&pdev->dev, dma_mask);
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
@@ -657,3 +748,32 @@ void __init pnv_pci_init(void)
ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
#endif
}
+
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return iommu_add_device(dev);
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->iommu_group)
+ iommu_del_device(dev);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+ .notifier_call = tce_iommu_bus_notifier,
+};
+
+static int __init tce_iommu_bus_notifier_init(void)
+{
+ bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+ return 0;
+}
+
+subsys_initcall_sync(tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 1ed8d5f40f5..cde16944277 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
struct iommu_table tce32_table;
phys_addr_t tce_inval_reg_phys;
- /* XXX TODO: Add support for additional 64-bit iommus */
+ /* 64-bit TCE bypass region */
+ bool tce_bypass_enabled;
+ uint64_t tce_bypass_base;
/* MSIs. MVE index is identical for for 32 and 64 bit MSI
* and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
+ int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
+ u64 dma_mask);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb);
@@ -176,6 +180,7 @@ struct pnv_phb {
union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc;
+ struct OpalIoPhb3ErrorData phb3;
struct OpalIoP7IOCErrorData hub_diag;
} diag;
@@ -186,6 +191,8 @@ extern struct pci_ops pnv_pci_ops;
extern struct pnv_eeh_ops ioda_eeh_ops;
#endif
+void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
+ unsigned char *log_buff);
int pnv_pci_cfg_read(struct device_node *dn,
int where, int size, u32 *val);
int pnv_pci_cfg_write(struct device_node *dn,
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819be1f9..0051e108ef0 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
static inline void pnv_smp_init(void) { }
#endif
+struct pci_dev;
+
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
+extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
+
+static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ return -ENODEV;
+}
#endif
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 19884b2a51b..110f4fbd319 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -26,6 +26,8 @@
#include <linux/of_fdt.h>
#include <linux/interrupt.h>
#include <linux/bug.h>
+#include <linux/cpuidle.h>
+#include <linux/pci.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -140,13 +142,22 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
+static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev_is_pci(dev))
+ return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
+
static void pnv_shutdown(void)
{
/* Let the PCI code clear up IODA tables */
pnv_pci_shutdown();
- /* And unregister all OPAL interrupts so they don't fire
- * up while we kexec
+ /*
+ * Stop OPAL activity: Unregister all OPAL interrupts so they
+ * don't fire up while we kexec and make sure all potentially
+ * DMA'ing ops are complete (such as dump retrieval).
*/
opal_shutdown();
}
@@ -214,6 +225,16 @@ static int __init pnv_probe(void)
return 1;
}
+void powernv_idle(void)
+{
+ /* Hook to cpuidle framework if available, else
+ * call on default platform idle code
+ */
+ if (cpuidle_idle_call()) {
+ power7_idle();
+ }
+}
+
define_machine(powernv) {
.name = "PowerNV",
.probe = pnv_probe,
@@ -223,8 +244,9 @@ define_machine(powernv) {
.show_cpuinfo = pnv_show_cpuinfo,
.progress = pnv_progress,
.machine_shutdown = pnv_shutdown,
- .power_save = power7_idle,
+ .power_save = powernv_idle,
.calibrate_decr = generic_calibrate_decr,
+ .dma_set_mask = pnv_dma_set_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index e17fa1432d8..a0bca05e26b 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -143,7 +143,7 @@ static void _dump_areas(unsigned int spe_id, unsigned long priv2,
pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
}
-inline u64 ps3_get_spe_id(void *arg)
+u64 ps3_get_spe_id(void *arg)
{
return spu_pdata(arg)->spe_id;
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 62b4f8025de..80b1d57c306 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@ config PPC_PSERIES
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
+ select ARCH_RANDOM
default y
config PPC_SPLPAR
@@ -34,7 +35,7 @@ config PPC_SPLPAR
config PSERIES_MSI
bool
- depends on PCI_MSI && EEH
+ depends on PCI_MSI && PPC_PSERIES && EEH
default y
config PSERIES_ENERGY
@@ -119,12 +120,3 @@ config DTL
which are accessible through a debugfs file.
Say N if you are unsure.
-
-config PSERIES_IDLE
- bool "Cpuidle driver for pSeries platforms"
- depends on CPU_IDLE
- depends on PPC_PSERIES
- default y
- help
- Select this option to enable processor idle state management
- through cpuidle subsystem.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fbccac9cd2d..03480796af9 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
-obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
ifeq ($(CONFIG_PPC_PSERIES),y)
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 1e561bef459..2d8bf15879f 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -25,7 +25,6 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 5db66f1fbc2..7d61498e45c 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -20,7 +20,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index ccb633e077b..8a8f0472d98 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
enable = 1;
if (enable) {
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
@@ -689,7 +689,9 @@ static struct eeh_ops pseries_eeh_ops = {
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
.read_config = pseries_eeh_read_config,
- .write_config = pseries_eeh_write_config
+ .write_config = pseries_eeh_write_config,
+ .next_error = NULL,
+ .restore_config = NULL
};
/**
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index f253361552a..33b552ffbe5 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -486,9 +486,10 @@ static void iommu_table_setparms(struct pci_controller *phb,
memset((void *)tbl->it_base, 0, *sizep);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Units of tce entries */
- tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
@@ -499,7 +500,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
- tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
+ tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
tbl->it_index = 0;
tbl->it_blocksize = 16;
@@ -537,11 +538,12 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
tbl->it_busno = phb->bus->number;
+ tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
- tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
- tbl->it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->it_offset = offset >> tbl->it_page_shift;
+ tbl->it_size = size >> tbl->it_page_shift;
}
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
@@ -687,7 +689,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
iommu_table_setparms(phb, dn, tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
return;
}
@@ -699,7 +702,8 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
dn = dn->parent;
if (dn && PCI_DN(dn))
- set_iommu_table_base(&dev->dev, PCI_DN(dn)->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev,
+ PCI_DN(dn)->iommu_table);
else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
@@ -717,21 +721,6 @@ static int __init disable_ddw_setup(char *str)
early_param("disable_ddw", disable_ddw_setup);
-static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
-{
- int ret;
-
- ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
- if (ret)
- pr_warning("%s: failed to remove DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
- else
- pr_debug("%s: successfully removed DMA window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
-}
-
static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
@@ -761,7 +750,15 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- __remove_ddw(np, ddw_avail, liobn);
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
+ if (ret)
+ pr_warning("%s: failed to remove direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
+ else
+ pr_debug("%s: successfully removed direct window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
delprop:
ret = of_remove_property(np, win64);
@@ -790,68 +787,33 @@ static u64 find_existing_ddw(struct device_node *pdn)
return dma_addr;
}
-static void __restore_default_window(struct eeh_dev *edev,
- u32 ddw_restore_token)
-{
- u32 cfg_addr;
- u64 buid;
- int ret;
-
- /*
- * Get the config address and phb buid of the PE window.
- * Rely on eeh to retrieve this for us.
- * Retrieve them from the pci device, not the node with the
- * dma-window property
- */
- cfg_addr = edev->config_addr;
- if (edev->pe_config_addr)
- cfg_addr = edev->pe_config_addr;
- buid = edev->phb->buid;
-
- do {
- ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
- BUID_HI(buid), BUID_LO(buid));
- } while (rtas_busy_delay(ret));
- pr_info("ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
- ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
-}
-
static int find_existing_ddw_windows(void)
{
+ int len;
struct device_node *pdn;
+ struct direct_window *window;
const struct dynamic_dma_window_prop *direct64;
- const u32 *ddw_extensions;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, NULL);
+ direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
if (!direct64)
continue;
- /*
- * We need to ensure the IOMMU table is active when we
- * return from the IOMMU setup so that the common code
- * can clear the table or find the holes. To that end,
- * first, remove any existing DDW configuration.
- */
- remove_ddw(pdn);
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
+ kfree(window);
+ remove_ddw(pdn);
+ continue;
+ }
- /*
- * Second, if we are running on a new enough level of
- * firmware where the restore API is present, use it to
- * restore the 32-bit window, which was removed in
- * create_ddw.
- * If the API is not present, then create_ddw couldn't
- * have removed the 32-bit window in the first place, so
- * removing the DDW configuration should be sufficient.
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions",
- NULL);
- if (ddw_extensions && ddw_extensions[0] > 0)
- __restore_default_window(of_node_to_eeh_dev(pdn),
- ddw_extensions[1]);
+ window->device = pdn;
+ window->prop = direct64;
+ spin_lock(&direct_window_list_lock);
+ list_add(&window->list, &direct_window_list);
+ spin_unlock(&direct_window_list_lock);
}
return 0;
@@ -921,12 +883,6 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
return ret;
}
-static void restore_default_window(struct pci_dev *dev,
- u32 ddw_restore_token)
-{
- __restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
-}
-
struct failed_ddw_pdn {
struct device_node *pdn;
struct list_head list;
@@ -954,13 +910,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
u64 dma_addr, max_addr;
struct device_node *dn;
const u32 *uninitialized_var(ddw_avail);
- const u32 *uninitialized_var(ddw_extensions);
- u32 ddw_restore_token = 0;
struct direct_window *window;
struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
- const void *dma_window = NULL;
- unsigned long liobn, offset, size;
struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -991,42 +943,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
if (!ddw_avail || len < 3 * sizeof(u32))
- goto out_unlock;
-
- /*
- * the extensions property is only required to exist in certain
- * levels of firmware and later
- * the ibm,ddw-extensions property is a list with the first
- * element containing the number of extensions and each
- * subsequent entry is a value corresponding to that extension
- */
- ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
- if (ddw_extensions) {
- /*
- * each new defined extension length should be added to
- * the top of the switch so the "earlier" entries also
- * get picked up
- */
- switch (ddw_extensions[0]) {
- /* ibm,reset-pe-dma-windows */
- case 1:
- ddw_restore_token = ddw_extensions[1];
- break;
- }
- }
-
- /*
- * Only remove the existing DMA window if we can restore back to
- * the default state. Removing the existing window maximizes the
- * resources available to firmware for dynamic window creation.
- */
- if (ddw_restore_token) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
- __remove_ddw(pdn, ddw_avail, liobn);
- }
+ goto out_failed;
- /*
+ /*
* Query if there is a second window of size to map the
* whole partition. Query returns number of windows, largest
* block assigned to PE (partition endpoint), and two bitmasks
@@ -1035,7 +954,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dn = pci_device_to_OF_node(dev);
ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
- goto out_restore_window;
+ goto out_failed;
if (query.windows_available == 0) {
/*
@@ -1044,7 +963,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* trading in for a larger page size.
*/
dev_dbg(&dev->dev, "no free dynamic windows");
- goto out_restore_window;
+ goto out_failed;
}
if (be32_to_cpu(query.page_size) & 4) {
page_shift = 24; /* 16MB */
@@ -1055,7 +974,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
query.page_size);
- goto out_restore_window;
+ goto out_failed;
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
@@ -1064,14 +983,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
- goto out_restore_window;
+ goto out_failed;
}
len = order_base_2(max_addr);
win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
if (!win64) {
dev_info(&dev->dev,
"couldn't allocate property for 64bit dma window\n");
- goto out_restore_window;
+ goto out_failed;
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
@@ -1133,9 +1052,7 @@ out_free_prop:
kfree(win64->value);
kfree(win64);
-out_restore_window:
- if (ddw_restore_token)
- restore_default_window(dev, ddw_restore_token);
+out_failed:
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
if (!fpdn)
@@ -1193,7 +1110,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
}
- set_iommu_table_base(&dev->dev, pci->iommu_table);
+ set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
}
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4fca3def9db..b02af9ef3ff 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -92,7 +92,7 @@ void vpa_init(int cpu)
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
*/
- addr = __pa(&slb_shadow[cpu]);
+ addr = __pa(paca[cpu].slb_shadow_ptr);
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
ret = register_slb_shadow(hwcpu, addr);
if (ret)
@@ -153,7 +153,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Make pHyp happy */
if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
- hpte_r &= ~_PAGE_COHERENT;
+ hpte_r &= ~HPTE_R_M;
+
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 70670a2d9cf..c413ec158ff 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
- const __be32 *pcie_link_speed_stats;
+ u32 pcie_link_speed_stats[2];
+ int rc;
bus = bridge->bus;
@@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
- pcie_link_speed_stats = of_get_property(pdn,
- "ibm,pcie-link-speed-stats", NULL);
- if (pcie_link_speed_stats)
+ rc = of_property_read_u32_array(pdn,
+ "ibm,pcie-link-speed-stats",
+ &pcie_link_speed_stats[0], 2);
+ if (!rc)
break;
}
of_node_put(pdn);
- if (!pcie_link_speed_stats) {
+ if (rc) {
pr_err("no ibm,pcie-link-speed-stats property\n");
return 0;
}
- switch (be32_to_cpup(pcie_link_speed_stats)) {
+ switch (pcie_link_speed_stats[0]) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->max_bus_speed = PCIE_SPEED_5_0GT;
break;
+ case 0x04:
+ bus->max_bus_speed = PCIE_SPEED_8_0GT;
+ break;
default:
bus->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
- switch (be32_to_cpup(pcie_link_speed_stats)) {
+ switch (pcie_link_speed_stats[1]) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->cur_bus_speed = PCIE_SPEED_5_0GT;
break;
+ case 0x04:
+ bus->cur_bus_speed = PCIE_SPEED_8_0GT;
+ break;
default:
bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
break;
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
deleted file mode 100644
index a166e38bd68..00000000000
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * processor_idle - idle state cpuidle driver.
- * Adapted from drivers/idle/intel_idle.c and
- * drivers/acpi/processor_idle.c
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-
-#include <asm/paca.h>
-#include <asm/reg.h>
-#include <asm/machdep.h>
-#include <asm/firmware.h>
-#include <asm/runlatch.h>
-#include <asm/plpar_wrappers.h>
-
-struct cpuidle_driver pseries_idle_driver = {
- .name = "pseries_idle",
- .owner = THIS_MODULE,
-};
-
-#define MAX_IDLE_STATE_COUNT 2
-
-static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
-static struct cpuidle_device __percpu *pseries_cpuidle_devices;
-static struct cpuidle_state *cpuidle_state_table;
-
-static inline void idle_loop_prolog(unsigned long *in_purr)
-{
- *in_purr = mfspr(SPRN_PURR);
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-}
-
-static inline void idle_loop_epilog(unsigned long in_purr)
-{
- u64 wait_cycles;
-
- wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
- wait_cycles += mfspr(SPRN_PURR) - in_purr;
- get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
- get_lppaca()->idle = 0;
-}
-
-static int snooze_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
- int cpu = dev->cpu;
-
- idle_loop_prolog(&in_purr);
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while ((!need_resched()) && cpu_online(cpu)) {
- ppc64_runlatch_off();
- HMT_low();
- HMT_very_low();
- }
-
- HMT_medium();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
-
- idle_loop_epilog(in_purr);
-
- return index;
-}
-
-static void check_and_cede_processor(void)
-{
- /*
- * Ensure our interrupt state is properly tracked,
- * also checks if no interrupt has occurred while we
- * were soft-disabled
- */
- if (prep_irq_for_idle()) {
- cede_processor();
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
- __hard_irq_enable();
-#endif
- }
-}
-
-static int dedicated_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
-
- idle_loop_prolog(&in_purr);
- get_lppaca()->donate_dedicated_cpu = 1;
-
- ppc64_runlatch_off();
- HMT_medium();
- check_and_cede_processor();
-
- get_lppaca()->donate_dedicated_cpu = 0;
-
- idle_loop_epilog(in_purr);
-
- return index;
-}
-
-static int shared_cede_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- unsigned long in_purr;
-
- idle_loop_prolog(&in_purr);
-
- /*
- * Yield the processor to the hypervisor. We return if
- * an external interrupt occurs (which are driven prior
- * to returning here) or if a prod occurs from another
- * processor. When returning here, external interrupts
- * are enabled.
- */
- check_and_cede_processor();
-
- idle_loop_epilog(in_purr);
-
- return index;
-}
-
-/*
- * States for dedicated partition case.
- */
-static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
- { /* Snooze */
- .name = "snooze",
- .desc = "snooze",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 0,
- .target_residency = 0,
- .enter = &snooze_loop },
- { /* CEDE */
- .name = "CEDE",
- .desc = "CEDE",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 10,
- .target_residency = 100,
- .enter = &dedicated_cede_loop },
-};
-
-/*
- * States for shared partition case.
- */
-static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
- { /* Shared Cede */
- .name = "Shared Cede",
- .desc = "Shared Cede",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 0,
- .target_residency = 0,
- .enter = &shared_cede_loop },
-};
-
-void update_smt_snooze_delay(int cpu, int residency)
-{
- struct cpuidle_driver *drv = cpuidle_get_driver();
- struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
-
- if (cpuidle_state_table != dedicated_states)
- return;
-
- if (residency < 0) {
- /* Disable the Nap state on that cpu */
- if (dev)
- dev->states_usage[1].disable = 1;
- } else
- if (drv)
- drv->states[1].target_residency = residency;
-}
-
-static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
- unsigned long action, void *hcpu)
-{
- int hotcpu = (unsigned long)hcpu;
- struct cpuidle_device *dev =
- per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
-
- if (dev && cpuidle_get_driver()) {
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_enable_device(dev);
- cpuidle_resume_and_unlock();
- break;
-
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cpuidle_pause_and_lock();
- cpuidle_disable_device(dev);
- cpuidle_resume_and_unlock();
- break;
-
- default:
- return NOTIFY_DONE;
- }
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block setup_hotplug_notifier = {
- .notifier_call = pseries_cpuidle_add_cpu_notifier,
-};
-
-/*
- * pseries_cpuidle_driver_init()
- */
-static int pseries_cpuidle_driver_init(void)
-{
- int idle_state;
- struct cpuidle_driver *drv = &pseries_idle_driver;
-
- drv->state_count = 0;
-
- for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
-
- if (idle_state > max_idle_state)
- break;
-
- /* is the state not enabled? */
- if (cpuidle_state_table[idle_state].enter == NULL)
- continue;
-
- drv->states[drv->state_count] = /* structure copy */
- cpuidle_state_table[idle_state];
-
- drv->state_count += 1;
- }
-
- return 0;
-}
-
-/* pseries_idle_devices_uninit(void)
- * unregister cpuidle devices and de-allocate memory
- */
-static void pseries_idle_devices_uninit(void)
-{
- int i;
- struct cpuidle_device *dev;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(pseries_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
- }
-
- free_percpu(pseries_cpuidle_devices);
- return;
-}
-
-/* pseries_idle_devices_init()
- * allocate, initialize and register cpuidle device
- */
-static int pseries_idle_devices_init(void)
-{
- int i;
- struct cpuidle_driver *drv = &pseries_idle_driver;
- struct cpuidle_device *dev;
-
- pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (pseries_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(pseries_cpuidle_devices, i);
- dev->state_count = drv->state_count;
- dev->cpu = i;
- if (cpuidle_register_device(dev)) {
- printk(KERN_DEBUG \
- "cpuidle_register_device %d failed!\n", i);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-/*
- * pseries_idle_probe()
- * Choose state table for shared versus dedicated partition
- */
-static int pseries_idle_probe(void)
-{
-
- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
- return -ENODEV;
-
- if (cpuidle_disable != IDLE_NO_OVERRIDE)
- return -ENODEV;
-
- if (max_idle_state == 0) {
- printk(KERN_DEBUG "pseries processor idle disabled.\n");
- return -EPERM;
- }
-
- if (lppaca_shared_proc(get_lppaca()))
- cpuidle_state_table = shared_states;
- else
- cpuidle_state_table = dedicated_states;
-
- return 0;
-}
-
-static int __init pseries_processor_idle_init(void)
-{
- int retval;
-
- retval = pseries_idle_probe();
- if (retval)
- return retval;
-
- pseries_cpuidle_driver_init();
- retval = cpuidle_register_driver(&pseries_idle_driver);
- if (retval) {
- printk(KERN_DEBUG "Registration of pseries driver failed.\n");
- return retval;
- }
-
- retval = pseries_idle_devices_init();
- if (retval) {
- pseries_idle_devices_uninit();
- cpuidle_unregister_driver(&pseries_idle_driver);
- return retval;
- }
-
- register_cpu_notifier(&setup_hotplug_notifier);
- printk(KERN_DEBUG "pseries_idle_driver registered\n");
-
- return 0;
-}
-
-static void __exit pseries_processor_idle_exit(void)
-{
-
- unregister_cpu_notifier(&setup_hotplug_notifier);
- pseries_idle_devices_uninit();
- cpuidle_unregister_driver(&pseries_idle_driver);
-
- return;
-}
-
-module_init(pseries_processor_idle_init);
-module_exit(pseries_processor_idle_exit);
-
-MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("Cpuidle driver for POWER");
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c1f19085870..972df0ffd4d 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -72,7 +72,7 @@
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
-unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT);
+unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
- if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
- (image->type != KEXEC_TYPE_CRASH)) {
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
rc = pSeries_disable_reloc_on_exc();
if (rc != H_SUCCESS)
pr_warning("Warning: Failed to disable relocation on "
@@ -470,7 +469,7 @@ static long pseries_little_endian_exceptions(void)
static void __init pSeries_setup_arch(void)
{
- panic_timeout = 10;
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Discover PIC type and setup ppc_md accordingly */
pseries_discover_pic();
@@ -569,7 +568,7 @@ void pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
- int page_order = IOMMU_PAGE_SHIFT;
+ int page_order = IOMMU_PAGE_SHIFT_4K;
pr_debug(" -> fw_cmo_feature_init()\n");
spin_lock(&rtas_data_buf_lock);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index 62cb527493e..9a15e5b39bb 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -260,7 +260,7 @@ static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
*tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
- tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
+ tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
uaddr += TCE_PAGE_SIZE;
index++;
@@ -381,8 +381,9 @@ static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
/* Init bits and pieces */
tbl->table.it_blocksize = 16;
- tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
- tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
+ tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
+ tbl->table.it_offset = addr >> tbl->table.it_page_shift;
+ tbl->table.it_size = size >> tbl->table.it_page_shift;
/*
* It's already blank but we clear it anyway.
@@ -449,8 +450,8 @@ static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
if (table) {
pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
pci_name(pdev),
- table->table.it_offset << IOMMU_PAGE_SHIFT,
- (table->table.it_offset << IOMMU_PAGE_SHIFT)
+ table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
+ (table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
+ phb->dma32_region_size - 1);
archdata->dma_data.iommu_table_base = &table->table;
return;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 13ec968be4c..7baa70d6dc0 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -19,7 +19,7 @@ config PPC_MSI_BITMAP
default y if MPIC
default y if FSL_PCI
default y if PPC4xx_MSI
- default y if POWERNV_MSI
+ default y if PPC_POWERNV
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1c16141c031..47b6b9f81d4 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -109,27 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
unsigned long phys_mem, phys_end;
void *user_mem;
- struct bio_vec *vec;
+ struct bio_vec vec;
unsigned int transfered;
- unsigned short idx;
+ struct bvec_iter iter;
- phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
+ phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
+ AXON_RAM_SECTOR_SHIFT);
phys_end = bank->io_addr + bank->size;
transfered = 0;
- bio_for_each_segment(vec, bio, idx) {
- if (unlikely(phys_mem + vec->bv_len > phys_end)) {
+ bio_for_each_segment(vec, bio, iter) {
+ if (unlikely(phys_mem + vec.bv_len > phys_end)) {
bio_io_error(bio);
return;
}
- user_mem = page_address(vec->bv_page) + vec->bv_offset;
+ user_mem = page_address(vec.bv_page) + vec.bv_offset;
if (bio_data_dir(bio) == READ)
- memcpy(user_mem, (void *) phys_mem, vec->bv_len);
+ memcpy(user_mem, (void *) phys_mem, vec.bv_len);
else
- memcpy((void *) phys_mem, user_mem, vec->bv_len);
+ memcpy((void *) phys_mem, user_mem, vec.bv_len);
- phys_mem += vec->bv_len;
- transfered += vec->bv_len;
+ phys_mem += vec.bv_len;
+ transfered += vec.bv_len;
}
bio_endio(bio, 0);
}
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 10386b676d8..a11bd1d433a 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -27,7 +27,6 @@
*/
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index bd968a43a48..62c47bb7651 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -292,6 +292,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_offset = 0;
/* it_size is in number of entries */
iommu_table_dart.it_size = dart_tablesize / sizeof(u32);
+ iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Initialize the common IOMMU code */
iommu_table_dart.it_base = (unsigned long)dart_vbase;
diff --git a/arch/powerpc/sysdev/fsl_ifc.c b/arch/powerpc/sysdev/fsl_ifc.c
index d7fc7223914..fbc885b3194 100644
--- a/arch/powerpc/sysdev/fsl_ifc.c
+++ b/arch/powerpc/sysdev/fsl_ifc.c
@@ -19,7 +19,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 6bc5a546d49..d631022ffb4 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -214,10 +214,14 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
struct fsl_lbc_ctrl *ctrl = data;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
u32 status;
+ unsigned long flags;
+ spin_lock_irqsave(&fsl_lbc_lock, flags);
status = in_be32(&lbc->ltesr);
- if (!status)
+ if (!status) {
+ spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return IRQ_NONE;
+ }
out_be32(&lbc->ltesr, LTESR_CLEAR);
out_be32(&lbc->lteatr, 0);
@@ -260,6 +264,7 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
if (status & ~LTESR_MASK)
dev_err(ctrl->dev, "Unknown error: "
"LTESR 0x%08X\n", status);
+ spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return IRQ_HANDLED;
}
@@ -298,8 +303,8 @@ static int fsl_lbc_ctrl_probe(struct platform_device *dev)
goto err;
}
- fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
- if (fsl_lbc_ctrl_dev->irq == NO_IRQ) {
+ fsl_lbc_ctrl_dev->irq[0] = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (!fsl_lbc_ctrl_dev->irq[0]) {
dev_err(&dev->dev, "failed to get irq resource\n");
ret = -ENODEV;
goto err;
@@ -311,20 +316,34 @@ static int fsl_lbc_ctrl_probe(struct platform_device *dev)
if (ret < 0)
goto err;
- ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0,
+ ret = request_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_irq, 0,
"fsl-lbc", fsl_lbc_ctrl_dev);
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
- fsl_lbc_ctrl_dev->irq);
- ret = fsl_lbc_ctrl_dev->irq;
+ fsl_lbc_ctrl_dev->irq[0]);
+ ret = fsl_lbc_ctrl_dev->irq[0];
goto err;
}
+ fsl_lbc_ctrl_dev->irq[1] = irq_of_parse_and_map(dev->dev.of_node, 1);
+ if (fsl_lbc_ctrl_dev->irq[1]) {
+ ret = request_irq(fsl_lbc_ctrl_dev->irq[1], fsl_lbc_ctrl_irq,
+ IRQF_SHARED, "fsl-lbc-err", fsl_lbc_ctrl_dev);
+ if (ret) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_lbc_ctrl_dev->irq[1]);
+ ret = fsl_lbc_ctrl_dev->irq[1];
+ goto err1;
+ }
+ }
+
/* Enable interrupts for any detected events */
out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
return 0;
+err1:
+ free_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_dev);
err:
iounmap(fsl_lbc_ctrl_dev->regs);
kfree(fsl_lbc_ctrl_dev);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4dfd61df8ab..a625dcf26b2 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -122,7 +122,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* address width of the SoC such that we can address any internal
* SoC address from across PCI if needed
*/
- if ((dev->bus == &pci_bus_type) &&
+ if ((dev_is_pci(dev)) &&
dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
set_dma_ops(dev, &dma_direct_ops);
set_dma_offset(dev, pci64_dma_offset);
@@ -454,7 +454,7 @@ void fsl_pcibios_fixup_bus(struct pci_bus *bus)
}
}
-int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
+int fsl_add_bridge(struct platform_device *pdev, int is_primary)
{
int len;
struct pci_controller *hose;
@@ -1035,6 +1035,7 @@ static const struct of_device_id pci_ids[] = {
{ .compatible = "fsl,mpc8548-pcie", },
{ .compatible = "fsl,mpc8610-pci", },
{ .compatible = "fsl,mpc8641-pcie", },
+ { .compatible = "fsl,qoriq-pcie", },
{ .compatible = "fsl,qoriq-pcie-v2.1", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
diff --git a/arch/powerpc/sysdev/ge/ge_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 6149916da3f..908dbd9826b 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
@@ -1,7 +1,6 @@
#ifndef __GEF_PIC_H__
#define __GEF_PIC_H__
-#include <linux/init.h>
void gef_pic_cascade(unsigned int, struct irq_desc *);
unsigned int gef_pic_get_irq(void);
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 997df6a7ab5..45598da0b32 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -8,7 +8,6 @@
*/
#undef DEBUG
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index c6c8b526a4f..1f6c570d66d 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -152,10 +152,8 @@ static struct pci_ops indirect_pci_ops =
.write = indirect_write_config,
};
-void __init
-setup_indirect_pci(struct pci_controller* hose,
- resource_size_t cfg_addr,
- resource_size_t cfg_data, u32 flags)
+void setup_indirect_pci(struct pci_controller *hose, resource_size_t cfg_addr,
+ resource_size_t cfg_data, u32 flags)
{
resource_size_t base = cfg_addr & PAGE_MASK;
void __iomem *mbase;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index b724622c3a0..c4828c0be5b 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -1,6 +1,5 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed4cd1..8209744b282 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
/* Default: read HW settings */
if (flow_type == IRQ_TYPE_DEFAULT) {
- switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
- MPIC_INFO(VECPRI_SENSE_MASK))) {
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_EDGE_RISING;
- break;
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_EDGE_FALLING;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_LEVEL_HIGH;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_LEVEL_LOW;
- break;
- }
+ int vold_ps;
+
+ vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+ MPIC_INFO(VECPRI_SENSE_MASK));
+
+ if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_EDGE_RISING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_EDGE_FALLING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_LEVEL_HIGH;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+ else
+ WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
}
/* Apply to irq desc */
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index 22d7d57eead..9d9b06217f8 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -41,6 +41,7 @@
#define MPIC_TIMER_TCR_ROVR_OFFSET 24
#define TIMER_STOP 0x80000000
+#define GTCCR_TOG 0x80000000
#define TIMERS_PER_GROUP 4
#define MAX_TICKS (~0U >> 1)
#define MAX_TICKS_CASCADE (~0U)
@@ -96,8 +97,11 @@ static void convert_ticks_to_time(struct timer_group_priv *priv,
time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq);
tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
- time->tv_usec = (__kernel_suseconds_t)
- div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
+ time->tv_usec = 0;
+
+ if (tmp_sec <= ticks)
+ time->tv_usec = (__kernel_suseconds_t)
+ div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
return;
}
@@ -327,11 +331,13 @@ void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time)
casc_priv = priv->timer[handle->num].cascade_handle;
if (casc_priv) {
tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
+ tmp_ticks &= ~GTCCR_TOG;
ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
ticks += tmp_ticks;
} else {
ticks = in_be32(&priv->regs[handle->num].gtccr);
+ ticks &= ~GTCCR_TOG;
}
convert_ticks_to_time(priv, ticks, time);
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index a3a8fad8537..c2dba7db71a 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -448,7 +448,7 @@ static int __init mv64x60_device_setup(void)
int err;
id = 0;
- for_each_compatible_node(np, "serial", "marvell,mv64360-mpsc") {
+ for_each_compatible_node(np, NULL, "marvell,mv64360-mpsc") {
err = mv64x60_mpsc_device_setup(np, id++);
if (err)
printk(KERN_ERR "Failed to initialize MV64x60 "
diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c
index 50a81387e9b..3b8734b870e 100644
--- a/arch/powerpc/sysdev/mv64x60_udbg.c
+++ b/arch/powerpc/sysdev/mv64x60_udbg.c
@@ -85,7 +85,7 @@ static void mv64x60_udbg_init(void)
if (!stdout)
return;
- for_each_compatible_node(np, "serial", "marvell,mv64360-mpsc") {
+ for_each_compatible_node(np, NULL, "marvell,mv64360-mpsc") {
if (np == stdout)
break;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index a88807b3dd5..d09994164da 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -16,7 +16,6 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/ioport.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 134b07d2943..621575b7e84 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -14,7 +14,6 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index cceb2e36673..65aaf15032a 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -13,7 +13,6 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index 1c062f48f1a..befaf1123f7 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -13,7 +13,6 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
index ce5a7b489e4..9998c0de12d 100644
--- a/arch/powerpc/sysdev/udbg_memcons.c
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -18,7 +18,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/barrier.h>
#include <asm/page.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index df0fc582146..c1917cf67c3 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -12,7 +12,6 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index af9d3469fb9..b07909850f7 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@ static void get_output_lock(void)
if (xmon_speaker == me)
return;
+
for (;;) {
- if (xmon_speaker == 0) {
- last_speaker = cmpxchg(&xmon_speaker, 0, me);
- if (last_speaker == 0)
- return;
- }
- timeout = 10000000;
+ last_speaker = cmpxchg(&xmon_speaker, 0, me);
+ if (last_speaker == 0)
+ return;
+
+ /*
+ * Wait a full second for the lock, we might be on a slow
+ * console, but check every 100us.
+ */
+ timeout = 10000;
while (xmon_speaker == last_speaker) {
- if (--timeout > 0)
+ if (--timeout > 0) {
+ udelay(100);
continue;
+ }
+
/* hostile takeover */
prev = cmpxchg(&xmon_speaker, last_speaker, me);
if (prev == last_speaker)
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
xmon_fault_jmp[cpu] = recurse_jmp;
- cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
release_output_lock();
}
+ cpumask_set_cpu(cpu, &cpus_in_xmon);
+
waiting:
secondary = 1;
while (secondary && !xmon_gate) {
@@ -2051,6 +2059,10 @@ static void dump_one_paca(int cpu)
DUMP(p, stab_addr, "lx");
#endif
DUMP(p, emergency_sp, "p");
+#ifdef CONFIG_PPC_BOOK3S_64
+ DUMP(p, mc_emergency_sp, "p");
+ DUMP(p, in_mce, "x");
+#endif
DUMP(p, data_offset, "lx");
DUMP(p, hw_cpu_id, "x");
DUMP(p, cpu_start, "x");