summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 17:37:49 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 17:54:19 +0200
commitc032862fba51a3ca504752d3a25186b324c5ce83 (patch)
tree955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /arch
parentfda76e074c7737fc57855dd17c762e50ed526052 (diff)
parent8700c95adb033843fc163d112b9d21d4fda78018 (diff)
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched upstream updates in order to fix some dependencies. Merge a common upstream merge point that has these updates. Conflicts: include/linux/perf_event.h kernel/rcutree.h kernel/rcutree_plugin.h Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/Makefile2
-rw-r--r--arch/alpha/boot/head.S1
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/alpha/kernel/irq.c7
-rw-r--r--arch/alpha/kernel/irq_alpha.c10
-rw-r--r--arch/alpha/kernel/process.c19
-rw-r--r--arch/alpha/kernel/smp.c3
-rw-r--r--arch/alpha/kernel/sys_nautilus.c10
-rw-r--r--arch/alpha/kernel/sys_titan.c14
-rw-r--r--arch/alpha/mm/init.c24
-rw-r--r--arch/alpha/mm/numa.c3
-rw-r--r--arch/arc/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/elf.h3
-rw-r--r--arch/arc/include/asm/entry.h2
-rw-r--r--arch/arc/include/asm/irqflags.h12
-rw-r--r--arch/arc/include/asm/kgdb.h6
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/syscalls.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arc/kernel/entry.S27
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/process.c27
-rw-r--r--arch/arc/kernel/setup.c4
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arc/kernel/sys.c2
-rw-r--r--arch/arc/mm/init.c23
-rw-r--r--arch/arm/Kconfig39
-rw-r--r--arch/arm/Kconfig.debug3
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts2
-rw-r--r--arch/arm/boot/dts/armada-370-rd.dts8
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi5
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi6
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi10
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi68
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi10
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi2
-rw-r--r--arch/arm/boot/dts/dbx5x0.dtsi7
-rw-r--r--arch/arm/boot/dts/dove.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi6
-rw-r--r--arch/arm/boot/dts/href.dtsi2
-rw-r--r--arch/arm/boot/dts/hrefv60plus.dts2
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts1
-rw-r--r--arch/arm/boot/dts/imx28-sps1.dts1
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts3
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-dns320.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-dns325.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-dockstar.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-dreamplug.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts15
-rw-r--r--arch/arm/boot/dts/kirkwood-km_kirkwood.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lschlv2.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxhl.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-mplcec4.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-ns2-common.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood-openblocks_a6.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-topkick.dts1
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi5
-rw-r--r--arch/arm/boot/dts/msm8660-surf.dts6
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts6
-rw-r--r--arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts2
-rw-r--r--arch/arm/boot/dts/orion5x.dtsi9
-rw-r--r--arch/arm/boot/dts/snowball.dts2
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi3
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi4
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi4
-rw-r--r--arch/arm/boot/dts/spear310.dtsi4
-rw-r--r--arch/arm/boot/dts/spear320.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi3
-rw-r--r--arch/arm/boot/dts/vt8500-bv07.dts34
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8505-ref.dts34
-rw-r--r--arch/arm/boot/dts/wm8505.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8650-mid.dts36
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi4
-rw-r--r--arch/arm/boot/dts/wm8850-w70v2.dts40
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi4
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/delay.h2
-rw-r--r--arch/arm/include/asm/glue-cache.h8
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h2
-rw-r--r--arch/arm/include/asm/highmem.h7
-rw-r--r--arch/arm/include/asm/mmu.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h4
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/system_misc.h3
-rw-r--r--arch/arm/include/asm/tlbflush.h60
-rw-r--r--arch/arm/include/asm/xen/events.h25
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/early_printk.c17
-rw-r--r--arch/arm/kernel/entry-common.S12
-rw-r--r--arch/arm/kernel/head.S26
-rw-r--r--arch/arm/kernel/hw_breakpoint.c10
-rw-r--r--arch/arm/kernel/perf_event.c9
-rw-r--r--arch/arm/kernel/perf_event_v7.c2
-rw-r--r--arch/arm/kernel/process.c100
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/setup.c27
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/kernel/smp_tlb.c78
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/suspend.c1
-rw-r--r--arch/arm/kernel/tcm.c1
-rw-r--r--arch/arm/kvm/arm.c1
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/vgic.c35
-rw-r--r--arch/arm/lib/delay.c8
-rw-r--r--arch/arm/lib/memset.S100
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c2
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c2
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-at91/board-foxg20.c1
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c1
-rw-r--r--arch/arm/mach-at91/include/mach/gpio.h8
-rw-r--r--arch/arm/mach-at91/irq.c20
-rw-r--r--arch/arm/mach-at91/pm.c10
-rw-r--r--arch/arm/mach-cns3xxx/core.c16
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/cns3xxx.h16
-rw-r--r--arch/arm/mach-davinci/dma.c3
-rw-r--r--arch/arm/mach-ep93xx/include/mach/uncompress.h10
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c27
-rw-r--r--arch/arm/mach-exynos/setup-usb-phy.c8
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-gemini/idle.c4
-rw-r--r--arch/arm/mach-gemini/irq.c4
-rw-r--r--arch/arm/mach-highbank/hotplug.c10
-rw-r--r--arch/arm/mach-imx/clk-busy.c2
-rw-r--r--arch/arm/mach-imx/clk-imx35.c3
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c5
-rw-r--r--arch/arm/mach-imx/common.h2
-rw-r--r--arch/arm/mach-imx/headsmp.S18
-rw-r--r--arch/arm/mach-imx/hotplug.c12
-rw-r--r--arch/arm/mach-imx/imx25-dt.c5
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c15
-rw-r--r--arch/arm/mach-imx/src.c12
-rw-r--r--arch/arm/mach-ixp4xx/common.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c25
-rw-r--r--arch/arm/mach-kirkwood/board-iomega_ix2_200.c7
-rw-r--r--arch/arm/mach-kirkwood/guruplug-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/openrd-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c1
-rw-r--r--arch/arm/mach-mmp/aspenite.c6
-rw-r--r--arch/arm/mach-mmp/gplugd.c1
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c6
-rw-r--r--arch/arm/mach-msm/timer.c5
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c24
-rw-r--r--arch/arm/mach-mxs/icoll.c2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c34
-rw-r--r--arch/arm/mach-mxs/mm.c1
-rw-r--r--arch/arm/mach-mxs/ocotp.c1
-rw-r--r--arch/arm/mach-netx/generic.c2
-rw-r--r--arch/arm/mach-netx/include/mach/irqs.h64
-rw-r--r--arch/arm/mach-omap1/clock_data.c12
-rw-r--r--arch/arm/mach-omap1/common.h2
-rw-r--r--arch/arm/mach-omap1/pm.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/cclock44xx_data.c20
-rw-r--r--arch/arm/mach-omap2/common.h4
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mach-omap2/io.c18
-rw-r--r--arch/arm/mach-omap2/mux.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/pm.c5
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mach-orion5x/board-dt.c3
-rw-r--r--arch/arm/mach-orion5x/common.c2
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c5
-rw-r--r--arch/arm/mach-s3c24xx/common.c5
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-s3c24xx/irq.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-usb-phy.c4
-rw-r--r--arch/arm/mach-s5pv210/clock.c36
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c2
-rw-r--r--arch/arm/mach-s5pv210/setup-usb-phy.c4
-rw-r--r--arch/arm/mach-shark/core.c3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c8
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c8
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c12
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/suspend.c6
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c2
-rw-r--r--arch/arm/mach-tegra/Kconfig8
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c774
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h7
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c1
-rw-r--r--arch/arm/mach-ux500/board-mop500.c17
-rw-r--r--arch/arm/mach-ux500/board-mop500.h1
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c5
-rw-r--r--arch/arm/mach-vexpress/v2m.c8
-rw-r--r--arch/arm/mach-w90x900/dev.c3
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/cache-v3.S137
-rw-r--r--arch/arm/mm/cache-v4.S2
-rw-r--r--arch/arm/mm/context.c32
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/idmap.c1
-rw-r--r--arch/arm/mm/init.c50
-rw-r--r--arch/arm/mm/mmu.c75
-rw-r--r--arch/arm/mm/proc-arm740.S30
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/proc-v7.S19
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tcm.h (renamed from arch/arm/kernel/tcm.h)0
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm/plat-orion/addr-map.c7
-rw-r--r--arch/arm/plat-samsung/devs.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/fb.h50
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h282
-rw-r--r--arch/arm/plat-samsung/include/plat/usb-phy.h5
-rw-r--r--arch/arm/plat-spear/Kconfig2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.debug11
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/ucontext.h2
-rw-r--r--arch/arm64/kernel/arm64ksyms.c2
-rw-r--r--arch/arm64/kernel/process.c43
-rw-r--r--arch/arm64/kernel/signal32.c1
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/mmu.c15
-rw-r--r--arch/avr32/Kconfig2
-rw-r--r--arch/avr32/include/asm/io.h4
-rw-r--r--arch/avr32/kernel/process.c13
-rw-r--r--arch/avr32/kernel/time.c9
-rw-r--r--arch/avr32/mach-at32ap/include/mach/pm.h24
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S7
-rw-r--r--arch/avr32/mm/init.c24
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/kernel/early_printk.c2
-rw-r--r--arch/blackfin/kernel/process.c32
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/blackfin/mm/init.c22
-rw-r--r--arch/c6x/include/asm/irqflags.h2
-rw-r--r--arch/c6x/kernel/process.c28
-rw-r--r--arch/c6x/mm/init.c30
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/cris/arch-v10/kernel/process.c3
-rw-r--r--arch/cris/arch-v32/kernel/process.c12
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/include/asm/processor.h7
-rw-r--r--arch/cris/kernel/process.c49
-rw-r--r--arch/cris/mm/init.c16
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/kernel/process.c27
-rw-r--r--arch/frv/mm/init.c38
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/h8300/kernel/process.c35
-rw-r--r--arch/h8300/mm/init.c30
-rw-r--r--arch/hexagon/kernel/process.c23
-rw-r--r--arch/hexagon/kernel/smp.c2
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/sim/simserial.c16
-rw-r--r--arch/ia64/include/asm/futex.h5
-rw-r--r--arch/ia64/include/asm/hugetlb.h1
-rw-r--r--arch/ia64/include/asm/irqflags.h1
-rw-r--r--arch/ia64/include/asm/mca.h1
-rw-r--r--arch/ia64/include/asm/numa.h5
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/kernel/fsys.S49
-rw-r--r--arch/ia64/kernel/iosapic.c34
-rw-r--r--arch/ia64/kernel/irq.c8
-rw-r--r--arch/ia64/kernel/mca.c37
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/palinfo.c77
-rw-r--r--arch/ia64/kernel/perfmon.c15
-rw-r--r--arch/ia64/kernel/process.c83
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c9
-rw-r--r--arch/ia64/mm/init.c23
-rw-r--r--arch/ia64/mm/ioremap.c14
-rw-r--r--arch/ia64/mm/numa.c20
-rw-r--r--arch/ia64/pci/pci.c11
-rw-r--r--arch/ia64/sn/kernel/tiocx.c5
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/uapi/asm/stat.h4
-rw-r--r--arch/m32r/kernel/process.c18
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m32r/mm/init.c26
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/Kconfig.machine1
-rw-r--r--arch/m68k/include/asm/MC68328.h10
-rw-r--r--arch/m68k/include/asm/gpio.h20
-rw-r--r--arch/m68k/kernel/process.c32
-rw-r--r--arch/m68k/kernel/setup_no.c3
-rw-r--r--arch/m68k/mm/init.c26
-rw-r--r--arch/m68k/platform/coldfire/m528x.c2
-rw-r--r--arch/metag/include/asm/elf.h3
-rw-r--r--arch/metag/include/asm/thread_info.h2
-rw-r--r--arch/metag/kernel/process.c35
-rw-r--r--arch/metag/kernel/smp.c2
-rw-r--r--arch/metag/mm/Kconfig1
-rw-r--r--arch/metag/mm/init.c31
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/include/asm/processor.h5
-rw-r--r--arch/microblaze/include/asm/setup.h1
-rw-r--r--arch/microblaze/include/asm/thread_info.h1
-rw-r--r--arch/microblaze/kernel/early_printk.c26
-rw-r--r--arch/microblaze/kernel/process.c65
-rw-r--r--arch/microblaze/mm/init.c34
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c5
-rw-r--r--arch/mips/bcm63xx/dev-spi.c11
-rw-r--r--arch/mips/bcm63xx/nvram.c7
-rw-r--r--arch/mips/bcm63xx/setup.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c5
-rw-r--r--arch/mips/include/asm/hugetlb.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h4
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h209
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/signal.h2
-rw-r--r--arch/mips/include/uapi/asm/signal.h8
-rw-r--r--arch/mips/kernel/Makefile25
-rw-r--r--arch/mips/kernel/cpu-probe.c13
-rw-r--r--arch/mips/kernel/early_printk.c12
-rw-r--r--arch/mips/kernel/linux32.c2
-rw-r--r--arch/mips/kernel/mcount.S11
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/process.c48
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/lib/bitops.c16
-rw-r--r--arch/mips/lib/csum_partial.S4
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/init.c37
-rw-r--r--arch/mips/mm/sc-mips.c6
-rw-r--r--arch/mips/pci/pci-alchemy.c4
-rw-r--r--arch/mips/pci/pci.c8
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c4
-rw-r--r--arch/mn10300/Kconfig2
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/process.c70
-rw-r--r--arch/mn10300/kernel/smp.c7
-rw-r--r--arch/mn10300/mm/init.c23
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/kernel/Makefile2
-rw-r--r--arch/openrisc/kernel/idle.c73
-rw-r--r--arch/openrisc/mm/init.c27
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile6
-rw-r--r--arch/parisc/include/asm/cacheflush.h5
-rw-r--r--arch/parisc/include/asm/pgtable.h47
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h14
-rw-r--r--arch/parisc/kernel/cache.c5
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/process.c22
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/parisc/lib/Makefile3
-rw-r--r--arch/parisc/lib/ucmpdi2.c25
-rw-r--r--arch/parisc/mm/init.c25
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S4
-rw-r--r--arch/powerpc/include/asm/bitops.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h1
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h128
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/uprobes.h1
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S5
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash_dump.c5
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S182
-rw-r--r--arch/powerpc/kernel/fadump.c5
-rw-r--r--arch/powerpc/kernel/idle.c89
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c14
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/tm.S2
-rw-r--r--arch/powerpc/kernel/udbg.c6
-rw-r--r--arch/powerpc/kernel/uprobes.c29
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/e500.h24
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c84
-rw-r--r--arch/powerpc/kvm/e500mc.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/powerpc/mm/mem.c35
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c11
-rw-r--r--arch/powerpc/mm/numa.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c13
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/Kconfig2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c5
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c6
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c12
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c5
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c4
-rw-r--r--arch/s390/hypfs/inode.c1
-rw-r--r--arch/s390/include/asm/bitops.h117
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/compat.h57
-rw-r--r--arch/s390/include/asm/cpu_mf.h1
-rw-r--r--arch/s390/include/asm/eadm.h6
-rw-r--r--arch/s390/include/asm/elf.h23
-rw-r--r--arch/s390/include/asm/hugetlb.h56
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_debug.h9
-rw-r--r--arch/s390/include/asm/pci_insn.h203
-rw-r--r--arch/s390/include/asm/pci_io.h16
-rw-r--r--arch/s390/include/asm/pgtable.h105
-rw-r--r--arch/s390/include/asm/processor.h3
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/syscall.h1
-rw-r--r--arch/s390/include/asm/thread_info.h6
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h20
-rw-r--r--arch/s390/include/uapi/asm/statfs.h63
-rw-r--r--arch/s390/kernel/Makefile17
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/dumpstack.c236
-rw-r--r--arch/s390/kernel/entry.S42
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/entry64.S48
-rw-r--r--arch/s390/kernel/machine_kexec.c30
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/setup.c11
-rw-r--r--arch/s390/kernel/smp.c18
-rw-r--r--arch/s390/kernel/suspend.c31
-rw-r--r--arch/s390/kernel/swsusp_asm64.S29
-rw-r--r--arch/s390/kernel/traps.c250
-rw-r--r--arch/s390/kernel/vtime.c5
-rw-r--r--arch/s390/kvm/trace.h2
-rw-r--r--arch/s390/lib/uaccess_pt.c83
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c45
-rw-r--r--arch/s390/mm/pageattr.c24
-rw-r--r--arch/s390/mm/pgtable.c235
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/s390/oprofile/init.c1
-rw-r--r--arch/s390/pci/Makefile4
-rw-r--r--arch/s390/pci/pci.c153
-rw-r--r--arch/s390/pci/pci_clp.c13
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_dma.c9
-rw-r--r--arch/s390/pci/pci_insn.c202
-rw-r--r--arch/s390/pci/pci_msi.c10
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/score/kernel/process.c18
-rw-r--r--arch/score/mm/init.c33
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c4
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c2
-rw-r--r--arch/sh/include/asm/hugetlb.h1
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/idle.c101
-rw-r--r--arch/sh/kernel/sh_bios.c4
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/mm/init.c26
-rw-r--r--arch/sparc/Kconfig10
-rw-r--r--arch/sparc/include/asm/Kbuild5
-rw-r--r--arch/sparc/include/asm/cputime.h6
-rw-r--r--arch/sparc/include/asm/emergency-restart.h6
-rw-r--r--arch/sparc/include/asm/hugetlb.h1
-rw-r--r--arch/sparc/include/asm/mutex.h9
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/include/asm/serial.h6
-rw-r--r--arch/sparc/include/asm/smp_32.h5
-rw-r--r--arch/sparc/include/asm/spitfire.h1
-rw-r--r--arch/sparc/include/asm/switch_to_64.h3
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h37
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/include/uapi/asm/types.h17
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/head_64.S25
-rw-r--r--arch/sparc/kernel/hvtramp.S3
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c41
-rw-r--r--arch/sparc/kernel/process_32.c21
-rw-r--r--arch/sparc/kernel/process_64.c49
-rw-r--r--arch/sparc/kernel/smp_32.c2
-rw-r--r--arch/sparc/kernel/smp_64.c43
-rw-r--r--arch/sparc/kernel/trampoline_64.S3
-rw-r--r--arch/sparc/lib/bitext.c6
-rw-r--r--arch/sparc/mm/init_32.c12
-rw-r--r--arch/sparc/mm/init_64.c7
-rw-r--r--arch/sparc/mm/iommu.c2
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/tlb.c39
-rw-r--r--arch/sparc/mm/tsb.c57
-rw-r--r--arch/sparc/mm/ultra.S119
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/tile/include/asm/compat.h3
-rw-r--r--arch/tile/include/asm/hugetlb.h1
-rw-r--r--arch/tile/include/asm/irqflags.h10
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/kernel/compat.c42
-rw-r--r--arch/tile/kernel/early_printk.c27
-rw-r--r--arch/tile/kernel/process.c65
-rw-r--r--arch/tile/kernel/setup.c25
-rw-r--r--arch/tile/kernel/smpboot.c4
-rw-r--r--arch/tile/mm/pgtable.c7
-rw-r--r--arch/um/drivers/chan.h2
-rw-r--r--arch/um/drivers/chan_kern.c10
-rw-r--r--arch/um/drivers/chan_user.c12
-rw-r--r--arch/um/drivers/chan_user.h6
-rw-r--r--arch/um/drivers/line.c50
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/kernel/early_printk.c8
-rw-r--r--arch/um/kernel/mem.c26
-rw-r--r--arch/um/kernel/process.c27
-rw-r--r--arch/um/os-Linux/signal.c2
-rw-r--r--arch/um/os-Linux/start_up.c2
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/kernel/early_printk.c12
-rw-r--r--arch/unicore32/kernel/process.c21
-rw-r--r--arch/unicore32/mm/init.c31
-rw-r--r--arch/unicore32/mm/ioremap.c17
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/compressed/eboot.c47
-rw-r--r--arch/x86/include/asm/bootparam_utils.h20
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/perf_event_p4.h62
-rw-r--r--arch/x86/include/asm/syscall.h4
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/tlb.h2
-rw-r--r--arch/x86/include/asm/uprobes.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h6
-rw-r--r--arch/x86/kernel/cpu/Makefile6
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.pl48
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh41
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h56
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c138
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c547
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c62
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c195
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c876
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h64
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c9
-rw-r--r--arch/x86/kernel/early_printk.c21
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/microcode_core_early.c38
-rw-r--r--arch/x86/kernel/microcode_intel_early.c30
-rw-r--r--arch/x86/kernel/paravirt.c25
-rw-r--r--arch/x86/kernel/process.c105
-rw-r--r--arch/x86/kernel/setup.c55
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kernel/uprobes.c29
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/x86.c69
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--arch/x86/mm/init.c10
-rw-r--r--arch/x86/mm/init_32.c10
-rw-r--r--arch/x86/mm/init_64.c73
-rw-r--r--arch/x86/mm/ioremap.c7
-rw-r--r--arch/x86/mm/numa.c9
-rw-r--r--arch/x86/mm/pageattr-test.c7
-rw-r--r--arch/x86/mm/pageattr.c12
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/mm/pgtable.c7
-rw-r--r--arch/x86/pci/common.c11
-rw-r--r--arch/x86/pci/xen.c6
-rw-r--r--arch/x86/platform/efi/efi.c168
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/xen/enlighten.c57
-rw-r--r--arch/x86/xen/mmu.c16
-rw-r--r--arch/x86/xen/smp.c23
-rw-r--r--arch/x86/xen/spinlock.c25
-rw-r--r--arch/x86/xen/time.c13
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/kernel/process.c14
-rw-r--r--arch/xtensa/mm/init.c21
653 files changed, 7553 insertions, 5871 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 5a1779c9394..1455579791e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -319,13 +319,6 @@ config ARCH_WANT_OLD_COMPAT_IPC
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
-config HAVE_VIRT_TO_BUS
- bool
- help
- An architecture should select this if it implements the
- deprecated interface virt_to_bus(). All new architectures
- should probably not select this.
-
config HAVE_ARCH_SECCOMP_FILTER
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5833aa44148..8a33ba01301 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,7 +9,7 @@ config ALPHA
select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 4759fe751aa..2cc3cc519c5 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -12,7 +12,7 @@ NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
CHECKFLAGS += -D__alpha__ -m64
-cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
+cflags-y := -pipe -mno-fp-regs -ffixed-8
cflags-y += $(call cc-option, -fno-jump-tables)
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S
index b06812bcac8..8efb26686d4 100644
--- a/arch/alpha/boot/head.S
+++ b/arch/alpha/boot/head.S
@@ -4,6 +4,7 @@
* initial bootloader stuff..
*/
+#include <asm/pal.h>
.set noreorder
.globl __start
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index 46cefbd50e7..bae97eb19d2 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -26,7 +26,7 @@
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
- IRQF_DISABLED, "floppy", NULL)
+ 0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#ifdef CONFIG_PCI
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 1f8c72959fb..52cd2a4a3ff 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -95,8 +95,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TS_POLLING 0x0010 /* idle task polling need_resched,
skip sending interrupt */
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 2872accd221..7b2be251c30 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -117,13 +117,6 @@ handle_irq(int irq)
return;
}
- /*
- * From here we must proceed with IPL_MAX. Note that we do not
- * explicitly enable interrupts afterwards - some MILO PALcode
- * (namely LX164 one) seems to have severe problems with RTI
- * at IPL 0.
- */
- local_irq_disable();
irq_enter();
generic_handle_irq_desc(irq, desc);
irq_exit();
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 772ddfdb71a..f433fc11877 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,
unsigned long la_ptr, struct pt_regs *regs)
{
struct pt_regs *old_regs;
+
+ /*
+ * Disable interrupts during IRQ handling.
+ * Note that there is no matching local_irq_enable() due to
+ * severe problems with RTI at IPL0 and some MILO PALcode
+ * (namely LX164).
+ */
+ local_irq_disable();
switch (type) {
case 0:
#ifdef CONFIG_SMP
@@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,
{
long cpu;
- local_irq_disable();
smp_percpu_timer_interrupt(regs);
cpu = smp_processor_id();
if (cpu != boot_cpuid) {
@@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED,
.name = "timer",
};
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 63d27fb9b02..a3fd8a29cca 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -46,25 +46,6 @@
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
-void
-cpu_idle(void)
-{
- current_thread_info()->status |= TS_POLLING;
-
- while (1) {
- /* FIXME -- EV6 and LCA45 know how to power down
- the CPU. */
-
- rcu_idle_enter();
- while (!need_resched())
- cpu_relax();
-
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
-
struct halt_info {
int mode;
char *restart_cmd;
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9603bc234b4..7b60834fb4b 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -167,8 +167,7 @@ smp_callin(void)
cpuid, current, current->active_mm));
preempt_disable();
- /* Do nothing. */
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 4d4c046f708..1d4aabfcf9a 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -185,9 +185,12 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
mb();
}
-extern void free_reserved_mem(void *, void *);
extern void pcibios_claim_one_bus(struct pci_bus *);
+static struct resource irongate_io = {
+ .name = "Irongate PCI IO",
+ .flags = IORESOURCE_IO,
+};
static struct resource irongate_mem = {
.name = "Irongate PCI MEM",
.flags = IORESOURCE_MEM,
@@ -209,6 +212,7 @@ nautilus_init_pci(void)
irongate = pci_get_bus_and_slot(0, 0);
bus->self = irongate;
+ bus->resource[0] = &irongate_io;
bus->resource[1] = &irongate_mem;
pci_bus_size_bridges(bus);
@@ -234,8 +238,8 @@ nautilus_init_pci(void)
if (pci_mem < memtop)
memtop = pci_mem;
if (memtop > alpha_mv.min_mem_address) {
- free_reserved_mem(__va(alpha_mv.min_mem_address),
- __va(memtop));
+ free_reserved_area((unsigned long)__va(alpha_mv.min_mem_address),
+ (unsigned long)__va(memtop), 0, NULL);
printk("nautilus_init_pci: %ldk freed\n",
(memtop - alpha_mv.min_mem_address) >> 10);
}
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 5cf4a481b8c..a53cf03f49d 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -280,15 +280,15 @@ titan_late_init(void)
* all reported to the kernel as machine checks, so the handler
* is a nop so it can be called to count the individual events.
*/
- titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(63+16, titan_intr_nop, 0,
"CChip Error", NULL);
- titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(62+16, titan_intr_nop, 0,
"PChip 0 H_Error", NULL);
- titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(61+16, titan_intr_nop, 0,
"PChip 1 H_Error", NULL);
- titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(60+16, titan_intr_nop, 0,
"PChip 0 C_Error", NULL);
- titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(59+16, titan_intr_nop, 0,
"PChip 1 C_Error", NULL);
/*
@@ -348,9 +348,9 @@ privateer_init_pci(void)
* Hook a couple of extra err interrupts that the
* common titan code won't.
*/
- titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(53+16, titan_intr_nop, 0,
"NMI", NULL);
- titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(50+16, titan_intr_nop, 0,
"Temperature Warning", NULL);
/*
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 1ad6ca74bed..0ba85ee4a46 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -31,6 +31,7 @@
#include <asm/console.h>
#include <asm/tlb.h>
#include <asm/setup.h>
+#include <asm/sections.h>
extern void die_if_kernel(char *,struct pt_regs *,long);
@@ -281,8 +282,6 @@ printk_memory_info(void)
{
unsigned long codesize, reservedpages, datasize, initsize, tmp;
extern int page_is_ram(unsigned long) __init;
- extern char _text, _etext, _data, _edata;
- extern char __init_begin, __init_end;
/* printk all informations */
reservedpages = 0;
@@ -318,32 +317,15 @@ mem_init(void)
#endif /* CONFIG_DISCONTIGMEM */
void
-free_reserved_mem(void *start, void *end)
-{
- void *__start = start;
- for (; __start < end; __start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(__start));
- init_page_count(virt_to_page(__start));
- free_page((long)__start);
- totalram_pages++;
- }
-}
-
-void
free_initmem(void)
{
- extern char __init_begin, __init_end;
-
- free_reserved_mem(&__init_begin, &__init_end);
- printk ("Freeing unused kernel memory: %ldk freed\n",
- (&__init_end - &__init_begin) >> 10);
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_mem((void *)start, (void *)end);
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 3973ae39577..33885048fa3 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -17,6 +17,7 @@
#include <asm/hwrpb.h>
#include <asm/pgalloc.h>
+#include <asm/sections.h>
pg_data_t node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
@@ -325,8 +326,6 @@ void __init mem_init(void)
{
unsigned long codesize, reservedpages, datasize, initsize, pfn;
extern int page_is_ram(unsigned long) __init;
- extern char _text, _etext, _data, _edata;
- extern char __init_begin, __init_end;
unsigned long nid, i;
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 31f77aec082..45b8e0cea17 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
- sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index f4c8d36ebec..a2628285768 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
*/
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 23daa326fc9..eb2ae53187d 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -415,7 +415,7 @@
*-------------------------------------------------------------*/
.macro SAVE_ALL_EXCEPTION marker
- st \marker, [sp, 8]
+ st \marker, [sp, 8] /* orig_r8 */
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
/* Restore r9 used to code the early prologue */
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index ccd84806b62..eac07166820 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
- : "cc");
+ : "memory", "cc");
return flags;
}
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
__asm__ __volatile__(
" flag %0 \n"
:
- : "r"(flags));
+ : "r"(flags)
+ : "memory");
}
/*
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
- : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory");
}
/*
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
__asm__ __volatile__(
" lr %0, [status32] \n"
- : "=&r"(temp));
+ : "=&r"(temp)
+ :
+ : "memory");
return temp;
}
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index f3c4934f0ca..4930957ca3d 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -13,7 +13,7 @@
#ifdef CONFIG_KGDB
-#include <asm/user.h>
+#include <asm/ptrace.h>
/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
* register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
};
#else
-static inline void kgdb_trap(struct pt_regs *regs, int param)
-{
-}
+#define kgdb_trap(regs, param)
#endif
#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 8ae783d20a8..6179de7e07c 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define orig_r8_IS_SCALL 0x0001
#define orig_r8_IS_SCALL_RESTARTED 0x0002
#define orig_r8_IS_BRKPT 0x0004
-#define orig_r8_IS_EXCPN 0x0004
+#define orig_r8_IS_EXCPN 0x0008
#define orig_r8_IS_IRQ1 0x0010
#define orig_r8_IS_IRQ2 0x0020
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e53a5340ba4..dd785befe7f 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -16,8 +16,6 @@
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
-int sys_fork_wrapper(void);
-int sys_vfork_wrapper(void);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 6afa4f70207..30333cec0fe 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -28,14 +28,14 @@
*/
struct user_regs_struct {
- struct scratch {
+ struct {
long pad;
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
} scratch;
- struct callee {
+ struct {
long pad;
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index ef6800ba2f0..91eeab81f52 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -452,7 +452,7 @@ tracesys:
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
- st r12, [r11, THREAD_FAULT_ADDR]
+ st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
;################### Special Sys Call Wrappers ##########################
-; TBD: call do_fork directly from here
-ARC_ENTRY sys_fork_wrapper
- SAVE_CALLEE_SAVED_USER
- bl @sys_fork
- DISCARD_CALLEE_SAVED_USER
-
- GET_CURR_THR_INFO_FLAGS r10
- btst r10, TIF_SYSCALL_TRACE
- bnz tracesys_exit
-
- b ret_from_system_call
-ARC_EXIT sys_fork_wrapper
-
-ARC_ENTRY sys_vfork_wrapper
- SAVE_CALLEE_SAVED_USER
- bl @sys_vfork
- DISCARD_CALLEE_SAVED_USER
-
- GET_CURR_THR_INFO_FLAGS r10
- btst r10, TIF_SYSCALL_TRACE
- bnz tracesys_exit
-
- b ret_from_system_call
-ARC_EXIT sys_vfork_wrapper
-
ARC_ENTRY sys_clone_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_clone
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 2888ba5be47..52bdc83c149 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -9,6 +9,7 @@
*/
#include <linux/kgdb.h>
+#include <linux/sched.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 0a7531d9929..cad66851e0c 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,37 +41,12 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr;
}
-static inline void arch_idle(void)
+void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
__asm__("sleep 0x3");
}
-void cpu_idle(void)
-{
- /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
-doze:
- local_irq_disable();
- if (!need_resched()) {
- arch_idle();
- goto doze;
- } else {
- local_irq_enable();
- }
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
-
- schedule_preempt_disabled();
- }
-}
-
asmlinkage void ret_from_fork(void);
/* Layout of Child kernel mode stack as setup at the end of this function is
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index dc0f968dae0..2d95ac07df7 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "\n");
-#ifdef _ASM_GENERIC_UNISTD_H
n += scnprintf(buf + n, len - n,
- "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
-#endif
+ "OS ABI [v3]\t: no-legacy-syscalls\n");
return buf;
}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 3af3e06dcf0..5c7fd603d21 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -141,7 +141,7 @@ void __cpuinit start_kernel_secondary(void)
local_irq_enable();
preempt_disable();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
/*
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
index f6bdd07583f..9d6c1ca26af 100644
--- a/arch/arc/kernel/sys.c
+++ b/arch/arc/kernel/sys.c
@@ -6,8 +6,6 @@
#include <asm/syscalls.h>
#define sys_clone sys_clone_wrapper
-#define sys_fork sys_fork_wrapper
-#define sys_vfork sys_vfork_wrapper
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index caf797de23f..727d4794ea0 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -144,37 +144,18 @@ void __init mem_init(void)
PAGES_TO_KB(reserved_pages));
}
-static void __init free_init_pages(const char *what, unsigned long begin,
- unsigned long end)
-{
- unsigned long addr;
-
- pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
- what, TO_KB(end - begin), begin, end);
-
- /* need to check that the page we free is not a partial page */
- for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
-}
-
/*
* free_initmem: Free all the __init memory.
*/
void __init_refok free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long)__init_begin,
- (unsigned long)__init_end);
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory", start, end);
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5b714695b01..a39e3214ea3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -15,6 +15,7 @@ config ARM
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_IDLE_POLL_SETUP
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND
@@ -49,7 +50,6 @@ config ARM
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
select KTIME_SCALAR
select PERF_USE_VMALLOC
select RTC_LIB
@@ -550,13 +550,14 @@ config ARCH_IXP4XX
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select NEED_MACH_IO_H
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
help
Support for Intel's IXP4XX (XScale) family of processors.
config ARCH_DOVE
bool "Marvell Dove"
select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLK_DOVE
select CPU_V7
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
@@ -744,6 +745,7 @@ config ARCH_RPC
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select NO_IOPORT
+ select VIRT_TO_BUS
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -879,6 +881,7 @@ config ARCH_SHARK
select ISA_DMA
select NEED_MACH_MEMORY_H
select PCI
+ select VIRT_TO_BUS
select ZONE_DMA
help
Support for the StrongARM based Digital DNARD machine, also known
@@ -1006,12 +1009,12 @@ config ARCH_MULTI_V4_V5
bool
config ARCH_MULTI_V6
- bool "ARMv6 based platforms (ARM11, Scorpion, ...)"
+ bool "ARMv6 based platforms (ARM11)"
select ARCH_MULTI_V6_V7
select CPU_V6
config ARCH_MULTI_V7
- bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)"
+ bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
default y
select ARCH_MULTI_V6_V7
select ARCH_VEXPRESS
@@ -1183,9 +1186,9 @@ config ARM_NR_BANKS
default 8
config IWMMXT
- bool "Enable iWMMXt support"
+ bool "Enable iWMMXt support" if !CPU_PJ4
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
- default y if PXA27x || PXA3xx || ARCH_MMP
+ default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
@@ -1439,6 +1442,16 @@ config ARM_ERRATA_775420
to deadlock. This workaround puts DSB before executing ISB if
an abort may occur on cache maintenance.
+config ARM_ERRATA_798181
+ bool "ARM errata: TLBI/DSB failure on Cortex-A15"
+ depends on CPU_V7 && SMP
+ help
+ On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
+ adequately shooting down all use of the old entries. This
+ option enables the Linux kernel workaround for this erratum
+ which sends an IPI to the CPUs that are running the same ASID
+ as the one being invalidated.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1462,10 +1475,6 @@ config ISA_DMA
bool
select ISA_DMA_API
-config ARCH_NO_VIRT_TO_BUS
- def_bool y
- depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
-
# Select ISA DMA interface
config ISA_DMA_API
bool
@@ -1657,13 +1666,16 @@ config LOCAL_TIMERS
accounting to be spread across the timer interval, preventing a
"thundering herd" at every timer tick.
+# The GPIO number here must be sorted by descending number. In case of
+# a multiplatform kernel, we just want the highest value required by the
+# selected platforms.
config ARCH_NR_GPIO
int
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
- default 355 if ARCH_U8500
- default 264 if MACH_H4700
default 512 if SOC_OMAP5
+ default 355 if ARCH_U8500
default 288 if ARCH_VT8500 || ARCH_SUNXI
+ default 264 if MACH_H4700
default 0
help
Maximum number of GPIOs in the system.
@@ -1887,8 +1899,9 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM (EXPERIMENTAL)"
- depends on ARM && OF
+ depends on ARM && AEABI && OF
depends on CPU_V7 && !CPU_V6
+ depends on !GENERIC_ATOMIC64
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index acddddac7ee..9b31f4311ea 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -492,9 +492,10 @@ config DEBUG_IMX_UART_PORT
DEBUG_IMX31_UART || \
DEBUG_IMX35_UART || \
DEBUG_IMX51_UART || \
- DEBUG_IMX50_IMX53_UART || \
+ DEBUG_IMX53_UART || \
DEBUG_IMX6Q_UART
default 1
+ depends on ARCH_MXC
help
Choose UART port on which kernel low-level debug messages
should be output.
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 71768b8a1ab..84aa2caf07e 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -115,4 +115,4 @@ i:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
-subdir- := bootp compressed
+subdir- := bootp compressed dts
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 5cad8a6dadb..afed28e37ea 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif
-ccflags-y := -fpic -fno-builtin -I$(obj)
+ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index dd0c57dd9f3..3234875824d 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -54,7 +54,7 @@
};
mvsdio@d00d4000 {
- pinctrl-0 = <&sdio_pins2>;
+ pinctrl-0 = <&sdio_pins3>;
pinctrl-names = "default";
status = "okay";
/*
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index f8e4855bc9a..070bba4f258 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -64,5 +64,13 @@
status = "okay";
/* No CD or WP GPIOs */
};
+
+ usb@d0050000 {
+ status = "okay";
+ };
+
+ usb@d0051000 {
+ status = "okay";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 6f1acc75e15..5b708208b60 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -31,7 +31,6 @@
mpic: interrupt-controller@d0020000 {
compatible = "marvell,mpic";
#interrupt-cells = <1>;
- #address-cells = <1>;
#size-cells = <1>;
interrupt-controller;
};
@@ -54,7 +53,7 @@
reg = <0xd0012000 0x100>;
reg-shift = <2>;
interrupts = <41>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
serial@d0012100 {
@@ -62,7 +61,7 @@
reg = <0xd0012100 0x100>;
reg-shift = <2>;
interrupts = <42>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 8188d138020..a195debb67d 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -59,6 +59,12 @@
"mpp50", "mpp51", "mpp52";
marvell,function = "sd0";
};
+
+ sdio_pins3: sdio-pins3 {
+ marvell,pins = "mpp48", "mpp49", "mpp50",
+ "mpp51", "mpp52", "mpp53";
+ marvell,function = "sd0";
+ };
};
gpio0: gpio@d0018100 {
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 1443949c165..ca00d8326c8 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -46,7 +46,7 @@
reg = <0xd0012200 0x100>;
reg-shift = <2>;
interrupts = <43>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
serial@d0012300 {
@@ -54,7 +54,7 @@
reg = <0xd0012300 0x100>;
reg-shift = <2>;
interrupts = <44>;
- reg-io-width = <4>;
+ reg-io-width = <1>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb7bcc51608..39253b9aedd 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -322,6 +322,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 0 0x1 0x0 /* PA0 periph A SPI0_MISO pin */
+ 0 1 0x1 0x0 /* PA1 periph A SPI0_MOSI pin */
+ 0 2 0x1 0x0>; /* PA2 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <1 0 0x1 0x0 /* PB0 periph A SPI1_MISO pin */
+ 1 1 0x1 0x0 /* PB1 periph A SPI1_MOSI pin */
+ 1 2 0x1 0x0>; /* PB2 periph A SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -471,6 +489,28 @@
status = "disabled";
};
+ spi0: spi@fffc8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffc8000 0x200>;
+ interrupts = <12 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@fffcc000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffcc000 0x200>;
+ interrupts = <13 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
+
adc0: adc@fffe0000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xfffe0000 0x100>;
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 271d4de026e..94b58ab2cc0 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -303,6 +303,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 0 0x2 0x0 /* PA0 periph B SPI0_MISO pin */
+ 0 1 0x2 0x0 /* PA1 periph B SPI0_MOSI pin */
+ 0 2 0x2 0x0>; /* PA2 periph B SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <1 12 0x1 0x0 /* PB12 periph A SPI1_MISO pin */
+ 1 13 0x1 0x0 /* PB13 periph A SPI1_MOSI pin */
+ 1 14 0x1 0x0>; /* PB14 periph A SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@@ -462,6 +480,28 @@
reg = <0xfffffd40 0x10>;
status = "disabled";
};
+
+ spi0: spi@fffa4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa4000 0x200>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@fffa8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa8000 0x200>;
+ interrupts = <15 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 1eb08728f52..a14e424b2e8 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -79,6 +79,16 @@
};
};
};
+
+ spi0: spi@fffa4000 {
+ status = "okay";
+ cs-gpios = <&pioA 5 0>, <0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index da15e83e7f1..23d1f468f27 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -96,6 +96,16 @@
status = "okay";
pinctrl-0 = <&pinctrl_ssc0_tx>;
};
+
+ spi0: spi@fffc8000 {
+ status = "okay";
+ cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <50000000>;
+ reg = <1>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 6b1d4cab24c..cfdf429578b 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -322,6 +322,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <1 0 0x1 0x0 /* PB0 periph A SPI0_MISO pin */
+ 1 1 0x1 0x0 /* PB1 periph A SPI0_MOSI pin */
+ 1 2 0x1 0x0>; /* PB2 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <1 14 0x1 0x0 /* PB14 periph A SPI1_MISO pin */
+ 1 15 0x1 0x0 /* PB15 periph A SPI1_MOSI pin */
+ 1 16 0x1 0x0>; /* PB16 periph A SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@@ -531,6 +549,28 @@
reg = <0xfffffd40 0x10>;
status = "disabled";
};
+
+ spi0: spi@fffa4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa4000 0x200>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@fffa8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa8000 0x200>;
+ interrupts = <15 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 20c31913c27..92c52a7d70b 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -102,6 +102,16 @@
};
};
};
+
+ spi0: spi@fffa4000{
+ status = "okay";
+ cs-gpios = <&pioB 3 0>, <0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <13000000>;
+ reg = <0>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 7750f98dd76..b2961f1ea51 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -261,6 +261,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
+ 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
+ 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
+ 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
+ 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -373,6 +391,28 @@
#size-cells = <0>;
status = "disabled";
};
+
+ spi0: spi@f0000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0000000 0x100>;
+ interrupts = <13 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@f0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0004000 0x100>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d400f8de438..34c842b1efb 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -67,6 +67,16 @@
};
};
};
+
+ spi0: spi@f0000000 {
+ status = "okay";
+ cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
+ m25p80@0 {
+ compatible = "atmel,at25df321a";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index aa98e641931..347b438d47f 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -238,8 +238,32 @@
nand {
pinctrl_nand: nand-0 {
atmel,pins =
- <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */
- 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */
+ <3 0 0x1 0x0 /* PD0 periph A Read Enable */
+ 3 1 0x1 0x0 /* PD1 periph A Write Enable */
+ 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
+ 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
+ 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
+ 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
+ 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
+ 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
+ 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
+ 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
+ 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
+ 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
+ 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
+ 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
+ };
+
+ pinctrl_nand_16bits: nand_16bits-0 {
+ atmel,pins =
+ <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
+ 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
+ 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
+ 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
+ 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
+ 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
+ 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
+ 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
};
};
@@ -319,6 +343,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
+ 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
+ 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
+ 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
+ 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -505,6 +547,28 @@
trigger-value = <0x6>;
};
};
+
+ spi0: spi@f0000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0000000 0x100>;
+ interrupts = <13 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@f0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0004000 0x100>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 8a7cf1d9cf5..09f5e667ca7 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -84,6 +84,16 @@
};
};
};
+
+ spi0: spi@f0000000 {
+ status = "okay";
+ cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
+ m25p80@0 {
+ compatible = "atmel,at25df321a";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
};
usb0: ohci@00600000 {
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 4bf2a8774aa..7e0481e2441 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -105,7 +105,7 @@
compatible = "fixed-clock";
reg = <1>;
#clock-cells = <0>;
- clock-frequency = <150000000>;
+ clock-frequency = <250000000>;
};
};
};
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi
index 69140ba99f4..aaa63d0a809 100644
--- a/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/arch/arm/boot/dts/dbx5x0.dtsi
@@ -191,8 +191,8 @@
prcmu: prcmu@80157000 {
compatible = "stericsson,db8500-prcmu";
- reg = <0x80157000 0x1000>;
- reg-names = "prcmu";
+ reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
+ reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
interrupts = <0 47 0x4>;
#address-cells = <1>;
#size-cells = <1>;
@@ -319,9 +319,8 @@
};
};
- ab8500@5 {
+ ab8500 {
compatible = "stericsson,ab8500";
- reg = <5>; /* mailbox 5 is i2c */
interrupt-parent = <&intc>;
interrupts = <0 40 0x4>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 67dbe20868a..f7509cafc37 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -197,6 +197,11 @@
status = "disabled";
};
+ rtc@d8500 {
+ compatible = "marvell,orion-rtc";
+ reg = <0xd8500 0x20>;
+ };
+
crypto: crypto@30000 {
compatible = "marvell,orion-crypto";
reg = <0x30000 0x10000>,
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index e1347fceb5b..1a62bcf18aa 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -275,18 +275,27 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x12680000 0x1000>;
interrupts = <0 35 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
pdma1: pdma@12690000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x12690000 0x1000>;
interrupts = <0 36 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
mdma1: mdma@12850000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x12850000 0x1000>;
interrupts = <0 34 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 5f3562ad674..9a99755920c 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -142,12 +142,18 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x120000 0x1000>;
interrupts = <0 34 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
pdma1: pdma@121B0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x121000 0x1000>;
interrupts = <0 35 0>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
};
diff --git a/arch/arm/boot/dts/href.dtsi b/arch/arm/boot/dts/href.dtsi
index 592fb9dc35b..379128eb9d9 100644
--- a/arch/arm/boot/dts/href.dtsi
+++ b/arch/arm/boot/dts/href.dtsi
@@ -221,7 +221,7 @@
};
};
- ab8500@5 {
+ ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/hrefv60plus.dts b/arch/arm/boot/dts/hrefv60plus.dts
index 55f4191a626..2b587a74b81 100644
--- a/arch/arm/boot/dts/hrefv60plus.dts
+++ b/arch/arm/boot/dts/hrefv60plus.dts
@@ -158,7 +158,7 @@
};
};
- ab8500@5 {
+ ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 6ce3d17c3a2..fd36e1cca10 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,7 +152,6 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
- clock-frequency = <400000>;
status = "okay";
sgtl5000: codec@0a {
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts
index e6cde8aa7ff..6c6a5442800 100644
--- a/arch/arm/boot/dts/imx28-sps1.dts
+++ b/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,7 +70,6 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
- clock-frequency = <400000>;
status = "okay";
rtc: rtc@51 {
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index e54fffd4836..468c0a1d48d 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -42,10 +42,9 @@
fsl,pins = <689 0x10000 /* DISP1_DRDY */
482 0x10000 /* DISP1_HSYNC */
489 0x10000 /* DISP1_VSYNC */
- 684 0x10000 /* DISP1_DAT_0 */
515 0x10000 /* DISP1_DAT_22 */
523 0x10000 /* DISP1_DAT_23 */
- 543 0x10000 /* DISP1_DAT_21 */
+ 545 0x10000 /* DISP1_DAT_21 */
553 0x10000 /* DISP1_DAT_20 */
558 0x10000 /* DISP1_DAT_19 */
564 0x10000 /* DISP1_DAT_18 */
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 06ec460b458..281a223591f 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,6 +91,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x00a00600 0x20>;
interrupts = <1 13 0xf01>;
+ clocks = <&clks 15>;
};
L2: l2-cache@00a02000 {
diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts
index 5bb0bf39d3b..c9c44b2f62d 100644
--- a/arch/arm/boot/dts/kirkwood-dns320.dts
+++ b/arch/arm/boot/dts/kirkwood-dns320.dts
@@ -42,12 +42,10 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <166666667>;
status = "okay";
};
serial@12100 {
- clock-frequency = <166666667>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts
index d430713ea9b..e4e4930dc5c 100644
--- a/arch/arm/boot/dts/kirkwood-dns325.dts
+++ b/arch/arm/boot/dts/kirkwood-dns325.dts
@@ -50,7 +50,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts
index 2e3dd34e21a..0196cf6b0ef 100644
--- a/arch/arm/boot/dts/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/kirkwood-dockstar.dts
@@ -37,7 +37,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index ef2d8c70570..289e51d8637 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -38,7 +38,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index 1b133e0c566..c3573be7b92 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -73,11 +73,11 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
nand@3000000 {
+ chip-delay = <40>;
status = "okay";
partition@0 {
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index 71902da33d6..5335b1aa860 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -51,7 +51,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 504f16be8b5..12ccf74ac3c 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -78,7 +78,6 @@
};
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 6cae4599c4b..3694e94f6e9 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
- marvell,pins = "mpp44";
+ marvell,pins = "mpp46";
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
- marvell,pins = "mpp45";
+ marvell,pins = "mpp47";
marvell,function = "gpio";
};
@@ -115,7 +115,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
@@ -158,14 +157,14 @@
gpios = <&gpio0 16 0>;
linux,default-trigger = "default-on";
};
- health_led1 {
+ rebuild_led {
+ label = "status:white:rebuild_led";
+ gpios = <&gpio1 4 0>;
+ };
+ health_led {
label = "status:red:health_led";
gpios = <&gpio1 5 0>;
};
- health_led2 {
- label = "status:white:health_led";
- gpios = <&gpio1 4 0>;
- };
backup_led {
label = "status:blue:backup_led";
gpios = <&gpio0 15 0>;
diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
index 8db3123ac80..5bbd0542cdd 100644
--- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
+++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
@@ -34,7 +34,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts
index 9510c9ea666..9f55d95f35f 100644
--- a/arch/arm/boot/dts/kirkwood-lschlv2.dts
+++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts
@@ -13,7 +13,6 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <166666667>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts
index 739019c4cba..5c84c118ed8 100644
--- a/arch/arm/boot/dts/kirkwood-lsxhl.dts
+++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts
@@ -13,7 +13,6 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <200000000>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index 662dfd81b1c..758824118a9 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -90,7 +90,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index e8e7ecef165..6affd924fe1 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -23,7 +23,6 @@
};
serial@12000 {
- clock-frequency = <166666667>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index 3a178cf708d..a7412b937a8 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -117,7 +117,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index ede7fe0d7a8..d27f7245f8e 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -18,12 +18,10 @@
ocp@f1000000 {
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
serial@12100 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index 842ff95d60d..66eb45b00b2 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -108,7 +108,6 @@
};
serial@12000 {
- clock-frequency = <200000000>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 2c738d9dc82..fada7e6d24d 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -38,6 +38,7 @@
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <35>, <36>, <37>, <38>;
+ clocks = <&gate_clk 7>;
};
gpio1: gpio@10140 {
@@ -49,6 +50,7 @@
interrupt-controller;
#interrupt-cells = <2>;
interrupts = <39>, <40>, <41>;
+ clocks = <&gate_clk 7>;
};
serial@12000 {
@@ -57,7 +59,6 @@
reg-shift = <2>;
interrupts = <33>;
clocks = <&gate_clk 7>;
- /* set clock-frequency in board dts */
status = "disabled";
};
@@ -67,7 +68,6 @@
reg-shift = <2>;
interrupts = <34>;
clocks = <&gate_clk 7>;
- /* set clock-frequency in board dts */
status = "disabled";
};
@@ -75,6 +75,7 @@
compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc";
reg = <0x10300 0x20>;
interrupts = <53>;
+ clocks = <&gate_clk 7>;
};
spi@10600 {
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index 31f2157cd7d..67f8670c4d6 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -38,4 +38,10 @@
<0x19c00000 0x1000>;
interrupts = <0 195 0x0>;
};
+
+ qcom,ssbi@500000 {
+ compatible = "qcom,ssbi";
+ reg = <0x500000 0x1000>;
+ qcom,controller-type = "pmic-arbiter";
+ };
};
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index 9e621b5ad3d..c9b09a813a4 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -38,4 +38,10 @@
<0x16400000 0x1000>;
interrupts = <0 154 0x0>;
};
+
+ qcom,ssbi@500000 {
+ compatible = "qcom,ssbi";
+ reg = <0x500000 0x1000>;
+ qcom,controller-type = "pmic-arbiter";
+ };
};
diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
index 5a3a58b7e18..0077fc8510b 100644
--- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
+++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
@@ -11,7 +11,7 @@
/ {
model = "LaCie Ethernet Disk mini V2";
- compatible = "lacie,ethernet-disk-mini-v2", "marvell-orion5x-88f5182", "marvell,orion5x";
+ compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x";
memory {
reg = <0x00000000 0x4000000>; /* 64 MB */
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi
index 8aad00f81ed..f7bec3b1ba3 100644
--- a/arch/arm/boot/dts/orion5x.dtsi
+++ b/arch/arm/boot/dts/orion5x.dtsi
@@ -13,6 +13,9 @@
compatible = "marvell,orion5x";
interrupt-parent = <&intc>;
+ aliases {
+ gpio0 = &gpio0;
+ };
intc: interrupt-controller {
compatible = "marvell,orion-intc", "marvell,intc";
interrupt-controller;
@@ -32,7 +35,9 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0x10100 0x40>;
- ngpio = <32>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <6>, <7>, <8>, <9>;
};
@@ -91,7 +96,7 @@
reg = <0x90000 0x10000>,
<0xf2200000 0x800>;
reg-names = "regs", "sram";
- interrupts = <22>;
+ interrupts = <28>;
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index 27f31a5fa49..d3ec32f6b79 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -298,7 +298,7 @@
};
};
- ab8500@5 {
+ ab8500 {
ab8500-regulators {
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
regulator-name = "V-DISPLAY";
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 936d2306e7e..7e8769bd597 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -75,6 +75,9 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0xffe01000 0x1000>;
interrupts = <0 180 4>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
};
};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 1513c1927cc..122ae94076c 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -89,7 +89,7 @@
pinmux: pinmux@e0700000 {
compatible = "st,spear1310-pinmux";
reg = <0xe0700000 0x1000>;
- #gpio-range-cells = <2>;
+ #gpio-range-cells = <3>;
};
apb {
@@ -212,7 +212,7 @@
interrupt-controller;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&pinmux 0 246>;
+ gpio-ranges = <&pinmux 0 0 246>;
status = "disabled";
st-plgpio,ngpio = <246>;
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 34da11aa679..c511c4772ef 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -63,7 +63,7 @@
pinmux: pinmux@e0700000 {
compatible = "st,spear1340-pinmux";
reg = <0xe0700000 0x1000>;
- #gpio-range-cells = <2>;
+ #gpio-range-cells = <3>;
};
pwm: pwm@e0180000 {
@@ -127,7 +127,7 @@
interrupt-controller;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&pinmux 0 252>;
+ gpio-ranges = <&pinmux 0 0 252>;
status = "disabled";
st-plgpio,ngpio = <250>;
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index ab45b8c8198..95372080eea 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -25,7 +25,7 @@
pinmux: pinmux@b4000000 {
compatible = "st,spear310-pinmux";
reg = <0xb4000000 0x1000>;
- #gpio-range-cells = <2>;
+ #gpio-range-cells = <3>;
};
fsmc: flash@44000000 {
@@ -102,7 +102,7 @@
interrupt-controller;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&pinmux 0 102>;
+ gpio-ranges = <&pinmux 0 0 102>;
status = "disabled";
st-plgpio,ngpio = <102>;
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index caa5520b1fd..ffea342aeec 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -24,7 +24,7 @@
pinmux: pinmux@b3000000 {
compatible = "st,spear320-pinmux";
reg = <0xb3000000 0x1000>;
- #gpio-range-cells = <2>;
+ #gpio-range-cells = <3>;
};
clcd@90000000 {
@@ -130,7 +130,7 @@
interrupt-controller;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&pinmux 0 102>;
+ gpio-ranges = <&pinmux 0 0 102>;
status = "disabled";
st-plgpio,ngpio = <102>;
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9a428931d04..3d3f64d2111 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -118,6 +118,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x50040600 0x20>;
interrupts = <1 13 0x304>;
+ clocks = <&tegra_car 132>;
};
intc: interrupt-controller {
@@ -384,7 +385,7 @@
spi@7000d800 {
compatible = "nvidia,tegra20-slink";
- reg = <0x7000d480 0x200>;
+ reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 767803e1fd5..dbf46c27256 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -119,6 +119,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x50040600 0x20>;
interrupts = <1 13 0xf04>;
+ clocks = <&tegra_car 214>;
};
intc: interrupt-controller {
@@ -371,7 +372,7 @@
spi@7000d800 {
compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
- reg = <0x7000d480 0x200>;
+ reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/vt8500-bv07.dts b/arch/arm/boot/dts/vt8500-bv07.dts
index 567cf4e8ab8..877b33afa7e 100644
--- a/arch/arm/boot/dts/vt8500-bv07.dts
+++ b/arch/arm/boot/dts/vt8500-bv07.dts
@@ -11,26 +11,22 @@
/ {
model = "Benign BV07 Netbook";
+};
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
- };
+&fb {
+ bits-per-pixel = <16>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index cf31ced4660..68c8dc64438 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -98,12 +98,10 @@
interrupts = <43>;
};
- fb@d800e400 {
+ fb: fb@d8050800 {
compatible = "via,vt8500-fb";
reg = <0xd800e400 0x400>;
interrupts = <12>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8505-ref.dts b/arch/arm/boot/dts/wm8505-ref.dts
index fd4e248074c..edd2cec3d37 100644
--- a/arch/arm/boot/dts/wm8505-ref.dts
+++ b/arch/arm/boot/dts/wm8505-ref.dts
@@ -11,26 +11,22 @@
/ {
model = "Wondermedia WM8505 Netbook";
+};
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <32>; /* non-standard but required */
- };
+&fb {
+ bits-per-pixel = <32>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
};
};
};
diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi
index e74a1c0fb9a..bcf668d31b2 100644
--- a/arch/arm/boot/dts/wm8505.dtsi
+++ b/arch/arm/boot/dts/wm8505.dtsi
@@ -128,11 +128,9 @@
interrupts = <0>;
};
- fb@d8050800 {
+ fb: fb@d8050800 {
compatible = "wm,wm8505-fb";
reg = <0xd8050800 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8650-mid.dts b/arch/arm/boot/dts/wm8650-mid.dts
index cefd938f842..61671a0d9ed 100644
--- a/arch/arm/boot/dts/wm8650-mid.dts
+++ b/arch/arm/boot/dts/wm8650-mid.dts
@@ -11,26 +11,24 @@
/ {
model = "Wondermedia WM8650-MID Tablet";
+};
+
+&fb {
+ bits-per-pixel = <16>;
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
- };
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
};
};
};
+
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index db3c0a12e05..9313407bbc3 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -128,11 +128,9 @@
interrupts = <43>;
};
- fb@d8050800 {
+ fb: fb@d8050800 {
compatible = "wm,wm8505-fb";
reg = <0xd8050800 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts
index fcc660c8954..32d22532cd6 100644
--- a/arch/arm/boot/dts/wm8850-w70v2.dts
+++ b/arch/arm/boot/dts/wm8850-w70v2.dts
@@ -15,28 +15,6 @@
/ {
model = "Wondermedia WM8850-W70v2 Tablet";
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
- };
- };
- };
-
backlight {
compatible = "pwm-backlight";
pwms = <&pwm 0 50000 1>; /* duty inverted */
@@ -45,3 +23,21 @@
default-brightness-level = <5>;
};
};
+
+&fb {
+ bits-per-pixel = <16>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index e8cbfdc87bb..7149cd13e3b 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -135,11 +135,9 @@
};
};
- fb@d8051700 {
+ fb: fb@d8051700 {
compatible = "wm,wm8505-fb";
reg = <0xd8051700 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e36b0102532..088d6c11a0f 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -188,6 +188,7 @@ CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MXC=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_PHY=y
CONFIG_USB_MXS_PHY=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 92386b20bd0..afa7249fac6 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -134,6 +134,7 @@ CONFIG_SND_DEBUG_VERBOSE=y
# CONFIG_SND_SPI is not set
CONFIG_SND_SOC=y
CONFIG_USB=y
+CONFIG_USB_PHY=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_GADGET=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index fbbc5bb022d..87924d67111 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -116,9 +116,11 @@ CONFIG_SND_SOC=y
CONFIG_SND_MXS_SOC=y
CONFIG_SND_SOC_MXS_SGTL5000=y
CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_PHY=y
CONFIG_USB_MXS_PHY=y
CONFIG_MMC=y
CONFIG_MMC_MXS=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 42eab9a2a0f..7e0ebb64a7f 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -195,6 +195,7 @@ CONFIG_SND_SOC=y
CONFIG_SND_OMAP_SOC=y
# CONFIG_USB_HID is not set
CONFIG_USB=y
+CONFIG_USB_PHY=y
CONFIG_USB_DEBUG=y
CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b16bae2c9a6..bd07864f14a 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -126,6 +126,8 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_TWL4030_PWRBUTTON=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index 720799fd3a8..dff714d886d 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
- bool const_clock;
+ unsigned long ticks_per_jiffy;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cca9f15704e..ea289e1435e 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -19,14 +19,6 @@
#undef _CACHE
#undef MULTI_CACHE
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE v3
-# endif
-#endif
-
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fbe247..ed94b1a366a 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
* IOP3XX processor registers
*/
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 8c5e828f484..91b99abe7a9 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
#endif
#endif
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 9f77e7804f3..e3d55547e75 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -5,15 +5,15 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
- u64 id;
+ atomic64_t id;
#endif
- unsigned int vmalloc_seq;
+ unsigned int vmalloc_seq;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
-#define ASID(mm) ((mm)->context.id & ~ASID_MASK)
+#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK)
#else
#define ASID(mm) (0)
#endif
@@ -26,7 +26,7 @@ typedef struct {
* modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
*/
typedef struct {
- unsigned long end_brk;
+ unsigned long end_brk;
} mm_context_t;
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index e1f644bc7cc..a7b85e0d0cc 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -25,7 +25,9 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ mm->context.id = 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+
+DECLARE_PER_CPU(atomic64_t, active_asids);
#else /* !CONFIG_CPU_HAS_ASID */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 6ef8afd1b64..86b8fe398b9 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 80d6fc4dbe4..9bcd262a900 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
#define FIRST_USER_ADDRESS PAGE_SIZE
/*
+ * Use TASK_SIZE as the ceiling argument for free_pgtables() and
+ * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
+ * page shared between user and kernel).
+ */
+#ifdef CONFIG_ARM_LPAE
+#define USER_PGTABLES_CEILING TASK_SIZE
+#endif
+
+/*
* The pgprot_* and protection_map entries will be fixed up in runtime
* to include the cachable and bufferable bits based on memory policy,
* as well as any architecture dependent bits like global/ASID and SMP
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 5a85f148b60..21a23e378bb 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -21,9 +21,6 @@ extern void (*arm_pm_idle)(void);
extern unsigned int user_debug;
-extern void disable_hlt(void);
-extern void enable_hlt(void);
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_ARM_SYSTEM_MISC_H */
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 6e924d3a77e..ab865e65a84 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
#include <asm/glue.h>
-#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
@@ -22,7 +21,6 @@
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
-#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
@@ -34,10 +32,13 @@
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
+#define TLB_V6_BP (1 << 19)
+
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
-#define TLB_V7_UIS_PAGE (1 << 19)
-#define TLB_V7_UIS_FULL (1 << 20)
-#define TLB_V7_UIS_ASID (1 << 21)
+#define TLB_V7_UIS_PAGE (1 << 20)
+#define TLB_V7_UIS_FULL (1 << 21)
+#define TLB_V7_UIS_ASID (1 << 22)
+#define TLB_V7_UIS_BP (1 << 23)
#define TLB_BARRIER (1 << 28)
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
@@ -49,7 +50,6 @@
* =============
*
* We have the following to choose from:
- * v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -150,7 +150,8 @@
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
- TLB_V6_I_ASID | TLB_V6_D_ASID)
+ TLB_V6_I_ASID | TLB_V6_D_ASID | \
+ TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags v6wbi_tlb_flags
@@ -166,9 +167,11 @@
#endif
#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
+ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
+ TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
- TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
+ TLB_V6_U_FULL | TLB_V6_U_PAGE | \
+ TLB_V6_U_ASID | TLB_V6_BP)
#ifdef CONFIG_CPU_TLB_V7
@@ -324,7 +327,6 @@ static inline void local_flush_tlb_all(void)
if (tlb_flag(TLB_WB))
dsb();
- tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -345,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+ if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
- tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -379,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+ if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -412,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_WB))
dsb();
- tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -430,6 +429,35 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
}
}
+static inline void local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
+ else if (tlb_flag(TLB_V6_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+
+ if (tlb_flag(TLB_BARRIER))
+ isb();
+}
+
+#ifdef CONFIG_ARM_ERRATA_798181
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+ /*
+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
+ */
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb();
+}
+#else
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+}
+#endif
+
/*
* flush_pmd_entry
*
@@ -480,6 +508,7 @@ static inline void clean_pmd_entry(void *pmd)
#define flush_tlb_kernel_page local_flush_tlb_kernel_page
#define flush_tlb_range local_flush_tlb_range
#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+#define flush_bp_all local_flush_bp_all
#else
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
@@ -487,6 +516,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
extern void flush_tlb_kernel_page(unsigned long kaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_bp_all(void);
#endif
/*
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 5c27696de14..8b1f37bfeee 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -2,6 +2,7 @@
#define _ASM_ARM_XEN_EVENTS_H
#include <asm/ptrace.h>
+#include <asm/atomic.h>
enum ipi_vector {
XEN_PLACEHOLDER_VECTOR,
@@ -15,26 +16,8 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
-/*
- * We cannot use xchg because it does not support 8-byte
- * values. However it is safe to use {ldr,dtd}exd directly because all
- * platforms which Xen can run on support those instructions.
- */
-static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
-{
- xen_ulong_t oldval;
- unsigned int tmp;
-
- wmb();
- asm volatile("@ xchg_xen_ulong\n"
- "1: ldrexd %0, %H0, [%3]\n"
- " strexd %1, %2, %H2, [%3]\n"
- " teq %1, #0\n"
- " bne 1b"
- : "=&r" (oldval), "=&r" (tmp)
- : "r" (val), "r" (ptr)
- : "memory", "cc");
- return oldval;
-}
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+ atomic64_t, \
+ counter), (val))
#endif /* _ASM_ARM_XEN_EVENTS_H */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 4da7cde70b5..af33b44990e 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -404,7 +404,7 @@
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
- /* 378 for kcmp */
+#define __NR_kcmp (__NR_SYSCALL_BASE+378)
#define __NR_finit_module (__NR_SYSCALL_BASE+379)
/*
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 5ce738b4350..923eec7105c 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -110,7 +110,7 @@ int main(void)
BLANK();
#endif
#ifdef CONFIG_CPU_HAS_ASID
- DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
+ DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
BLANK();
#endif
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 0cc57611fc4..c6ca7e37677 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -387,7 +387,7 @@
/* 375 */ CALL(sys_setns)
CALL(sys_process_vm_readv)
CALL(sys_process_vm_writev)
- CALL(sys_ni_syscall) /* reserved for sys_kcmp */
+ CALL(sys_kcmp)
CALL(sys_finit_module)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
index 85aa2b29269..43076536965 100644
--- a/arch/arm/kernel/early_printk.c
+++ b/arch/arm/kernel/early_printk.c
@@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
early_write(s, n);
}
-static struct console early_console = {
+static struct console early_console_dev = {
.name = "earlycon",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
-asmlinkage void early_printk(const char *fmt, ...)
-{
- char buf[512];
- int n;
- va_list ap;
-
- va_start(ap, fmt);
- n = vscnprintf(buf, sizeof(buf), fmt, ap);
- early_write(buf, n);
- va_end(ap);
-}
-
static int __init setup_early_printk(char *buf)
{
- register_console(&early_console);
+ early_console = &early_console_dev;
+ register_console(&early_console_dev);
return 0;
}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3248cde504e..fefd7f97143 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
*/
.macro mcount_enter
+/*
+ * This pad compensates for the push {lr} at the call site. Note that we are
+ * unable to unwind through a function which does not otherwise save its lr.
+ */
+ UNWIND(.pad #4)
stmdb sp!, {r0-r3, lr}
+ UNWIND(.save {r0-r3, lr})
.endm
.macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
.endm
ENTRY(__gnu_mcount_nc)
+UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
mov ip, lr
ldmia sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
#else
__mcount
#endif
+UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
+UNWIND(.fnstart)
__ftrace_caller
+UNWIND(.fnend)
ENDPROC(ftrace_caller)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
+UNWIND(.fnstart)
__ftrace_graph_caller
+UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 486a15ae901..8bac553fe21 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -184,13 +184,22 @@ __create_page_tables:
orr r3, r3, #3 @ PGD block type
mov r6, #4 @ PTRS_PER_PGD
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
-1: str r3, [r0], #4 @ set bottom PGD entry bits
+1:
+#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4 @ set top PGD entry bits
+ str r3, [r0], #4 @ set bottom PGD entry bits
+#else
+ str r3, [r0], #4 @ set bottom PGD entry bits
+ str r7, [r0], #4 @ set top PGD entry bits
+#endif
add r3, r3, #0x1000 @ next PMD table
subs r6, r6, #1
bne 1b
add r4, r4, #0x1000 @ point to the PMD tables
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ add r4, r4, #4 @ we only write the bottom word
+#endif
#endif
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
@@ -258,6 +267,11 @@ __create_page_tables:
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+ sub r4, r4, #4 @ Fixup page table pointer
+ @ for 64-bit descriptors
+#endif
+
#ifdef CONFIG_DEBUG_LL
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
@@ -276,13 +290,17 @@ __create_page_tables:
orr r3, r7, r3, lsl #SECTION_SHIFT
#ifdef CONFIG_ARM_LPAE
mov r7, #1 << (54 - 32) @ XN
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ str r7, [r0], #4
+ str r3, [r0], #4
#else
- orr r3, r3, #PMD_SECT_XN
-#endif
str r3, [r0], #4
-#ifdef CONFIG_ARM_LPAE
str r7, [r0], #4
#endif
+#else
+ orr r3, r3, #PMD_SECT_XN
+ str r3, [r0], #4
+#endif
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5eae53e7a2e..1fd749ee4a1 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
}
if (err) {
- pr_warning("CPU %d debug is powered down!\n", cpu);
+ pr_warn_once("CPU %d debug is powered down!\n", cpu);
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
return;
}
@@ -987,7 +987,7 @@ clear_vcr:
isb();
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warning("CPU %d failed to disable vector catch\n", cpu);
+ pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
return;
}
@@ -1007,7 +1007,7 @@ clear_vcr:
}
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
+ pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
return;
}
@@ -1023,7 +1023,7 @@ out_mdbgen:
static int __cpuinit dbg_reset_notify(struct notifier_block *self,
unsigned long action, void *cpu)
{
- if (action == CPU_ONLINE)
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
return NOTIFY_OK;
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+static struct notifier_block dbg_cpu_pm_nb = {
.notifier_call = dbg_cpu_pm_notify,
};
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 31e0eb353cd..8c3094d0f7b 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, event) >= 0;
@@ -400,7 +403,7 @@ __hw_perf_event_init(struct perf_event *event)
}
if (event->group_leader != event) {
- if (validate_group(event) != 0);
+ if (validate_group(event) != 0)
return -EINVAL;
}
@@ -484,7 +487,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = {
SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
};
-static void __init armpmu_init(struct arm_pmu *armpmu)
+static void armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
mutex_init(&armpmu->reserve_mutex);
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8c79a9e70b8..039cffb053a 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
+#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 047d3e40e47..c9a5e2ce8aa 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -57,38 +57,6 @@ static const char *isa_modes[] = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
-static volatile int hlt_counter;
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
- BUG_ON(hlt_counter < 0);
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
-static int __init nohlt_setup(char *__unused)
-{
- hlt_counter = 1;
- return 1;
-}
-
-static int __init hlt_setup(char *__unused)
-{
- hlt_counter = 0;
- return 1;
-}
-
-__setup("nohlt", nohlt_setup);
-__setup("hlt", hlt_setup);
-
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
typedef void (*phys_reset_t)(unsigned long);
@@ -172,54 +140,38 @@ static void default_idle(void)
local_irq_enable();
}
-/*
- * The idle thread.
- * We always respect 'hlt_counter' to prevent low power idle.
- */
-void cpu_idle(void)
+void arch_cpu_idle_prepare(void)
{
local_fiq_enable();
+}
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- ledtrig_cpu(CPU_LED_IDLE_START);
- while (!need_resched()) {
-#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id()))
- cpu_die();
+void arch_cpu_idle_enter(void)
+{
+ ledtrig_cpu(CPU_LED_IDLE_START);
+#ifdef CONFIG_PL310_ERRATA_769419
+ wmb();
#endif
+}
- /*
- * We need to disable interrupts here
- * to ensure we don't miss a wakeup call.
- */
- local_irq_disable();
-#ifdef CONFIG_PL310_ERRATA_769419
- wmb();
+void arch_cpu_idle_exit(void)
+{
+ ledtrig_cpu(CPU_LED_IDLE_END);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
#endif
- if (hlt_counter) {
- local_irq_enable();
- cpu_relax();
- } else if (!need_resched()) {
- stop_critical_timings();
- if (cpuidle_idle_call())
- default_idle();
- start_critical_timings();
- /*
- * default_idle functions must always
- * return with IRQs enabled.
- */
- WARN_ON(irqs_disabled());
- } else
- local_irq_enable();
- }
- ledtrig_cpu(CPU_LED_IDLE_END);
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+
+/*
+ * Called from the core idle loop.
+ */
+void arch_cpu_idle(void)
+{
+ if (cpuidle_idle_call())
+ default_idle();
}
static char reboot_mode = 'h';
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index bd6f56b9ec2..59d2adb764a 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
-static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
{
u64 epoch_ns;
u32 epoch_cyc;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3f6cbb2e3ed..234e339196c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -56,7 +56,6 @@
#include <asm/virt.h>
#include "atags.h"
-#include "tcm.h"
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -353,6 +352,23 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+static void __init cpuid_init_hwcaps(void)
+{
+ unsigned int divide_instrs;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv7)
+ return;
+
+ divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
+
+ switch (divide_instrs) {
+ case 2:
+ elf_hwcap |= HWCAP_IDIVA;
+ case 1:
+ elf_hwcap |= HWCAP_IDIVT;
+ }
+}
+
static void __init feat_v6_fixup(void)
{
int id = read_cpuid_id();
@@ -483,8 +499,11 @@ static void __init setup_processor(void)
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
list->elf_name, ENDIANNESS);
elf_hwcap = list->elf_hwcap;
+
+ cpuid_init_hwcaps();
+
#ifndef CONFIG_ARM_THUMB
- elf_hwcap &= ~HWCAP_THUMB;
+ elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
feat_v6_fixup();
@@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
-#ifndef CONFIG_LPAE
+#ifndef CONFIG_ARM_LPAE
if (bank->start + size < bank->start) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
@@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
- tcm_init();
-
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1bdfd87c8e4..4619177bcfe 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* switch away from it before attempting any exclusive accesses.
*/
cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
@@ -335,7 +336,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* OK, it's off to the idle thread for us
*/
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -479,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 400;
+ evt->rating = 100;
evt->mult = 1;
evt->set_mode = broadcast_timer_set_mode;
@@ -672,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
- if (arm_delay_ops.const_clock)
- return NOTIFY_OK;
-
if (!per_cpu(l_p_j_ref, cpu)) {
per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy;
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 02c5d2ce23b..e82e1d24877 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -12,6 +12,7 @@
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
/**********************************************************************/
@@ -64,12 +65,77 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}
+static inline void ipi_flush_bp_all(void *ignored)
+{
+ local_flush_bp_all();
+}
+
+#ifdef CONFIG_ARM_ERRATA_798181
+static int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+#else
+static int erratum_a15_798181(void)
+{
+ return 0;
+}
+#endif
+
+static void ipi_flush_tlb_a15_erratum(void *arg)
+{
+ dmb();
+}
+
+static void broadcast_tlb_a15_erratum(void)
+{
+ if (!erratum_a15_798181())
+ return;
+
+ dummy_flush_tlb_a15_erratum();
+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
+ NULL, 1);
+}
+
+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
+{
+ int cpu;
+ cpumask_t mask = { CPU_BITS_NONE };
+
+ if (!erratum_a15_798181())
+ return;
+
+ dummy_flush_tlb_a15_erratum();
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ /*
+ * We only need to send an IPI if the other CPUs are running
+ * the same ASID as the one being invalidated. There is no
+ * need for locking around the active_asids check since the
+ * switch_mm() function has at least one dmb() (as required by
+ * this workaround) in case a context switch happens on
+ * another CPU after the condition below.
+ */
+ if (atomic64_read(&mm->context.id) ==
+ atomic64_read(&per_cpu(active_asids, cpu)))
+ cpumask_set_cpu(cpu, &mask);
+ }
+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+}
+
void flush_tlb_all(void)
{
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
else
local_flush_tlb_all();
+ broadcast_tlb_a15_erratum();
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -78,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
local_flush_tlb_mm(mm);
+ broadcast_tlb_mm_a15_erratum(mm);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -90,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
&ta, 1);
} else
local_flush_tlb_page(vma, uaddr);
+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -100,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
} else
local_flush_tlb_kernel_page(kaddr);
+ broadcast_tlb_a15_erratum();
}
void flush_tlb_range(struct vm_area_struct *vma,
@@ -114,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
&ta, 1);
} else
local_flush_tlb_range(vma, start, end);
+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -125,5 +195,13 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
} else
local_flush_tlb_kernel_range(start, end);
+ broadcast_tlb_a15_erratum();
}
+void flush_bp_all(void)
+{
+ if (tlb_ops_need_broadcast())
+ on_each_cpu(ipi_flush_bp_all, NULL, 1);
+ else
+ local_flush_bp_all();
+}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index c092115d903..3f256503748 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
@@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void)
struct device_node *np;
int err;
+ if (!is_smp() || !setup_max_cpus)
+ return;
+
np = of_find_matching_node(NULL, twd_of_match);
if (!np)
return;
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 358bca3a995..c59c97ea826 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
ret = __cpu_suspend(arg, fn);
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
+ local_flush_bp_all();
local_flush_tlb_all();
}
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 30ae6bb4a31..f50f19e5c13 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -17,7 +17,6 @@
#include <asm/mach/map.h>
#include <asm/memory.h>
#include <asm/system_info.h>
-#include "tcm.h"
static struct gen_pool *tcm_pool;
static bool dtcm_present;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5a936988eb2..c1fe498983a 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext)
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
+ break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4ea9a982269..7bed7556077 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
u32 val;
int cpu;
- cpu = get_cpu();
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
+ cpu = get_cpu();
+
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index c9a17316e9f..0e4cfe123b3 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
lr, irq, vgic_cpu->vgic_lr[lr]);
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
-
- goto out;
+ return true;
}
/* Try to use another LR for this interrupt */
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
vgic_cpu->vgic_irq_lr_map[irq] = lr;
set_bit(lr, vgic_cpu->lr_used);
-out:
if (!vgic_irq_is_edge(vcpu, irq))
vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
- /*
- * We do not need to take the distributor lock here, since the only
- * action we perform is clearing the irq_active_bit for an EOIed
- * level interrupt. There is a potential race with
- * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
- * check if the interrupt is already active. Two possibilities:
- *
- * - The queuing is occurring on the same vcpu: cannot happen,
- * as we're already in the context of this vcpu, and
- * executing the handler
- * - The interrupt has been migrated to another vcpu, and we
- * ignore this interrupt for this run. Big deal. It is still
- * pending though, and will get considered when this vcpu
- * exits.
- */
if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
/*
* Some level interrupts have been EOIed. Clear their
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
} else {
vgic_cpu_irq_clear(vcpu, irq);
}
+
+ /*
+ * Despite being EOIed, the LR may not have
+ * been marked as empty.
+ */
+ set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
}
}
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
}
/*
- * Sync back the VGIC state after a guest run. We do not really touch
- * the distributor here (the irq_pending_on_cpu bit is safe to set),
- * so there is no need for taking its lock.
+ * Sync back the VGIC state after a guest run. The distributor lock is
+ * needed so we don't get preempted in the middle of the state processing.
*/
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
if (!irqchip_in_kernel(vcpu->kvm))
return;
+ spin_lock(&dist->lock);
__kvm_vgic_sync_hwstate(vcpu);
+ spin_unlock(&dist->lock);
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 6b93f6a1a3c..64dbfa57204 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
static void __timer_const_udelay(unsigned long xloops)
{
unsigned long long loops = xloops;
- loops *= loops_per_jiffy;
+ loops *= arm_delay_ops.ticks_per_jiffy;
__timer_delay(loops >> UDELAY_SHIFT);
}
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
pr_info("Switching to timer-based delay loop\n");
delay_timer = timer;
lpj_fine = timer->freq / HZ;
- loops_per_jiffy = lpj_fine;
+
+ /* cpufreq may scale loops_per_jiffy, so keep a private copy */
+ arm_delay_ops.ticks_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
- arm_delay_ops.const_clock = true;
+
delay_calibrated = true;
} else {
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 650d5923ab8..94b0650ea98 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -14,27 +14,15 @@
.text
.align 5
- .word 0
-
-1: subs r2, r2, #4 @ 1 do we have enough
- blt 5f @ 1 bytes to align with?
- cmp r3, #2 @ 1
- strltb r1, [r0], #1 @ 1
- strleb r1, [r0], #1 @ 1
- strb r1, [r0], #1 @ 1
- add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted. Try doing the
- * memset again.
- */
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
- bne 1b @ 1
+ mov ip, r0 @ preserve r0 as return value
+ bne 6f @ 1
/*
- * we know that the pointer in r0 is aligned to a word boundary.
+ * we know that the pointer in ip is aligned to a word boundary.
*/
- orr r1, r1, r1, lsl #8
+1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
@@ -43,29 +31,28 @@ ENTRY(memset)
#if ! CALGN(1)+0
/*
- * We need an extra register for this loop - save the return address and
- * use the LR
+ * We need 2 extra registers for this loop - use r8 and the LR
*/
- str lr, [sp, #-4]!
- mov ip, r1
+ stmfd sp!, {r8, lr}
+ mov r8, r1
mov lr, r1
2: subs r2, r2, #64
- stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
- stmgeia r0!, {r1, r3, ip, lr}
+ stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
+ stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
- ldmeqfd sp!, {pc} @ Now <64 bytes to go.
+ ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
- stmneia r0!, {r1, r3, ip, lr}
- stmneia r0!, {r1, r3, ip, lr}
+ stmneia ip!, {r1, r3, r8, lr}
+ stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
- stmneia r0!, {r1, r3, ip, lr}
- ldr lr, [sp], #4
+ stmneia ip!, {r1, r3, r8, lr}
+ ldmfd sp!, {r8, lr}
#else
@@ -74,54 +61,63 @@ ENTRY(memset)
* whole cache lines at once.
*/
- stmfd sp!, {r4-r7, lr}
+ stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
- mov ip, r1
+ mov r8, r1
mov lr, r1
cmp r2, #96
- tstgt r0, #31
+ tstgt ip, #31
ble 3f
- and ip, r0, #31
- rsb ip, ip, #32
- sub r2, r2, ip
- movs ip, ip, lsl #(32 - 4)
- stmcsia r0!, {r4, r5, r6, r7}
- stmmiia r0!, {r4, r5}
- tst ip, #(1 << 30)
- mov ip, r1
- strne r1, [r0], #4
+ and r8, ip, #31
+ rsb r8, r8, #32
+ sub r2, r2, r8
+ movs r8, r8, lsl #(32 - 4)
+ stmcsia ip!, {r4, r5, r6, r7}
+ stmmiia ip!, {r4, r5}
+ tst r8, #(1 << 30)
+ mov r8, r1
+ strne r1, [ip], #4
3: subs r2, r2, #64
- stmgeia r0!, {r1, r3-r7, ip, lr}
- stmgeia r0!, {r1, r3-r7, ip, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
+ stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
- ldmeqfd sp!, {r4-r7, pc}
+ ldmeqfd sp!, {r4-r8, pc}
tst r2, #32
- stmneia r0!, {r1, r3-r7, ip, lr}
+ stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
- stmneia r0!, {r4-r7}
- ldmfd sp!, {r4-r7, lr}
+ stmneia ip!, {r4-r7}
+ ldmfd sp!, {r4-r8, lr}
#endif
4: tst r2, #8
- stmneia r0!, {r1, r3}
+ stmneia ip!, {r1, r3}
tst r2, #4
- strne r1, [r0], #4
+ strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
- strneb r1, [r0], #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
+ strneb r1, [ip], #1
tst r2, #1
- strneb r1, [r0], #1
+ strneb r1, [ip], #1
mov pc, lr
+
+6: subs r2, r2, #4 @ 1 do we have enough
+ blt 5b @ 1 bytes to align with?
+ cmp r3, #2 @ 1
+ strltb r1, [ip], #1 @ 1
+ strleb r1, [ip], #1 @ 1
+ strb r1, [ip], #1 @ 1
+ add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
+ b 1b
ENDPROC(memset)
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b67cd537411..44199bc2c66 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -232,6 +232,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk),
CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk),
CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffc8000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffcc000.spi", &spi1_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index d3addee43d8..2ec5efea3f0 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -262,6 +262,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk),
CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffa4000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffa8000.spi", &spi1_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index 5dfc8fd8710..ccd078355ee 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -172,6 +172,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk),
CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 44a9a62dcc1..a200d8a1712 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -237,6 +237,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index 2ea7059b840..c20a870ea9c 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -176,6 +176,7 @@ static struct w1_gpio_platform_data w1_gpio_pdata = {
/* If you choose to use a pin other than PB16 it needs to be 3.3V */
.pin = AT91_PIN_PB16,
.is_open_drain = 1,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device w1_device = {
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index a033b8df9fb..869cbecf00b 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -188,6 +188,7 @@ static struct spi_board_info portuxg20_spi_devices[] = {
static struct w1_gpio_platform_data w1_gpio_pdata = {
.pin = AT91_PIN_PA29,
.is_open_drain = 1,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device w1_device = {
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h
index eed465ab0dd..5fc23771c15 100644
--- a/arch/arm/mach-at91/include/mach/gpio.h
+++ b/arch/arm/mach-at91/include/mach/gpio.h
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
extern void at91_gpio_suspend(void);
extern void at91_gpio_resume(void);
+#ifdef CONFIG_PINCTRL_AT91
+extern void at91_pinctrl_gpio_suspend(void);
+extern void at91_pinctrl_gpio_resume(void);
+#else
+static inline void at91_pinctrl_gpio_suspend(void) {}
+static inline void at91_pinctrl_gpio_resume(void) {}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 8e210262aee..e0ca5917102 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
void at91_irq_suspend(void)
{
- int i = 0, bit;
+ int bit = -1;
if (has_aic5()) {
/* disable enabled irqs */
- while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+ while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IDCR, 1);
- i = bit;
}
/* enable wakeup irqs */
- i = 0;
- while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+ bit = -1;
+ while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IECR, 1);
- i = bit;
}
} else {
at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@ void at91_irq_suspend(void)
void at91_irq_resume(void)
{
- int i = 0, bit;
+ int bit = -1;
if (has_aic5()) {
/* disable wakeup irqs */
- while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+ while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IDCR, 1);
- i = bit;
}
/* enable irqs disabled for suspend */
- i = 0;
- while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+ bit = -1;
+ while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IECR, 1);
- i = bit;
}
} else {
at91_aic_write(AT91_AIC_IDCR, *wakeups);
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index adb6db888a1..73f1f250403 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
static int at91_pm_enter(suspend_state_t state)
{
- at91_gpio_suspend();
+ if (of_have_populated_dt())
+ at91_pinctrl_gpio_suspend();
+ else
+ at91_gpio_suspend();
at91_irq_suspend();
pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
error:
target_state = PM_SUSPEND_ON;
at91_irq_resume();
- at91_gpio_resume();
+ if (of_have_populated_dt())
+ at91_pinctrl_gpio_resume();
+ else
+ at91_gpio_resume();
return 0;
}
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index e698f26cc0c..52e4bb5cf12 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -22,19 +22,9 @@
static struct map_desc cns3xxx_io_desc[] __initdata = {
{
- .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
- .length = SZ_4K,
+ .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
+ .length = SZ_8K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
index 191c8e57f28..b1021aafa48 100644
--- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -94,10 +94,10 @@
#define RTC_INTR_STS_OFFSET 0x34
#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
-#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */
#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
-#define CNS3XXX_PM_BASE_VIRT 0xFFF08000
+#define CNS3XXX_PM_BASE_VIRT 0xFB001000
#define PM_CLK_GATE_OFFSET 0x00
#define PM_SOFT_RST_OFFSET 0x04
@@ -109,7 +109,7 @@
#define PM_PLL_HM_PD_OFFSET 0x1C
#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
-#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000
+#define CNS3XXX_UART0_BASE_VIRT 0xFB002000
#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
@@ -130,7 +130,7 @@
#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
-#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000
#define TIMER1_COUNTER_OFFSET 0x00
#define TIMER1_AUTO_RELOAD_OFFSET 0x04
@@ -227,16 +227,16 @@
* Testchip peripheral and fpga gic regions
*/
#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
-#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000
#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
-#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
-#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
-#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index a685e9706b7..45b7c71d9cc 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
*/
int edma_alloc_slot(unsigned ctlr, int slot)
{
+ if (!edma_cc[ctlr])
+ return -EINVAL;
+
if (slot >= 0)
slot = EDMA_CHAN_SLOT(slot);
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h
index d2afb4dd82a..b5cc77d2380 100644
--- a/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
static inline void putc(int c)
{
- /* Transmit fifo not full? */
- while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
- ;
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ /* Transmit fifo not full? */
+ if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
+ break;
+ }
__raw_writeb(c, PHYS_UART_DATA);
}
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 497fcb793dc..d28c7fbaba2 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -97,6 +97,19 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
static struct regulator_consumer_supply max8952_consumer =
REGULATOR_SUPPLY("vdd_arm", NULL);
+static struct regulator_init_data universal_max8952_reg_data = {
+ .constraints = {
+ .name = "VARM_1.2V",
+ .min_uV = 770000,
+ .max_uV = 1400000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &max8952_consumer,
+};
+
static struct max8952_platform_data universal_max8952_pdata __initdata = {
.gpio_vid0 = EXYNOS4_GPX0(3),
.gpio_vid1 = EXYNOS4_GPX0(4),
@@ -105,19 +118,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
.dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */
.sync_freq = 0, /* default: fastest */
.ramp_speed = 0, /* default: fastest */
-
- .reg_data = {
- .constraints = {
- .name = "VARM_1.2V",
- .min_uV = 770000,
- .max_uV = 1400000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
- .always_on = 1,
- .boot_on = 1,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &max8952_consumer,
- },
+ .reg_data = &universal_max8952_reg_data,
};
static struct regulator_consumer_supply lp3974_buck1_consumer =
diff --git a/arch/arm/mach-exynos/setup-usb-phy.c b/arch/arm/mach-exynos/setup-usb-phy.c
index b81cc569a8d..6af40662a44 100644
--- a/arch/arm/mach-exynos/setup-usb-phy.c
+++ b/arch/arm/mach-exynos/setup-usb-phy.c
@@ -204,9 +204,9 @@ static int exynos4210_usb_phy1_exit(struct platform_device *pdev)
int s5p_usb_phy_init(struct platform_device *pdev, int type)
{
- if (type == S5P_USB_PHY_DEVICE)
+ if (type == USB_PHY_TYPE_DEVICE)
return exynos4210_usb_phy0_init(pdev);
- else if (type == S5P_USB_PHY_HOST)
+ else if (type == USB_PHY_TYPE_HOST)
return exynos4210_usb_phy1_init(pdev);
return -EINVAL;
@@ -214,9 +214,9 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
int s5p_usb_phy_exit(struct platform_device *pdev, int type)
{
- if (type == S5P_USB_PHY_DEVICE)
+ if (type == USB_PHY_TYPE_DEVICE)
return exynos4210_usb_phy0_exit(pdev);
- else if (type == S5P_USB_PHY_HOST)
+ else if (type == USB_PHY_TYPE_HOST)
return exynos4210_usb_phy1_exit(pdev);
return -EINVAL;
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index abda5a18a66..0f2111a1131 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -67,6 +67,7 @@ config ARCH_NETWINDER
select ISA
select ISA_DMA
select PCI
+ select VIRT_TO_BUS
help
Say Y here if you intend to run this kernel on the Rebel.COM
NetWinder. Information about this machine can be found at:
diff --git a/arch/arm/mach-gemini/idle.c b/arch/arm/mach-gemini/idle.c
index 92bbd6bb600..87dff4f5059 100644
--- a/arch/arm/mach-gemini/idle.c
+++ b/arch/arm/mach-gemini/idle.c
@@ -13,9 +13,11 @@ static void gemini_idle(void)
* will never wakeup... Acctualy it is not very good to enable
* interrupts first since scheduler can miss a tick, but there is
* no other way around this. Platforms that needs it for power saving
- * should call enable_hlt() in init code, since by default it is
+ * should enable it in init code, since by default it is
* disabled.
*/
+
+ /* FIXME: Enabling interrupts here is racy! */
local_irq_enable();
cpu_do_idle();
}
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index 020852d3bdd..6d8f6d1669f 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -15,6 +15,8 @@
#include <linux/stddef.h>
#include <linux/list.h>
#include <linux/sched.h>
+#include <linux/cpu.h>
+
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/system_misc.h>
@@ -77,7 +79,7 @@ void __init gemini_init_irq(void)
* Disable the idle handler by default since it is buggy
* For more info see arch/arm/mach-gemini/idle.c
*/
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
request_resource(&iomem_resource, &irq_resource);
diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c
index f30c5284339..890cae23c12 100644
--- a/arch/arm/mach-highbank/hotplug.c
+++ b/arch/arm/mach-highbank/hotplug.c
@@ -28,13 +28,11 @@ extern void secondary_startup(void);
*/
void __ref highbank_cpu_die(unsigned int cpu)
{
- flush_cache_all();
-
highbank_set_cpu_jump(cpu, phys_to_virt(0));
- highbank_set_core_pwr();
- cpu_do_idle();
+ flush_cache_louis();
+ highbank_set_core_pwr();
- /* We should never return from idle */
- panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
+ while (1)
+ cpu_do_idle();
}
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
index 1ab91b5209e..85b728cc27a 100644
--- a/arch/arm/mach-imx/clk-busy.c
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -169,7 +169,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
busy->mux.reg = reg;
busy->mux.shift = shift;
- busy->mux.width = width;
+ busy->mux.mask = BIT(width) - 1;
busy->mux.lock = &imx_ccm_lock;
busy->mux_ops = &clk_mux_ops;
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 74e3a34d78b..2193c834f55 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+ clk_register_clkdev(clk[admux_gate], "audmux", NULL);
clk_prepare_enable(clk[spba_gate]);
clk_prepare_enable(clk[gpio1_gate]);
@@ -264,6 +265,8 @@ int __init mx35_clocks_init(void)
clk_prepare_enable(clk[gpio3_gate]);
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
+ clk_prepare_enable(clk[max_gate]);
+ clk_prepare_enable(clk[iomuxc_gate]);
/*
* SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 7b025ee528a..d38e54f5b6d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -172,7 +172,7 @@ static struct clk *clk[clk_max];
static struct clk_onecell_data clk_data;
static enum mx6q_clks const clks_init_on[] __initconst = {
- mmdc_ch0_axi, rom,
+ mmdc_ch0_axi, rom, pll1_sys,
};
static struct clk_div_table clk_enet_ref_table[] = {
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
- clk_register_clkdev(clk[twd], NULL, "smp_twd");
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
clk_register_clkdev(clk[ahb], "ahb", NULL);
clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 5a800bfcec5..5bf4a97ab24 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
extern void imx_enable_cpu(int cpu, bool enable);
extern void imx_set_cpu_jump(int cpu, void *jump_addr);
+extern u32 imx_get_cpu_arg(int cpu);
+extern void imx_set_cpu_arg(int cpu, u32 arg);
extern void v7_cpu_resume(void);
extern u32 *pl310_get_save_ptr(void);
#ifdef CONFIG_SMP
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index 921fc155585..a58c8b0527c 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -26,16 +26,16 @@ ENDPROC(v7_secondary_startup)
#ifdef CONFIG_PM
/*
- * The following code is located into the .data section. This is to
- * allow phys_l2x0_saved_regs to be accessed with a relative load
- * as we are running on physical address here.
+ * The following code must assume it is running from physical address
+ * where absolute virtual addresses to the data section have to be
+ * turned into relative ones.
*/
- .data
- .align
#ifdef CONFIG_CACHE_L2X0
.macro pl310_resume
- ldr r2, phys_l2x0_saved_regs
+ adr r0, l2x0_saved_regs_offset
+ ldr r2, [r0]
+ add r2, r2, r0
ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
@@ -43,9 +43,9 @@ ENDPROC(v7_secondary_startup)
str r1, [r0, #L2X0_CTRL] @ re-enable L2
.endm
- .globl phys_l2x0_saved_regs
-phys_l2x0_saved_regs:
- .long 0
+l2x0_saved_regs_offset:
+ .word l2x0_saved_regs - .
+
#else
.macro pl310_resume
.endm
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 7bc5fe15dda..361a253e2b6 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
void imx_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
+ /*
+ * We use the cpu jumping argument register to sync with
+ * imx_cpu_kill() which is running on cpu0 and waiting for
+ * the register being cleared to kill the cpu.
+ */
+ imx_set_cpu_arg(cpu, ~0);
cpu_do_idle();
}
int imx_cpu_kill(unsigned int cpu)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+ while (imx_get_cpu_arg(cpu) == 0)
+ if (time_after(jiffies, timeout))
+ return 0;
imx_enable_cpu(cpu, false);
+ imx_set_cpu_arg(cpu, 0);
return 1;
}
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c
index 03b65e5ea54..82348391582 100644
--- a/arch/arm/mach-imx/imx25-dt.c
+++ b/arch/arm/mach-imx/imx25-dt.c
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = {
NULL
};
+static void __init imx25_timer_init(void)
+{
+ mx25_clocks_init_dt();
+}
+
DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
.map_io = mx25_map_io,
.init_early = imx25_init_early,
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index ee42d20cba1..5faba7a3c95 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -22,8 +22,6 @@
#include "common.h"
#include "hardware.h"
-extern unsigned long phys_l2x0_saved_regs;
-
static int imx6q_suspend_finish(unsigned long val)
{
cpu_do_idle();
@@ -57,18 +55,5 @@ static const struct platform_suspend_ops imx6q_pm_ops = {
void __init imx6q_pm_init(void)
{
- /*
- * The l2x0 core code provides an infrastucture to save and restore
- * l2x0 registers across suspend/resume cycle. But because imx6q
- * retains L2 content during suspend and needs to resume L2 before
- * MMU is enabled, it can only utilize register saving support and
- * have to take care of restoring on its own. So we save physical
- * address of the data structure used by l2x0 core to save registers,
- * and later restore the necessary ones in imx6q resume entry.
- */
-#ifdef CONFIG_CACHE_L2X0
- phys_l2x0_saved_regs = __pa(&l2x0_saved_regs);
-#endif
-
suspend_set_ops(&imx6q_pm_ops);
}
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c
index e15f1555c59..09a742f8c7a 100644
--- a/arch/arm/mach-imx/src.c
+++ b/arch/arm/mach-imx/src.c
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
src_base + SRC_GPR1 + cpu * 8);
}
+u32 imx_get_cpu_arg(int cpu)
+{
+ cpu = cpu_logical_map(cpu);
+ return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
+void imx_set_cpu_arg(int cpu, u32 arg)
+{
+ cpu = cpu_logical_map(cpu);
+ writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
void imx_src_prepare_restart(void)
{
u32 val;
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 1dbeb7c99d5..6600cff6bd9 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/export.h>
#include <linux/gpio.h>
+#include <linux/cpu.h>
#include <mach/udc.h>
#include <mach/hardware.h>
@@ -239,7 +240,7 @@ void __init ixp4xx_init_irq(void)
* ixp4xx does not implement the XScale PWRMODE register
* so it must not call cpu_do_idle().
*/
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
/* Route all sources to IRQ instead of FIQ */
*IXP4XX_ICLR = 0x0;
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index d42730a1d4a..d599e354ca5 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -163,6 +163,7 @@ static struct platform_device vulcan_max6369 = {
static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = {
.pin = 14,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device vulcan_w1_gpio = {
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 2e73e9d53f7..d367aa6b47b 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -41,16 +41,12 @@ static void __init kirkwood_legacy_clk_init(void)
struct device_node *np = of_find_compatible_node(
NULL, NULL, "marvell,kirkwood-gating-clock");
-
struct of_phandle_args clkspec;
+ struct clk *clk;
clkspec.np = np;
clkspec.args_count = 1;
- clkspec.args[0] = CGC_BIT_GE0;
- orion_clkdev_add(NULL, "mv643xx_eth_port.0",
- of_clk_get_from_provider(&clkspec));
-
clkspec.args[0] = CGC_BIT_PEX0;
orion_clkdev_add("0", "pcie",
of_clk_get_from_provider(&clkspec));
@@ -59,9 +55,24 @@ static void __init kirkwood_legacy_clk_init(void)
orion_clkdev_add("1", "pcie",
of_clk_get_from_provider(&clkspec));
- clkspec.args[0] = CGC_BIT_GE1;
- orion_clkdev_add(NULL, "mv643xx_eth_port.1",
+ clkspec.args[0] = CGC_BIT_SDIO;
+ orion_clkdev_add(NULL, "mvsdio",
of_clk_get_from_provider(&clkspec));
+
+ /*
+ * The ethernet interfaces forget the MAC address assigned by
+ * u-boot if the clocks are turned off. Until proper DT support
+ * is available we always enable them for now.
+ */
+ clkspec.args[0] = CGC_BIT_GE0;
+ clk = of_clk_get_from_provider(&clkspec);
+ orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk);
+ clk_prepare_enable(clk);
+
+ clkspec.args[0] = CGC_BIT_GE1;
+ clk = of_clk_get_from_provider(&clkspec);
+ orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk);
+ clk_prepare_enable(clk);
}
static void __init kirkwood_of_clk_init(void)
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index f655b2637b0..e5f70415905 100644
--- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
.duplex = DUPLEX_FULL,
};
+static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(11),
+};
+
void __init iomega_ix2_200_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
+ kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
+ kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
}
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c
index 1c6e736cbbf..08dd739aa70 100644
--- a/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
static struct mvsdio_platform_data guruplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
+ .gpio_card_detect = -1,
+ .gpio_write_protect = -1,
};
static struct gpio_led guruplug_led_pins[] = {
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index 8ddd69fdc93..6a6eb548307 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
static struct mvsdio_platform_data openrd_mvsdio_data = {
.gpio_card_detect = 29, /* MPP29 used as SD card detect */
+ .gpio_write_protect = -1,
};
static unsigned int openrd_mpp_config[] __initdata = {
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index c7d93b48926..d24223166e0 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
.gpio_card_detect = 28,
+ .gpio_write_protect = -1,
};
static unsigned int rd88f6281_mpp_config[] __initdata = {
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 9f64d5632e0..76901f4ce61 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -223,13 +223,7 @@ static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = {
};
#if defined(CONFIG_USB_EHCI_MV)
-static char *pxa168_sph_clock_name[] = {
- [0] = "PXA168-USBCLK",
-};
-
static struct mv_usb_platform_data pxa168_sph_pdata = {
- .clknum = 1,
- .clkname = pxa168_sph_clock_name,
.mode = MV_USB_MODE_HOST,
.phy_init = pxa_usb_phy_init,
.phy_deinit = pxa_usb_phy_deinit,
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index d1e2d595e79..f62b68d926f 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -9,6 +9,7 @@
*/
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 22a9058f9f4..6528a5fa6a2 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -162,13 +162,7 @@ static struct i2c_board_info ttc_dkb_i2c_info[] = {
#ifdef CONFIG_USB_SUPPORT
#if defined(CONFIG_USB_MV_UDC) || defined(CONFIG_USB_EHCI_MV_U2O)
-static char *pxa910_usb_clock_name[] = {
- [0] = "U2OCLK",
-};
-
static struct mv_usb_platform_data ttc_usb_pdata = {
- .clknum = 1,
- .clkname = pxa910_usb_clock_name,
.vbus = NULL,
.mode = MV_USB_MODE_OTG,
.otg_force_a_bus_req = 1,
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 2969027f02f..f9fd77e8f1f 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
{
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
- writel_relaxed(0, event_base + TIMER_CLEAR);
+ ctrl &= ~TIMER_ENABLE_EN;
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+
+ writel_relaxed(ctrl, event_base + TIMER_CLEAR);
writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
return 0;
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 274ff58271d..d5970f5a1e8 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -44,6 +44,8 @@
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
+
#define ACTIVE_DOORBELLS (8)
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain;
*/
static void armada_370_xp_irq_mask(struct irq_data *d)
{
-#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_SET_MASK_OFFS);
-#else
- writel(irqd_to_hwirq(d),
- per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
-#endif
}
static void armada_370_xp_irq_unmask(struct irq_data *d)
{
-#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-#else
- writel(irqd_to_hwirq(d),
- per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-#endif
}
#ifdef CONFIG_SMP
@@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ writel(hw, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ else
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
irq_set_status_flags(virq, IRQ_LEVEL);
- if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+ if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
irq_set_percpu_devid(virq);
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_percpu_devid_irq);
diff --git a/arch/arm/mach-mxs/icoll.c b/arch/arm/mach-mxs/icoll.c
index 8fb23af154b..e26eeba4659 100644
--- a/arch/arm/mach-mxs/icoll.c
+++ b/arch/arm/mach-mxs/icoll.c
@@ -100,7 +100,7 @@ static struct irq_domain_ops icoll_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-void __init icoll_of_init(struct device_node *np,
+static void __init icoll_of_init(struct device_node *np,
struct device_node *interrupt_parent)
{
/*
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 05218671334..e7b781d3788 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
.lower_margin = 4,
.hsync_len = 1,
.vsync_len = 1,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
.lower_margin = 45,
.hsync_len = 1,
.vsync_len = 1,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
},
};
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
.lower_margin = 13,
.hsync_len = 48,
.vsync_len = 3,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
- FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
.lower_margin = 0x15,
.hsync_len = 64,
.vsync_len = 4,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
- FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
.lower_margin = 2,
.hsync_len = 15,
.vsync_len = 15,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
},
};
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
}
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@@ -402,17 +399,18 @@ static void __init cfa10049_init(void)
{
enable_clk_enet_out();
update_fec_mac_prop(OUI_CRYSTALFONTZ);
+
+ mxsfb_pdata.mode_list = cfa10049_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
+ mxsfb_pdata.default_bpp = 32;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init cfa10037_init(void)
{
enable_clk_enet_out();
update_fec_mac_prop(OUI_CRYSTALFONTZ);
-
- mxsfb_pdata.mode_list = cfa10049_video_modes;
- mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
- mxsfb_pdata.default_bpp = 32;
- mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
}
static void __init apf28_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static void __init mxs_machine_init(void)
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index a4294aa9f30..e63b7d87acb 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -18,6 +18,7 @@
#include <mach/mx23.h>
#include <mach/mx28.h>
+#include <mach/common.h>
/*
* Define the MX23 memory map.
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
index 54add60f94c..1dff4670375 100644
--- a/arch/arm/mach-mxs/ocotp.c
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -19,6 +19,7 @@
#include <asm/processor.h> /* for cpu_relax() */
#include <mach/mxs.h>
+#include <mach/common.h>
#define OCOTP_WORD_OFFSET 0x20
#define OCOTP_WORD_COUNT 0x20
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 27c2cb7ab81..1504b68f4c6 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
{
int irq;
- vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
+ vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
irq_set_chip_and_handler(irq, &netx_hif_chip,
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h
index 6ce914d54a3..8f74a844a77 100644
--- a/arch/arm/mach-netx/include/mach/irqs.h
+++ b/arch/arm/mach-netx/include/mach/irqs.h
@@ -17,42 +17,42 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define NETX_IRQ_VIC_START 0
-#define NETX_IRQ_SOFTINT 0
-#define NETX_IRQ_TIMER0 1
-#define NETX_IRQ_TIMER1 2
-#define NETX_IRQ_TIMER2 3
-#define NETX_IRQ_SYSTIME_NS 4
-#define NETX_IRQ_SYSTIME_S 5
-#define NETX_IRQ_GPIO_15 6
-#define NETX_IRQ_WATCHDOG 7
-#define NETX_IRQ_UART0 8
-#define NETX_IRQ_UART1 9
-#define NETX_IRQ_UART2 10
-#define NETX_IRQ_USB 11
-#define NETX_IRQ_SPI 12
-#define NETX_IRQ_I2C 13
-#define NETX_IRQ_LCD 14
-#define NETX_IRQ_HIF 15
-#define NETX_IRQ_GPIO_0_14 16
-#define NETX_IRQ_XPEC0 17
-#define NETX_IRQ_XPEC1 18
-#define NETX_IRQ_XPEC2 19
-#define NETX_IRQ_XPEC3 20
-#define NETX_IRQ_XPEC(no) (17 + (no))
-#define NETX_IRQ_MSYNC0 21
-#define NETX_IRQ_MSYNC1 22
-#define NETX_IRQ_MSYNC2 23
-#define NETX_IRQ_MSYNC3 24
-#define NETX_IRQ_IRQ_PHY 25
-#define NETX_IRQ_ISO_AREA 26
+#define NETX_IRQ_VIC_START 64
+#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
+#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
+#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
+#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
+#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
+#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
+#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
+#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
+#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
+#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
+#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
+#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
+#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
+#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
+#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
+#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
+#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
+#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
+#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
+#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
+#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
+#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
+#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
+#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
+#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
+#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
+#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
+#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
/* int 27 is reserved */
/* int 28 is reserved */
-#define NETX_IRQ_TIMER3 29
-#define NETX_IRQ_TIMER4 30
+#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
+#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
/* int 31 is reserved */
-#define NETX_IRQS 32
+#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
/* for multiplexed irqs on gpio 0..14 */
#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index cb7c6ae2e3f..6c4f766365a 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -543,15 +543,6 @@ static struct clk usb_dc_ck = {
/* Direct from ULPD, no parent */
.rate = 48000000,
.enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
- .enable_bit = USB_REQ_EN_SHIFT,
-};
-
-static struct clk usb_dc_ck7xx = {
- .name = "usb_dc_ck",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
.enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT,
};
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index fb18831e88a..14f7e992047 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -31,6 +31,8 @@
#include <plat/i2c.h>
+#include <mach/irqs.h>
+
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
void omap7xx_map_io(void);
#else
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 7a7690ab6cb..db37f49da5a 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/atomic.h>
+#include <linux/cpu.h>
#include <asm/fncpy.h>
#include <asm/system_misc.h>
@@ -584,8 +585,7 @@ static void omap_pm_init_proc(void)
static int omap_pm_prepare(void)
{
/* We cannot sleep in idle until we have resumed */
- disable_hlt();
-
+ cpu_idle_poll_ctrl(true);
return 0;
}
@@ -621,7 +621,7 @@ static int omap_pm_enter(suspend_state_t state)
static void omap_pm_finish(void)
{
- enable_hlt();
+ cpu_idle_poll_ctrl(false);
}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 49ac3dfebef..8111cd9ff3e 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -311,9 +311,6 @@ config MACH_OMAP_ZOOM2
default y
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
- select SERIAL_8250
- select SERIAL_8250_CONSOLE
- select SERIAL_CORE_CONSOLE
config MACH_OMAP_ZOOM3
bool "OMAP3630 Zoom3 board"
@@ -321,9 +318,6 @@ config MACH_OMAP_ZOOM3
default y
select OMAP_PACKAGE_CBP
select REGULATOR_FIXED_VOLTAGE if REGULATOR
- select SERIAL_8250
- select SERIAL_8250_CONSOLE
- select SERIAL_CORE_CONSOLE
config MACH_CM_T35
bool "CompuLab CM-T35/CM-T3730 modules"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 0274ff7a2a2..e54a4806019 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -102,6 +102,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
.init_irq = omap_intc_of_init,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
.init_time = omap3_sync32k_timer_init,
.dt_compat = omap3_boards_compat,
.restart = omap3xxx_restart,
@@ -119,6 +120,7 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
.init_irq = omap_intc_of_init,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
.init_time = omap3_secure_sync32k_timer_init,
.dt_compat = omap3_gp_boards_compat,
.restart = omap3xxx_restart,
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index f7c4616cbb6..d2ea68ea678 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/leds.h>
+#include <linux/usb/phy.h>
#include <linux/usb/musb.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -98,6 +99,7 @@ static void __init rx51_init(void)
sdrc_params = nokia_get_sdram_timings();
omap_sdrc_init(sdrc_params, sdrc_params);
+ usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(&musb_board_data);
rx51_peripherals_init();
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
index 3d58f335f17..0c6834ae1fc 100644
--- a/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/arch/arm/mach-omap2/cclock44xx_data.c
@@ -52,6 +52,13 @@
*/
#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ 960000000
+
/* Root clocks */
DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
+DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
+
DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X),
CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X),
CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X),
+ CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ /*
+ * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+ * domain can transition to retention state when not in use.
+ */
+ rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
return 0;
}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 0a6b9c7a63d..d6ba13e1c54 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -108,7 +108,6 @@ void omap35xx_init_late(void);
void omap3630_init_late(void);
void am35xx_init_late(void);
void ti81xx_init_late(void);
-void omap4430_init_late(void);
int omap2_common_pm_late_init(void);
#if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430)
@@ -294,5 +293,8 @@ extern void omap_reserve(void);
struct omap_hwmod;
extern int omap_dss_reset(struct omap_hwmod *);
+/* SoC specific clock initializer */
+extern int (*omap_clk_init)(void);
+
#endif /* __ASSEMBLER__ */
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index e4b16c8efe8..410e1bac781 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1122,9 +1122,6 @@ int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
/* TODO: remove, see function definition */
gpmc_convert_ps_to_ns(gpmc_t);
- /* Now the GPMC is initialised, unreserve the chip-selects */
- gpmc_cs_map = 0;
-
return 0;
}
@@ -1383,6 +1380,9 @@ static int gpmc_probe(struct platform_device *pdev)
if (IS_ERR_VALUE(gpmc_setup_irq()))
dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
+ /* Now the GPMC is initialised, unreserve the chip-selects */
+ gpmc_cs_map = 0;
+
rc = gpmc_probe_dt(pdev);
if (rc < 0) {
clk_disable_unprepare(gpmc_l3_clk);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 2c3fdd65387..5c445ca1e27 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -55,6 +55,12 @@
#include "prm44xx.h"
/*
+ * omap_clk_init: points to a function that does the SoC-specific
+ * clock initializations
+ */
+int (*omap_clk_init)(void);
+
+/*
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
*/
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
omap242x_clockdomains_init();
omap2420_hwmod_init();
omap_hwmod_init_postsetup();
- omap2420_clk_init();
+ omap_clk_init = omap2420_clk_init;
}
void __init omap2420_init_late(void)
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
omap243x_clockdomains_init();
omap2430_hwmod_init();
omap_hwmod_init_postsetup();
- omap2430_clk_init();
+ omap_clk_init = omap2430_clk_init;
}
void __init omap2430_init_late(void)
@@ -462,7 +468,7 @@ void __init omap3_init_early(void)
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
- omap3xxx_clk_init();
+ omap_clk_init = omap3xxx_clk_init;
}
void __init omap3430_init_early(void)
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
- omap3xxx_clk_init();
+ omap_clk_init = omap3xxx_clk_init;
}
void __init omap3_init_late(void)
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
am33xx_clockdomains_init();
am33xx_hwmod_init();
omap_hwmod_init_postsetup();
- am33xx_clk_init();
+ omap_clk_init = am33xx_clk_init;
}
#endif
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
omap44xx_clockdomains_init();
omap44xx_hwmod_init();
omap_hwmod_init_postsetup();
- omap4xxx_clk_init();
+ omap_clk_init = omap4xxx_clk_init;
}
void __init omap4430_init_late(void)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 6a217c98db5..f82cf878d6a 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -211,8 +211,6 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
return -EINVAL;
}
- pr_err("%s: Could not find signal %s\n", __func__, muxname);
-
return -ENODEV;
}
@@ -234,6 +232,8 @@ int __init omap_mux_get_by_name(const char *muxname,
return mux_mode;
}
+ pr_err("%s: Could not find signal %s\n", __func__, muxname);
+
return -ENODEV;
}
@@ -739,8 +739,9 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
- (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
- m, &omap_mux_dbg_signal_fops);
+ (void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO,
+ mux_dbg_dir, m,
+ &omap_mux_dbg_signal_fops);
}
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index c2c798c08c2..e512253601c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -138,6 +138,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
+#include <linux/cpu.h>
#include <asm/system_misc.h>
@@ -1368,7 +1369,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
idlemode = HWMOD_IDLEMODE_NO;
} else {
if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1440,7 +1443,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
+ (oh->flags & HWMOD_FORCE_MSTANDBY)) {
idlemode = HWMOD_IDLEMODE_FORCE;
} else {
if (sf & SYSC_HAS_ENAWAKEUP)
@@ -2154,7 +2158,7 @@ static int _enable(struct omap_hwmod *oh)
if (soc_ops.enable_module)
soc_ops.enable_module(oh);
if (oh->flags & HWMOD_BLOCK_WFI)
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
if (soc_ops.update_context_lost)
soc_ops.update_context_lost(oh);
@@ -2218,7 +2222,7 @@ static int _idle(struct omap_hwmod *oh)
_del_initiator_dep(oh, mpu_oh);
if (oh->flags & HWMOD_BLOCK_WFI)
- enable_hlt();
+ cpu_idle_poll_ctrl(false);
if (soc_ops.disable_module)
soc_ops.disable_module(oh);
@@ -2328,7 +2332,7 @@ static int _shutdown(struct omap_hwmod *oh)
_del_initiator_dep(oh, mpu_oh);
/* XXX what about the other system initiators here? dma, dsp */
if (oh->flags & HWMOD_BLOCK_WFI)
- enable_hlt();
+ cpu_idle_poll_ctrl(false);
if (soc_ops.disable_module)
soc_ops.disable_module(oh);
_disable_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index d43d9b608ed..d5dc935f606 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
*
* HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
* of idle, rather than relying on module smart-idle
- * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
- * of standby, rather than relying on module smart-standby
+ * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
+ * out of standby, rather than relying on module smart-standby
* HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
* SDRAM controller, etc. XXX probably belongs outside the main hwmod file
* XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
* correctly, or this is being abused to deal with some PM latency
* issues -- but we're currently suffering from a shortage of
* folks who are able to track these issues down properly.
+ * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
+ * is kept in force-standby mode. Failing to do so causes PM problems
+ * with musb on OMAP3630 at least. Note that musb has a dedicated register
+ * to control MSTANDBY signal when MIDLEMODE is set to force-standby.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_16BIT_REG (1 << 8)
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
#define HWMOD_BLOCK_WFI (1 << 10)
+#define HWMOD_FORCE_MSTANDBY (1 << 11)
/*
* omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ac7e03ec952..5112d04e7b7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
* Erratum ID: i479 idle_req / idle_ack mechanism potentially
* broken when autoidle is enabled
* workaround is to disable the autoidle bit at module level.
+ *
+ * Enabling the device in any other MIDLEMODE setting but force-idle
+ * causes core_pwrdm not enter idle states at least on OMAP3630.
+ * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
+ * signal when MIDLEMODE is set to force-idle.
*/
.flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
- | HWMOD_SWSUP_MSTANDBY,
+ | HWMOD_FORCE_MSTANDBY,
};
/* usb_otg_hs */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 0e47d2e1687..eaba9dc91a0 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
.class = &omap44xx_ocp2scp_hwmod_class,
.clkdm_name = "l3_init_clkdm",
- .main_clk = "func_48m_fclk",
+ /*
+ * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP
+ * block as an "optional clock," and normally should never be
+ * specified as the main_clk for an OMAP IP block. However it
+ * turns out that this clock is actually the main clock for
+ * the ocp2scp_usb_phy IP block:
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html
+ * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems
+ * to be the best workaround.
+ */
+ .main_clk = "ocp2scp_usb_phy_phy_48m",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 673a4c1d1d7..dec553349ae 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -218,7 +218,7 @@ static int omap_pm_enter(suspend_state_t suspend_state)
static int omap_pm_begin(suspend_state_t state)
{
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
if (cpu_is_omap34xx())
omap_prcm_irq_prepare();
return 0;
@@ -226,8 +226,7 @@ static int omap_pm_begin(suspend_state_t state)
static void omap_pm_end(void)
{
- enable_hlt();
- return;
+ cpu_idle_poll_ctrl(false);
}
static void omap_pm_finish(void)
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 2bdd4cf17a8..f62b509ed08 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
clksrc_nr, clksrc_src) \
void __init omap##name##_gptimer_timer_init(void) \
{ \
+ if (omap_clk_init) \
+ omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \
clksrc_nr, clksrc_src) \
void __init omap##name##_sync32k_timer_init(void) \
{ \
+ if (omap_clk_init) \
+ omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
/* Enable the use of clocksource="gp_timer" kernel parameter */ \
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
index 35a8014529c..94fbb815680 100644
--- a/arch/arm/mach-orion5x/board-dt.c
+++ b/arch/arm/mach-orion5x/board-dt.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/cpu.h>
#include <asm/system_misc.h>
#include <asm/mach/arch.h>
#include <mach/orion5x.h>
@@ -52,7 +53,7 @@ static void __init orion5x_dt_init(void)
*/
if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
}
if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2"))
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index d068f1431c4..ad71c8a03ff 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -293,7 +293,7 @@ void __init orion5x_init(void)
*/
if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) {
printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n");
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
}
/*
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index af41888acbd..969b0ba7fa7 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -505,6 +505,7 @@ static struct w1_gpio_platform_data w1_gpio_platform_data = {
.pin = GPIO_ONE_WIRE,
.is_open_drain = 0,
.enable_external_pullup = w1_enable_external_pullup,
+ .ext_pullup_enable_pin = -EINVAL,
};
struct platform_device raumfeld_w1_gpio_device = {
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 04b87ec9253..1069b568082 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -123,6 +123,11 @@ static struct clk s3c2440_clk_ac97 = {
.ctrlbit = S3C2440_CLKCON_AC97,
};
+#define S3C24XX_VA_UART0 (S3C_VA_UART)
+#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
+#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
+#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
+
static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
{
unsigned long ucon0, ucon1, ucon2, divisor;
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index 6bcf87f65f9..92e609440c5 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -239,6 +239,11 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
/* Serial port registrations */
+#define S3C2410_PA_UART0 (S3C24XX_PA_UART)
+#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 )
+#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 )
+#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 )
+
static struct resource s3c2410_uart0_resource[] = {
[0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
[1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index b7a9f4d469e..1e73f5fa865 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,10 +188,8 @@
#if defined(CONFIG_CPU_S3C2416)
#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
-#elif defined(CONFIG_CPU_S3C2443)
-#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
-#define NR_IRQS (IRQ_S3C2440_AC97+1)
+#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
#endif
/* compatibility define. */
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c
index cb9f5e011e7..d8ba9bee4c7 100644
--- a/arch/arm/mach-s3c24xx/irq.c
+++ b/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
base = (void *)0xfd000000;
intc->reg_mask = base + 0xa4;
- intc->reg_pending = base + 0x08;
+ intc->reg_pending = base + 0xa8;
irq_num = 20;
irq_start = S3C2410_IRQ(32);
irq_offset = 4;
diff --git a/arch/arm/mach-s3c64xx/setup-usb-phy.c b/arch/arm/mach-s3c64xx/setup-usb-phy.c
index c8174d95339..ca960bda02f 100644
--- a/arch/arm/mach-s3c64xx/setup-usb-phy.c
+++ b/arch/arm/mach-s3c64xx/setup-usb-phy.c
@@ -76,7 +76,7 @@ static int s3c_usb_otgphy_exit(struct platform_device *pdev)
int s5p_usb_phy_init(struct platform_device *pdev, int type)
{
- if (type == S5P_USB_PHY_DEVICE)
+ if (type == USB_PHY_TYPE_DEVICE)
return s3c_usb_otgphy_init(pdev);
return -EINVAL;
@@ -84,7 +84,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
int s5p_usb_phy_exit(struct platform_device *pdev, int type)
{
- if (type == S5P_USB_PHY_DEVICE)
+ if (type == USB_PHY_TYPE_DEVICE)
return s3c_usb_otgphy_exit(pdev);
return -EINVAL;
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index fcdf52dbcc4..f051f53e35b 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = {
.name = "pcmcdclk",
};
-static struct clk dummy_apb_pclk = {
- .name = "apb_pclk",
- .id = -1,
-};
-
static struct clk *clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll,
[1] = &clk_sclk_hdmi27m,
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = {
static struct clk init_clocks_off[] = {
{
- .name = "dma",
- .devname = "dma-pl330.0",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 3),
- }, {
- .name = "dma",
- .devname = "dma-pl330.1",
- .parent = &clk_hclk_psys.clk,
- .enable = s5pv210_clk_ip0_ctrl,
- .ctrlbit = (1 << 4),
- }, {
.name = "rot",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = {
.ctrlbit = (1<<19),
};
+static struct clk clk_pdma0 = {
+ .name = "pdma0",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 3),
+};
+
+static struct clk clk_pdma1 = {
+ .name = "pdma1",
+ .parent = &clk_hclk_psys.clk,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 4),
+};
+
static struct clk *clkset_uart_list[] = {
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = {
&clk_hsmmc1,
&clk_hsmmc2,
&clk_hsmmc3,
+ &clk_pdma0,
+ &clk_pdma1,
};
/* Clock initialisation code */
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
+ CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
};
void __init s5pv210_register_clocks(void)
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void)
for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
s3c_disable_clocks(clk_cdev[ptr], 1);
- s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 3a38f7b34b9..e373de44a8b 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = {
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
- .bus_type = FIMC_BUS_TYPE_ITU_601,
+ .fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
.board_info = &noon010pc30_board_info,
.i2c_bus_num = 0,
.clk_frequency = 16000000UL,
diff --git a/arch/arm/mach-s5pv210/setup-usb-phy.c b/arch/arm/mach-s5pv210/setup-usb-phy.c
index 356a0900af0..b2ee5333f89 100644
--- a/arch/arm/mach-s5pv210/setup-usb-phy.c
+++ b/arch/arm/mach-s5pv210/setup-usb-phy.c
@@ -80,7 +80,7 @@ static int s5pv210_usb_otgphy_exit(struct platform_device *pdev)
int s5p_usb_phy_init(struct platform_device *pdev, int type)
{
- if (type == S5P_USB_PHY_DEVICE)
+ if (type == USB_PHY_TYPE_DEVICE)
return s5pv210_usb_otgphy_init(pdev);
return -EINVAL;
@@ -88,7 +88,7 @@ int s5p_usb_phy_init(struct platform_device *pdev, int type)
int s5p_usb_phy_exit(struct platform_device *pdev, int type)
{
- if (type == S5P_USB_PHY_DEVICE)
+ if (type == USB_PHY_TYPE_DEVICE)
return s5pv210_usb_otgphy_exit(pdev);
return -EINVAL;
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index b63dec84819..15355572498 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/serial_8250.h>
#include <linux/io.h>
+#include <linux/cpu.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
@@ -130,7 +131,7 @@ static void __init shark_timer_init(void)
static void shark_init_early(void)
{
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
}
MACHINE_START(SHARK, "Shark")
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index f2ec0777cfb..ff8b7ba9b93 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -169,7 +169,7 @@ static int usbhsf_get_id(struct platform_device *pdev)
return USBHS_GADGET;
}
-static void usbhsf_power_ctrl(struct platform_device *pdev,
+static int usbhsf_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
struct usbhsf_private *priv = usbhsf_get_priv(pdev);
@@ -223,6 +223,8 @@ static void usbhsf_power_ctrl(struct platform_device *pdev,
clk_disable(priv->pci); /* usb work around */
clk_disable(priv->usb24); /* usb work around */
}
+
+ return 0;
}
static int usbhsf_get_vbus(struct platform_device *pdev)
@@ -239,7 +241,7 @@ static irqreturn_t usbhsf_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static void usbhsf_hardware_exit(struct platform_device *pdev)
+static int usbhsf_hardware_exit(struct platform_device *pdev)
{
struct usbhsf_private *priv = usbhsf_get_priv(pdev);
@@ -264,6 +266,8 @@ static void usbhsf_hardware_exit(struct platform_device *pdev)
priv->usbh_base = NULL;
free_irq(IRQ7, pdev);
+
+ return 0;
}
static int usbhsf_hardware_init(struct platform_device *pdev)
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 7f3a6b7e7b7..a385f570bbf 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -155,12 +155,14 @@ static int usbhs_get_vbus(struct platform_device *pdev)
return !((1 << 7) & __raw_readw(priv->cr2));
}
-static void usbhs_phy_reset(struct platform_device *pdev)
+static int usbhs_phy_reset(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
/* init phy */
__raw_writew(0x8a0a, priv->cr2);
+
+ return 0;
}
static int usbhs_get_id(struct platform_device *pdev)
@@ -202,7 +204,7 @@ static int usbhs_hardware_init(struct platform_device *pdev)
return 0;
}
-static void usbhs_hardware_exit(struct platform_device *pdev)
+static int usbhs_hardware_exit(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
@@ -210,6 +212,8 @@ static void usbhs_hardware_exit(struct platform_device *pdev)
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
free_irq(IRQ15, pdev);
+
+ return 0;
}
static u32 usbhs_pipe_cfg[] = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index db968a585ff..979237c18da 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -596,12 +596,14 @@ static int usbhs_get_vbus(struct platform_device *pdev)
return usbhs_is_connected(usbhs_get_priv(pdev));
}
-static void usbhs_phy_reset(struct platform_device *pdev)
+static int usbhs_phy_reset(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
/* init phy */
__raw_writew(0x8a0a, priv->usbcrcaddr);
+
+ return 0;
}
static int usbhs0_get_id(struct platform_device *pdev)
@@ -628,11 +630,13 @@ static int usbhs0_hardware_init(struct platform_device *pdev)
return 0;
}
-static void usbhs0_hardware_exit(struct platform_device *pdev)
+static int usbhs0_hardware_exit(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
cancel_delayed_work_sync(&priv->work);
+
+ return 0;
}
static struct usbhs_private usbhs0_private = {
@@ -735,7 +739,7 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
return 0;
}
-static void usbhs1_hardware_exit(struct platform_device *pdev)
+static int usbhs1_hardware_exit(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
@@ -743,6 +747,8 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
free_irq(IRQ8, pdev);
+
+ return 0;
}
static int usbhs1_get_id(struct platform_device *pdev)
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cdcb799e802..fec49ebc359 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -32,6 +32,7 @@
#include <linux/smsc911x.h>
#include <linux/spi/spi.h>
#include <linux/spi/sh_hspi.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/usb/otg.h>
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 47d83f7a70b..5d92b5dd486 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -12,6 +12,8 @@
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/cpu.h>
+
#include <asm/io.h>
#include <asm/system_misc.h>
@@ -23,13 +25,13 @@ static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
static int shmobile_suspend_begin(suspend_state_t state)
{
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
return 0;
}
static void shmobile_suspend_end(void)
{
- enable_hlt();
+ cpu_idle_poll_ctrl(false);
}
struct platform_suspend_ops shmobile_suspend_ops = {
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index f9d754f90c5..d2b3937c401 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -14,7 +14,7 @@
#define pr_fmt(fmt) "SPEAr3xx: " fmt
#include <linux/amba/pl022.h>
-#include <linux/amba/pl08x.h>
+#include <linux/amba/pl080.h>
#include <linux/io.h>
#include <plat/pl080.h>
#include <mach/generic.h>
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d1c4893894c..dbc653ea851 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -18,8 +18,8 @@ config ARCH_TEGRA_2x_SOC
select PL310_ERRATA_727915 if CACHE_L2X0
select PL310_ERRATA_769419 if CACHE_L2X0
select USB_ARCH_HAS_EHCI if USB_SUPPORT
- select USB_ULPI if USB
- select USB_ULPI_VIEWPORT if USB_SUPPORT
+ select USB_ULPI if USB_PHY
+ select USB_ULPI_VIEWPORT if USB_PHY
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
@@ -37,8 +37,8 @@ config ARCH_TEGRA_3x_SOC
select PINCTRL_TEGRA30
select PL310_ERRATA_769419 if CACHE_L2X0
select USB_ARCH_HAS_EHCI if USB_SUPPORT
- select USB_ULPI if USB
- select USB_ULPI_VIEWPORT if USB_SUPPORT
+ select USB_ULPI if USB_PHY
+ select USB_ULPI_VIEWPORT if USB_PHY
help
Support for NVIDIA Tegra T30 processor family, based on the
ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 2a17bc506cf..ff3c9f01659 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -5,6 +5,7 @@
*
* Authors: Sundar Iyer <sundar.iyer@stericsson.com>
* Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Daniel Willerud <daniel.willerud@stericsson.com>
*
* MOP500 board specific initialization for regulators
*/
@@ -12,6 +13,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
#include "board-mop500-regulators.h"
+#include "id.h"
static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
@@ -53,21 +55,37 @@ struct regulator_init_data tps61052_regulator = {
};
static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
- /* External displays, connector on board 2v5 power supply */
- REGULATOR_SUPPLY("vaux12v5", "mcde.0"),
+ /* Main display, u8500 R3 uib */
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ /* Main display, u8500 uib and ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
+ /* Secondary display, ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
/* SFH7741 proximity sensor */
REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
/* BH1780GLS ambient light sensor */
REGULATOR_SUPPLY("vcc", "2-0029"),
/* lsm303dlh accelerometer */
- REGULATOR_SUPPLY("vdd", "3-0018"),
+ REGULATOR_SUPPLY("vdd", "2-0018"),
+ /* lsm303dlhc accelerometer */
+ REGULATOR_SUPPLY("vdd", "2-0019"),
/* lsm303dlh magnetometer */
- REGULATOR_SUPPLY("vdd", "3-001e"),
+ REGULATOR_SUPPLY("vdd", "2-001e"),
/* Rohm BU21013 Touchscreen devices */
REGULATOR_SUPPLY("avdd", "3-005c"),
REGULATOR_SUPPLY("avdd", "3-005d"),
/* Synaptics RMI4 Touchscreen device */
REGULATOR_SUPPLY("vdd", "3-004b"),
+ /* L3G4200D Gyroscope device */
+ REGULATOR_SUPPLY("vdd", "2-0068"),
+ /* Ambient light sensor device */
+ REGULATOR_SUPPLY("vdd", "3-0029"),
+ /* Pressure sensor device */
+ REGULATOR_SUPPLY("vdd", "2-005c"),
+ /* Cypress TrueTouch Touchscreen device */
+ REGULATOR_SUPPLY("vcpin", "spi8.0"),
+ /* Camera device */
+ REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
};
static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
@@ -75,18 +93,50 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sdi4"),
/* AB8500 audio codec */
REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+ /* AB8500 accessory detect 1 */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
+ /* AV8100 HDMI device */
+ REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
};
static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
+ REGULATOR_SUPPLY("v-SD-STM", "stm"),
/* External MMC slot power */
REGULATOR_SUPPLY("vmmc", "sdi0"),
};
+static struct regulator_consumer_supply ab8505_vaux4_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab8505_vaux5_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab8505_vaux6_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab8505_vaux8_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-aux8", NULL),
+};
+
+static struct regulator_consumer_supply ab8505_vadc_consumers[] = {
+ /* Internal general-purpose ADC */
+ REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+};
+
static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
/* TV-out DENC supply */
REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
/* Internal general-purpose ADC */
REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
};
static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
@@ -114,77 +164,90 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
REGULATOR_SUPPLY("v-intcore", NULL),
/* USB Transceiver */
REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
+ /* Handled by abx500 clk driver */
+ REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
+};
+
+static struct regulator_consumer_supply ab8505_usb_consumers[] = {
+ /* HS USB OTG physical interface */
+ REGULATOR_SUPPLY("v-ape", NULL),
};
static struct regulator_consumer_supply ab8500_vana_consumers[] = {
- /* External displays, connector on board, 1v8 power supply */
- REGULATOR_SUPPLY("vsmps2", "mcde.0"),
+ /* DB8500 DSI */
+ REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
+ /* DB8500 CSI */
+ REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
};
/* ab8500 regulator register initialization */
-struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
+static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
/*
* VanaRequestCtrl = HP/LP depending on VxRequest
* VextSupply1RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
/*
* VextSupply2RequestCtrl = HP/LP depending on VxRequest
* VextSupply3RequestCtrl = HP/LP depending on VxRequest
* Vaux1RequestCtrl = HP/LP depending on VxRequest
* Vaux2RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
/*
* Vaux3RequestCtrl = HP/LP depending on VxRequest
* SwHPReq = Control through SWValid disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
/*
* VanaSysClkReq1HPValid = disabled
* Vaux1SysClkReq1HPValid = disabled
* Vaux2SysClkReq1HPValid = disabled
* Vaux3SysClkReq1HPValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
/*
* VextSupply1SysClkReq1HPValid = disabled
* VextSupply2SysClkReq1HPValid = disabled
* VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
/*
* VanaHwHPReq1Valid = disabled
* Vaux1HwHPreq1Valid = disabled
* Vaux2HwHPReq1Valid = disabled
* Vaux3HwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq1Valid = disabled
* VextSupply2HwHPReq1Valid = disabled
* VextSupply3HwHPReq1Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
/*
* VanaHwHPReq2Valid = disabled
* Vaux1HwHPReq2Valid = disabled
* Vaux2HwHPReq2Valid = disabled
* Vaux3HwHPReq2Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq2Valid = disabled
* VextSupply2HwHPReq2Valid = disabled
* VextSupply3HwHPReq2Valid = HWReq2 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
/*
* VanaSwHPReqValid = disabled
* Vaux1SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
/*
* Vaux2SwHPReqValid = disabled
* Vaux3SwHPReqValid = disabled
@@ -192,7 +255,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VextSupply2SwHPReqValid = disabled
* VextSupply3SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
/*
* SysClkReq2Valid1 = SysClkReq2 controlled
* SysClkReq3Valid1 = disabled
@@ -202,7 +265,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid1 = disabled
* SysClkReq8Valid1 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
/*
* SysClkReq2Valid2 = disabled
* SysClkReq3Valid2 = disabled
@@ -212,7 +275,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid2 = disabled
* SysClkReq8Valid2 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
/*
* VTVoutEna = disabled
* Vintcore12Ena = disabled
@@ -220,66 +283,62 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* Vintcore12LP = inactive (HP)
* VTVoutLP = inactive (HP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10),
+ INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
/*
* VaudioEna = disabled
* VdmicEna = disabled
* Vamic1Ena = disabled
* Vamic2Ena = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
/*
* Vamic1_dzout = high-Z when Vamic1 is disabled
* Vamic2_dzout = high-Z when Vamic2 is disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
/*
- * VPll = Hw controlled
+ * VPll = Hw controlled (NOTE! PRCMU bits)
* VanaRegu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02),
+ INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
/*
* VrefDDREna = disabled
* VrefDDRSleepMode = inactive (no pulldown)
*/
- INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
/*
- * VextSupply1Regu = HW control
- * VextSupply2Regu = HW control
- * VextSupply3Regu = HW control
+ * VextSupply1Regu = force LP
+ * VextSupply2Regu = force OFF
+ * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
* ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
* ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
*/
- INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
/*
* Vaux1Regu = force HP
* Vaux2Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
/*
- * Vaux3regu = force off
+ * Vaux3Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
/*
- * Vsmps1 = 1.15V
+ * Vaux1Sel = 2.8 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24),
- /*
- * Vaux1Sel = 2.5 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
/*
* Vaux2Sel = 2.9 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
/*
* Vaux3Sel = 2.91 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
/*
* VextSupply12LP = disabled (no LP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
/*
* Vaux1Disch = short discharge time
* Vaux2Disch = short discharge time
@@ -288,33 +347,26 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VTVoutDisch = short discharge time
* VaudioDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
/*
* VanaDisch = short discharge time
* VdmicPullDownEna = pulldown disabled when Vdmic is disabled
* VdmicDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
};
/* AB8500 regulators */
-struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
/* supplies to the display/camera */
[AB8500_LDO_AUX1] = {
.constraints = {
.name = "V-DISPLAY",
- .min_uV = 2500000,
- .max_uV = 2900000,
+ .min_uV = 2800000,
+ .max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1, /* display is on at boot */
- /*
- * This voltage cannot be disabled right now because
- * it is somehow affecting the external MMC
- * functionality, though that typically will use
- * AUX3.
- */
- .always_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
.consumer_supplies = ab8500_vaux1_consumers,
@@ -326,7 +378,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
.consumer_supplies = ab8500_vaux2_consumers,
@@ -338,7 +393,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
.consumer_supplies = ab8500_vaux3_consumers,
@@ -392,18 +450,614 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
[AB8500_LDO_INTCORE] = {
.constraints = {
.name = "V-INTCORE",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
.consumer_supplies = ab8500_vintcore_consumers,
},
- /* supply for U8500 CSI/DSI, VANA LDO */
+ /* supply for U8500 CSI-DSI, VANA LDO */
[AB8500_LDO_ANA] = {
.constraints = {
- .name = "V-CSI/DSI",
+ .name = "V-CSI-DSI",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
.consumer_supplies = ab8500_vana_consumers,
},
};
+
+/* supply for VextSupply3 */
+static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
+ /* SIM supply for 3 V SIM cards */
+ REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
+};
+
+/* extended configuration for VextSupply2, only used for HREFP_V20 boards */
+static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = {
+ .hwreq = true,
+};
+
+/*
+ * AB8500 external regulators
+ */
+static struct regulator_init_data ab8500_ext_regulators[] = {
+ /* fixed Vbat supplies VSMPS1_EXT_1V8 */
+ [AB8500_EXT_SUPPLY1] = {
+ .constraints = {
+ .name = "ab8500-ext-supply1",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .initial_mode = REGULATOR_MODE_IDLE,
+ .boot_on = 1,
+ .always_on = 1,
+ },
+ },
+ /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
+ [AB8500_EXT_SUPPLY2] = {
+ .constraints = {
+ .name = "ab8500-ext-supply2",
+ .min_uV = 1360000,
+ .max_uV = 1360000,
+ },
+ },
+ /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
+ [AB8500_EXT_SUPPLY3] = {
+ .constraints = {
+ .name = "ab8500-ext-supply3",
+ .min_uV = 3400000,
+ .max_uV = 3400000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_ext_supply3_consumers),
+ .consumer_supplies = ab8500_ext_supply3_consumers,
+ },
+};
+
+/* ab8505 regulator register initialization */
+static struct ab8500_regulator_reg_init ab8505_reg_init[] = {
+ /*
+ * VarmRequestCtrl
+ * VsmpsCRequestCtrl
+ * VsmpsARequestCtrl
+ * VsmpsBRequestCtrl
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL1, 0x00, 0x00),
+ /*
+ * VsafeRequestCtrl
+ * VpllRequestCtrl
+ * VanaRequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL2, 0x30, 0x00),
+ /*
+ * Vaux1RequestCtrl = HP/LP depending on VxRequest
+ * Vaux2RequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL3, 0xf0, 0x00),
+ /*
+ * Vaux3RequestCtrl = HP/LP depending on VxRequest
+ * SwHPReq = Control through SWValid disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL4, 0x07, 0x00),
+ /*
+ * VsmpsASysClkReq1HPValid
+ * VsmpsBSysClkReq1HPValid
+ * VsafeSysClkReq1HPValid
+ * VanaSysClkReq1HPValid = disabled
+ * VpllSysClkReq1HPValid
+ * Vaux1SysClkReq1HPValid = disabled
+ * Vaux2SysClkReq1HPValid = disabled
+ * Vaux3SysClkReq1HPValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
+ /*
+ * VsmpsCSysClkReq1HPValid
+ * VarmSysClkReq1HPValid
+ * VbbSysClkReq1HPValid
+ * VsmpsMSysClkReq1HPValid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID2, 0x00, 0x00),
+ /*
+ * VsmpsAHwHPReq1Valid
+ * VsmpsBHwHPReq1Valid
+ * VsafeHwHPReq1Valid
+ * VanaHwHPReq1Valid = disabled
+ * VpllHwHPReq1Valid
+ * Vaux1HwHPreq1Valid = disabled
+ * Vaux2HwHPReq1Valid = disabled
+ * Vaux3HwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID1, 0xe8, 0x00),
+ /*
+ * VsmpsMHwHPReq1Valid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID2, 0x00, 0x00),
+ /*
+ * VsmpsAHwHPReq2Valid
+ * VsmpsBHwHPReq2Valid
+ * VsafeHwHPReq2Valid
+ * VanaHwHPReq2Valid = disabled
+ * VpllHwHPReq2Valid
+ * Vaux1HwHPReq2Valid = disabled
+ * Vaux2HwHPReq2Valid = disabled
+ * Vaux3HwHPReq2Valid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID1, 0xe8, 0x00),
+ /*
+ * VsmpsMHwHPReq2Valid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID2, 0x00, 0x00),
+ /**
+ * VsmpsCSwHPReqValid
+ * VarmSwHPReqValid
+ * VsmpsASwHPReqValid
+ * VsmpsBSwHPReqValid
+ * VsafeSwHPReqValid
+ * VanaSwHPReqValid
+ * VanaSwHPReqValid = disabled
+ * VpllSwHPReqValid
+ * Vaux1SwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID1, 0xa0, 0x00),
+ /*
+ * Vaux2SwHPReqValid = disabled
+ * Vaux3SwHPReqValid = disabled
+ * VsmpsMSwHPReqValid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID2, 0x03, 0x00),
+ /*
+ * SysClkReq2Valid1 = SysClkReq2 controlled
+ * SysClkReq3Valid1 = disabled
+ * SysClkReq4Valid1 = SysClkReq4 controlled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID1, 0x0e, 0x0a),
+ /*
+ * SysClkReq2Valid2 = disabled
+ * SysClkReq3Valid2 = disabled
+ * SysClkReq4Valid2 = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID2, 0x0e, 0x00),
+ /*
+ * Vaux4SwHPReqValid
+ * Vaux4HwHPReq2Valid
+ * Vaux4HwHPReq1Valid
+ * Vaux4SysClkReq1HPValid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUVAUX4REQVALID, 0x00, 0x00),
+ /*
+ * VadcEna = disabled
+ * VintCore12Ena = disabled
+ * VintCore12Sel = 1.25 V
+ * VintCore12LP = inactive (HP)
+ * VadcLP = inactive (HP)
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUMISC1, 0xfe, 0x10),
+ /*
+ * VaudioEna = disabled
+ * Vaux8Ena = disabled
+ * Vamic1Ena = disabled
+ * Vamic2Ena = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUDIOSUPPLY, 0x1e, 0x00),
+ /*
+ * Vamic1_dzout = high-Z when Vamic1 is disabled
+ * Vamic2_dzout = high-Z when Vamic2 is disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRL1VAMIC, 0x03, 0x00),
+ /*
+ * VsmpsARegu
+ * VsmpsASelCtrl
+ * VsmpsAAutoMode
+ * VsmpsAPWMMode
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSAREGU, 0x00, 0x00),
+ /*
+ * VsmpsBRegu
+ * VsmpsBSelCtrl
+ * VsmpsBAutoMode
+ * VsmpsBPWMMode
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBREGU, 0x00, 0x00),
+ /*
+ * VsafeRegu
+ * VsafeSelCtrl
+ * VsafeAutoMode
+ * VsafePWMMode
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFEREGU, 0x00, 0x00),
+ /*
+ * VPll = Hw controlled (NOTE! PRCMU bits)
+ * VanaRegu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VPLLVANAREGU, 0x0f, 0x02),
+ /*
+ * VextSupply1Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
+ * VextSupply2Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
+ * VextSupply3Regu = force OFF (OTP_ExtSupply3LPnPolarity 0)
+ * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
+ * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
+ */
+ INIT_REGULATOR_REGISTER(AB8505_EXTSUPPLYREGU, 0xff, 0x30),
+ /*
+ * Vaux1Regu = force HP
+ * Vaux2Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX12REGU, 0x0f, 0x01),
+ /*
+ * Vaux3Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3REGU, 0x03, 0x00),
+ /*
+ * VsmpsASel1
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL1, 0x00, 0x00),
+ /*
+ * VsmpsASel2
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL2, 0x00, 0x00),
+ /*
+ * VsmpsASel3
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL3, 0x00, 0x00),
+ /*
+ * VsmpsBSel1
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL1, 0x00, 0x00),
+ /*
+ * VsmpsBSel2
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL2, 0x00, 0x00),
+ /*
+ * VsmpsBSel3
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL3, 0x00, 0x00),
+ /*
+ * VsafeSel1
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFESEL1, 0x00, 0x00),
+ /*
+ * VsafeSel2
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFESEL2, 0x00, 0x00),
+ /*
+ * VsafeSel3
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFESEL3, 0x00, 0x00),
+ /*
+ * Vaux1Sel = 2.8 V
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX1SEL, 0x0f, 0x0C),
+ /*
+ * Vaux2Sel = 2.9 V
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX2SEL, 0x0f, 0x0d),
+ /*
+ * Vaux3Sel = 2.91 V
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3SEL, 0x07, 0x07),
+ /*
+ * Vaux4RequestCtrl
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX4REQCTRL, 0x00, 0x00),
+ /*
+ * Vaux4Regu
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX4REGU, 0x00, 0x00),
+ /*
+ * Vaux4Sel
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX4SEL, 0x00, 0x00),
+ /*
+ * Vaux1Disch = short discharge time
+ * Vaux2Disch = short discharge time
+ * Vaux3Disch = short discharge time
+ * Vintcore12Disch = short discharge time
+ * VTVoutDisch = short discharge time
+ * VaudioDisch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH, 0xfc, 0x00),
+ /*
+ * VanaDisch = short discharge time
+ * Vaux8PullDownEna = pulldown disabled when Vaux8 is disabled
+ * Vaux8Disch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH2, 0x16, 0x00),
+ /*
+ * Vaux4Disch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH3, 0x01, 0x00),
+ /*
+ * Vaux5Sel
+ * Vaux5LP
+ * Vaux5Ena
+ * Vaux5Disch
+ * Vaux5DisSfst
+ * Vaux5DisPulld
+ */
+ INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX5, 0x00, 0x00),
+ /*
+ * Vaux6Sel
+ * Vaux6LP
+ * Vaux6Ena
+ * Vaux6DisPulld
+ */
+ INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX6, 0x00, 0x00),
+};
+
+struct regulator_init_data ab8505_regulators[AB8505_NUM_REGULATORS] = {
+ /* supplies to the display/camera */
+ [AB8505_LDO_AUX1] = {
+ .constraints = {
+ .name = "V-DISPLAY",
+ .min_uV = 2800000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .boot_on = 1, /* display is on at boot */
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
+ .consumer_supplies = ab8500_vaux1_consumers,
+ },
+ /* supplies to the on-board eMMC */
+ [AB8505_LDO_AUX2] = {
+ .constraints = {
+ .name = "V-eMMC1",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
+ .consumer_supplies = ab8500_vaux2_consumers,
+ },
+ /* supply for VAUX3, supplies to SDcard slots */
+ [AB8505_LDO_AUX3] = {
+ .constraints = {
+ .name = "V-MMC-SD",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
+ .consumer_supplies = ab8500_vaux3_consumers,
+ },
+ /* supply for VAUX4, supplies to NFC and standalone secure element */
+ [AB8505_LDO_AUX4] = {
+ .constraints = {
+ .name = "V-NFC-SE",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux4_consumers),
+ .consumer_supplies = ab8505_vaux4_consumers,
+ },
+ /* supply for VAUX5, supplies to TBD */
+ [AB8505_LDO_AUX5] = {
+ .constraints = {
+ .name = "V-AUX5",
+ .min_uV = 1050000,
+ .max_uV = 2790000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux5_consumers),
+ .consumer_supplies = ab8505_vaux5_consumers,
+ },
+ /* supply for VAUX6, supplies to TBD */
+ [AB8505_LDO_AUX6] = {
+ .constraints = {
+ .name = "V-AUX6",
+ .min_uV = 1050000,
+ .max_uV = 2790000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux6_consumers),
+ .consumer_supplies = ab8505_vaux6_consumers,
+ },
+ /* supply for gpadc, ADC LDO */
+ [AB8505_LDO_ADC] = {
+ .constraints = {
+ .name = "V-ADC",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vadc_consumers),
+ .consumer_supplies = ab8505_vadc_consumers,
+ },
+ /* supply for ab8500-vaudio, VAUDIO LDO */
+ [AB8505_LDO_AUDIO] = {
+ .constraints = {
+ .name = "V-AUD",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
+ .consumer_supplies = ab8500_vaud_consumers,
+ },
+ /* supply for v-anamic1 VAMic1-LDO */
+ [AB8505_LDO_ANAMIC1] = {
+ .constraints = {
+ .name = "V-AMIC1",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
+ .consumer_supplies = ab8500_vamic1_consumers,
+ },
+ /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+ [AB8505_LDO_ANAMIC2] = {
+ .constraints = {
+ .name = "V-AMIC2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
+ .consumer_supplies = ab8500_vamic2_consumers,
+ },
+ /* supply for v-aux8, VAUX8 LDO */
+ [AB8505_LDO_AUX8] = {
+ .constraints = {
+ .name = "V-AUX8",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux8_consumers),
+ .consumer_supplies = ab8505_vaux8_consumers,
+ },
+ /* supply for v-intcore12, VINTCORE12 LDO */
+ [AB8505_LDO_INTCORE] = {
+ .constraints = {
+ .name = "V-INTCORE",
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
+ .consumer_supplies = ab8500_vintcore_consumers,
+ },
+ /* supply for LDO USB */
+ [AB8505_LDO_USB] = {
+ .constraints = {
+ .name = "V-USB",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_usb_consumers),
+ .consumer_supplies = ab8505_usb_consumers,
+ },
+ /* supply for U8500 CSI-DSI, VANA LDO */
+ [AB8505_LDO_ANA] = {
+ .constraints = {
+ .name = "V-CSI-DSI",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
+ .consumer_supplies = ab8500_vana_consumers,
+ },
+};
+
+struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
+ .reg_init = ab8500_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
+ .regulator = ab8500_regulators,
+ .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .ext_regulator = ab8500_ext_regulators,
+ .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
+};
+
+/* Use the AB8500 init settings for AB8505 as they are the same right now */
+struct ab8500_regulator_platform_data ab8505_regulator_plat_data = {
+ .reg_init = ab8505_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8505_reg_init),
+ .regulator = ab8505_regulators,
+ .num_regulator = ARRAY_SIZE(ab8505_regulators),
+};
+
+static void ab8500_modify_reg_init(int id, u8 mask, u8 value)
+{
+ int i;
+
+ if (cpu_is_u8520()) {
+ for (i = ARRAY_SIZE(ab8505_reg_init) - 1; i >= 0; i--) {
+ if (ab8505_reg_init[i].id == id) {
+ u8 initval = ab8505_reg_init[i].value;
+ initval = (initval & ~mask) | (value & mask);
+ ab8505_reg_init[i].value = initval;
+
+ BUG_ON(mask & ~ab8505_reg_init[i].mask);
+ return;
+ }
+ }
+ } else {
+ for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) {
+ if (ab8500_reg_init[i].id == id) {
+ u8 initval = ab8500_reg_init[i].value;
+ initval = (initval & ~mask) | (value & mask);
+ ab8500_reg_init[i].value = initval;
+
+ BUG_ON(mask & ~ab8500_reg_init[i].mask);
+ return;
+ }
+ }
+ }
+
+ BUG_ON(1);
+}
+
+void mop500_regulator_init(void)
+{
+ struct regulator_init_data *regulator;
+
+ /*
+ * Temporarily turn on Vaux2 on 8520 machine
+ */
+ if (cpu_is_u8520()) {
+ /* Vaux2 initialized to be on */
+ ab8500_modify_reg_init(AB8505_VAUX12REGU, 0x0f, 0x05);
+ }
+
+ /*
+ * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for
+ * all HREFP_V20 boards)
+ */
+ if (cpu_is_u8500v20()) {
+ /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */
+ ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01);
+
+ /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */
+ ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2,
+ 0x20, 0x20);
+
+ /* VextSupply2 = force HP at initialization */
+ ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04);
+
+ /* enable VextSupply2 during platform active */
+ regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
+ regulator->constraints.always_on = 1;
+
+ /* disable VextSupply2 in suspend */
+ regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
+ regulator->constraints.state_mem.disabled = 1;
+ regulator->constraints.state_standby.disabled = 1;
+
+ /* enable VextSupply2 HW control (used in suspend) */
+ regulator->driver_data = (void *)&ab8500_ext_supply2;
+ }
+}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index 78a0642a220..9bece38fe93 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -14,10 +14,11 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
-extern struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS];
-extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
+extern struct ab8500_regulator_platform_data ab8505_regulator_plat_data;
extern struct regulator_init_data tps61052_regulator;
extern struct regulator_init_data gpio_en_3v3_regulator;
+void mop500_regulator_init(void);
+
#endif
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 051b62c2710..7f2cb6c5e2c 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
#endif
struct mmci_platform_data mop500_sdi0_data = {
- .ios_handler = mop500_sdi0_ios_handler,
.ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index b03457881c4..ce672378a83 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/platform_data/i2c-nomadik.h>
@@ -198,10 +199,7 @@ static struct platform_device snowball_sbnet_dev = {
struct ab8500_platform_data ab8500_platdata = {
.irq_base = MOP500_AB8500_IRQ_BASE,
- .regulator_reg_init = ab8500_regulator_reg_init,
- .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
- .regulator = ab8500_regulators,
- .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .regulator = &ab8500_regulator_plat_data,
.gpio = &ab8500_gpio_pdata,
.codec = &ab8500_codec_pdata,
};
@@ -439,6 +437,15 @@ static void mop500_prox_deactivate(struct device *dev)
regulator_put(prox_regulator);
}
+void mop500_snowball_ethernet_clock_enable(void)
+{
+ struct clk *clk;
+
+ clk = clk_get_sys("fsmc", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
static struct cryp_platform_data u8500_cryp1_platform_data = {
.mem_to_engine = {
.dir = STEDMA40_MEM_TO_PERIPH,
@@ -683,6 +690,8 @@ static void __init snowball_init_machine(void)
mop500_audio_init(parent);
mop500_uart_init(parent);
+ mop500_snowball_ethernet_clock_enable();
+
/* This board has full regulator constraints */
regulator_has_full_constraints();
}
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index eaa605f5d90..d38951be70d 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
void __init snowball_pinmaps_init(void);
void __init hrefv60_pinmaps_init(void);
void mop500_audio_init(struct device *parent);
+void mop500_snowball_ethernet_clock_enable(void);
int __init mop500_uib_init(void);
void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 19235cf7bbe..f1a58184437 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
/* Pinmaps must be in place before devices register */
if (of_machine_is_compatible("st-ericsson,mop500"))
mop500_pinmaps_init();
- else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
+ else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
snowball_pinmaps_init();
- else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
+ mop500_snowball_ethernet_clock_enable();
+ } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
hrefv60_pinmaps_init();
else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
/* TODO: Add pinmaps for ccu9540 board. */
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 915683cb67d..c5e20b52e3b 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -21,6 +21,8 @@
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/vexpress.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#include <asm/arch_timer.h>
#include <asm/mach-types.h>
@@ -433,7 +435,7 @@ static void __init v2m_dt_timer_init(void)
{
struct device_node *node = NULL;
- vexpress_clk_of_init();
+ of_clk_init(NULL);
do {
node = of_find_compatible_node(node, NULL, "arm,sp804");
@@ -441,6 +443,10 @@ static void __init v2m_dt_timer_init(void)
if (node) {
pr_info("Using SP804 '%s' as a clock & events source\n",
node->full_name);
+ WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
+ "timclken1"), "v2m-timer0", "sp804"));
+ WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
+ "timclken2"), "v2m-timer1", "sp804"));
v2m_sp804_init(of_iomap(node, 0),
irq_of_parse_and_map(node, 0));
}
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index 7abdb9645c5..e65a80a1ac7 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/mtd.h>
@@ -531,7 +532,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = {
void __init nuc900_board_init(struct platform_device **device, int size)
{
- disable_hlt();
+ cpu_idle_poll_ctrl(true);
platform_add_devices(device, size);
platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev));
spi_register_board_info(nuc900_spi_board_info,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 025d1732873..4045c4931a3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
- select CPU_CACHE_V3 # although the core is v4t
+ select CPU_CACHE_V4
select CPU_CP15_MPU
select CPU_PABRT_LEGACY
help
@@ -469,9 +469,6 @@ config CPU_PABRT_V7
bool
# The cache model
-config CPU_CACHE_V3
- bool
-
config CPU_CACHE_V4
bool
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 4e333fa2756..9e51be96f63 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
-obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc..48bc3c0a87c 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
+ outer_cache.inv_all = l2_inv_all;
enable_l2();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308..c465faca51b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
int lockregs;
int i;
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
lockregs = 8;
break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
- & L2X0_CACHE_ID_PART_MASK;
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
.flush_all = l2x0_flush_all,
.inv_all = l2x0_inv_all,
.disable = l2x0_disable,
- .set_debug = pl310_set_debug,
},
};
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
data->save();
of_init = true;
- l2x0_init(l2x0_base, aux_val, aux_mask);
-
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+ l2x0_init(l2x0_base, aux_val, aux_mask);
return 0;
}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d..00000000000
--- a/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/mm/cache-v3.S
- *
- * Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include "proc-macros.S"
-
-/*
- * flush_icache_all()
- *
- * Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
- mov pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- * flush_user_cache_all()
- *
- * Invalidate all cache entries in a particular address
- * space.
- *
- * - mm - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
- /* FALLTHROUGH */
-/*
- * flush_kern_cache_all()
- *
- * Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
- /* FALLTHROUGH */
-
-/*
- * flush_user_cache_range(start, end, flags)
- *
- * Invalidate a range of cache entries in the specified
- * address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
- mov ip, #0
- mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * coherent_kern_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
- /* FALLTHROUGH */
-
-/*
- * coherent_user_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_user_range)
- mov r0, #0
- mov pc, lr
-
-/*
- * flush_kern_dcache_area(void *page, size_t size)
- *
- * Ensure no D cache aliasing occurs, either with itself or
- * the I cache
- *
- * - addr - kernel address
- * - size - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
- /* FALLTHROUGH */
-
-/*
- * dma_flush_range(start, end)
- *
- * Clean and invalidate the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_flush_range)
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
- teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
- /* FALLTHROUGH */
-
-/*
- * dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_map_area)
- mov pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
- .globl v3_flush_kern_cache_louis
- .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 43e5d77be67..a7ba68f59f0 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
ENTRY(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
- mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
#else
/* FALLTHROUGH */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a0511191f6..2ac37372ef5 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
return 0;
}
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
- u64 asid = mm->context.id;
+ u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
cpumask_clear(mm_cpumask(mm));
}
- mm->context.id = asid;
+ return asid;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
+ u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
@@ -198,20 +199,27 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/
cpu_set_reserved_ttbr0();
- if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+ asid = atomic64_read(&mm->context.id);
+ if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
- if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- new_context(mm, cpu);
-
- atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
- cpumask_set_cpu(cpu, mm_cpumask(mm));
+ asid = atomic64_read(&mm->context.id);
+ if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+ local_flush_bp_all();
local_flush_tlb_all();
+ dummy_flush_tlb_a15_erratum();
+ }
+
+ atomic64_set(&per_cpu(active_asids, cpu), asid);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c7e3759f16d..e9db6b4bf65 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void)
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, atomic_pool_init);
+ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
+ atomic_pool_init);
if (ptr) {
int i;
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc4..5ee505c937d 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -141,6 +141,7 @@ void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ad722f1208a..9a5cdc01fcd 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -99,6 +99,9 @@ void show_mem(unsigned int filter)
printk("Mem-info:\n");
show_free_areas(filter);
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
+
for_each_bank (i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
@@ -424,24 +427,6 @@ void __init bootmem_init(void)
max_pfn = max_high - PHYS_PFN_OFFSET;
}
-static inline int free_area(unsigned long pfn, unsigned long end, char *s)
-{
- unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
-
- for (; pfn < end; pfn++) {
- struct page *page = pfn_to_page(pfn);
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- pages++;
- }
-
- if (size && s)
- printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
-
- return pages;
-}
-
/*
* Poison init memory with an undefined instruction (ARM) or a branch to an
* undefined instruction (Thumb).
@@ -534,6 +519,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
#endif
}
+#ifdef CONFIG_HIGHMEM
+static inline void free_area_high(unsigned long pfn, unsigned long end)
+{
+ for (; pfn < end; pfn++)
+ free_highmem_page(pfn_to_page(pfn));
+}
+#endif
+
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
@@ -569,8 +562,7 @@ static void __init free_highpages(void)
if (res_end > end)
res_end = end;
if (res_start != start)
- totalhigh_pages += free_area(start, res_start,
- NULL);
+ free_area_high(start, res_start);
start = res_end;
if (start == end)
break;
@@ -578,9 +570,8 @@ static void __init free_highpages(void)
/* And now free anything which remains */
if (start < end)
- totalhigh_pages += free_area(start, end, NULL);
+ free_area_high(start, end);
}
- totalram_pages += totalhigh_pages;
#endif
}
@@ -609,8 +600,7 @@ void __init mem_init(void)
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- totalram_pages += free_area(PHYS_PFN_OFFSET,
- __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
+ free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
#endif
free_highpages();
@@ -738,16 +728,12 @@ void free_initmem(void)
extern char __tcm_start, __tcm_end;
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
- totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
- __phys_to_pfn(__pa(&__tcm_end)),
- "TCM link");
+ free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
#endif
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
- totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
- __phys_to_pfn(__pa(__init_end)),
- "init");
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -758,9 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- totalram_pages += free_area(__phys_to_pfn(__pa(start)),
- __phys_to_pfn(__pa(end)),
- "initrd");
+ free_reserved_area(start, end, 0, "initrd");
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e95a996ab78..a84ff763ac3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
#include <asm/mach/pci.h>
#include "mm.h"
+#include "tcm.h"
/*
* empty_zero_page is a special page that is used for
@@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
/*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
+ if (addr & SECTION_SIZE)
+ pmd++;
#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
+ flush_pmd_entry(pmd);
+}
- flush_pmd_entry(p);
- } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
+ tcm_init();
top_pmd = pmd_off_k(0xffff0000);
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index dc5de5d53f2..fde2d2a794c 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
+ cmp r3, #0
+ moveq r0, #0
+ beq 2f
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
+2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
+ .long 0
b __arm740_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
- .long v3_cache_fns @ cache model
+ .long v4_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5..2556cf1c2da 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e297..344c8a548cc 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d..0b60dd3d742 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd8..d92dfd08142 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 3e6210b4d6d..054b491ff76 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
#else
EXPORT_SYMBOL(processor);
#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bcaaa8de932..5c07ee4fe3e 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 50bf1dafc9e..6ffd78c0f9a 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -48,7 +48,7 @@
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
- and r3, r1, #0xff
+ asid r3, r1
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
isb
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5..f584d3f5b37 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca15mp_setup
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
+ * Qualcomm Inc. Krait processors.
+ */
+ .type __krait_proc_info, #object
+__krait_proc_info:
+ .long 0x510f0400 @ Required ID value
+ .long 0xff0ffc00 @ Mask for ID
+ /*
+ * Some Krait processors don't indicate support for SDIV and UDIV
+ * instructions in the ARM instruction set, even though they actually
+ * do support them.
+ */
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ .size __krait_proc_info, . - __krait_proc_info
+
+ /*
* Match any ARMv7 processor core.
*/
.type __v7_proc_info, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f3..e8efd83b6f2 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa1..e766f889bfd 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h
index 8015ad434a4..8015ad434a4 100644
--- a/arch/arm/kernel/tcm.h
+++ b/arch/arm/mm/tcm.h
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6828ef6ce80..a0bd8a755bd 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -576,7 +576,7 @@ load_ind:
/* x = ((*(frame + k)) & 0xf) << 2; */
ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
/* the interpreter should deal with the negative K */
- if (k < 0)
+ if ((int)k < 0)
return -1;
/* offset in r1: we might have to take the slow path */
emit_mov_i(r_off, k, ctx);
diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c
index febe3862873..807ac8e5cbc 100644
--- a/arch/arm/plat-orion/addr-map.c
+++ b/arch/arm/plat-orion/addr-map.c
@@ -157,9 +157,12 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i));
/*
- * Chip select enabled?
+ * We only take care of entries for which the chip
+ * select is enabled, and that don't have high base
+ * address bits set (devices can only access the first
+ * 32 bits of the memory).
*/
- if (size & 1) {
+ if ((size & 1) && !(base & 0xF)) {
struct mbus_dram_window *w;
w = &orion_mbus_dram_info.cs[cs++];
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 51afedda9ab..03db14d8ace 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/amba/pl330.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -1552,6 +1553,9 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
+#ifdef CONFIG_PL330_DMA
+ pd.filter = pl330_filter;
+#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
}
@@ -1590,6 +1594,9 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
+#ifdef CONFIG_PL330_DMA
+ pd.filter = pl330_filter;
+#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
}
@@ -1628,6 +1635,9 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
+#ifdef CONFIG_PL330_DMA
+ pd.filter = pl330_filter;
+#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
}
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index b885322717a..9ae50727078 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -15,55 +15,7 @@
#ifndef __PLAT_S3C_FB_H
#define __PLAT_S3C_FB_H __FILE__
-/* S3C_FB_MAX_WIN
- * Set to the maximum number of windows that any of the supported hardware
- * can use. Since the platform data uses this for an array size, having it
- * set to the maximum of any version of the hardware can do is safe.
- */
-#define S3C_FB_MAX_WIN (5)
-
-/**
- * struct s3c_fb_pd_win - per window setup data
- * @xres : The window X size.
- * @yres : The window Y size.
- * @virtual_x: The virtual X size.
- * @virtual_y: The virtual Y size.
- */
-struct s3c_fb_pd_win {
- unsigned short default_bpp;
- unsigned short max_bpp;
- unsigned short xres;
- unsigned short yres;
- unsigned short virtual_x;
- unsigned short virtual_y;
-};
-
-/**
- * struct s3c_fb_platdata - S3C driver platform specific information
- * @setup_gpio: Setup the external GPIO pins to the right state to transfer
- * the data from the display system to the connected display
- * device.
- * @vidcon0: The base vidcon0 values to control the panel data format.
- * @vidcon1: The base vidcon1 values to control the panel data output.
- * @vtiming: Video timing when connected to a RGB type panel.
- * @win: The setup data for each hardware window, or NULL for unused.
- * @display_mode: The LCD output display mode.
- *
- * The platform data supplies the video driver with all the information
- * it requires to work with the display(s) attached to the machine. It
- * controls the initial mode, the number of display windows (0 is always
- * the base framebuffer) that are initialised etc.
- *
- */
-struct s3c_fb_platdata {
- void (*setup_gpio)(void);
-
- struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN];
- struct fb_videomode *vtiming;
-
- u32 vidcon0;
- u32 vidcon1;
-};
+#include <linux/platform_data/video_s3c.h>
/**
* s3c_fb_set_platdata() - Setup the FB device with platform data.
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 29c26a81884..f05f2afa440 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -1,281 +1 @@
-/* arch/arm/plat-samsung/include/plat/regs-serial.h
- *
- * From linux/include/asm-arm/hardware/serial_s3c2410.h
- *
- * Internal header file for Samsung S3C2410 serial ports (UART0-2)
- *
- * Copyright (C) 2002 Shane Nay (shane@minirl.com)
- *
- * Additional defines, Copyright 2003 Simtec Electronics (linux@simtec.co.uk)
- *
- * Adapted from:
- *
- * Internal header file for MX1ADS serial ports (UART1 & 2)
- *
- * Copyright (C) 2002 Shane Nay (shane@minirl.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#ifndef __ASM_ARM_REGS_SERIAL_H
-#define __ASM_ARM_REGS_SERIAL_H
-
-#define S3C24XX_VA_UART0 (S3C_VA_UART)
-#define S3C24XX_VA_UART1 (S3C_VA_UART + 0x4000 )
-#define S3C24XX_VA_UART2 (S3C_VA_UART + 0x8000 )
-#define S3C24XX_VA_UART3 (S3C_VA_UART + 0xC000 )
-
-#define S3C2410_PA_UART0 (S3C24XX_PA_UART)
-#define S3C2410_PA_UART1 (S3C24XX_PA_UART + 0x4000 )
-#define S3C2410_PA_UART2 (S3C24XX_PA_UART + 0x8000 )
-#define S3C2443_PA_UART3 (S3C24XX_PA_UART + 0xC000 )
-
-#define S3C2410_URXH (0x24)
-#define S3C2410_UTXH (0x20)
-#define S3C2410_ULCON (0x00)
-#define S3C2410_UCON (0x04)
-#define S3C2410_UFCON (0x08)
-#define S3C2410_UMCON (0x0C)
-#define S3C2410_UBRDIV (0x28)
-#define S3C2410_UTRSTAT (0x10)
-#define S3C2410_UERSTAT (0x14)
-#define S3C2410_UFSTAT (0x18)
-#define S3C2410_UMSTAT (0x1C)
-
-#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3))
-
-#define S3C2410_LCON_CS5 (0x0)
-#define S3C2410_LCON_CS6 (0x1)
-#define S3C2410_LCON_CS7 (0x2)
-#define S3C2410_LCON_CS8 (0x3)
-#define S3C2410_LCON_CSMASK (0x3)
-
-#define S3C2410_LCON_PNONE (0x0)
-#define S3C2410_LCON_PEVEN (0x5 << 3)
-#define S3C2410_LCON_PODD (0x4 << 3)
-#define S3C2410_LCON_PMASK (0x7 << 3)
-
-#define S3C2410_LCON_STOPB (1<<2)
-#define S3C2410_LCON_IRM (1<<6)
-
-#define S3C2440_UCON_CLKMASK (3<<10)
-#define S3C2440_UCON_CLKSHIFT (10)
-#define S3C2440_UCON_PCLK (0<<10)
-#define S3C2440_UCON_UCLK (1<<10)
-#define S3C2440_UCON_PCLK2 (2<<10)
-#define S3C2440_UCON_FCLK (3<<10)
-#define S3C2443_UCON_EPLL (3<<10)
-
-#define S3C6400_UCON_CLKMASK (3<<10)
-#define S3C6400_UCON_CLKSHIFT (10)
-#define S3C6400_UCON_PCLK (0<<10)
-#define S3C6400_UCON_PCLK2 (2<<10)
-#define S3C6400_UCON_UCLK0 (1<<10)
-#define S3C6400_UCON_UCLK1 (3<<10)
-
-#define S3C2440_UCON2_FCLK_EN (1<<15)
-#define S3C2440_UCON0_DIVMASK (15 << 12)
-#define S3C2440_UCON1_DIVMASK (15 << 12)
-#define S3C2440_UCON2_DIVMASK (7 << 12)
-#define S3C2440_UCON_DIVSHIFT (12)
-
-#define S3C2412_UCON_CLKMASK (3<<10)
-#define S3C2412_UCON_CLKSHIFT (10)
-#define S3C2412_UCON_UCLK (1<<10)
-#define S3C2412_UCON_USYSCLK (3<<10)
-#define S3C2412_UCON_PCLK (0<<10)
-#define S3C2412_UCON_PCLK2 (2<<10)
-
-#define S3C2410_UCON_CLKMASK (1 << 10)
-#define S3C2410_UCON_CLKSHIFT (10)
-#define S3C2410_UCON_UCLK (1<<10)
-#define S3C2410_UCON_SBREAK (1<<4)
-
-#define S3C2410_UCON_TXILEVEL (1<<9)
-#define S3C2410_UCON_RXILEVEL (1<<8)
-#define S3C2410_UCON_TXIRQMODE (1<<2)
-#define S3C2410_UCON_RXIRQMODE (1<<0)
-#define S3C2410_UCON_RXFIFO_TOI (1<<7)
-#define S3C2443_UCON_RXERR_IRQEN (1<<6)
-#define S3C2443_UCON_LOOPBACK (1<<5)
-
-#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
- S3C2410_UCON_RXILEVEL | \
- S3C2410_UCON_TXIRQMODE | \
- S3C2410_UCON_RXIRQMODE | \
- S3C2410_UCON_RXFIFO_TOI)
-
-#define S3C2410_UFCON_FIFOMODE (1<<0)
-#define S3C2410_UFCON_TXTRIG0 (0<<6)
-#define S3C2410_UFCON_RXTRIG8 (1<<4)
-#define S3C2410_UFCON_RXTRIG12 (2<<4)
-
-/* S3C2440 FIFO trigger levels */
-#define S3C2440_UFCON_RXTRIG1 (0<<4)
-#define S3C2440_UFCON_RXTRIG8 (1<<4)
-#define S3C2440_UFCON_RXTRIG16 (2<<4)
-#define S3C2440_UFCON_RXTRIG32 (3<<4)
-
-#define S3C2440_UFCON_TXTRIG0 (0<<6)
-#define S3C2440_UFCON_TXTRIG16 (1<<6)
-#define S3C2440_UFCON_TXTRIG32 (2<<6)
-#define S3C2440_UFCON_TXTRIG48 (3<<6)
-
-#define S3C2410_UFCON_RESETBOTH (3<<1)
-#define S3C2410_UFCON_RESETTX (1<<2)
-#define S3C2410_UFCON_RESETRX (1<<1)
-
-#define S3C2410_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
- S3C2410_UFCON_TXTRIG0 | \
- S3C2410_UFCON_RXTRIG8 )
-
-#define S3C2410_UMCOM_AFC (1<<4)
-#define S3C2410_UMCOM_RTS_LOW (1<<0)
-
-#define S3C2412_UMCON_AFC_63 (0<<5) /* same as s3c2443 */
-#define S3C2412_UMCON_AFC_56 (1<<5)
-#define S3C2412_UMCON_AFC_48 (2<<5)
-#define S3C2412_UMCON_AFC_40 (3<<5)
-#define S3C2412_UMCON_AFC_32 (4<<5)
-#define S3C2412_UMCON_AFC_24 (5<<5)
-#define S3C2412_UMCON_AFC_16 (6<<5)
-#define S3C2412_UMCON_AFC_8 (7<<5)
-
-#define S3C2410_UFSTAT_TXFULL (1<<9)
-#define S3C2410_UFSTAT_RXFULL (1<<8)
-#define S3C2410_UFSTAT_TXMASK (15<<4)
-#define S3C2410_UFSTAT_TXSHIFT (4)
-#define S3C2410_UFSTAT_RXMASK (15<<0)
-#define S3C2410_UFSTAT_RXSHIFT (0)
-
-/* UFSTAT S3C2443 same as S3C2440 */
-#define S3C2440_UFSTAT_TXFULL (1<<14)
-#define S3C2440_UFSTAT_RXFULL (1<<6)
-#define S3C2440_UFSTAT_TXSHIFT (8)
-#define S3C2440_UFSTAT_RXSHIFT (0)
-#define S3C2440_UFSTAT_TXMASK (63<<8)
-#define S3C2440_UFSTAT_RXMASK (63)
-
-#define S3C2410_UTRSTAT_TXE (1<<2)
-#define S3C2410_UTRSTAT_TXFE (1<<1)
-#define S3C2410_UTRSTAT_RXDR (1<<0)
-
-#define S3C2410_UERSTAT_OVERRUN (1<<0)
-#define S3C2410_UERSTAT_FRAME (1<<2)
-#define S3C2410_UERSTAT_BREAK (1<<3)
-#define S3C2443_UERSTAT_PARITY (1<<1)
-
-#define S3C2410_UERSTAT_ANY (S3C2410_UERSTAT_OVERRUN | \
- S3C2410_UERSTAT_FRAME | \
- S3C2410_UERSTAT_BREAK)
-
-#define S3C2410_UMSTAT_CTS (1<<0)
-#define S3C2410_UMSTAT_DeltaCTS (1<<2)
-
-#define S3C2443_DIVSLOT (0x2C)
-
-/* S3C64XX interrupt registers. */
-#define S3C64XX_UINTP 0x30
-#define S3C64XX_UINTSP 0x34
-#define S3C64XX_UINTM 0x38
-
-#define S3C64XX_UINTM_RXD (0)
-#define S3C64XX_UINTM_TXD (2)
-#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
-#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
-
-/* Following are specific to S5PV210 */
-#define S5PV210_UCON_CLKMASK (1<<10)
-#define S5PV210_UCON_CLKSHIFT (10)
-#define S5PV210_UCON_PCLK (0<<10)
-#define S5PV210_UCON_UCLK (1<<10)
-
-#define S5PV210_UFCON_TXTRIG0 (0<<8)
-#define S5PV210_UFCON_TXTRIG4 (1<<8)
-#define S5PV210_UFCON_TXTRIG8 (2<<8)
-#define S5PV210_UFCON_TXTRIG16 (3<<8)
-#define S5PV210_UFCON_TXTRIG32 (4<<8)
-#define S5PV210_UFCON_TXTRIG64 (5<<8)
-#define S5PV210_UFCON_TXTRIG128 (6<<8)
-#define S5PV210_UFCON_TXTRIG256 (7<<8)
-
-#define S5PV210_UFCON_RXTRIG1 (0<<4)
-#define S5PV210_UFCON_RXTRIG4 (1<<4)
-#define S5PV210_UFCON_RXTRIG8 (2<<4)
-#define S5PV210_UFCON_RXTRIG16 (3<<4)
-#define S5PV210_UFCON_RXTRIG32 (4<<4)
-#define S5PV210_UFCON_RXTRIG64 (5<<4)
-#define S5PV210_UFCON_RXTRIG128 (6<<4)
-#define S5PV210_UFCON_RXTRIG256 (7<<4)
-
-#define S5PV210_UFSTAT_TXFULL (1<<24)
-#define S5PV210_UFSTAT_RXFULL (1<<8)
-#define S5PV210_UFSTAT_TXMASK (255<<16)
-#define S5PV210_UFSTAT_TXSHIFT (16)
-#define S5PV210_UFSTAT_RXMASK (255<<0)
-#define S5PV210_UFSTAT_RXSHIFT (0)
-
-#define S3C2410_UCON_CLKSEL0 (1 << 0)
-#define S3C2410_UCON_CLKSEL1 (1 << 1)
-#define S3C2410_UCON_CLKSEL2 (1 << 2)
-#define S3C2410_UCON_CLKSEL3 (1 << 3)
-
-/* Default values for s5pv210 UCON and UFCON uart registers */
-#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
- S3C2410_UCON_RXILEVEL | \
- S3C2410_UCON_TXIRQMODE | \
- S3C2410_UCON_RXIRQMODE | \
- S3C2410_UCON_RXFIFO_TOI | \
- S3C2443_UCON_RXERR_IRQEN)
-
-#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
- S5PV210_UFCON_TXTRIG4 | \
- S5PV210_UFCON_RXTRIG4)
-
-#ifndef __ASSEMBLY__
-
-/* configuration structure for per-machine configurations for the
- * serial port
- *
- * the pointer is setup by the machine specific initialisation from the
- * arch/arm/mach-s3c2410/ directory.
-*/
-
-struct s3c2410_uartcfg {
- unsigned char hwport; /* hardware port number */
- unsigned char unused;
- unsigned short flags;
- upf_t uart_flags; /* default uart flags */
- unsigned int clk_sel;
-
- unsigned int has_fracval;
-
- unsigned long ucon; /* value of ucon for port */
- unsigned long ulcon; /* value of ulcon for port */
- unsigned long ufcon; /* value of ufcon for port */
-};
-
-/* s3c24xx_uart_devs
- *
- * this is exported from the core as we cannot use driver_register(),
- * or platform_add_device() before the console_initcall()
-*/
-
-extern struct platform_device *s3c24xx_uart_devs[4];
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_ARM_REGS_SERIAL_H */
-
+#include <linux/serial_s3c.h>
diff --git a/arch/arm/plat-samsung/include/plat/usb-phy.h b/arch/arm/plat-samsung/include/plat/usb-phy.h
index 959bcdb03a2..ab34dfadb7f 100644
--- a/arch/arm/plat-samsung/include/plat/usb-phy.h
+++ b/arch/arm/plat-samsung/include/plat/usb-phy.h
@@ -11,10 +11,7 @@
#ifndef __PLAT_SAMSUNG_USB_PHY_H
#define __PLAT_SAMSUNG_USB_PHY_H __FILE__
-enum s5p_usb_phy_type {
- S5P_USB_PHY_DEVICE,
- S5P_USB_PHY_HOST,
-};
+#include <linux/usb/samsung_usb_phy.h>
extern int s5p_usb_phy_init(struct platform_device *pdev, int type);
extern int s5p_usb_phy_exit(struct platform_device *pdev, int type);
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 739d016eb27..8a08c31b5e2 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -10,7 +10,7 @@ choice
config ARCH_SPEAR13XX
bool "ST SPEAr13xx with Device Tree"
- select ARCH_HAVE_CPUFREQ
+ select ARCH_HAS_CPUFREQ
select ARM_GIC
select CPU_V7
select GPIO_SPEAR_SPICS
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fd70a68387e..9b6d19f7407 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -9,7 +9,6 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select GENERIC_CLOCKEVENTS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 51493430f14..1a6bfe954d4 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,17 +6,6 @@ config FRAME_POINTER
bool
default y
-config DEBUG_ERRORS
- bool "Verbose kernel error messages"
- depends on DEBUG_KERNEL
- help
- This option controls verbose debugging information which can be
- printed when the kernel detects an internal error. This debugging
- information is useful to kernel hackers when tracking down problems,
- but mostly meaningless to other people. It's safe to say Y unless
- you are concerned with the code size or don't want to see these
- messages.
-
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9212c7880da..09bef29f3a0 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h
index bde96072089..42e04c87742 100644
--- a/arch/arm64/include/asm/ucontext.h
+++ b/arch/arm64/include/asm/ucontext.h
@@ -22,7 +22,7 @@ struct ucontext {
stack_t uc_stack;
sigset_t uc_sigmask;
/* glibc uses a 1024-bit sigset_t */
- __u8 __unused[(1024 - sizeof(sigset_t)) / 8];
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
/* last for future expansion */
struct sigcontext uc_mcontext;
};
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index cef3925eaf6..aa3e948f788 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
/* bitops */
+#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
+#endif
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0337cdb0667..83a0ad5936a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*pm_restart)(const char *cmd);
EXPORT_SYMBOL_GPL(pm_restart);
+void arch_cpu_idle_prepare(void)
+{
+ local_fiq_enable();
+}
/*
* This is our default idle handler.
*/
-static void default_idle(void)
+void arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
@@ -98,43 +102,6 @@ static void default_idle(void)
local_irq_enable();
}
-/*
- * The idle thread.
- * We always respect 'hlt_counter' to prevent low power idle.
- */
-void cpu_idle(void)
-{
- local_fiq_enable();
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched()) {
- /*
- * We need to disable interrupts here to ensure
- * we don't miss a wakeup call.
- */
- local_irq_disable();
- if (!need_resched()) {
- stop_critical_timings();
- default_idle();
- start_critical_timings();
- /*
- * default_idle functions should always return
- * with IRQs enabled.
- */
- WARN_ON(irqs_disabled());
- } else {
- local_irq_enable();
- }
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
void machine_shutdown(void)
{
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 7f4f3673f2b..e393174fe85 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame;
- compat_stack_t stack;
int err = 0;
frame = compat_get_sigframe(ka, regs, sizeof(*frame));
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index bdd34597254..261445c4666 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -216,7 +216,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* OK, it's off to the idle thread for us
*/
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 800aac306a0..f497ca77925 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -197,24 +197,6 @@ void __init bootmem_init(void)
max_pfn = max_low_pfn = max;
}
-static inline int free_area(unsigned long pfn, unsigned long end, char *s)
-{
- unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
-
- for (; pfn < end; pfn++) {
- struct page *page = pfn_to_page(pfn);
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- pages++;
- }
-
- if (size && s)
- pr_info("Freeing %s memory: %dK\n", s, size);
-
- return pages;
-}
-
/*
* Poison init memory with an undefined instruction (0x0).
*/
@@ -405,9 +387,7 @@ void __init mem_init(void)
void free_initmem(void)
{
poison_init_mem(__init_begin, __init_end - __init_begin);
- totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
- __phys_to_pfn(__pa(__init_end)),
- "init");
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -418,9 +398,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- totalram_pages += free_area(__phys_to_pfn(__pa(start)),
- __phys_to_pfn(__pa(end)),
- "initrd");
+ free_reserved_area(start, end, 0, "initrd");
}
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 224b44ab534..eeecc9c8ed6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
{
unsigned long size, mask;
- bool page64k = IS_ENABLED(ARM64_64K_PAGES);
+ bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr)
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
#ifdef CONFIG_ARM64_64K_PAGES
-int __meminit vmemmap_populate(struct page *start_page,
- unsigned long size, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- return vmemmap_populate_basepages(start_page, size, node);
+ return vmemmap_populate_basepages(start, end, node);
}
#else /* !CONFIG_ARM64_64K_PAGES */
-int __meminit vmemmap_populate(struct page *start_page,
- unsigned long size, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long addr = (unsigned long)start_page;
- unsigned long end = (unsigned long)(start_page + size);
+ unsigned long addr = start;
unsigned long next;
pgd_t *pgd;
pud_t *pud;
@@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page,
return 0;
}
#endif /* CONFIG_ARM64_64K_PAGES */
-void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void vmemmap_free(unsigned long start, unsigned long end)
{
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 9b89257b2cf..c1a868d398b 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -7,7 +7,7 @@ config AVR32
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_ATOMIC64
select HARDIRQS_SW_RESEND
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index cf60d0a9f17..fc6483f83cc 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
#define readw_be __raw_readw
#define readl_be __raw_readl
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+
#define writeb_be __raw_writeb
#define writew_be __raw_writew
#define writel_be __raw_writel
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index fd78f58ea79..073c3c2fa52 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -30,18 +30,9 @@ EXPORT_SYMBOL(pm_power_off);
* This file handles the architecture-dependent parts of process handling..
*/
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched())
- cpu_idle_sleep();
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ cpu_enter_idle();
}
void machine_halt(void)
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 05ad29112ff..869a1c6ffee 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -12,6 +12,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/time.h>
+#include <linux/cpu.h>
#include <asm/sysreg.h>
@@ -87,13 +88,17 @@ static void comparator_mode(enum clock_event_mode mode,
pr_debug("%s: start\n", evdev->name);
/* FALLTHROUGH */
case CLOCK_EVT_MODE_RESUME:
- cpu_disable_idle_sleep();
+ /*
+ * If we're using the COUNT and COMPARE registers we
+ * need to force idle poll.
+ */
+ cpu_idle_poll_ctrl(true);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
sysreg_write(COMPARE, 0);
pr_debug("%s: stop\n", evdev->name);
- cpu_enable_idle_sleep();
+ cpu_idle_poll_ctrl(false);
break;
default:
BUG();
diff --git a/arch/avr32/mach-at32ap/include/mach/pm.h b/arch/avr32/mach-at32ap/include/mach/pm.h
index 979b355b77b..f29ff2cd23d 100644
--- a/arch/avr32/mach-at32ap/include/mach/pm.h
+++ b/arch/avr32/mach-at32ap/include/mach/pm.h
@@ -21,30 +21,6 @@
extern void cpu_enter_idle(void);
extern void cpu_enter_standby(unsigned long sdramc_base);
-extern bool disable_idle_sleep;
-
-static inline void cpu_disable_idle_sleep(void)
-{
- disable_idle_sleep = true;
-}
-
-static inline void cpu_enable_idle_sleep(void)
-{
- disable_idle_sleep = false;
-}
-
-static inline void cpu_idle_sleep(void)
-{
- /*
- * If we're using the COUNT and COMPARE registers for
- * timekeeping, we can't use the IDLE state.
- */
- if (disable_idle_sleep)
- cpu_relax();
- else
- cpu_enter_idle();
-}
-
void intc_set_suspend_handler(unsigned long offset);
#endif
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index f868f4ce761..1c8e4e6bff0 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -18,13 +18,6 @@
/* Same as 0xfff00000 but fits in a 21 bit signed immediate */
#define PM_BASE -0x100000
- .section .bss, "wa", @nobits
- .global disable_idle_sleep
- .type disable_idle_sleep, @object
-disable_idle_sleep:
- .int 4
- .size disable_idle_sleep, . - disable_idle_sleep
-
/* Keep this close to the irq handlers */
.section .irq.text, "ax", @progbits
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index 2798c2d4a1c..e66e8406f99 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -146,34 +146,14 @@ void __init mem_init(void)
initsize >> 10);
}
-static inline void free_area(unsigned long addr, unsigned long end, char *s)
-{
- unsigned int size = (end - addr) >> 10;
-
- for (; addr < end; addr += PAGE_SIZE) {
- struct page *page = virt_to_page(addr);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(addr);
- totalram_pages++;
- }
-
- if (size && s)
- printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
- s, size, end - (size << 10), end);
-}
-
void free_initmem(void)
{
- free_area((unsigned long)__init_begin, (unsigned long)__init_end,
- "init");
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
-
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_area(start, end, "initrd");
+ free_reserved_area(start, end, 0, "initrd");
}
-
#endif
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 600494c70e9..c3f2e0bc644 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -33,7 +33,7 @@ config BLACKFIN
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 84ed8375113..61fbd2de993 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_init(unsigned int port,
extern struct console *bfin_jc_early_init(void);
#endif
-static struct console *early_console;
-
/* Default console */
#define DEFAULT_PORT 0
#define DEFAULT_CFLAG CS8|B57600
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 9782c0329c1..4aa5545c4fd 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -46,15 +46,14 @@ EXPORT_SYMBOL(pm_power_off);
* The idle loop on BFIN
*/
#ifdef CONFIG_IDLE_L1
-static void default_idle(void)__attribute__((l1_text));
-void cpu_idle(void)__attribute__((l1_text));
+void arch_cpu_idle(void)__attribute__((l1_text));
#endif
/*
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
*/
-static void default_idle(void)
+void arch_cpu_idle(void)
{
#ifdef CONFIG_IPIPE
ipipe_suspend_domain();
@@ -66,31 +65,12 @@ static void default_idle(void)
hard_local_irq_enable();
}
-/*
- * The idle thread. We try to conserve power, while trying to keep
- * overall latency low. The architecture specific idle is passed
- * a value to indicate the level of "idleness" of the system.
- */
-void cpu_idle(void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
-
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id()))
- cpu_die();
-#endif
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched())
- default_idle();
- rcu_idle_exit();
- tick_nohz_idle_exit();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
}
+#endif
/*
* Do necessary setup to start up a newly executed thread.
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index bb61ae4986e..1bc2ce6f3c9 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -335,7 +335,7 @@ void __cpuinit secondary_start_kernel(void)
*/
calibrate_delay();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
void __init smp_prepare_boot_cpu(void)
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 9cb85537bd2..82d01a71207 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -103,7 +103,7 @@ void __init mem_init(void)
max_mapnr = num_physpages = MAP_NR(high_memory);
printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages);
- /* This will put all memory onto the freelists. */
+ /* This will put all low memory onto the freelists. */
totalram_pages = free_all_bootmem();
reservedpages = 0;
@@ -129,24 +129,11 @@ void __init mem_init(void)
initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
}
-static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr;
- /* next to check that the page we free is not a partial page */
- for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
#ifndef CONFIG_MPU
- free_init_pages("initrd memory", start, end);
+ free_reserved_area(start, end, 0, "initrd");
#endif
}
#endif
@@ -154,10 +141,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
void __init_refok free_initmem(void)
{
#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
-
+ free_initmem_default(0);
if (memory_start == (unsigned long)(&__init_end))
memory_start = (unsigned long)(&__init_begin);
#endif
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h
index cf78e09e18c..2c71d5634ec 100644
--- a/arch/c6x/include/asm/irqflags.h
+++ b/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
+ asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
}
/* unconditionally enable interrupts */
diff --git a/arch/c6x/kernel/process.c b/arch/c6x/kernel/process.c
index 6434df476f7..57d2ea8d197 100644
--- a/arch/c6x/kernel/process.c
+++ b/arch/c6x/kernel/process.c
@@ -33,7 +33,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-static void c6x_idle(void)
+void arch_cpu_idle(void)
{
unsigned long tmp;
@@ -49,32 +49,6 @@ static void c6x_idle(void)
: "=b"(tmp));
}
-/*
- * The idle loop for C64x
- */
-void cpu_idle(void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (1) {
- local_irq_disable();
- if (need_resched()) {
- local_irq_enable();
- break;
- }
- c6x_idle(); /* enables local irqs */
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
-
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
-
static void halt_loop(void)
{
printk(KERN_EMERG "System Halted, OK to turn off power\n");
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 89395f09648..a9fcd89b251 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -77,37 +77,11 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- printk(KERN_INFO "Freeing initrd memory: %luk freed\n",
- (pages * PAGE_SIZE) >> 10);
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
void __init free_initmem(void)
{
- unsigned long addr;
-
- /*
- * The following code should be cool even if these sections
- * are not page aligned.
- */
- addr = PAGE_ALIGN((unsigned long)(__init_begin));
-
- /* next to check that the page we free is not a partial page */
- for (; addr + PAGE_SIZE < (unsigned long)(__init_end);
- addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %dK freed\n",
- (int) ((addr - PAGE_ALIGN((long) &__init_begin)) >> 10));
+ free_initmem_default(0);
}
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index bb0ac66cf53..06dd026533e 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -43,7 +43,7 @@ config CRIS
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index b1018750cff..2ba23c13df6 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -30,8 +30,9 @@ void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
void default_idle(void)
{
#ifdef CONFIG_ETRAX_GPIO
- etrax_gpio_wake_up_check();
+ etrax_gpio_wake_up_check();
#endif
+ local_irq_enable();
}
/*
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 2b23ef0e445..57451faa9b2 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -20,18 +20,12 @@
extern void stop_watchdog(void);
-extern int cris_hlt_counter;
-
/* We use this if we don't have any better idle routine. */
void default_idle(void)
{
- local_irq_disable();
- if (!need_resched() && !cris_hlt_counter) {
- /* Halt until exception. */
- __asm__ volatile("ei \n\t"
- "halt ");
- }
- local_irq_enable();
+ /* Halt until exception. */
+ __asm__ volatile("ei \n\t"
+ "halt ");
}
/*
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 04a16edd540..cdd12028de0 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -145,8 +145,6 @@ smp_boot_one_cpu(int cpuid, struct task_struct idle)
* specific stuff such as the local timer and the MMU. */
void __init smp_callin(void)
{
- extern void cpu_idle(void);
-
int cpu = cpu_now_booting;
reg_intr_vect_rw_mask vect_mask = {0};
@@ -170,7 +168,7 @@ void __init smp_callin(void)
local_irq_enable();
set_cpu_online(cpu, true);
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
/* Stop execution on this CPU.*/
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 675823f70c0..c0a29b96b92 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -65,13 +65,6 @@ static inline void release_thread(struct task_struct *dead_task)
#define cpu_relax() barrier()
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
void default_idle(void);
#endif /* __ASM_CRIS_PROCESSOR_H */
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 104ff4dd9b9..b78498eb079 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -29,59 +29,14 @@
//#define DEBUG
-/*
- * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
- * there would ever be a halt sequence (for power save when idle) with
- * some largish delay when halting or resuming *and* a driver that can't
- * afford that delay. The hlt_counter would then be checked before
- * executing the halt sequence, and the driver marks the unhaltable
- * region by enable_hlt/disable_hlt.
- */
-
-int cris_hlt_counter=0;
-
-void disable_hlt(void)
-{
- cris_hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- cris_hlt_counter--;
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
extern void default_idle(void);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-
-void cpu_idle (void)
+void arch_cpu_idle(void)
{
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched()) {
- /*
- * Mark this as an RCU critical section so that
- * synchronize_kernel() in the unload path waits
- * for our completion.
- */
- default_idle();
- }
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
+ default_idle();
}
void hard_reset_now (void);
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index d72ab58fd83..9ac80946dad 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -12,12 +12,10 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <asm/tlb.h>
+#include <asm/sections.h>
unsigned long empty_zero_page;
-extern char _stext, _edata, _etext; /* From linkerscript */
-extern char __init_begin, __init_end;
-
void __init
mem_init(void)
{
@@ -67,15 +65,5 @@ mem_init(void)
void
free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
- (unsigned long)((&__init_end - &__init_begin) >> 10));
+ free_initmem_default(0);
}
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 12369b194c7..2ce731f9aa4 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,7 +6,7 @@ config FRV
select HAVE_PERF_EVENTS
select HAVE_UID16
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 23916b2a12a..5d40aeb7712 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -59,29 +59,12 @@ static void core_sleep_idle(void)
mb();
}
-void (*idle)(void) = core_sleep_idle;
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched()) {
- check_pgt_cache();
-
- if (!frv_dma_inprogress && idle)
- idle();
- }
- rcu_idle_exit();
-
- schedule_preempt_disabled();
- }
+ if (!frv_dma_inprogress)
+ core_sleep_idle();
+ else
+ local_irq_enable();
}
void machine_restart(char * __unused)
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 92e97b0894a..dee354fa6b6 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -122,7 +122,7 @@ void __init mem_init(void)
#endif
int codek = 0, datak = 0;
- /* this will put all memory onto the freelists */
+ /* this will put all low memory onto the freelists */
totalram_pages = free_all_bootmem();
#ifdef CONFIG_MMU
@@ -131,14 +131,8 @@ void __init mem_init(void)
datapages++;
#ifdef CONFIG_HIGHMEM
- for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) {
- struct page *page = &mem_map[pfn];
-
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- }
+ for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--)
+ free_highmem_page(&mem_map[pfn]);
#endif
codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
@@ -168,21 +162,7 @@ void __init mem_init(void)
void free_initmem(void)
{
#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
- unsigned long start, end, addr;
-
- start = PAGE_ALIGN((unsigned long) &__init_begin); /* round up */
- end = ((unsigned long) &__init_end) & PAGE_MASK; /* round down */
-
- /* next to check that the page we free is not a partial page */
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
-
- printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n",
- (end - start) >> 10, start, end);
+ free_initmem_default(0);
#endif
} /* end free_initmem() */
@@ -193,14 +173,6 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10);
+ free_reserved_area(start, end, 0, "initrd");
} /* end free_initrd_mem() */
#endif
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index ae8551eb373..79250de1b12 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -5,7 +5,7 @@ config H8300
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index b609f63f159..a17d2cd463d 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -53,40 +53,13 @@ asmlinkage void ret_from_kernel_thread(void);
* The idle loop on an H8/300..
*/
#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
-static void default_idle(void)
+void arch_cpu_idle(void)
{
- local_irq_disable();
- if (!need_resched()) {
- local_irq_enable();
- /* XXX: race here! What if need_resched() gets set now? */
- __asm__("sleep");
- } else
- local_irq_enable();
-}
-#else
-static void default_idle(void)
-{
- cpu_relax();
+ local_irq_enable();
+ /* XXX: race here! What if need_resched() gets set now? */
+ __asm__("sleep");
}
#endif
-void (*idle)(void) = default_idle;
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
- while (1) {
- rcu_idle_enter();
- while (!need_resched())
- idle();
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
-}
void machine_restart(char * __unused)
{
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 981e25094b1..ff349d70a29 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -139,7 +139,7 @@ void __init mem_init(void)
start_mem = PAGE_ALIGN(start_mem);
max_mapnr = num_physpages = MAP_NR(high_memory);
- /* this will put all memory onto the freelists */
+ /* this will put all low memory onto the freelists */
totalram_pages = free_all_bootmem();
codek = (_etext - _stext) >> 10;
@@ -161,15 +161,7 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- printk ("Freeing initrd memory: %dk freed\n", pages);
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
@@ -177,23 +169,7 @@ void
free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
- unsigned long addr;
-/*
- * the following code should be cool even if these sections
- * are not page aligned.
- */
- addr = PAGE_ALIGN((unsigned long)(__init_begin));
- /* next to check that the page we free is not a partial page */
- for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
- (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
- (int)(PAGE_ALIGN((unsigned long)__init_begin)),
- (int)(addr - PAGE_SIZE));
+ free_initmem_default(0);
#endif
}
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 06ae9ffcabd..9b948c619a0 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -51,28 +51,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
* If hardware or VM offer wait termination even though interrupts
* are disabled.
*/
-static void default_idle(void)
+void arch_cpu_idle(void)
{
__vmwait();
-}
-
-void (*idle_sleep)(void) = default_idle;
-
-void cpu_idle(void)
-{
- while (1) {
- tick_nohz_idle_enter();
- local_irq_disable();
- while (!need_resched()) {
- idle_sleep();
- /* interrupts wake us up, but aren't serviced */
- local_irq_enable(); /* service interrupt */
- local_irq_disable();
- }
- local_irq_enable();
- tick_nohz_idle_exit();
- schedule();
- }
+ /* interrupts wake us up, but irqs are still disabled */
+ local_irq_enable();
}
/*
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 8e095dffd07..0e364ca4319 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -184,7 +184,7 @@ void __cpuinit start_secondary(void)
local_irq_enable();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 33f3fdc0b21..e7e55a00f94 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,7 +26,7 @@ config IA64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
@@ -187,7 +187,7 @@ config IA64_DIG
config IA64_DIG_VTD
bool "DIG+Intel+IOMMU"
- select DMAR
+ select INTEL_IOMMU
select PCI_MSI
config IA64_HP_ZX1
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index da2f319fb71..e70cadec7ce 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -142,8 +142,7 @@ static void transmit_chars(struct tty_struct *tty, struct serial_state *info,
goto out;
}
- if (info->xmit.head == info->xmit.tail || tty->stopped ||
- tty->hw_stopped) {
+ if (info->xmit.head == info->xmit.tail || tty->stopped) {
#ifdef SIMSERIAL_DEBUG
printk("transmit_chars: head=%d, tail=%d, stopped=%d\n",
info->xmit.head, info->xmit.tail, tty->stopped);
@@ -181,7 +180,7 @@ static void rs_flush_chars(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
if (info->xmit.head == info->xmit.tail || tty->stopped ||
- tty->hw_stopped || !info->xmit.buf)
+ !info->xmit.buf)
return;
transmit_chars(tty, info, NULL);
@@ -217,7 +216,7 @@ static int rs_write(struct tty_struct * tty,
* Hey, we transmit directly from here in our case
*/
if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) &&
- !tty->stopped && !tty->hw_stopped)
+ !tty->stopped)
transmit_chars(tty, info, NULL);
return ret;
@@ -325,14 +324,6 @@ static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- /* Handle turning off CRTSCTS */
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- }
-}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
@@ -481,7 +472,6 @@ static const struct tty_operations hp_ops = {
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.send_xchar = rs_send_xchar,
- .set_termios = rs_set_termios,
.hangup = rs_hangup,
.proc_fops = &rs_proc_fops,
};
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index d2bf1fd5e44..76acbcd5c06 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
{
- register unsigned long r8 __asm ("r8");
+ register unsigned long r8 __asm ("r8") = 0;
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov %0=r0 \n"
" mov ar.ccv=%4;; \n"
"[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (r8), "=r" (prev)
+ : "+r" (r8), "=&r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h
index 94eaa5bd5d0..aa910054b8e 100644
--- a/arch/ia64/include/asm/hugetlb.h
+++ b/arch/ia64/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
#define _ASM_IA64_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
index 2b68d856dc7..1bf2cf2f4ab 100644
--- a/arch/ia64/include/asm/irqflags.h
+++ b/arch/ia64/include/asm/irqflags.h
@@ -89,6 +89,7 @@ static inline bool arch_irqs_disabled(void)
static inline void arch_safe_halt(void)
{
+ arch_local_irq_enable();
ia64_pal_halt_light(); /* PAL_HALT_LIGHT */
}
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 43f96ab18fa..8c709616871 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
extern int cpe_vector;
extern int ia64_cpe_irq;
extern void ia64_mca_init(void);
+extern void ia64_mca_irq_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 2e27ef17565..2db0a6c6daa 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr);
extern void map_cpu_to_node(int cpu, int nid);
extern void unmap_cpu_from_node(int cpu, int nid);
-
+extern void numa_clear_node(int cpu);
#else /* !CONFIG_NUMA */
#define map_cpu_to_node(cpu, nid) do{}while(0)
#define unmap_cpu_from_node(cpu, nid) do{}while(0)
-
#define paddr_to_nid(addr) 0
-
+#define numa_clear_node(cpu) do { } while (0)
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 020d655ed08..cade13dd029 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -131,8 +131,6 @@ struct thread_info {
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c4cd45d9774..abc6dee3799 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -90,53 +90,6 @@ ENTRY(fsys_getpid)
FSYS_RETURN
END(fsys_getpid)
-ENTRY(fsys_getppid)
- .prologue
- .altrp b6
- .body
- add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
- ;;
- ld8 r17=[r17] // r17 = current->group_leader
- add r9=TI_FLAGS+IA64_TASK_SIZE,r16
- ;;
-
- ld4 r9=[r9]
- add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
- ;;
- and r9=TIF_ALLWORK_MASK,r9
-
-1: ld8 r18=[r17] // r18 = current->group_leader->real_parent
- ;;
- cmp.ne p8,p0=0,r9
- add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = &current->group_leader->real_parent->tgid
- ;;
-
- /*
- * The .acq is needed to ensure that the read of tgid has returned its data before
- * we re-check "real_parent".
- */
- ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
-#ifdef CONFIG_SMP
- /*
- * Re-read current->group_leader->real_parent.
- */
- ld8 r19=[r17] // r19 = current->group_leader->real_parent
-(p8) br.spnt.many fsys_fallback_syscall
- ;;
- cmp.ne p6,p0=r18,r19 // did real_parent change?
- mov r19=0 // i must not leak kernel bits...
-(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
- ;;
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
-#else
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
- mov r19=0 // i must not leak kernel bits...
-#endif
- FSYS_RETURN
-END(fsys_getppid)
-
ENTRY(fsys_set_tid_address)
.prologue
.altrp b6
@@ -614,7 +567,7 @@ paravirt_fsyscall_table:
data8 0 // chown
data8 0 // lseek // 1040
data8 fsys_getpid // getpid
- data8 fsys_getppid // getppid
+ data8 0 // getppid
data8 0 // mount
data8 0 // umount
data8 0 // setuid // 1045
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index ee33c3aaa2f..19f107be734 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -76,7 +76,7 @@
* PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
*
* Note: The term "IRQ" is loosely used everywhere in Linux kernel to
- * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
+ * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
* (isa_irq) is the only exception in this source code.
*/
@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
return 0;
}
+static int
+iosapic_delete_rte(unsigned int irq, unsigned int gsi)
+{
+ struct iosapic_rte_info *rte, *temp;
+
+ list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
+ rte_list) {
+ if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
+ if (rte->refcnt)
+ return -EBUSY;
+
+ list_del(&rte->rte_list);
+ kfree(rte);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
{
int num_rte, err, index;
@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
int iosapic_remove(unsigned int gsi_base)
{
- int index, err = 0;
+ int i, irq, index, err = 0;
unsigned long flags;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)
goto out;
}
+ for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
+ irq = __gsi_to_irq(i);
+ if (irq < 0)
+ continue;
+
+ err = iosapic_delete_rte(irq, i);
+ if (err)
+ goto out;
+ }
+
iounmap(iosapic_lists[index].addr);
iosapic_free(index);
out:
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index ad69606613e..f2c41828113 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <asm/mca.h>
+
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
#endif /* CONFIG_SMP */
+int __init arch_early_irq_init(void)
+{
+ ia64_mca_irq_init();
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS];
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 65bf9cd3904..d7396dbb07b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
printk(KERN_INFO "MCA related initialization done\n");
}
+
/*
- * ia64_mca_late_init
- *
- * Opportunity to setup things that require initialization later
- * than ia64_mca_init. Setup a timer to poll for CPEs if the
- * platform doesn't support an interrupt driven mechanism.
- *
- * Inputs : None
- * Outputs : Status
+ * These pieces cannot be done in ia64_mca_init() because it is called before
+ * early_irq_init() which would wipe out our percpu irq registrations. But we
+ * cannot leave them until ia64_mca_late_init() because by then all the other
+ * processors have been brought online and have set their own CMC vectors to
+ * point at a non-existant action. Called from arch_early_irq_init().
*/
-static int __init
-ia64_mca_late_init(void)
+void __init ia64_mca_irq_init(void)
{
- if (!mca_init)
- return 0;
-
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
+}
+
+/*
+ * ia64_mca_late_init
+ *
+ * Opportunity to setup things that require initialization later
+ * than ia64_mca_init. Setup a timer to poll for CPEs if the
+ * platform doesn't support an interrupt driven mechanism.
+ *
+ * Inputs : None
+ * Outputs : Status
+ */
+static int __init
+ia64_mca_late_init(void)
+{
+ if (!mca_init)
+ return 0;
register_hotcpu_notifier(&mca_cpu_notifier);
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 9392e021c93..94f8bf777af 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -349,7 +349,7 @@ init_record_index_pools(void)
/* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
- slidx_pool.buffer = (slidx_list_t *)
+ slidx_pool.buffer =
kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 77597e5ea60..79521d5499f 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
-/*
- * this array is used to keep track of the proc entries we create. This is
- * required in the module mode when we need to remove all entries. The procfs code
- * does not do recursion of deletion
- *
- * Notes:
- * - +1 accounts for the cpuN directory entry in /proc/pal
- */
-#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
-
-static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
static struct proc_dir_entry *palinfo_dir;
/*
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
static void __cpuinit
create_palinfo_proc_entries(unsigned int cpu)
{
-# define CPUSTR "cpu%d"
-
pal_func_cpu_u_t f;
- struct proc_dir_entry **pdir;
struct proc_dir_entry *cpu_dir;
int j;
- char cpustr[sizeof(CPUSTR)];
-
-
- /*
- * we keep track of created entries in a depth-first order for
- * cleanup purposes. Each entry is stored into palinfo_proc_entries
- */
- sprintf(cpustr,CPUSTR, cpu);
+ char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
+ sprintf(cpustr, "cpu%d", cpu);
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
+ if (!cpu_dir)
+ return;
f.req_cpu = cpu;
- /*
- * Compute the location to store per cpu entries
- * We dont store the top level entry in this list, but
- * remove it finally after removing all cpu entries.
- */
- pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
- *pdir++ = cpu_dir;
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
f.func_id = j;
- *pdir = create_proc_read_entry(
- palinfo_entries[j].name, 0, cpu_dir,
- palinfo_read_entry, (void *)f.value);
- pdir++;
+ create_proc_read_entry(
+ palinfo_entries[j].name, 0, cpu_dir,
+ palinfo_read_entry, (void *)f.value);
}
}
static void
remove_palinfo_proc_entries(unsigned int hcpu)
{
- int j;
- struct proc_dir_entry *cpu_dir, **pdir;
-
- pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
- cpu_dir = *pdir;
- *pdir++=NULL;
- for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
- if ((*pdir)) {
- remove_proc_entry ((*pdir)->name, cpu_dir);
- *pdir ++= NULL;
- }
- }
-
- if (cpu_dir) {
- remove_proc_entry(cpu_dir->name, palinfo_dir);
- }
+ char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
+ sprintf(cpustr, "cpu%d", hcpu);
+ remove_proc_subtree(cpustr, palinfo_dir);
}
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1058,6 +1019,8 @@ palinfo_init(void)
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
palinfo_dir = proc_mkdir("pal", NULL);
+ if (!palinfo_dir)
+ return -ENOMEM;
/* Create palinfo dirs in /proc for all online cpus */
for_each_online_cpu(i) {
@@ -1073,22 +1036,8 @@ palinfo_init(void)
static void __exit
palinfo_exit(void)
{
- int i = 0;
-
- /* remove all nodes: depth first pass. Could optimize this */
- for_each_online_cpu(i) {
- remove_palinfo_proc_entries(i);
- }
-
- /*
- * Remove the top level entry finally
- */
- remove_proc_entry(palinfo_dir->name, NULL);
-
- /*
- * Unregister from cpu notifier callbacks
- */
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
+ remove_proc_subtree("pal", NULL);
}
module_init(palinfo_init);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 433f5e8a2cd..9ea25fce06d 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/tracehook.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
@@ -619,6 +620,7 @@ static struct file_system_type pfm_fs_type = {
.mount = pfmfs_mount,
.kill_sb = kill_anon_super,
};
+MODULE_ALIAS_FS("pfmfs");
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
@@ -1321,8 +1323,6 @@ out:
}
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
-extern void update_pal_halt_status(int);
-
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
@@ -1370,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
cpu));
/*
- * disable default_idle() to go to PAL_HALT
+ * Force idle() into poll mode
*/
- update_pal_halt_status(0);
+ cpu_idle_poll_ctrl(true);
UNLOCK_PFS(flags);
@@ -1429,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
- /*
- * if possible, enable default_idle() to go into PAL_HALT
- */
- if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
- update_pal_halt_status(1);
+ /* Undo forced polling. Last session reenables pal_halt */
+ cpu_idle_poll_ctrl(false);
UNLOCK_PFS(flags);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e34f565f595..a26fc640e4c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -209,41 +209,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
local_irq_disable(); /* force interrupt disable */
}
-static int pal_halt = 1;
-static int can_do_pal_halt = 1;
-
static int __init nohalt_setup(char * str)
{
- pal_halt = can_do_pal_halt = 0;
+ cpu_idle_poll_ctrl(true);
return 1;
}
__setup("nohalt", nohalt_setup);
-void
-update_pal_halt_status(int status)
-{
- can_do_pal_halt = pal_halt && status;
-}
-
-/*
- * We use this if we don't have any better idle routine..
- */
-void
-default_idle (void)
-{
- local_irq_enable();
- while (!need_resched()) {
- if (can_do_pal_halt) {
- local_irq_disable();
- if (!need_resched()) {
- safe_halt();
- }
- local_irq_enable();
- } else
- cpu_relax();
- }
-}
-
#ifdef CONFIG_HOTPLUG_CPU
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void)
@@ -270,50 +242,29 @@ static inline void play_dead(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
-void __attribute__((noreturn))
-cpu_idle (void)
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
+
+void arch_cpu_idle(void)
{
void (*mark_idle)(int) = ia64_mark_idle;
- int cpu = smp_processor_id();
-
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- if (can_do_pal_halt) {
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
- } else {
- current_thread_info()->status |= TS_POLLING;
- }
- if (!need_resched()) {
- void (*idle)(void);
#ifdef CONFIG_SMP
- min_xtp();
+ min_xtp();
#endif
- rmb();
- if (mark_idle)
- (*mark_idle)(1);
-
- if (!idle)
- idle = default_idle;
- (*idle)();
- if (mark_idle)
- (*mark_idle)(0);
+ rmb();
+ if (mark_idle)
+ (*mark_idle)(1);
+
+ safe_halt();
+
+ if (mark_idle)
+ (*mark_idle)(0);
#ifdef CONFIG_SMP
- normal_xtp();
+ normal_xtp();
#endif
- }
- rcu_idle_exit();
- schedule_preempt_disabled();
- check_pgt_cache();
- if (cpu_is_offline(cpu))
- play_dead();
- }
}
void
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 500f1e4d9f9..8d87168d218 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -455,7 +455,7 @@ start_secondary (void *unused)
preempt_disable();
smp_callin();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
return 0;
}
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4332f7ee520..a7869f8f49a 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"srlz.d;;"
"ssm psr.i;;"
"srlz.d;;"
- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
return ret;
}
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 80dab509dfb..67c59ebec89 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -47,6 +47,8 @@ void show_mem(unsigned int filter)
printk(KERN_INFO "Mem-info:\n");
show_free_areas(filter);
printk(KERN_INFO "Node memory in pages:\n");
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c2e955ee79a..ae4db4bd6d9 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -623,6 +623,8 @@ void show_mem(unsigned int filter)
printk(KERN_INFO "Mem-info:\n");
show_free_areas(filter);
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
printk(KERN_INFO "Node memory in pages:\n");
for_each_online_pgdat(pgdat) {
unsigned long present;
@@ -817,13 +819,12 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-int __meminit vmemmap_populate(struct page *start_page,
- unsigned long size, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- return vmemmap_populate_basepages(start_page, size, node);
+ return vmemmap_populate_basepages(start, end, node);
}
-void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void vmemmap_free(unsigned long start, unsigned long end)
{
}
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 20bc967c720..d1fe4b40260 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -154,25 +154,14 @@ ia64_init_addr_space (void)
void
free_initmem (void)
{
- unsigned long addr, eaddr;
-
- addr = (unsigned long) ia64_imva(__init_begin);
- eaddr = (unsigned long) ia64_imva(__init_end);
- while (addr < eaddr) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- ++totalram_pages;
- addr += PAGE_SIZE;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
- (__init_end - __init_begin) >> 10);
+ free_reserved_area((unsigned long)ia64_imva(__init_begin),
+ (unsigned long)ia64_imva(__init_end),
+ 0, "unused kernel");
}
void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
- struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
@@ -213,11 +202,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
for (; start < end; start += PAGE_SIZE) {
if (!virt_addr_valid(start))
continue;
- page = virt_to_page(start);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(start);
- ++totalram_pages;
+ free_reserved_page(virt_to_page(start));
}
}
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 3dccdd8eb27..43964cde621 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -16,7 +16,7 @@
#include <asm/meminit.h>
static inline void __iomem *
-__ioremap (unsigned long phys_addr)
+__ioremap_uc(unsigned long phys_addr)
{
return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr)
void __iomem *
early_ioremap (unsigned long phys_addr, unsigned long size)
{
- return __ioremap(phys_addr);
+ u64 attr;
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return (void __iomem *) phys_to_virt(phys_addr);
+ return __ioremap_uc(phys_addr);
}
void __iomem *
@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
if (attr & EFI_MEMORY_WB)
return (void __iomem *) phys_to_virt(phys_addr);
else if (attr & EFI_MEMORY_UC)
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
/*
* Some chipsets don't support UC access to memory. If
@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
return (void __iomem *) (offset + (char __iomem *)addr);
}
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap);
@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 3efea7d0a35..4248492b932 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -61,18 +61,36 @@ paddr_to_nid(unsigned long paddr)
int __meminit __early_pfn_to_nid(unsigned long pfn)
{
int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
+ /*
+ * NOTE: The following SMP-unsafe globals are only used early in boot
+ * when the kernel is running single-threaded.
+ */
+ static int __meminitdata last_ssec, last_esec;
+ static int __meminitdata last_nid;
+
+ if (section >= last_ssec && section < last_esec)
+ return last_nid;
for (i = 0; i < num_node_memblks; i++) {
ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
esec = (node_memblk[i].start_paddr + node_memblk[i].size +
((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
- if (section >= ssec && section < esec)
+ if (section >= ssec && section < esec) {
+ last_ssec = ssec;
+ last_esec = esec;
+ last_nid = node_memblk[i].nid;
return node_memblk[i].nid;
+ }
}
return -1;
}
+void __cpuinit numa_clear_node(int cpu)
+{
+ unmap_cpu_from_node(cpu, NUMA_NO_NODE);
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* SRAT information is stored in node_memblk[], then we can use SRAT
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 60532ab2734..de1474ff0bc 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/slab.h>
@@ -458,6 +459,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
platform_pci_fixup_bus(b);
}
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
void pcibios_set_master (struct pci_dev *dev)
{
/* No special bus mastering setup handling */
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 14c1711238c..e35f6485c1f 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -490,11 +490,14 @@ static int __init tiocx_init(void)
{
cnodeid_t cnodeid;
int found_tiocx_device = 0;
+ int err;
if (!ia64_platform_is("sn2"))
return 0;
- bus_register(&tiocx_bus_type);
+ err = bus_register(&tiocx_bus_type);
+ if (err)
+ return err;
for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
nasid_t nasid;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 92623818a1f..bcd17b20657 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -10,7 +10,7 @@ config M32R
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_DEBUG_BUGVERBOSE
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
diff --git a/arch/m32r/include/uapi/asm/stat.h b/arch/m32r/include/uapi/asm/stat.h
index da4518f82d6..98470fe483b 100644
--- a/arch/m32r/include/uapi/asm/stat.h
+++ b/arch/m32r/include/uapi/asm/stat.h
@@ -63,10 +63,10 @@ struct stat64 {
long long st_size;
unsigned long st_blksize;
-#if defined(__BIG_ENDIAN)
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
-#elif defined(__LITTLE_ENDIAN)
+#elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */
#else
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index bde899e155d..e2d049018c3 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -47,24 +47,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle (void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched())
- cpu_relax();
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
void machine_restart(char *__unused)
{
#if defined(CONFIG_PLAT_MAPPI3)
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 13168a769f8..0ac558adc60 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -432,7 +432,7 @@ int __init start_secondary(void *unused)
*/
local_flush_tlb_all();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
return 0;
}
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 78b660e903d..ab4cbce91a9 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -28,10 +28,7 @@
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/tlb.h>
-
-/* References to section boundaries */
-extern char _text, _etext, _edata;
-extern char __init_begin, __init_end;
+#include <asm/sections.h>
pgd_t swapper_pg_dir[1024];
@@ -184,17 +181,7 @@ void __init mem_init(void)
*======================================================================*/
void free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \
- (int)(&__init_end - &__init_begin) >> 10);
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -204,13 +191,6 @@ void free_initmem(void)
*======================================================================*/
void free_initrd_mem(unsigned long start, unsigned long end)
{
- unsigned long p;
- for (p = start; p < end; p += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(p));
- init_page_count(virt_to_page(p));
- free_page(p);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0e708c78e01..6de813370b8 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -8,7 +8,7 @@ config M68K
select GENERIC_IRQ_SHOW
select GENERIC_ATOMIC64
select HAVE_UID16
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 7cdf6b01038..7240584d343 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -310,7 +310,6 @@ config COBRA5282
config SOM5282EM
bool "EMAC.Inc SOM5282EM board support"
depends on M528x
- select EMAC_INC
help
Support for the EMAC.Inc SOM5282EM module.
diff --git a/arch/m68k/include/asm/MC68328.h b/arch/m68k/include/asm/MC68328.h
index a337e56d09b..4ebf098b8a1 100644
--- a/arch/m68k/include/asm/MC68328.h
+++ b/arch/m68k/include/asm/MC68328.h
@@ -293,7 +293,7 @@
/*
* Here go the bitmasks themselves
*/
-#define IMR_MSPIM (1 << SPIM _IRQ_NUM) /* Mask SPI Master interrupt */
+#define IMR_MSPIM (1 << SPIM_IRQ_NUM) /* Mask SPI Master interrupt */
#define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */
#define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */
#define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */
@@ -327,7 +327,7 @@
#define IWR_ADDR 0xfffff308
#define IWR LONG_REF(IWR_ADDR)
-#define IWR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */
+#define IWR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
#define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
#define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
#define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -357,7 +357,7 @@
#define ISR_ADDR 0xfffff30c
#define ISR LONG_REF(ISR_ADDR)
-#define ISR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */
+#define ISR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
#define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
#define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
#define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -391,7 +391,7 @@
#define IPR_ADDR 0xfffff310
#define IPR LONG_REF(IPR_ADDR)
-#define IPR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */
+#define IPR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */
#define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */
#define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */
#define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */
@@ -757,7 +757,7 @@
/* 'EZ328-compatible definitions */
#define TCN_ADDR TCN1_ADDR
-#define TCN TCN
+#define TCN TCN1
/*
* Timer Unit 1 and 2 Status Registers
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 4395ffc51fd..8cc83431805 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
+static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+{
+ int err;
+
+ err = gpio_request(gpio, label);
+ if (err)
+ return err;
+
+ if (flags & GPIOF_DIR_IN)
+ err = gpio_direction_input(gpio);
+ else
+ err = gpio_direction_output(gpio,
+ (flags & GPIOF_INIT_HIGH) ? 1 : 0);
+
+ if (err)
+ gpio_free(gpio);
+
+ return err;
+}
+
#endif
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index d538694ad20..c55ff719fa7 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -51,40 +51,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sw->retpc;
}
-/*
- * The idle loop on an m68k..
- */
-static void default_idle(void)
+void arch_cpu_idle(void)
{
- if (!need_resched())
#if defined(MACH_ATARI_ONLY)
- /* block out HSYNC on the atari (falcon) */
- __asm__("stop #0x2200" : : : "cc");
+ /* block out HSYNC on the atari (falcon) */
+ __asm__("stop #0x2200" : : : "cc");
#else
- __asm__("stop #0x2000" : : : "cc");
+ __asm__("stop #0x2000" : : : "cc");
#endif
}
-void (*idle)(void) = default_idle;
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched())
- idle();
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
void machine_restart(char * __unused)
{
if (mach_reset)
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 71fb29938db..911ba472e6c 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -57,6 +57,9 @@ void (*mach_reset)(void);
void (*mach_halt)(void);
void (*mach_power_off)(void);
+#ifdef CONFIG_M68000
+#define CPU_NAME "MC68000"
+#endif
#ifdef CONFIG_M68328
#define CPU_NAME "MC68328"
#endif
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index afd8106fd83..1af2ca3411f 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -110,18 +110,7 @@ void __init paging_init(void)
void free_initmem(void)
{
#ifndef CONFIG_MMU_SUN3
- unsigned long addr;
-
- addr = (unsigned long) __init_begin;
- for (; addr < ((unsigned long) __init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
- (addr - (unsigned long) __init_begin) >> 10,
- (unsigned int) __init_begin, (unsigned int) __init_end);
+ free_initmem_default(0);
#endif /* CONFIG_MMU_SUN3 */
}
@@ -188,7 +177,7 @@ void __init mem_init(void)
}
}
-#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+#if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -213,15 +202,6 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- pr_notice("Freeing initrd memory: %dk freed\n",
- pages << (PAGE_SHIFT - 10));
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c
index 83b7dad7a84..b03a9d27183 100644
--- a/arch/m68k/platform/coldfire/m528x.c
+++ b/arch/m68k/platform/coldfire/m528x.c
@@ -69,7 +69,7 @@ static void __init m528x_uarts_init(void)
u8 port;
/* make sure PUAPAR is set for UART0 and UART1 */
- port = readb(MCF5282_GPIO_PUAPAR);
+ port = readb(MCFGPIO_PUAPAR);
port |= 0x03 | (0x03 << 2);
writeb(port, MCFGPIO_PUAPAR);
}
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
index d63b9d0e57d..d2baf696179 100644
--- a/arch/metag/include/asm/elf.h
+++ b/arch/metag/include/asm/elf.h
@@ -100,9 +100,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#define SET_PERSONALITY(ex) \
- set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
#define STACK_RND_MASK (0)
#ifdef CONFIG_METAG_USER_TCM
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 0ecd34d8b5f..7c4a3300614 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -150,6 +150,4 @@ static inline int kstack_end(void *addr)
#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
_TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index c6efe62e5b7..dc592354456 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -22,6 +22,7 @@
#include <linux/pm.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/smp.h>
#include <asm/core_reg.h>
#include <asm/user_gateway.h>
#include <asm/tcm.h>
@@ -31,7 +32,7 @@
/*
* Wait for the next interrupt and enable local interrupts
*/
-static inline void arch_idle(void)
+void arch_cpu_idle(void)
{
int tmp;
@@ -59,36 +60,12 @@ static inline void arch_idle(void)
: "r" (get_trigger_mask()));
}
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched()) {
- /*
- * We need to disable interrupts here to ensure we don't
- * miss a wakeup call.
- */
- local_irq_disable();
- if (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id()))
- cpu_die();
-#endif
- arch_idle();
- } else {
- local_irq_enable();
- }
- }
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
}
+#endif
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 4b6d1f14df3..4de8fc8e31a 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -297,7 +297,7 @@ asmlinkage void secondary_start_kernel(void)
/*
* OK, it's off to the idle thread for us
*/
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
index cd7f2f2ad41..975f2f4e3ec 100644
--- a/arch/metag/mm/Kconfig
+++ b/arch/metag/mm/Kconfig
@@ -40,6 +40,7 @@ endchoice
config NUMA
bool "Non Uniform Memory Access (NUMA) Support"
+ select ARCH_WANT_NUMA_VARIABLE_LOCALITY
help
Some Meta systems have MMU-mappable on-chip memories with
lower latencies than main memory. This enables support for
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 504a398d5f8..d05b8455c44 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -380,14 +380,8 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
- }
- totalram_pages += totalhigh_pages;
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
num_physpages += totalhigh_pages;
#endif /* CONFIG_HIGHMEM */
@@ -412,32 +406,15 @@ void __init mem_init(void)
return;
}
-static void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr;
-
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-}
-
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- end = end & PAGE_MASK;
- free_init_pages("initrd memory", start, end);
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 7843d11156e..a827057c792 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,13 +19,14 @@ config MICROBLAZE
select HAVE_DEBUG_KMEMLEAK
select IRQ_DOMAIN
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_CPU_DEVICES
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
+ select GENERIC_IDLE_POLL_SETUP
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 0759153e811..d6e0ffea28b 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -22,7 +22,6 @@
extern const struct seq_operations cpuinfo_op;
# define cpu_relax() barrier()
-# define cpu_sleep() do {} while (0)
#define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
@@ -160,10 +159,6 @@ unsigned long get_wchan(struct task_struct *p);
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
-void disable_hlt(void);
-void enable_hlt(void);
-void default_idle(void);
-
#ifdef CONFIG_DEBUG_FS
extern struct dentry *of_debugfs_root;
#endif
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 0e0b0a5ec75..f05df5630c8 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -46,7 +46,6 @@ void machine_shutdown(void);
void machine_halt(void);
void machine_power_off(void);
-void free_init_pages(char *what, unsigned long begin, unsigned long end);
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 008f30433d2..de26ea6373d 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -182,7 +182,6 @@ static inline bool test_and_clear_restore_sigmask(void)
ti->status &= ~TS_RESTORE_SIGMASK;
return true;
}
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index 60dcacc6803..365f2d53f1b 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
@@ -21,7 +21,6 @@
#include <asm/setup.h>
#include <asm/prom.h>
-static u32 early_console_initialized;
static u32 base_addr;
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -109,27 +108,11 @@ static struct console early_serial_uart16550_console = {
};
#endif /* CONFIG_SERIAL_8250_CONSOLE */
-static struct console *early_console;
-
-void early_printk(const char *fmt, ...)
-{
- char buf[512];
- int n;
- va_list ap;
-
- if (early_console_initialized) {
- va_start(ap, fmt);
- n = vscnprintf(buf, 512, fmt, ap);
- early_console->write(early_console, buf, n);
- va_end(ap);
- }
-}
-
int __init setup_early_printk(char *opt)
{
int version = 0;
- if (early_console_initialized)
+ if (early_console)
return 1;
base_addr = of_early_console(&version);
@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt)
}
register_console(early_console);
- early_console_initialized = 1;
return 0;
}
return 1;
@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt)
* only for early console because of performance degression */
void __init remap_early_printk(void)
{
- if (!early_console_initialized || !early_console)
+ if (!early_console)
return;
pr_info("early_printk_console remapping from 0x%x to ", base_addr);
base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
@@ -194,9 +176,9 @@ void __init remap_early_printk(void)
void __init disable_early_printk(void)
{
- if (!early_console_initialized || !early_console)
+ if (!early_console)
return;
pr_warn("disabling early console\n");
unregister_console(early_console);
- early_console_initialized = 0;
+ early_console = NULL;
}
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index fa0ea609137..7cce2e9c171 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -44,71 +44,6 @@ void show_regs(struct pt_regs *regs)
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
-static int hlt_counter = 1;
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-EXPORT_SYMBOL(enable_hlt);
-
-static int __init nohlt_setup(char *__unused)
-{
- hlt_counter = 1;
- return 1;
-}
-__setup("nohlt", nohlt_setup);
-
-static int __init hlt_setup(char *__unused)
-{
- hlt_counter = 0;
- return 1;
-}
-__setup("hlt", hlt_setup);
-
-void default_idle(void)
-{
- if (likely(hlt_counter)) {
- local_irq_disable();
- stop_critical_timings();
- cpu_relax();
- start_critical_timings();
- local_irq_enable();
- } else {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
- local_irq_disable();
- while (!need_resched())
- cpu_sleep();
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
- }
-}
-
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched())
- default_idle();
- rcu_idle_exit();
- tick_nohz_idle_exit();
-
- schedule_preempt_disabled();
- check_pgt_cache();
- }
-}
-
void flush_thread(void)
{
}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 8f8b367c079..4ec137d13ad 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -82,13 +82,9 @@ static unsigned long highmem_setup(void)
/* FIXME not sure about */
if (memblock_is_reserved(pfn << PAGE_SHIFT))
continue;
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
+ free_highmem_page(page);
reservedpages++;
}
- totalram_pages += totalhigh_pages;
pr_info("High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
@@ -236,40 +232,16 @@ void __init setup_memory(void)
paging_init();
}
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr;
-
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- int pages = 0;
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- pages++;
- }
- pr_notice("Freeing initrd memory: %dk freed\n",
- (int)(pages * (PAGE_SIZE / 1024)));
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_initmem_default(0);
}
void __init mem_init(void)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ae9c716c46b..3a7b3954ce1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -18,7 +18,7 @@ config MIPS
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -38,7 +38,7 @@ config MIPS
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select CLONE_BACKWARDS
@@ -404,6 +404,8 @@ config PMC_MSP
select IRQ_CPU
select SERIAL_8250
select SERIAL_8250_CONSOLE
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
help
This adds support for the PMC-Sierra family of Multi-Service
Processor System-On-A-Chips. These parts include a number
@@ -657,7 +659,7 @@ config SNI_RM
bool "SNI RM200/300/400"
select FW_ARC if CPU_LITTLE_ENDIAN
select FW_ARC32 if CPU_LITTLE_ENDIAN
- select SNIPROM if CPU_BIG_ENDIAN
+ select FW_SNIPROM if CPU_BIG_ENDIAN
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
select CEVT_R4K
@@ -1144,7 +1146,7 @@ config DEFAULT_SGI_PARTITION
config FW_ARC32
bool
-config SNIPROM
+config FW_SNIPROM
bool
config BOOT_ELF32
@@ -1433,6 +1435,7 @@ config CPU_CAVIUM_OCTEON
select CPU_SUPPORTS_HUGEPAGES
select LIBFDT
select USE_OF
+ select USB_EHCI_BIG_ENDIAN_MMIO
help
The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor
@@ -1493,7 +1496,6 @@ config CPU_XLP
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
- select CPU_HAS_LLSC
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index ed1949c2950..9aa7d44898e 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -745,10 +745,7 @@ void __init board_prom_init(void)
strcpy(cfe_version, "unknown");
printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
- if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
- printk(KERN_ERR PFX "invalid nvram checksum\n");
- return;
- }
+ bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
board_name = bcm63xx_nvram_get_name();
/* find board by name */
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index f1c9c3e2f67..e97fd60e92e 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -85,20 +85,9 @@ static struct platform_device bcm63xx_spi_device = {
int __init bcm63xx_spi_register(void)
{
- struct clk *periph_clk;
-
if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
return -ENODEV;
- periph_clk = clk_get(NULL, "periph");
- if (IS_ERR(periph_clk)) {
- pr_err("unable to get periph clock\n");
- return -ENODEV;
- }
-
- /* Set bus frequency */
- spi_pdata.speed_hz = clk_get_rate(periph_clk);
-
spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
spi_resources[0].end = spi_resources[0].start;
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c
index 62061168083..a4b8864f930 100644
--- a/arch/mips/bcm63xx/nvram.c
+++ b/arch/mips/bcm63xx/nvram.c
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
static struct bcm963xx_nvram nvram;
static int mac_addr_used;
-int __init bcm63xx_nvram_init(void *addr)
+void __init bcm63xx_nvram_init(void *addr)
{
unsigned int check_len;
u32 crc, expected_crc;
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
crc = crc32_le(~0, (u8 *)&nvram, check_len);
if (crc != expected_crc)
- return -EINVAL;
-
- return 0;
+ pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+ expected_crc, crc);
}
u8 *bcm63xx_nvram_get_name(void)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 314231be788..35e18e98beb 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
return board_register_devices();
}
-device_initcall(bcm63xx_register_devices);
+arch_initcall(bcm63xx_register_devices);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index c594a3d4f74..b0baa299f89 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)
static void octeon_generic_shutdown(void)
{
- int cpu, i;
+ int i;
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
struct cvmx_bootmem_desc *bootmem_desc;
void *named_block_array_ptr;
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index ef99db994c2..fe0d15d3266 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -10,6 +10,7 @@
#define __ASM_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index c9bae136260..b0184cf0257 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -13,7 +13,6 @@ struct bcm63xx_spi_pdata {
unsigned int msg_ctl_width;
int bus_num;
int num_chipselect;
- u32 speed_hz;
};
enum bcm63xx_regs_spi {
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
index 62d6a3b4d3b..4e0b6bc1165 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -9,10 +9,8 @@
*
* Initialized the local nvram copy from the target address and checks
* its checksum.
- *
- * Returns 0 on success.
*/
-int __init bcm63xx_nvram_init(void *nvram);
+void bcm63xx_nvram_init(void *nvram);
/**
* bcm63xx_nvram_get_name() - returns the board name according to nvram
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index d9c82841903..193c0912d38 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,11 +28,7 @@
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
-#ifdef CONFIG_CPU_HAS_LLSC
#define cpu_has_llsc 1
-#else
-#define cpu_has_llsc 0
-#endif
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 12b70c25906..0da44d422f5 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1166,7 +1166,10 @@ do { \
unsigned int __dspctl; \
\
__asm__ __volatile__( \
+ " .set push \n" \
+ " .set dsp \n" \
" rddsp %0, %x1 \n" \
+ " .set pop \n" \
: "=r" (__dspctl) \
: "i" (mask)); \
__dspctl; \
@@ -1175,30 +1178,198 @@ do { \
#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
+ " .set push \n" \
+ " .set dsp \n" \
" wrdsp %0, %x1 \n" \
+ " .set pop \n" \
: \
: "r" (val), "i" (mask)); \
} while (0)
-#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
-#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
-#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
-#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
-
-#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
-#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
-#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
-#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
-
-#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
-#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
-#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
-#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
-
-#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
-#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
-#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
-#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
+#define mflo0() \
+({ \
+ long mflo0; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac0 \n" \
+ " .set pop \n" \
+ : "=r" (mflo0)); \
+ mflo0; \
+})
+
+#define mflo1() \
+({ \
+ long mflo1; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac1 \n" \
+ " .set pop \n" \
+ : "=r" (mflo1)); \
+ mflo1; \
+})
+
+#define mflo2() \
+({ \
+ long mflo2; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac2 \n" \
+ " .set pop \n" \
+ : "=r" (mflo2)); \
+ mflo2; \
+})
+
+#define mflo3() \
+({ \
+ long mflo3; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac3 \n" \
+ " .set pop \n" \
+ : "=r" (mflo3)); \
+ mflo3; \
+})
+
+#define mfhi0() \
+({ \
+ long mfhi0; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac0 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi0)); \
+ mfhi0; \
+})
+
+#define mfhi1() \
+({ \
+ long mfhi1; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac1 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi1)); \
+ mfhi1; \
+})
+
+#define mfhi2() \
+({ \
+ long mfhi2; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac2 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi2)); \
+ mfhi2; \
+})
+
+#define mfhi3() \
+({ \
+ long mfhi3; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac3 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi3)); \
+ mfhi3; \
+})
+
+
+#define mtlo0(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac0 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo1(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo2(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac2 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo3(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac3 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi0(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac0 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi1(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi2(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac2 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi3(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac3 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
#else
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 99fc547af9d..eab99e536b5 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -31,7 +31,7 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 197f6367c20..8efe5a9e2c3 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -21,6 +21,6 @@
#include <asm/sigcontext.h>
#include <asm/siginfo.h>
-#define __ARCH_HAS_ODD_SIGACTION
+#define __ARCH_HAS_IRIX_SIGACTION
#endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index d6b18b4d0f3..addb9f556b7 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
+ *
+ * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
+ * supported its use and no libc was using it, so the entire sa-restorer
+ * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
+ * retaining only the SA_RESTORER definition as a reminder to avoid
+ * accidental reuse of the mask bit.
*/
#define SA_ONSTACK 0x08000000
#define SA_RESETHAND 0x80000000
@@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-#define SA_RESTORER 0x04000000 /* Only for o32 */
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index f81d98f6184..de75fb50562 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
#
-# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
-# to enable DSP assembler support here even if the MIPS Release 2 CPU we
-# are targetting does not support DSP because all code-paths making use of
-# it properly check that the running CPU *actually does* support these
-# instructions.
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
+# here because the compiler may use DSP ASE instructions (such as lwx) in
+# code paths where we cannot check that the CPU we are running on supports it.
+# Proper abstraction using HAVE_AS_DSP and macros is done in
+# arch/mips/include/asm/mipsregs.h.
#
ifeq ($(CONFIG_CPU_MIPSR2), y)
CFLAGS_DSP = -DHAVE_AS_DSP
-#
-# Check if assembler supports DSP ASE
-#
-ifeq ($(call cc-option-yn,-mdsp), y)
-CFLAGS_DSP += -mdsp
-endif
-
-#
-# Check if assembler supports DSP ASE Rev2
-#
-ifeq ($(call cc-option-yn,-mdspr2), y)
-CFLAGS_DSP += -mdspr2
-endif
-
CFLAGS_signal.o = $(CFLAGS_DSP)
CFLAGS_signal32.o = $(CFLAGS_DSP)
CFLAGS_process.o = $(CFLAGS_DSP)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6bfccc227a9..5fe66a0c322 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->options = R4K_OPTS;
+ c->tlbsize = 32;
switch (c->processor_id & 0xf0) {
case PRID_REV_VR4111:
c->cputype = CPU_VR4111;
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC VR4131";
} else {
c->cputype = CPU_VR4133;
+ c->options |= MIPS_CPU_LLSC;
__cpu_name[cpu] = "NEC VR4133";
}
break;
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC Vr41xx";
break;
}
- set_isa(c, MIPS_CPU_ISA_III);
- c->options = R4K_OPTS;
- c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
@@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void)
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9e6440eaa45..505cb77d128 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -7,7 +7,9 @@
* Copyright (C) 2007 MIPS Technologies, Inc.
* written by Ralf Baechle (ralf@linux-mips.org)
*/
+#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/printk.h>
#include <linux/init.h>
#include <asm/setup.h>
@@ -24,20 +26,18 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
}
}
-static struct console early_console = {
+static struct console early_console_prom = {
.name = "early",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
-static int early_console_initialized __initdata;
-
void __init setup_early_printk(void)
{
- if (early_console_initialized)
+ if (early_console)
return;
- early_console_initialized = 1;
+ early_console = &early_console_prom;
- register_console(&early_console);
+ register_console(&early_console_prom);
}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 8eeee1c860c..db9655f0889 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
err = compat_sys_shmctl(first, second, compat_ptr(ptr));
break;
default:
- err = -EINVAL;
+ err = -ENOSYS;
break;
}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 16586767335..33d067148e6 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -46,10 +46,9 @@
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
-#else
- PTR_ADDIU sp, PT_SIZE
#endif
-.endm
+ PTR_ADDIU sp, PT_SIZE
+ .endm
.macro RETURN_BACK
jr ra
@@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)
.globl _mcount
_mcount:
b ftrace_stub
- addiu sp,sp,8
+#ifdef CONFIG_32BIT
+ addiu sp,sp,8
+#else
+ nop
+#endif
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
lw t1, function_trace_stop
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 135c4aadccb..7a54f74b781 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_mips_r) {
seq_printf(m, "isa\t\t\t:");
if (cpu_has_mips_1)
- seq_printf(m, "%s", "mips1");
+ seq_printf(m, "%s", " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
if (cpu_has_mips_3)
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 3be4405c2d1..cfc742d75b7 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,44 +41,26 @@
#include <asm/inst.h>
#include <asm/stacktrace.h>
-/*
- * The idle thread. There's no useful work to be done, so just try to conserve
- * power and have a low exit latency (ie sit in a loop waiting for somebody to
- * say that they'd like to reschedule)
- */
-void __noreturn cpu_idle(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
{
- int cpu;
-
- /* CPU is going idle. */
- cpu = smp_processor_id();
+ /* What the heck is this check doing ? */
+ if (!cpu_isset(smp_processor_id(), cpu_callin_map))
+ play_dead();
+}
+#endif
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched() && cpu_online(cpu)) {
+void arch_cpu_idle(void)
+{
#ifdef CONFIG_MIPS_MT_SMTC
- extern void smtc_idle_loop_hook(void);
+ extern void smtc_idle_loop_hook(void);
- smtc_idle_loop_hook();
+ smtc_idle_loop_hook();
#endif
-
- if (cpu_wait) {
- /* Don't trace irqs off for idle */
- stop_critical_timings();
- (*cpu_wait)();
- start_critical_timings();
- }
- }
-#ifdef CONFIG_HOTPLUG_CPU
- if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
- play_dead();
-#endif
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ if (cpu_wait)
+ (*cpu_wait)();
+ else
+ local_irq_enable();
}
asmlinkage void ret_from_fork(void);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 66bf4e22d9b..aee04af213c 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -139,7 +139,7 @@ asmlinkage __cpuinit void start_secondary(void)
WARN_ON_ONCE(!irqs_disabled());
mp_ops->smp_finish();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
/*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a200b5bdbb8..c3abb88170f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
- if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
+ if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 81f1dcfdcab..a64daee740e 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a |= mask;
raw_local_irq_restore(flags);
return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a |= mask;
raw_local_irq_restore(flags);
return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a ^= mask;
raw_local_irq_restore(flags);
return res;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 507147aebd4..a6adffbb4e5 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -270,7 +270,7 @@ LEAF(csum_partial)
#endif
/* odd buffer alignment? */
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
wsbh v1, sum
movn sum, v1, t7
#else
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
addu sum, v1
#endif
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
wsbh v1, sum
movn sum, v1, odd
#else
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index ecca559b8d7..2078915eacb 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)
return;
default:
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 67929251286..3d0346dbccf 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -77,10 +77,9 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
/*
* Not static inline because used by IP27 special magic initialization code
*/
-unsigned long setup_zero_pages(void)
+void setup_zero_pages(void)
{
- unsigned int order;
- unsigned long size;
+ unsigned int order, i;
struct page *page;
if (cpu_has_vce)
@@ -94,15 +93,10 @@ unsigned long setup_zero_pages(void)
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
- while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
- SetPageReserved(page);
- page++;
- }
-
- size = PAGE_SIZE << order;
- zero_page_mask = (size - 1) & PAGE_MASK;
+ for (i = 0; i < (1 << order); i++, page++)
+ mark_page_reserved(page);
- return 1UL << order;
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
#ifdef CONFIG_MIPS_MT_SMTC
@@ -380,7 +374,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+ setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -399,12 +393,8 @@ void __init mem_init(void)
SetPageReserved(page);
continue;
}
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
+ free_highmem_page(page);
}
- totalram_pages += totalhigh_pages;
num_physpages += totalhigh_pages;
#endif
@@ -440,11 +430,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
- ClearPageReserved(page);
- init_page_count(page);
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
- __free_page(page);
- totalram_pages++;
+ free_reserved_page(page);
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
@@ -452,18 +439,14 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory",
- virt_to_phys((void *)start),
- virt_to_phys((void *)end));
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
void __init_refok free_initmem(void)
{
prom_free_prom_memory();
- free_init_pages("unused kernel memory",
- __pa_symbol(&__init_begin),
- __pa_symbol(&__init_end));
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 93d937b4b1b..df96da7e939 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
/* Ignore anything but MIPSxx processors */
- if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
- c->isa_level != MIPS_CPU_ISA_M32R2 &&
- c->isa_level != MIPS_CPU_ISA_M64R1 &&
- c->isa_level != MIPS_CPU_ISA_M64R2)
+ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 38a80c83fd6..d1faece21b6 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -19,7 +19,7 @@
#include <asm/mach-au1x00/au1000.h>
#include <asm/tlbmisc.h>
-#ifdef CONFIG_DEBUG_PCI
+#ifdef CONFIG_PCI_DEBUG
#define DBG(x...) printk(KERN_DEBUG x)
#else
#define DBG(x...) do {} while (0)
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
if (status & (1 << 29)) {
*data = 0xffffffff;
error = -1;
- DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d",
+ DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
access_type, bus->number, device);
} else if ((status >> 28) & 0xf) {
DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0872f12f268..594e60d6a43 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -115,7 +115,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
- bus->dev.of_node = hose->of_node;
}
}
@@ -169,6 +168,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
}
}
}
+
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->of_node);
+}
#endif
static DEFINE_MUTEX(pci_scan_mutex);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 3505d08ff2f..5f2bddb1860 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -457,7 +457,7 @@ void __init prom_free_prom_memory(void)
/* We got nothing to free here ... */
}
-extern unsigned long setup_zero_pages(void);
+extern void setup_zero_pages(void);
void __init paging_init(void)
{
@@ -492,7 +492,7 @@ void __init mem_init(void)
totalram_pages += free_all_bootmem_node(NODE_DATA(node));
}
- totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
+ setup_zero_pages(); /* This comes from node 0 */
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index b06c7360b1c..428da175d07 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,7 @@ config MN10300
select HAVE_ARCH_KGDB
select GENERIC_ATOMIC64
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index f90062b0622..224b4262486 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,8 +165,6 @@ void arch_release_thread_info(struct thread_info *ti);
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 84f4e97e307..2da39fb8b3b 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -50,77 +50,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
-/*
- * we use this if we don't have any better idle routine
- */
-static void default_idle(void)
-{
- local_irq_disable();
- if (!need_resched())
- safe_halt();
- else
- local_irq_enable();
-}
-
-#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
/*
* On SMP it's slightly faster (but much more power-consuming!)
* to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
+ *
+ * tglx: No idea why this depends on HOTPLUG_CPU !?!
*/
-static inline void poll_idle(void)
-{
- int oldval;
-
- local_irq_enable();
-
- /*
- * Deal with another CPU just having chosen a thread to
- * run here:
- */
- oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-
- if (!oldval) {
- set_thread_flag(TIF_POLLING_NRFLAG);
- while (!need_resched())
- cpu_relax();
- clear_thread_flag(TIF_POLLING_NRFLAG);
- } else {
- set_need_resched();
- }
-}
-#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
-
-/*
- * the idle thread
- * - there's no useful work to be done, so just try to conserve power and have
- * a low exit latency (ie sit in a loop waiting for somebody to say that
- * they'd like to reschedule)
- */
-void cpu_idle(void)
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
+void arch_cpu_idle(void)
{
- /* endless idle loop with no priority at all */
- for (;;) {
- rcu_idle_enter();
- while (!need_resched()) {
- void (*idle)(void);
-
- smp_rmb();
- if (!idle) {
-#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
- idle = poll_idle;
-#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
- idle = default_idle;
-#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
- }
- idle();
- }
- rcu_idle_exit();
-
- schedule_preempt_disabled();
- }
+ safe_halt();
}
+#endif
void release_segments(struct mm_struct *mm)
{
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 5d7e152a23b..a17f9c9c14c 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -675,7 +675,7 @@ int __init start_secondary(void *unused)
#ifdef CONFIG_GENERIC_CLOCKEVENTS
init_clockevents();
#endif
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
return 0;
}
@@ -935,8 +935,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
int timeout;
#ifdef CONFIG_HOTPLUG_CPU
- if (num_online_cpus() == 1)
- disable_hlt();
if (sleep_mode[cpu])
run_wakeup_cpu(cpu);
#endif /* CONFIG_HOTPLUG_CPU */
@@ -1003,9 +1001,6 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
run_sleep_cpu(cpu);
-
- if (num_online_cpus() == 1)
- enable_hlt();
}
#ifdef CONFIG_MN10300_CACHE_ENABLED
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index e57e5bc2356..5a8ace63a6b 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -139,30 +139,11 @@ void __init mem_init(void)
}
/*
- *
- */
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr;
-
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *) addr, 0xcc, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
-/*
* recycle memory containing stuff only required for initialisation
*/
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long) &__init_begin,
- (unsigned long) &__init_end);
+ free_initmem_default(POISON_FREE_INITMEM);
}
/*
@@ -171,6 +152,6 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory", start, end);
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 014a6482ed4..9ab3bf2eca8 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -9,10 +9,9 @@ config OPENRISC
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
select HAVE_MEMBLOCK
- select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_REQUIRE_GPIOLIB
select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index 07f3212422a..d797acc901e 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -128,8 +128,6 @@ register struct thread_info *current_thread_info_reg asm("r10");
/* For OpenRISC, this is anything in the LSW other than syscall trace */
#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
index 35f92ce51c2..ec6d9d37cef 100644
--- a/arch/openrisc/kernel/Makefile
+++ b/arch/openrisc/kernel/Makefile
@@ -4,7 +4,7 @@
extra-y := head.o vmlinux.lds
-obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \
+obj-y := setup.o or32_ksyms.o process.o dma.o \
traps.o time.o irq.o entry.o ptrace.o signal.o \
sys_call_table.o
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
deleted file mode 100644
index 5e8a3b6d6bc..00000000000
--- a/arch/openrisc/kernel/idle.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * OpenRISC idle.c
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * Modifications for the OpenRISC architecture:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * Idle daemon for or32. Idle daemon will handle any action
- * that needs to be taken when the system becomes idle.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/tick.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/mmu.h>
-#include <asm/cache.h>
-#include <asm/pgalloc.h>
-
-void (*powersave) (void) = NULL;
-
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched()) {
- check_pgt_cache();
- rmb();
-
- clear_thread_flag(TIF_POLLING_NRFLAG);
-
- local_irq_disable();
- /* Don't trace irqs off for idle */
- stop_critical_timings();
- if (!need_resched() && powersave != NULL)
- powersave();
- start_critical_timings();
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
- }
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index e7fdc50c4bf..b3cbc670383 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -43,6 +43,7 @@
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
int mem_init_done;
@@ -201,9 +202,6 @@ void __init paging_init(void)
/* References to section boundaries */
-extern char _stext, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
-
static int __init free_pages_init(void)
{
int reservedpages, pfn;
@@ -263,30 +261,11 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
- (end - start) >> 10);
-
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
void free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
- ((unsigned long)&__init_end -
- (unsigned long)&__init_begin) >> 10);
+ free_initmem_default(0);
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index a9ff712a286..0339181bf3a 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -21,7 +21,7 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
select TTY # Needed for pdc_cons.c
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 01d95e2f058..113e2820650 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER
endif
# Use long jumps instead of long branches (needed if your linker fails to
-# link a too big vmlinux executable)
-cflags-$(CONFIG_MLONGCALLS) += -mlong-calls
+# link a too big vmlinux executable). Not enabled for building modules.
+ifdef CONFIG_MLONGCALLS
+KBUILD_CFLAGS_KERNEL += -mlong-calls
+endif
# select which processor to optimise for
cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 79f694f3ad9..f0e2784e7cc 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-#define kunmap(page) kunmap_parisc(page_address(page))
+static inline void kunmap(struct page *page)
+{
+ kunmap_parisc(page_address(page));
+}
static inline void *kmap_atomic(struct page *page)
{
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 7df49fad29f..1e40d7f86be 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cache.h>
+extern spinlock_t pa_dbit_lock;
+
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&pa_dbit_lock, flags); \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
+ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
+ pte_t pte;
+ unsigned long flags;
+
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
-#else
- pte_t pte = *ptep;
- if (!pte_young(pte))
+
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ pte = *ptep;
+ if (!pte_young(pte)) {
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ }
+ set_pte(ptep, pte_mkold(pte));
+ purge_tlb_entries(vma->vm_mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 1;
-#endif
}
-extern spinlock_t pa_dbit_lock;
-
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
+ unsigned long flags;
- spin_lock(&pa_dbit_lock);
+ spin_lock_irqsave(&pa_dbit_lock, flags);
old_pte = *ptep;
pte_clear(mm,addr,ptep);
- spin_unlock(&pa_dbit_lock);
+ purge_tlb_entries(mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
- unsigned long new, old;
-
- do {
- old = pte_val(*ptep);
- new = pte_val(pte_wrprotect(__pte (old)));
- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+ unsigned long flags;
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
-#else
- pte_t old_pte = *ptep;
- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-#endif
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index d1fb79a36f3..6182832e5b6 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -77,8 +77,6 @@ struct thread_info {
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
_TIF_BLOCKSTEP)
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* __KERNEL__ */
#endif /* _ASM_PARISC_THREAD_INFO_H */
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4ba2c93770f..e0a82358517 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -181,30 +181,24 @@ struct exception_data {
#if !defined(CONFIG_64BIT)
#define __put_kernel_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%1)" \
- "\n2:\tstw %3,4(%1)\n\t" \
+ "\n2:\tstw %R2,4(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
#define __put_user_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr3,%1)" \
- "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
+ "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 4b12890642e..83ded26cad0 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
/* Note: purge_tlb_entries can be called at startup with
no context. */
- /* Disable preemption while we play with %sr1. */
- preempt_disable();
- mtsp(mm->context, 1);
purge_tlb_start(flags);
+ mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
- preempt_enable();
}
EXPORT_SYMBOL(purge_tlb_entries);
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6795dc6c995..568b2c61ea0 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -120,11 +120,13 @@ extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
+extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d13507246c5..55f92b61418 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -59,28 +59,6 @@
#include <asm/unwind.h>
#include <asm/sections.h>
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched())
- barrier();
- rcu_idle_exit();
- schedule_preempt_disabled();
- check_pgt_cache();
- }
-}
-
-
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 6266730efd6..fd1bb1519c2 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -329,7 +329,7 @@ void __init smp_callin(void)
local_irq_enable(); /* Interrupts have been off until now */
- cpu_idle(); /* Wait for timer to schedule some work */
+ cpu_startup_entry(CPUHP_ONLINE);
/* NOTREACHED */
panic("smp_callin() AAAAaaaaahhhh....\n");
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 5f2e6904d14..5651536ac73 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,6 +2,7 @@
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+ ucmpdi2.o
obj-y := iomap.o
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 00000000000..149c016f32c
--- /dev/null
+++ b/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,25 @@
+#include <linux/module.h>
+
+union ull_union {
+ unsigned long long ull;
+ struct {
+ unsigned int high;
+ unsigned int low;
+ } ui;
+};
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ union ull_union au = {.ull = a};
+ union ull_union bu = {.ull = b};
+
+ if (au.ui.high < bu.ui.high)
+ return 0;
+ else if (au.ui.high > bu.ui.high)
+ return 2;
+ if (au.ui.low < bu.ui.low)
+ return 0;
+ else if (au.ui.low > bu.ui.low)
+ return 2;
+ return 1;
+}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3ac462de53a..157b931e7b0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -505,7 +505,6 @@ static void __init map_pages(unsigned long start_vaddr,
void free_initmem(void)
{
- unsigned long addr;
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
@@ -533,19 +532,10 @@ void free_initmem(void)
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
- for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- num_physpages++;
- totalram_pages++;
- }
+ num_physpages += free_initmem_default(0);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
-
- printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
- (init_end - init_begin) >> 10);
}
@@ -697,6 +687,8 @@ void show_mem(unsigned int filter)
printk(KERN_INFO "Mem-info:\n");
show_free_areas(filter);
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
#ifndef CONFIG_DISCONTIGMEM
i = max_mapnr;
while (i-- > 0) {
@@ -1107,15 +1099,6 @@ void flush_tlb_all(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
- printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- num_physpages++;
- totalram_pages++;
- }
+ num_physpages += free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b89d7eb730a..ea5bb045983 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -90,6 +90,7 @@ config GENERIC_GPIO
config PPC
bool
default y
+ select BINFMT_ELF
select OF
select OF_EARLY_FLATTREE
select HAVE_FTRACE_MCOUNT_RECORD
@@ -98,7 +99,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER
select SYSCTL_EXCEPTION_TRACE
select ARCH_WANT_OPTIONAL_GPIOLIB
- select HAVE_VIRT_TO_BUS if !PPC64
+ select VIRT_TO_BUS if !PPC64
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index a5f8264d2d3..125e1652006 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -113,7 +113,7 @@
STEPUP4((t)+16, fn)
_GLOBAL(powerpc_sha_transform)
- PPC_STLU r1,-STACKFRAMESIZE(r1)
+ PPC_STLU r1,-INT_FRAME_SIZE(r1)
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
@@ -175,5 +175,5 @@ _GLOBAL(powerpc_sha_transform)
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
- addi r1,r1,STACKFRAMESIZE
+ addi r1,r1,INT_FRAME_SIZE
blr
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index ef918a2328b..08bd299c75b 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -52,8 +52,6 @@
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
/* Macro for generating the ***_bits() functions */
#define DEFINE_BITOP(fn, op, prefix, postfix) \
static __inline__ void fn(unsigned long mask, \
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 62e11a32c4c..4fcbd6b14a3 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,6 +3,7 @@
#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
extern struct kmem_cache *hugepte_cache;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2fdb47a19ef..b59e06f507e 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
/*
* VSID allocation (256MB segment)
*
- * We first generate a 38-bit "proto-VSID". For kernel addresses this
- * is equal to the ESID | 1 << 37, for user addresses it is:
- * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
*
- * This splits the proto-VSID into the below range
- * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
- * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
- *
- * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
- * That is, we assign half of the space to user processes and half
- * to the kernel.
+ * For user processes max context id is limited to ((1ul << 19) - 5)
+ * for kernel space, we use the top 4 context ids to map address as below
+ * NOTE: each context only support 64TB now.
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
* VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without
- * a divide or extra multiply (see below).
- *
- * This scheme has several advantages over older methods:
- *
- * - We have VSIDs allocated for every kernel address
- * (i.e. everything above 0xC000000000000000), except the very top
- * segment, which simplifies several things.
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
*
- * - We allow for USER_ESID_BITS significant bits of ESID and
- * CONTEXT_BITS bits of context for user addresses.
- * i.e. 64T (46 bits) of address space for up to half a million contexts.
+ * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
+ * bad address. This enables us to consolidate bad address handling in
+ * hash_page.
*
- * - The scramble function gives robust scattering in the hash
- * table (at least based on some initial results). The previous
- * method was more susceptible to pathological cases giving excessive
- * hash collisions.
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble. But the vmemmap
+ * (which is what uses region 0xf) will never be close to 64TB in size
+ * (it's 56 bytes per page of system memory).
*/
+#define CONTEXT_BITS 19
+#define ESID_BITS 18
+#define ESID_BITS_1T 6
+
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. The top 4 contexts are used for
+ * kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
+ */
+#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
+
/*
* This should be computed such that protovosid * vsid_mulitplier
* doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
*/
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_256M 38
+#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_1T 26
+#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
-#define CONTEXT_BITS 19
-#define USER_ESID_BITS 18
-#define USER_ESID_BITS_1T 6
-#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
+#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
/*
* This macro generates asm code to compute the VSID scramble
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
srdi rx,rt,VSID_BITS_##size; \
clrldi rt,rt,(64-VSID_BITS_##size); \
add rt,rt,rx; /* add high and low bits */ \
- /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
+ /* NOTE: explanation based on VSID_BITS_##size = 36 \
+ * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
* 2^36-1+2^28-1. That in particular means that if r3 >= \
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
* the bit clear, r3 already has the answer we want, if it \
@@ -513,34 +521,6 @@ typedef struct {
})
#endif /* 1 */
-/*
- * This is only valid for addresses >= PAGE_OFFSET
- * The proto-VSID space is divided into two class
- * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
- * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
- *
- * With KERNEL_START at 0xc000000000000000, the proto vsid for
- * the kernel ends up with 0xc00000000 (36 bits). With 64TB
- * support we need to have kernel proto-VSID in the
- * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
- */
-static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
-{
- unsigned long proto_vsid;
- /*
- * We need to make sure proto_vsid for the kernel is
- * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
- */
- if (ssize == MMU_SEGSIZE_256M) {
- proto_vsid = ea >> SID_SHIFT;
- proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
- return vsid_scramble(proto_vsid, 256M);
- }
- proto_vsid = ea >> SID_SHIFT_1T;
- proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
- return vsid_scramble(proto_vsid, 1T);
-}
-
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
{
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
return MMU_SEGSIZE_256M;
}
-/* This is only valid for user addresses (which are below 2^44) */
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
+ /*
+ * Bad address. We return VSID 0 for that
+ */
+ if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
+ return 0;
+
if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble((context << USER_ESID_BITS)
+ return vsid_scramble((context << ESID_BITS)
| (ea >> SID_SHIFT), 256M);
- return vsid_scramble((context << USER_ESID_BITS_1T)
+ return vsid_scramble((context << ESID_BITS_1T)
| (ea >> SID_SHIFT_1T), 1T);
}
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ *
+ * For kernel space, we use the top 4 context ids to map address as below
+ * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+ unsigned long context;
+
+ /*
+ * kernel take the top 4 context from the available range
+ */
+ context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
+ return get_vsid(context, ea, ssize);
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e6658612203..c9c67fc888c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -266,7 +266,8 @@
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
+#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
+#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 535b6d8a41c..ebbec52d21b 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -358,3 +358,4 @@ SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
+SYSCALL(ni_syscall) /* sys_kcmp */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 406b7b9a134..8ceea14d6fe 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -182,8 +182,6 @@ static inline bool test_thread_local_flags(unsigned int flags)
#define is_32bit_task() (1)
#endif
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index f25b5c45c43..1487f0f1229 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 354
+#define __NR_syscalls 355
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index b532060d091..23016020915 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -51,4 +51,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 8c478c6c6b1..74cb4d72d67 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -376,6 +376,7 @@
#define __NR_process_vm_readv 351
#define __NR_process_vm_writev 352
#define __NR_finit_module 353
+#define __NR_kcmp 354
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index d29facbf9a2..ea847abb0d0 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -48,6 +48,7 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
+ bl __init_FSCR
bl __init_hvmode_206
mtlr r11
beqlr
@@ -56,13 +57,13 @@ _GLOBAL(__setup_cpu_power8)
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
- bl __init_FSCR
bl __init_TLB
mtlr r11
blr
_GLOBAL(__restore_cpu_power8)
mflr r11
+ bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
beqlr
@@ -115,7 +116,7 @@ __init_LPCR:
__init_FSCR:
mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR
+ ori r3,r3,FSCR_TAR|FSCR_DSCR
mtspr SPRN_FSCR,r3
blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 75a3d71b895..19599ef352b 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index b3ba5163eae..9ec3fe174cb 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -150,10 +150,7 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
continue;
- ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
- init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
- free_page((unsigned long)__va(addr));
- totalram_pages++;
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
#endif
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 256c5bf0adb..04d69c4a5ac 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
+ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -657,7 +657,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
- ldarx r4,0,r5
+0: ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index f3eab8594d9..d44a571e45a 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -23,8 +23,10 @@
#include <asm/code-patching.h>
#include <asm/machdep.h>
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
+#endif
bool epapr_paravirt_enabled;
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, insts[i]);
+#endif
}
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_property(hyper_node, "has-idle", NULL))
ppc_md.power_save = epapr_ev_idle;
+#endif
epapr_paravirt_enabled = true;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a8a5361fb70..56bd92362ce 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -74,13 +74,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
mflr r10 ; \
ld r12,PACAKBASE(r13) ; \
LOAD_HANDLER(r12, system_call_entry_direct) ; \
- mtlr r12 ; \
+ mtctr r12 ; \
mfspr r12,SPRN_SRR1 ; \
/* Re-use of r13... No spare regs to do this */ \
li r13,MSR_RI ; \
mtmsrd r13,1 ; \
GET_PACA(r13) ; /* get r13 back */ \
- blr ;
+ bctr ;
#else
/* We can branch directly */
#define SYSCALL_PSERIES_2_DIRECT \
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
- mflr r10
-#ifdef CONFIG_RELOCATABLE
- mtctr r11
-#endif
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate_realmode
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- 2f
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
- RESTORE_PPR_PACA(PACA_EXSLB, r9)
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-2: mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10,unrecov_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- rfid
- b .
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- ld r10,_LINK(r1) /* make idle task do the */
- std r10,_NIP(r1) /* equivalent of a blr */
- blr
-#endif
-
.align 7
.globl alignment_common
alignment_common:
@@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler)
/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+#ifdef CONFIG_RELOCATABLE
+ mtctr r11
+#endif
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- 2f
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+ RESTORE_PPR_PACA(PACA_EXSLB, r9)
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+2: mfspr r11,SPRN_SRR0
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10,unrecov_slb)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+ andc r9,r9,r10
+ std r9,TI_LOCAL_FLAGS(r11)
+ ld r10,_LINK(r1) /* make idle task do the */
+ std r10,_NIP(r1) /* equivalent of a blr */
+ blr
+#endif
+
+/*
* Hash table stuff
*/
.align 7
@@ -1452,20 +1452,36 @@ do_ste_alloc:
_GLOBAL(do_stab_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
+ mfspr r11,SPRN_DAR /* ea */
+ /*
+ * check for bad kernel/user address
+ * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ */
+ rldicr. r9,r11,4,(63 - 46 - 4)
+ li r9,0 /* VSID = 0 for bad address */
+ bne- 0f
+
+ /*
+ * Calculate VSID:
+ * This is the kernel vsid, we take the top for context from
+ * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * Here we know that (ea >> 60) == 0xc
+ */
+ lis r9,(MAX_USER_CONTEXT + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT + 1)@l
+
+ srdi r10,r11,SID_SHIFT
+ rldimi r10,r9,ESID_BITS,0 /* proto vsid */
+ ASM_VSID_SCRAMBLE(r10, r9, 256M)
+ rldic r9,r10,12,16 /* r9 = vsid << 12 */
+
+0:
/* Hash to the primary group */
ld r10,PACASTABVIRT(r13)
- mfspr r11,SPRN_DAR
- srdi r11,r11,28
+ srdi r11,r11,SID_SHIFT
rldimi r10,r11,7,52 /* r10 = first ste of the group */
- /* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID | 1 << 37 */
- li r9,0x1
- rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
- ASM_VSID_SCRAMBLE(r11, r9, 256M)
- rldic r9,r11,12,16 /* r9 = vsid << 12 */
-
/* Search the primary group for a free entry */
1: ld r11,0(r10) /* Test valid bit of the current ste */
andi. r11,r11,0x80
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 06c8202a69c..2230fd0ca3e 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1045,10 +1045,7 @@ static void fadump_release_memory(unsigned long begin, unsigned long end)
if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
continue;
- ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
- init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
- free_page((unsigned long)__va(addr));
- totalram_pages++;
+ free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
}
}
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index ea78761aa16..939ea7ef0dc 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -33,11 +33,6 @@
#include <asm/runlatch.h>
#include <asm/smp.h>
-#ifdef CONFIG_HOTPLUG_CPU
-#define cpu_should_die() cpu_is_offline(smp_processor_id())
-#else
-#define cpu_should_die() 0
-#endif
unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(cpuidle_disable);
@@ -50,64 +45,38 @@ static int __init powersave_off(char *arg)
}
__setup("powersave=off", powersave_off);
-/*
- * The body of the idle task.
- */
-void cpu_idle(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched() && !cpu_should_die()) {
- ppc64_runlatch_off();
-
- if (ppc_md.power_save) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- /*
- * smp_mb is so clearing of TIF_POLLING_NRFLAG
- * is ordered w.r.t. need_resched() test.
- */
- smp_mb();
- local_irq_disable();
-
- /* Don't trace irqs off for idle */
- stop_critical_timings();
-
- /* check again after disabling irqs */
- if (!need_resched() && !cpu_should_die())
- ppc_md.power_save();
-
- start_critical_timings();
-
- /* Some power_save functions return with
- * interrupts enabled, some don't.
- */
- if (irqs_disabled())
- local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- } else {
- /*
- * Go into low thread priority and possibly
- * low power mode.
- */
- HMT_low();
- HMT_very_low();
- }
- }
+ sched_preempt_enable_no_resched();
+ cpu_die();
+}
+#endif
- HMT_medium();
- ppc64_runlatch_on();
- rcu_idle_exit();
- tick_nohz_idle_exit();
- if (cpu_should_die()) {
- sched_preempt_enable_no_resched();
- cpu_die();
- }
- schedule_preempt_disabled();
+void arch_cpu_idle(void)
+{
+ ppc64_runlatch_off();
+
+ if (ppc_md.power_save) {
+ ppc_md.power_save();
+ /*
+ * Some power_save functions return with
+ * interrupts enabled, some don't.
+ */
+ if (irqs_disabled())
+ local_irq_enable();
+ } else {
+ local_irq_enable();
+ /*
+ * Go into low thread priority and possibly
+ * low power mode.
+ */
+ HMT_low();
+ HMT_very_low();
}
+
+ HMT_medium();
+ ppc64_runlatch_on();
}
int powersave_nap;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index a61b133c4f9..6782221d49b 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -756,12 +756,7 @@ static __init void kvm_free_tmp(void)
end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
/* Free the tmp space we don't need */
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, NULL);
}
static int __init kvm_guest_init(void)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 59dd545fdde..16e77a81ab4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC;
}
+#endif
/* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 7f7fb7fd991..13f8d168b3f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
{
}
#else
-static void __reloc_toc(void *tocstart, unsigned long offset,
- unsigned long nr_entries)
+static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
{
unsigned long i;
- unsigned long *toc_entry = (unsigned long *)tocstart;
+ unsigned long *toc_entry;
+
+ /* Get the start of the TOC by using r2 directly. */
+ asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
for (i = 0; i < nr_entries; i++) {
*toc_entry = *toc_entry + offset;
@@ -2850,8 +2852,7 @@ static void reloc_toc(void)
unsigned long nr_entries =
(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
- /* Need to add offset to get at __prom_init_toc_start */
- __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
+ __reloc_toc(offset, nr_entries);
mb();
}
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
mb();
- /* __prom_init_toc_start has been relocated, no need to add offset */
- __reloc_toc(__prom_init_toc_start, -offset, nr_entries);
+ __reloc_toc(-offset, nr_entries);
}
#endif
#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 245c1b6a085..f9b30c68ba4 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
brk.address = bp_info->addr & ~7UL;
brk.type = HW_BRK_TYPE_TRANSLATE;
+ brk.len = 8;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3acb28e245b..95068bf569a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(&current->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&current->thread);
regs->msr |= MSR_VEC;
}
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 995f8543cb5..c1794286098 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(&current->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&current->thread);
regs->msr |= MSR_VEC;
}
+#endif
return err;
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 76bd9da8cb7..ee7ac5e6e28 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -669,7 +669,7 @@ __cpuinit void start_secondary(void *unused)
local_irq_enable();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
BUG();
}
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 84dbace657c..2da67e7a16d 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5
+#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
+#endif
dont_restore_vec:
andi. r0, r4, MSR_FP
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index f9748498fe5..13b86709349 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -156,15 +156,13 @@ static struct console udbg_console = {
.index = 0,
};
-static int early_console_initialized;
-
/*
* Called by setup_system after ppc_md->probe and ppc_md->early_init.
* Call it again after setting udbg_putc in ppc_md->setup_arch.
*/
void __init register_early_udbg_console(void)
{
- if (early_console_initialized)
+ if (early_console)
return;
if (!udbg_putc)
@@ -174,7 +172,7 @@ void __init register_early_udbg_console(void)
printk(KERN_INFO "early console immortal !\n");
udbg_console.flags &= ~CON_BOOT;
}
- early_console_initialized = 1;
+ early_console = &udbg_console;
register_console(&udbg_console);
}
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index bc77834dbf4..59f419b935f 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -31,6 +31,16 @@
#define UPROBE_TRAP_NR UINT_MAX
/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ return (is_trap(*insn));
+}
+
+/**
* arch_uprobe_analyze_insn
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
@@ -43,12 +53,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
if (addr & 0x03)
return -EINVAL;
- /*
- * We currently don't support a uprobe on an already
- * existing breakpoint instruction underneath
- */
- if (is_trap(auprobe->ainsn))
- return -ENOTSUPP;
return 0;
}
@@ -188,3 +192,16 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
return false;
}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long orig_ret_vaddr;
+
+ orig_ret_vaddr = regs->link;
+
+ /* Replace the return addr with trampoline addr */
+ regs->link = trampoline_vaddr;
+
+ return orig_ret_vaddr;
+}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index ead58e31729..5d7d29a313e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu3s->context_id[0] = err;
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
- << USER_ESID_BITS) - 1;
- vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+ << ESID_BITS) - 1;
+ vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 41cefd43655..33db48a8ce2 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -26,17 +26,20 @@
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-#define E500_TLB_VALID 1
-#define E500_TLB_BITMAP 2
+/* entry is mapped somewhere in host TLB */
+#define E500_TLB_VALID (1 << 0)
+/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
+#define E500_TLB_BITMAP (1 << 1)
+/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
- pfn_t pfn;
- unsigned int flags; /* E500_TLB_* */
+ pfn_t pfn; /* valid only for TLB0, except briefly */
+ unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
- struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+ struct tlbe_ref ref;
};
#ifdef CONFIG_KVM_E500V2
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM];
- /*
- * information associated with each host TLB entry --
- * TLB1 only for now. If/when guest TLB1 entries can be
- * mapped with host TLB0, this will be used for that too.
- *
- * We don't want to use this for guest TLB0 because then we'd
- * have the overhead of doing the translation again even if
- * the entry is still in the guest TLB (e.g. we swapped out
- * and back, and our host TLB entries got evicted).
- */
- struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 svr;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index a222edfb9a9..1c6a9d729df 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID))
- return;
+ if (!(ref->flags & E500_TLB_VALID)) {
+ WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
+ "%s: flags %x\n", __func__, ref->flags);
+ WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
+ }
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn)
{
ref->pfn = pfn;
- ref->flags = E500_TLB_VALID;
+ ref->flags |= E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
+ /* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int tlbsel = 0;
- int i;
-
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
-}
-
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- int stlbsel = 1;
+ int tlbsel;
int i;
- kvmppc_e500_tlbil_all(vcpu_e500);
-
- for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->tlb_refs[stlbsel][i];
- kvmppc_e500_ref_release(ref);
+ for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
+ }
}
-
- clear_tlb_privs(vcpu_e500);
}
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- clear_tlb_refs(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
+ clear_tlb_privs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500);
}
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
- /* Drop old ref and setup new one. */
- kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;
- vcpu_e500->tlb_refs[1][sesel] = *ref;
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
+
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
+ WARN_ON(!(ref->flags & E500_TLB_VALID));
return sesel;
}
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref ref;
+ struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
int sesel;
int r;
- ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- &ref);
+ ref);
if (r)
return r;
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- /* Triggers after clear_tlb_refs or on initial mapping */
+ /* Triggers after clear_tlb_privs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
- vcpu_e500->tlb_refs[0] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[0])
- goto err;
-
- vcpu_e500->tlb_refs[1] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[1])
- goto err;
-
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
- goto err;
+ return -EINVAL;
return 0;
-
-err:
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
- return -EINVAL;
}
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
kfree(vcpu_e500->h2g_tlb1_rmap);
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 1f89d26e65f..2f4baa074b2 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
+ __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
+ __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+ }
kvmppc_load_guest_fp(vcpu);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1b6e1271719..f410c3e12c1 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
+ /*
+ * If we hit a bad address return error.
+ */
+ if (!vsid)
+ return -1;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
@@ -759,6 +764,8 @@ void __init early_init_mmu(void)
/* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
+ else
+ stab_initialize(get_paca()->stab_real);
}
#ifdef CONFIG_SMP
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
- if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
- DBG_LOW(" out of pgtable range !\n");
- return 1;
- }
-
/* Get region & vsid */
switch (REGION_ID(ea)) {
case USER_REGION_ID:
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
}
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
+ /* Bad address. */
+ if (!vsid) {
+ DBG_LOW("Bad address!\n");
+ return 1;
+ }
/* Get pgdir */
pgdir = mm->pgd;
if (pgdir == NULL)
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Get VSID */
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
+ if (!vsid)
+ return;
/* Hash doesn't like irqs */
local_irq_save(flags);
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+ /* Don't create HPTE entries for bad address */
+ if (!vsid)
+ return;
ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7e2246fb2f3..5a535b73ea1 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmemmap_list = vmem_back;
}
-int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long start = (unsigned long)start_page;
- unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);
- pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
- start_page, nr_pages, node);
- pr_debug(" -> map %lx..%lx\n", start, end);
+ pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
void *p;
@@ -298,7 +293,7 @@ int __meminit vmemmap_populate(struct page *start_page,
return 0;
}
-void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void vmemmap_free(unsigned long start, unsigned long end)
{
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f1f7409a418..cd76c454942 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -352,13 +352,9 @@ void __init mem_init(void)
struct page *page = pfn_to_page(pfn);
if (memblock_is_reserved(paddr))
continue;
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
+ free_highmem_page(page);
reservedpages--;
}
- totalram_pages += totalhigh_pages;
printk(KERN_DEBUG "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
}
@@ -405,39 +401,14 @@ void __init mem_init(void)
void free_initmem(void)
{
- unsigned long addr;
-
ppc_md.progress = ppc_printk_progress;
-
- addr = (unsigned long)__init_begin;
- for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- pr_info("Freeing unused kernel memory: %luk freed\n",
- ((unsigned long)__init_end -
- (unsigned long)__init_begin) >> 10);
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start >= end)
- return;
-
- start = _ALIGN_DOWN(start, PAGE_SIZE);
- end = _ALIGN_UP(end, PAGE_SIZE);
- pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0ace5..d1d1b92c5b9 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,15 +29,6 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
-/*
- * 256MB segment
- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
- * available for user mappings. Each segment contains 2^28 bytes. Each
- * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
- * (19 == 37 + 28 - 46).
- */
-#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
-
int __init_new_context(void)
{
int index;
@@ -56,7 +47,7 @@ again:
else if (err)
return err;
- if (index > MAX_CONTEXT) {
+ if (index > MAX_USER_CONTEXT) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bba87ca2b4d..fa33c546e77 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -22,6 +22,7 @@
#include <linux/pfn.h>
#include <linux/cpuset.h>
#include <linux/node.h>
+#include <linux/slab.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
@@ -62,14 +63,11 @@ static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
*/
static void __init setup_node_to_cpumask_map(void)
{
- unsigned int node, num = 0;
+ unsigned int node;
/* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES) {
- for_each_node_mask(node, node_possible_map)
- num = node;
- nr_node_ids = num + 1;
- }
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
/* allocate the map */
for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e212a271c7a..654258f165a 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -61,7 +61,7 @@
#endif
#ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca22775..17aa6dfceb3 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
* No other registers are examined or changed.
*/
_GLOBAL(slb_allocate_realmode)
- /* r3 = faulting address */
+ /*
+ * check for bad kernel/user address
+ * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+ */
+ rldicr. r9,r3,4,(63 - 46 - 4)
+ bne- 8f
srdi r9,r3,60 /* get region */
- srdi r10,r3,28 /* get esid */
+ srdi r10,r3,SID_SHIFT /* get esid */
cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
- li r9,0x1
/*
- * for 1T we shift 12 bits more. slb_finish_load_1T will do
- * the necessary adjustment
+ * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * r9 = region id.
*/
- rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+ addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
+
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
_GLOBAL(slb_miss_kernel_load_io)
li r11,0
6:
- li r9,0x1
/*
- * for 1T we shift 12 bits more. slb_finish_load_1T will do
- * the necessary adjustment
+ * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * r9 = region id.
*/
- rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+ addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+ addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-0: /* user address: proto-VSID = context << 15 | ESID. First check
- * if the address is within the boundaries of the user region
- */
- srdi. r9,r10,USER_ESID_BITS
- bne- 8f /* invalid ea bits set */
-
-
+0:
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- rldimi r10,r9,USER_ESID_BITS,0
-BEGIN_FTR_SECTION
bge slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
li r10,0 /* BAD_VSID */
+ li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
/* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13)
- rldimi r10,r9,USER_ESID_BITS,0
-
/* fall through slb_finish_load */
#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
/*
* Finish loading of an SLB entry and return
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+ * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
+ rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M)
/*
* bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
/*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
+ * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
*/
slb_finish_load_1T:
- srdi r10,r10,40-28 /* get 1T ESID */
+ srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
+ rldimi r10,r9,ESID_BITS_1T,0
ASM_VSID_SCRAMBLE(r10,r9,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef50dc3..023ec8a13f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, addr, ssize);
- WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
+ WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index b554879bd31..3c475d6267c 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
.attrs = power7_events_attr,
};
+PMU_FORMAT_ATTR(event, "config:0-19");
+
+static struct attribute *power7_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+struct attribute_group power7_pmu_format_group = {
+ .name = "format",
+ .attrs = power7_pmu_format_attr,
+};
+
static const struct attribute_group *power7_pmu_attr_groups[] = {
+ &power7_pmu_format_group,
&power7_pmu_events_group,
NULL,
};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0effe9f5a1e..7be93367d92 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -274,6 +274,8 @@ config 440EPX
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII
select IBM_EMAC_ZMII
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
config 440GRX
bool
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c16999802ec..381a592826a 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,6 +7,8 @@ config PPC_MPC512x
select PPC_PCI_CHOICE
select FSL_PCI if PCI
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
config MPC5121_ADS
bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index d30235b7e3f..db6ac389ef8 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -172,12 +172,9 @@ static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
static inline void mpc512x_free_bootmem(struct page *page)
{
- __ClearPageReserved(page);
BUG_ON(PageTail(page));
BUG_ON(atomic_read(&page->_count) > 1);
- atomic_set(&page->_count, 1);
- __free_page(page);
- totalram_pages++;
+ free_reserved_page(page);
}
void mpc512x_release_bootmem(void)
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 611e92f291c..7179726ba5c 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data)
return IRQ_HANDLED;
};
-static int __devinit gpio_halt_probe(struct platform_device *pdev)
+static int gpio_halt_probe(struct platform_device *pdev)
{
enum of_gpio_flags flags;
struct device_node *node = pdev->dev.of_node;
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit gpio_halt_remove(struct platform_device *pdev)
+static int gpio_halt_remove(struct platform_device *pdev)
{
if (halt_node) {
int gpio = of_get_gpio(halt_node, 0);
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = {
.of_match_table = gpio_halt_match,
},
.probe = gpio_halt_probe,
- .remove = __devexit_p(gpio_halt_remove),
+ .remove = gpio_halt_remove,
};
module_platform_driver(gpio_halt_driver);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index cea2f09c424..18e3b76c78d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -124,9 +124,8 @@ config 6xx
select PPC_HAVE_PMU_SUPPORT
config POWER3
- bool
depends on PPC64 && PPC_BOOK3S
- default y if !POWER4_ONLY
+ def_bool y
config POWER4
depends on PPC64 && PPC_BOOK3S
@@ -145,8 +144,7 @@ config TUNE_CELL
but somewhat slower on other machines. This option only changes
the scheduling of instructions, not the selection of instructions
itself, so the resulting kernel will keep running on all other
- machines. When building a kernel that is supposed to run only
- on Cell, you should also select the POWER4_ONLY option.
+ machines.
# this is temp to handle compat with arch=ppc
config 8xx
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 863184b182f..3f3bb4cdbbe 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -749,6 +749,7 @@ static struct file_system_type spufs_type = {
.mount = spufs_mount,
.kill_sb = kill_litter_super,
};
+MODULE_ALIAS_FS("spufs");
static int __init spufs_init(void)
{
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2372c609fa2..9a432de363b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -72,6 +72,7 @@ unsigned long memory_block_size_bytes(void)
return get_memblock_size();
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
unsigned long start, start_pfn;
@@ -153,6 +154,17 @@ static int pseries_remove_memory(struct device_node *np)
ret = pseries_remove_memblock(base, lmb_size);
return ret;
}
+#else
+static inline int pseries_remove_memblock(unsigned long base,
+ unsigned int memblock_size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pseries_remove_memory(struct device_node *np)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
static int pseries_add_memory(struct device_node *np)
{
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index fcf4b4cbeaf..4557e91626c 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
@@ -188,9 +189,9 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
- strncpy(&next_partner_info->location_code[0],
+ strlcpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
- strlen((char *)&pi_buff[2]) + 1);
+ sizeof(next_partner_info->location_code));
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355..299731e9036 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
- BUG_ON(lpar_rc != H_NOT_FOUND);
+
+ /*
+ * The test for adjunct partition is performed before the
+ * ANDCOND test. H_RESOURCE may be returned, so we need to
+ * check for that as well.
+ */
+ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
slot_offset++;
slot_offset &= 0x7;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 4b505370a1d..bda6ba6f3cf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,7 +134,7 @@ config S390
select HAVE_SYSCALL_WRAPPERS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
@@ -375,19 +375,6 @@ config PACK_STACK
Say Y if you are unsure.
-config SMALL_STACK
- def_bool n
- prompt "Use 8kb for kernel stack instead of 16kb"
- depends on PACK_STACK && 64BIT && !LOCKDEP
- help
- If you say Y here and the compiler supports the -mkernel-backchain
- option the kernel will use a smaller kernel stack size. The reduced
- size is 8kb instead of 16kb. This allows to run more threads on a
- system and reduces the pressure on the memory management for higher
- order page allocations.
-
- Say N if you are unsure.
-
config CHECK_STACK
def_bool y
prompt "Detect kernel stack overflow"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7e3ce78d429..a7d68a467ce 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
ifeq ($(call cc-option-yn,-mkernel-backchain),y)
cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 9fd4a40c675..bb5dd496614 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
- if (IS_ERR(dbfs_dir))
- return PTR_ERR(dbfs_dir);
- return 0;
+ return PTR_RET(dbfs_dir);
}
void hypfs_dbfs_exit(void)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 8538015ed4a..5f7d7ba2874 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -456,6 +456,7 @@ static struct file_system_type hypfs_type = {
.mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
+MODULE_ALIAS_FS("s390_hypfs");
static const struct super_operations hypfs_s_ops = {
.statfs = simple_statfs,
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 15422933c60..4d8604e311f 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
#ifndef CONFIG_64BIT
-#define __BITOPS_ALIGN 3
-#define __BITOPS_WORDSIZE 32
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
#else /* CONFIG_64BIT */
-#define __BITOPS_ALIGN 7
-#define __BITOPS_WORDSIZE 64
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
#endif /* CONFIG_64BIT */
-#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
-#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
+#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#ifdef CONFIG_SMP
/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND/test mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
- __BITOPS_BARRIER();
+ barrier();
return (old ^ new) != 0;
}
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr |= 1 << (nr & 7);
}
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr &= ~(1 << (nr & 7));
}
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr ^= 1 << (nr & 7);
}
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(volatile unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
static inline int
__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
return (((volatile char *) addr)
- [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
+ [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
}
#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffz_word returns __BITOPS_WORDSIZE
+ * __ffz_word returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, *p >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffs_word returns __BITOPS_WORDSIZE
+ * __ffs_word returns BITS_PER_LONG
* if no one bit is present in the word.
*/
set = __ffs_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_le(p, size);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e6061617a50..f201af8be58 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
-extern int ccw_device_force_console(void);
+extern void ccw_device_wait_idle(struct ccw_device *);
+extern int ccw_device_force_console(struct ccw_device *);
int ccw_device_siosl(struct ccw_device *);
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ad2b924167d..ffb898961c8 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
-extern void wait_cons_dev(void);
-
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index f8c6df6cd1f..c1e7c646727 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
+typedef struct {
+ u32 mask;
+ u32 addr;
+} __aligned(8) psw_compat_t;
+
+typedef struct {
+ psw_compat_t psw;
+ u32 gprs[NUM_GPRS];
+ u32 acrs[NUM_ACRS];
+ u32 orig_gpr2;
+} s390_compat_regs;
+
+typedef struct {
+ u32 gprs_high[NUM_GPRS];
+} s390_compat_regs_high;
+
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
};
struct compat_statfs {
- s32 f_type;
- s32 f_bsize;
- s32 f_blocks;
- s32 f_bfree;
- s32 f_bavail;
- s32 f_files;
- s32 f_ffree;
+ u32 f_type;
+ u32 f_bsize;
+ u32 f_blocks;
+ u32 f_bfree;
+ u32 f_bavail;
+ u32 f_files;
+ u32 f_ffree;
+ compat_fsid_t f_fsid;
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
+};
+
+struct compat_statfs64 {
+ u32 f_type;
+ u32 f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
compat_fsid_t f_fsid;
- s32 f_namelen;
- s32 f_frsize;
- s32 f_flags;
- s32 f_spare[5];
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
return is_32bit_task();
}
-#endif
-
static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
return (void __user *) (stack - len);
}
+#endif
+
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index f1eddd150dd..c879fad404c 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,7 @@
#ifndef _ASM_S390_CPU_MF_H
#define _ASM_S390_CPU_MF_H
+#include <linux/errno.h>
#include <asm/facility.h>
#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 8d4847191ec..dc9200ca32e 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -34,6 +34,8 @@ struct arsb {
u32 reserved[4];
} __packed;
+#define EQC_WR_PROHIBIT 22
+
struct msb {
u8 fmt:4;
u8 oc:4;
@@ -96,11 +98,13 @@ struct scm_device {
#define OP_STATE_TEMP_ERR 2
#define OP_STATE_PERM_ERR 3
+enum scm_event {SCM_CHANGE, SCM_AVAIL};
+
struct scm_driver {
struct device_driver drv;
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
- void (*notify) (struct scm_device *scmdev);
+ void (*notify) (struct scm_device *scmdev, enum scm_event event);
void (*handler) (struct scm_device *scmdev, void *data, int error);
};
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1bfdf24b85a..78f4f8711d5 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
*/
#include <asm/ptrace.h>
+#include <asm/compat.h>
+#include <asm/syscall.h>
#include <asm/user.h>
typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
-#ifdef CONFIG_64BIT
+#ifndef CONFIG_COMPAT
+#define SET_PERSONALITY(ex) \
+do { \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+} while (0)
+#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- else \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table_emu; \
+ } else { \
clear_thread_flag(TIF_31BIT); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+ } \
} while (0)
-#endif /* CONFIG_64BIT */
+#endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 593753ee07f..bd90359d6d2 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -114,7 +114,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
({ \
pte_t __pte = huge_ptep_get(__ptep); \
- if (pte_write(__pte)) { \
+ if (huge_pte_write(__pte)) { \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
@@ -127,4 +127,58 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
huge_ptep_invalidate(vma->vm_mm, address, ptep);
}
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ pte_t pte;
+ pmd_t pmd;
+
+ pmd = mk_pmd_phys(page_to_phys(page), pgprot);
+ pte_val(pte) = pmd_val(pmd);
+ return pte;
+}
+
+static inline int huge_pte_write(pte_t pte)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = pte_val(pte);
+ return pmd_write(pmd);
+}
+
+static inline int huge_pte_dirty(pte_t pte)
+{
+ /* No dirty bit in the segment table entry. */
+ return 0;
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = pte_val(pte);
+ pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
+ return pte;
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ /* No dirty bit in the segment table entry. */
+ return pte;
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = pte_val(pte);
+ pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
+ return pte;
+}
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pmd_clear((pmd_t *) ptep);
+}
+
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 27cb32185ce..379d96e2105 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 05333b7f046..6c1801235db 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
+int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
void zpci_free_device(struct zpci_dev *);
int zpci_scan_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6bbec4265b6..1ca5d1047c7 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
#ifdef CONFIG_PCI_DEBUG
-#define zpci_dbg(fmt, args...) \
- do { \
- if (pci_debug_msg_id->level >= 2) \
- debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
- } while (0)
+#define zpci_dbg(imp, fmt, args...) \
+ debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(fmt, args...) do { } while (0)
+#define zpci_dbg(imp, fmt, args...) do { } while (0)
#endif
#define zpci_err(text...) \
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 1486a98d5da..e6a2bdd4d70 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
-#include <linux/delay.h>
-
-#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
-
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
u64 reserved7;
} __packed;
-/* Modify PCI Function Controls */
-static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
- : : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
-{
- u8 cc, status;
-
- do {
- cc = __mpcifc(req, fib, &status);
- if (cc == 2)
- msleep(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
- __func__, cc, status);
- return (cc) ? -EIO : 0;
-}
-
-/* Refresh PCI Translations */
-static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
-{
- register u64 __addr asm("2") = addr;
- register u64 __range asm("3") = range;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d30000,%[fn],%[addr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn)
- : [addr] "d" (__addr), "d" (__range)
- : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
-{
- u8 cc, status;
-
- do {
- cc = __rpcit(fn, addr, range, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
- __func__, cc, status, addr, range);
- return (cc) ? -EIO : 0;
-}
-
-/* Store PCI function controls */
-static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
-{
- u64 fn = (u64) handle << 32 | space << 16;
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
- : : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-/* Set Interruption Controls */
-static inline void sic_instr(u16 ctl, char *unused, u8 isc)
-{
- asm volatile (
- " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
- : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
-}
-
-/* PCI Load */
-static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u64 __data;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d20000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
- : "d" (__offset)
- : "cc");
- *status = __req >> 24 & 0xff;
- *data = __data;
- return cc;
-}
-
-static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcilg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc) {
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- /* TODO: on IO errors set data to 0xff...
- * here or in users of pcilg (le conversion)?
- */
- }
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store */
-static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d00000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (__req)
- : "d" (__offset), [data] "d" (data)
- : "cc");
- *status = __req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistg_instr(u64 data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store Block */
-static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req)
- : [offset] "d" (offset), [data] "Q" (*data)
- : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistb(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
+int s390pci_load(u64 *data, u64 req, u64 offset);
+int s390pci_store(u64 data, u64 req, u64 offset);
+int s390pci_store_block(const u64 *data, u64 req, u64 offset);
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 5fd81f31d6c..83a9caa6ae5 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \
int rc; \
\
- rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
+ rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
+ s390pci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */
break;
}
- return pcistg_instr(val, req, offset);
+ return s390pci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
- u8 cc;
+ int cc;
+
+ cc = s390pci_load(&data, req, offset);
+ if (cc)
+ goto out;
- cc = pcilg_instr(&data, req, offset);
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
*((u64 *) dst) = (u64) data;
break;
}
+out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
- return pcistb_instr(data, req, offset);
+ return s390pci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4a2930844d4..b4622915bd1 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
(((unsigned long)(vaddr)) &zero_page_mask))))
#define __HAVE_COLOR_ZERO_PAGE
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#endif /* !__ASSEMBLY__ */
/*
@@ -344,6 +348,7 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
@@ -419,6 +424,13 @@ extern unsigned long MODULES_END;
#define __S110 PAGE_RW
#define __S111 PAGE_RW
+/*
+ * Segment entry (large page) protection definitions.
+ */
+#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
+#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
+#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
+
static inline int mm_exclusive(struct mm_struct *mm)
{
return likely(mm == current->active_mm &&
@@ -759,6 +771,8 @@ void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long length);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long __gmap_translate(unsigned long address, struct gmap *);
+unsigned long gmap_translate(unsigned long address, struct gmap *);
unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
@@ -907,26 +921,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
- /*
- * PROT_NONE needs to be remapped from the pte type to the ste type.
- * The HW invalid bit is also different for pte and ste. The pte
- * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
- * bit, so we don't have to clear it.
- */
- if (pte_val(pte) & _PAGE_INVALID) {
- if (pte_val(pte) & _PAGE_SWT)
- pte_val(pte) |= _HPAGE_TYPE_NONE;
- pte_val(pte) |= _SEGMENT_ENTRY_INV;
- }
- /*
- * Clear SW pte bits, there are no SW bits in a segment table entry.
- */
- pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX | _PAGE_SWC |
- _PAGE_SWR | _PAGE_SWW);
- /*
- * Also set the change-override bit because we don't need dirty bit
- * tracking for hugetlbfs pages.
- */
pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
return pte;
}
@@ -1271,31 +1265,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
}
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-
-#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
-#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
-#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
-
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
-
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
-
-static inline int pmd_trans_splitting(pmd_t pmd)
-{
- return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
-}
-
-static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, pmd_t entry)
-{
- if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
- pmd_val(entry) |= _SEGMENT_ENTRY_CO;
- *pmdp = entry;
-}
-
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
/*
@@ -1316,10 +1286,11 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pmd;
}
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
+static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
{
- pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
- return pmd;
+ pmd_t __pmd;
+ pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
+ return __pmd;
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
@@ -1329,6 +1300,34 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
return pmd;
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
+
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t entry)
+{
+ if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+ pmd_val(entry) |= _SEGMENT_ENTRY_CO;
+ *pmdp = entry;
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
+ return pmd;
+}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
@@ -1425,13 +1424,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
}
}
-static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
-{
- pmd_t __pmd;
- pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
- return __pmd;
-}
-
#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
@@ -1531,7 +1523,8 @@ extern int s390_enable_sie(void);
/*
* No page table caches to initialise
*/
-#define pgtable_cache_init() do { } while (0)
+static inline void pgtable_cache_init(void) { }
+static inline void check_pgt_cache(void) { }
#include <asm-generic/pgtable.h>
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 94e749c9023..6b499870662 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]);
+extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
+ unsigned int len);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 3ee5da3bc10..559512a455d 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
-#ifndef __s390x__
-#else /* __s390x__ */
-#endif /* __s390x__ */
+
extern long psw_kernel_bits;
extern long psw_user_bits;
@@ -77,8 +75,6 @@ struct per_struct_kernel {
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
-#ifdef __s390x__
-#endif /* __s390x__ */
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fe7b99759e1..cd29d2f4e4f 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
+extern const unsigned int sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 9e2cfe0349c..eb5f64d26d0 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else /* CONFIG_64BIT */
-#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#else
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#endif
#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
+ unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8fe2b17ef..6b32af30878 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
- if (unlikely(cpumask_empty(mm_cpumask(mm))))
- return;
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index a5ca214b34f..3aa9f1ec5b2 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
-typedef struct
-{
- __u32 mask;
- __u32 addr;
-} __attribute__ ((aligned(8))) psw_compat_t;
-
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
unsigned long orig_gpr2;
} s390_regs;
-typedef struct
-{
- psw_compat_t psw;
- __u32 gprs[NUM_GPRS];
- __u32 acrs[NUM_ACRS];
- __u32 orig_gpr2;
-} s390_compat_regs;
-
-typedef struct
-{
- __u32 gprs_high[NUM_GPRS];
-} s390_compat_regs_high;
-
-
/*
* Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index 5acca0a34c2..a61d538756f 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
#ifndef _S390_STATFS_H
#define _S390_STATFS_H
-#ifndef __s390x__
-#include <asm-generic/statfs.h>
-#else
/*
* We can't use <asm-generic/statfs.h> because in 64-bit mode
* we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
#endif
struct statfs {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
struct statfs64 {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
-struct compat_statfs64 {
- __u32 f_type;
- __u32 f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __u32 f_namelen;
- __u32 f_frsize;
- __u32 f_flags;
- __u32 f_spare[4];
-};
-
-#endif /* __s390x__ */
#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2ac311ef5c9..1386fcaf4ef 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -14,16 +14,25 @@ endif
CFLAGS_smp.o := -Wno-nonnull
#
+# Disable tailcall optimizations for stack / callchain walking functions
+# since this might generate broken code when accessing register 15 and
+# passing its content to other functions.
+#
+CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
+CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
+
+#
# Pass UTS_MACHINE for user_regset definition
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
- processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
- debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
- sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
+obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y += dumpstack.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fface87056e..7a82f9f7010 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
+ DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6de049fbe62..c439ac9ced0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
+ regs->gprs[6] = task_thread_info(current)->last_break;
}
/* Place signal number on stack to allow backtrace from handler. */
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
+ regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3ad5e954016..7f4a4a8c847 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
* insn_to_mnemonic - decode an s390 instruction
* @instruction: instruction to decode
* @buf: buffer to fill with mnemonic
+ * @len: length of buffer
*
* Decode the instruction at @instruction and store the corresponding
- * mnemonic into @buf.
+ * mnemonic into @buf of length @len.
* @buf is left unchanged if the instruction could not be decoded.
* Returns:
* %0 on success, %-ENOENT if the instruction was not found.
*/
-int insn_to_mnemonic(unsigned char *instruction, char buf[8])
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
struct insn *insn;
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
if (!insn)
return -ENOENT;
if (insn->name[0] == '\0')
- snprintf(buf, 8, "%s",
+ snprintf(buf, len, "%s",
long_insn_name[(int) insn->name[1]]);
else
- snprintf(buf, 8, "%.5s", insn->name);
+ snprintf(buf, len, "%.5s", insn->name);
return 0;
}
EXPORT_SYMBOL_GPL(insn_to_mnemonic);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
new file mode 100644
index 00000000000..03dce39d01e
--- /dev/null
+++ b/arch/s390/kernel/dumpstack.c
@@ -0,0 +1,236 @@
+/*
+ * Stack dumping functions
+ *
+ * Copyright IBM Corp. 1999, 2013
+ */
+
+#include <linux/kallsyms.h>
+#include <linux/hardirq.h>
+#include <linux/kprobes.h>
+#include <linux/utsname.h>
+#include <linux/export.h>
+#include <linux/kdebug.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/debug.h>
+#include <asm/ipl.h>
+
+#ifndef CONFIG_64BIT
+#define LONG "%08lx "
+#define FOURLONG "%08lx %08lx %08lx %08lx\n"
+static int kstack_depth_to_print = 12;
+#else /* CONFIG_64BIT */
+#define LONG "%016lx "
+#define FOURLONG "%016lx %016lx %016lx %016lx\n"
+static int kstack_depth_to_print = 20;
+#endif /* CONFIG_64BIT */
+
+/*
+ * For show_trace we have tree different stack to consider:
+ * - the panic stack which is used if the kernel stack has overflown
+ * - the asynchronous interrupt stack (cpu related)
+ * - the synchronous kernel stack (process related)
+ * The stack trace can start at any of the three stack and can potentially
+ * touch all of them. The order is: panic stack, async stack, sync stack.
+ */
+static unsigned long
+__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+{
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+
+ while (1) {
+ sp = sp & PSW_ADDR_INSN;
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+ /* Follow the backchain. */
+ while (1) {
+ low = sp;
+ sp = sf->back_chain & PSW_ADDR_INSN;
+ if (!sp)
+ break;
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+ }
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+ if (sp <= low || sp > high - sizeof(*regs))
+ return sp;
+ regs = (struct pt_regs *) sp;
+ printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+ low = sp;
+ sp = regs->gprs[15];
+ }
+}
+
+static void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ register unsigned long __r15 asm ("15");
+ unsigned long sp;
+
+ sp = (unsigned long) stack;
+ if (!sp)
+ sp = task ? task->thread.ksp : __r15;
+ printk("Call Trace:\n");
+#ifdef CONFIG_CHECK_STACK
+ sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
+ S390_lowcore.panic_stack);
+#endif
+ sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
+ S390_lowcore.async_stack);
+ if (task)
+ __show_trace(sp, (unsigned long) task_stack_page(task),
+ (unsigned long) task_stack_page(task) + THREAD_SIZE);
+ else
+ __show_trace(sp, S390_lowcore.thread_info,
+ S390_lowcore.thread_info + THREAD_SIZE);
+ if (!task)
+ task = current;
+ debug_show_held_locks(task);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ register unsigned long *__r15 asm ("15");
+ unsigned long *stack;
+ int i;
+
+ if (!sp)
+ stack = task ? (unsigned long *) task->thread.ksp : __r15;
+ else
+ stack = sp;
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if ((i * sizeof(long) % 32) == 0)
+ printk("%s ", i == 0 ? "" : "\n");
+ printk(LONG, *stack++);
+ }
+ printk("\n");
+ show_trace(task, sp);
+}
+
+static void show_last_breaking_event(struct pt_regs *regs)
+{
+#ifdef CONFIG_64BIT
+ printk("Last Breaking-Event-Address:\n");
+ printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+#endif
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+{
+ return (regs->psw.mask & bits) / ((~bits + 1) & bits);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ char *mode;
+
+ mode = user_mode(regs) ? "User" : "Krnl";
+ printk("%s PSW : %p %p",
+ mode, (void *) regs->psw.mask,
+ (void *) regs->psw.addr);
+ print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
+ printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
+ "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
+ mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
+ mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
+ mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
+ mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
+ mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
+#ifdef CONFIG_64BIT
+ printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
+#endif
+ printk("\n%s GPRS: " FOURLONG, mode,
+ regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+ printk(" " FOURLONG,
+ regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+ printk(" " FOURLONG,
+ regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+ printk(" " FOURLONG,
+ regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+ show_code(regs);
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_registers(regs);
+ /* Show stack backtrace if pt_regs is from kernel mode */
+ if (!user_mode(regs))
+ show_trace(NULL, (unsigned long *) regs->gprs[15]);
+ show_last_breaking_event(regs);
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+
+ oops_enter();
+ lgr_info_log();
+ debug_stop_all();
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+#endif
+ printk("\n");
+ notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
+ print_modules();
+ show_regs(regs);
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception: panic_on_oops");
+ oops_exit();
+ do_exit(SIGSEGV);
+}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 55022852326..4d5e6f8a797 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
#define BASED(name) name-system_call(%r13)
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: l %r15,\stack # load target stack
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro ADD64 high,low,timer
@@ -150,7 +151,7 @@ ENTRY(__switch_to)
l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
lr %r15,%r5
- ahi %r15,STACK_SIZE # end of kernel stack of next
+ ahi %r15,STACK_INIT # end of kernel stack of next
st %r3,__LC_CURRENT # store task struct of next
st %r5,__LC_THREAD_INFO # store thread info of next
st %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -178,7 +179,6 @@ sysc_stm:
l %r13,__LC_SVC_NEW_PSW+4
sysc_per:
l %r15,__LC_KERNEL_STACK
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -188,6 +188,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+3(%r12),_TIF_SYSCALL
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0
jnz sysc_nr_ok
@@ -198,7 +199,6 @@ sysc_do_svc:
lr %r8,%r1
sla %r8,2
sysc_nr_ok:
- l %r10,BASED(.Lsys_call_table) # 31 bit system call table
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
l %r15,__LC_KERNEL_STACK
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
stm %r8,%r9,__PT_PSW(%r11)
@@ -485,7 +485,6 @@ io_work:
#
io_work_user:
l %r1,__LC_KERNEL_STACK
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -636,7 +635,8 @@ ENTRY(mcck_int_handler)
UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
mcck_skip:
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
- mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
+ stm %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
stm %r8,%r9,__PT_PSW(%r11)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
l %r1,BASED(.Ldo_machine_check)
@@ -645,7 +645,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -673,6 +672,7 @@ mcck_panic:
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
+ j mcck_skip
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
@@ -713,12 +713,10 @@ ENTRY(restart_int_handler)
*/
stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack
- ahi %r15,-__PT_SIZE # create pt_regs
- stm %r0,%r7,__PT_R0(%r15)
- stm %r8,%r9,__PT_PSW(%r15)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stm %r0,%r7,__PT_R0(%r11)
+ stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(32,%r11),0(%r14)
- lr %r15,%r11
- ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,BASED(1f)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
lr %r2,%r11 # pass pointer to pt_regs
@@ -798,15 +796,14 @@ cleanup_system_call:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
# set up saved register 11
l %r15,__LC_KERNEL_STACK
- ahi %r15,-__PT_SIZE
- st %r15,12(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ st %r9,12(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
+ stm %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register 15
- ahi %r15,-STACK_FRAME_OVERHEAD
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@@ -909,7 +906,6 @@ cleanup_idle_wait:
.Ltrace_enter: .long do_syscall_trace_enter
.Ltrace_exit: .long do_syscall_trace_exit
.Lschedule_tail: .long schedule_tail
-.Lsys_call_table: .long sys_call_table
.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
.Lhardirqs_on: .long trace_hardirqs_on_caller
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3a736a3ed4..aa0ab02e959 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -7,6 +7,7 @@
#include <asm/cputime.h>
extern void *restart_stack;
+extern unsigned long suspend_zero_pages;
void system_call(void);
void pgm_check_handler(void);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 9c837c10129..4c17eece707 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lg %r15,\stack # load target stack
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME scratch,enter_timer
@@ -177,7 +178,7 @@ ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
lgr %r15,%r5
- aghi %r15,STACK_SIZE # end of kernel stack of next
+ aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -203,10 +204,8 @@ sysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
sysc_per:
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@@ -217,6 +216,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+7(%r12),_TIF_SYSCALL
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok
@@ -227,13 +227,6 @@ sysc_do_svc:
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
sysc_nr_ok:
- larl %r10,sys_call_table # 64 bit system call table
-#ifdef CONFIG_COMPAT
- tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
- jno sysc_noemu
- larl %r10,sys_call_table_emu # 31 bit system call table
-sysc_noemu:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r14
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -526,7 +519,6 @@ io_work:
#
io_work_user:
lg %r1,__LC_KERNEL_STACK
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -678,8 +670,9 @@ ENTRY(mcck_int_handler)
UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
LAST_BREAK %r14
mcck_skip:
- lghi %r14,__LC_GPREGS_SAVE_AREA
- mvc __PT_R0(128,%r11),0(%r14)
+ lghi %r14,__LC_GPREGS_SAVE_AREA+64
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
@@ -687,7 +680,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -754,14 +746,12 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- lg %r11,__LC_PANIC_STACK # change to panic stack
- aghi %r11,-__PT_SIZE # create pt_regs
+ lg %r15,__LC_PANIC_STACK # change to panic stack
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- lgr %r15,%r11
- aghi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
@@ -845,15 +835,14 @@ cleanup_system_call:
mvc __TI_last_break(8,%r12),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-__PT_SIZE
- stg %r15,24(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register r15
- aghi %r15,-STACK_FRAME_OVERHEAD
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,sysc_do_svc
@@ -1010,6 +999,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
+ .globl sys_call_table_emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b3de2770001..ac2178161ec 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
+#include <linux/suspend.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
@@ -67,6 +68,35 @@ void setup_regs(void)
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
+/*
+ * PM notifier callback for kdump
+ */
+static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ if (crashk_res.start)
+ crash_map_reserved_pages();
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ if (crashk_res.start)
+ crash_unmap_reserved_pages();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init machine_kdump_pm_init(void)
+{
+ pm_notifier(machine_kdump_pm_cb, 0);
+ return 0;
+}
+arch_initcall(machine_kdump_pm_init);
#endif
/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 536d64579d9..2bc3eddae34 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,18 +61,8 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-/*
- * The idle loop on a S390...
- */
-static void default_idle(void)
+void arch_cpu_idle(void)
{
- if (cpu_is_offline(smp_processor_id()))
- cpu_die();
- local_irq_disable();
- if (need_resched()) {
- local_irq_enable();
- return;
- }
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
@@ -83,19 +73,15 @@ static void default_idle(void)
vtime_stop_cpu();
}
-void cpu_idle(void)
+void arch_cpu_idle_exit(void)
{
- for (;;) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING))
- default_idle();
- rcu_idle_exit();
- tick_nohz_idle_exit();
- if (test_thread_flag(TIF_MCCK_PENDING))
- s390_handle_mcck();
- schedule_preempt_disabled();
- }
+ if (test_thread_flag(TIF_MCCK_PENDING))
+ s390_handle_mcck();
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
}
extern void __kprobes kernel_thread_starter(void);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5360de85ec..0f419c5765c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
- lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
+ lc->kernel_stack = ((unsigned long) &init_thread_union)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
+ __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
+ __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;
@@ -571,6 +574,8 @@ static void __init setup_memory_end(void)
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+ /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
+ tmp = SECTION_ALIGN_UP(tmp);
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 549c9d173c0..8074cb4b7cb 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
- lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
+ lc->async_stack = pcpu->async_stack + ASYNC_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->cpu_nr = cpu;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
struct _lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
- lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
+ lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->user_timer = ti->user_timer;
@@ -711,8 +714,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
set_cpu_online(smp_processor_id(), true);
inc_irq_stat(CPU_RST);
local_irq_enable();
- /* cpu_idle will call schedule for us */
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
/* Upping and downing of CPUs */
@@ -810,8 +812,10 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
- pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
- pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
+ pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index aa1494d0e38..c479d2f9605 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -41,6 +41,7 @@ struct page_key_data {
static struct page_key_data *page_key_data;
static struct page_key_data *page_key_rp, *page_key_wp;
static unsigned long page_key_rx, page_key_wx;
+unsigned long suspend_zero_pages;
/*
* For each page in the hibernation image one additional byte is
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
}
+/*
+ * PM notifier callback for suspend
+ */
+static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
+ if (!suspend_zero_pages)
+ return NOTIFY_BAD;
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ free_pages(suspend_zero_pages, LC_ORDER);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init suspend_pm_init(void)
+{
+ pm_notifier(suspend_pm_cb, 0);
+ return 0;
+}
+arch_initcall(suspend_pm_init);
+
void save_processor_state(void)
{
/* swsusp_arch_suspend() actually saves all cpu register contents.
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0617b..c487be4cfc8 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Save prefix register contents for lowcore */
- llgf %r4,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore copy */
+ llgf %r10,__SF_EMPTY(%r15)
/* Get pointer to save area */
lghi %r1,0x1000
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
+ /* Save absolute zero pages */
+ larl %r2,suspend_zero_pages
+ lg %r2,0(%r2)
+ lghi %r4,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
+ /* Copy lowcore to absolute zero lowcore */
lghi %r2,0
+ lgr %r4,%r10
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
@@ -248,8 +259,20 @@ restore_registers:
/* Load old stack */
lg %r15,0x2f8(%r13)
+ /* Save prefix register */
+ mvc __SF_EMPTY(4,%r15),0x318(%r13)
+
+ /* Restore absolute zero pages */
+ lghi %r2,0
+ larl %r4,suspend_zero_pages
+ lg %r4,0(%r4)
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
/* Restore prefix register */
- spx 0x318(%r13)
+ spx __SF_EMPTY(%r15)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 13dd63fba36..c5762324d9e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -12,49 +12,16 @@
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
#include <linux/ptrace.h>
-#include <linux/timer.h>
+#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/seq_file.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kdebug.h>
-#include <linux/kallsyms.h>
-#include <linux/reboot.h>
-#include <linux/kprobes.h>
-#include <linux/bug.h>
-#include <linux/utsname.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <asm/mathemu.h>
-#include <asm/cpcmd.h>
-#include <asm/lowcore.h>
-#include <asm/debug.h>
-#include <asm/ipl.h>
#include "entry.h"
int show_unhandled_signals = 1;
-#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
-
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
#endif
}
-/*
- * For show_trace we have tree different stack to consider:
- * - the panic stack which is used if the kernel stack has overflown
- * - the asynchronous interrupt stack (cpu related)
- * - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stack and can potentially
- * touch all of them. The order is: panic stack, async stack, sync stack.
- */
-static unsigned long
-__show_trace(unsigned long sp, unsigned long low, unsigned long high)
-{
- struct stack_frame *sf;
- struct pt_regs *regs;
-
- while (1) {
- sp = sp & PSW_ADDR_INSN;
- if (sp < low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
- /* Follow the backchain. */
- while (1) {
- low = sp;
- sp = sf->back_chain & PSW_ADDR_INSN;
- if (!sp)
- break;
- if (sp <= low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
- }
- /* Zero backchain detected, check for interrupt frame. */
- sp = (unsigned long) (sf + 1);
- if (sp <= low || sp > high - sizeof(*regs))
- return sp;
- regs = (struct pt_regs *) sp;
- printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
- low = sp;
- sp = regs->gprs[15];
- }
-}
-
-static void show_trace(struct task_struct *task, unsigned long *stack)
-{
- register unsigned long __r15 asm ("15");
- unsigned long sp;
-
- sp = (unsigned long) stack;
- if (!sp)
- sp = task ? task->thread.ksp : __r15;
- printk("Call Trace:\n");
-#ifdef CONFIG_CHECK_STACK
- sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
- S390_lowcore.panic_stack);
-#endif
- sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
- if (task)
- __show_trace(sp, (unsigned long) task_stack_page(task),
- (unsigned long) task_stack_page(task) + THREAD_SIZE);
- else
- __show_trace(sp, S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
- if (!task)
- task = current;
- debug_show_held_locks(task);
-}
-
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
- register unsigned long * __r15 asm ("15");
- unsigned long *stack;
- int i;
-
- if (!sp)
- stack = task ? (unsigned long *) task->thread.ksp : __r15;
- else
- stack = sp;
-
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
- break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
- }
- printk("\n");
- show_trace(task, sp);
-}
-
-static void show_last_breaking_event(struct pt_regs *regs)
-{
-#ifdef CONFIG_64BIT
- printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
-#endif
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
-
-static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
-{
- return (regs->psw.mask & bits) / ((~bits + 1) & bits);
-}
-
-void show_registers(struct pt_regs *regs)
-{
- char *mode;
-
- mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %p %p",
- mode, (void *) regs->psw.mask,
- (void *) regs->psw.addr);
- print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
- printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
- "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
- mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
- mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
- mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
- mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
- mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
- printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
- regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
- regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
- regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
- regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
-
- show_code(regs);
-}
-
-void show_regs(struct pt_regs *regs)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_registers(regs);
- /* Show stack backtrace if pt_regs is from kernel mode */
- if (!user_mode(regs))
- show_trace(NULL, (unsigned long *) regs->gprs[15]);
- show_last_breaking_event(regs);
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(struct pt_regs *regs, const char *str)
-{
- static int die_counter;
-
- oops_enter();
- lgr_info_log();
- debug_stop_all();
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
- printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
- printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC");
-#endif
- printk("\n");
- notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
- print_modules();
- show_regs(regs);
- bust_spinlocks(0);
- add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irq(&die_lock);
- if (in_interrupt())
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception: panic_on_oops");
- oops_exit();
- do_exit(SIGSEGV);
-}
-
static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index a0042acbd98..3fb09359eda 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -158,8 +158,6 @@ void __kprobes vtime_stop_cpu(void)
unsigned long psw_mask;
trace_hardirqs_on();
- /* Don't trace preempt off for idle. */
- stop_critical_timings();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
@@ -169,9 +167,6 @@ void __kprobes vtime_stop_cpu(void)
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
- /* Reenable preemption tracer. */
- start_critical_timings();
-
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 2b29e62351d..53252d2d472 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
__entry->instruction,
insn_to_mnemonic((unsigned char *)
&__entry->instruction,
- __entry->insn) ?
+ __entry->insn, sizeof(__entry->insn)) ?
"unknown" : __entry->insn)
);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index dff631d34b4..466fb338396 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
* contains the (negative) exception code.
*/
-static __always_inline unsigned long follow_table(struct mm_struct *mm,
- unsigned long addr, int write)
+#ifdef CONFIG_64BIT
+static unsigned long follow_table(struct mm_struct *mm,
+ unsigned long address, int write)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
+ unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
+ switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table = table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -0x39UL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ case _ASCE_TYPE_REGION2:
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -0x3aUL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ case _ASCE_TYPE_REGION3:
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -0x3bUL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ case _ASCE_TYPE_SEGMENT:
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INV))
+ return -0x10UL;
+ if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
+ if (write && (*table & _SEGMENT_ENTRY_RO))
+ return -0x04UL;
+ return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
+ (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
+ }
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ }
+ table = table + ((address >> 12) & 0xff);
+ if (unlikely(*table & _PAGE_INVALID))
+ return -0x11UL;
+ if (write && (*table & _PAGE_RO))
+ return -0x04UL;
+ return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
+}
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return -0x3aUL;
+#else /* CONFIG_64BIT */
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return -0x3bUL;
+static unsigned long follow_table(struct mm_struct *mm,
+ unsigned long address, int write)
+{
+ unsigned long *table = (unsigned long *)__pa(mm->pgd);
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INV))
return -0x10UL;
- if (pmd_large(*pmd)) {
- if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
- return -0x04UL;
- return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
- }
- if (unlikely(pmd_bad(*pmd)))
- return -0x10UL;
-
- ptep = pte_offset_map(pmd, addr);
- if (!pte_present(*ptep))
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ table = table + ((address >> 12) & 0xff);
+ if (unlikely(*table & _PAGE_INVALID))
return -0x11UL;
- if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
+ if (write && (*table & _PAGE_RO))
return -0x04UL;
-
- return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+ return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
}
+#endif /* CONFIG_64BIT */
+
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user)
{
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
static size_t clear_user_pt(size_t n, void __user *to)
{
- void *zpage = &empty_zero_page;
+ void *zpage = (void *) empty_zero_page;
long done, size, ret;
done = 0;
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 479e9428291..9d84a1feefe 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (rc)
- goto out_kthread;
- return 0;
+ if (!IS_ERR(cmm_thread_ptr))
+ return 0;
-out_kthread:
+ rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2fb9e63b8fc..047c3e4c59a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
int fault;
trans_exc_code = regs->int_parm_long;
- /* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
+ /*
+ * Protection exceptions are suppressing, decrement psw address.
+ * The exception to this rule are aborted transactions, for these
+ * the PSW already points to the correct location.
+ */
+ if (!(regs->int_code & 0x200))
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 532525ec88c..121089d5780 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page)
if (!ptep)
return -ENOMEM;
- pte = mk_pte(page, PAGE_RW);
+ pte_val(pte) = addr;
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 49ce6bb2c64..0b09b234230 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
-static unsigned long __init setup_zero_pages(void)
+static void __init setup_zero_pages(void)
{
struct cpuid cpu_id;
unsigned int order;
- unsigned long size;
struct page *page;
int i;
@@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void)
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
- default:
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
order = 2;
break;
+ case 0x2827: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
@@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void)
page = virt_to_page((void *) empty_zero_page);
split_page(page, order);
for (i = 1 << order; i > 0; i--) {
- SetPageReserved(page);
+ mark_page_reserved(page);
page++;
}
- size = PAGE_SIZE << order;
- zero_page_mask = (size - 1) & PAGE_MASK;
-
- return 1UL << order;
+ zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
/*
@@ -139,7 +143,7 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+ setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = 0;
@@ -158,34 +162,15 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr = begin;
-
- if (begin >= end)
- return;
- for (; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
- PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-}
-
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long)&__init_begin,
- (unsigned long)&__init_end);
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory", start, end);
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d21040ed5e5..80adfbf7506 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
#include <asm/pgtable.h>
#include <asm/page.h>
+static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
+{
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ : [addr] "+a" (addr) : [skey] "d" (skey));
+ return addr;
+}
+
void storage_key_init_range(unsigned long start, unsigned long end)
{
- unsigned long boundary, function, size;
+ unsigned long boundary, size;
while (start < end) {
- if (MACHINE_HAS_EDAT2) {
- /* set storage keys for a 2GB frame */
- function = 0x22000 | PAGE_DEFAULT_KEY;
- size = 1UL << 31;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
- function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
- start = pfmf(function, start);
+ start = sske_frame(start, PAGE_DEFAULT_KEY);
} while (start < boundary);
continue;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae44d2a3431..bd954e96f51 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ out_unmap:
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
-/*
- * this function is assumed to be called with mmap_sem held
- */
-unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
{
- unsigned long *table, vmaddr, segment;
- struct mm_struct *mm;
- struct gmap_pgtable *mp;
- struct gmap_rmap *rmap;
- struct vm_area_struct *vma;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ unsigned long *table;
- current->thread.gmap_addr = address;
- mm = gmap->mm;
- /* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
+ return table;
+}
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, vmaddr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
- segment = *table;
- if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
- vma = find_vma(mm, vmaddr);
- if (!vma || vma->vm_start > vmaddr)
- return -EFAULT;
-
- /* Walk the parent mm page table */
- pgd = pgd_offset(mm, vmaddr);
- pud = pud_alloc(mm, pgd, vmaddr);
- if (!pud)
- return -ENOMEM;
- pmd = pmd_alloc(mm, pud, vmaddr);
- if (!pmd)
- return -ENOMEM;
- if (!pmd_present(*pmd) &&
- __pte_alloc(mm, vma, pmd, vmaddr))
- return -ENOMEM;
- /* pmd now points to a valid segment table entry. */
- rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
- if (!rmap)
- return -ENOMEM;
- /* Link gmap segment table entry location to page table. */
- page = pmd_page(*pmd);
- mp = (struct gmap_pgtable *) page->index;
- rmap->entry = table;
- spin_lock(&mm->page_table_lock);
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_translate(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+static int gmap_connect_pgtable(unsigned long segment,
+ unsigned long *segment_ptr,
+ struct gmap *gmap)
+{
+ unsigned long vmaddr;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct mm_struct *mm;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mm = gmap->mm;
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->entry = segment_ptr;
+ spin_lock(&mm->page_table_lock);
+ if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper);
- spin_unlock(&mm->page_table_lock);
/* Set gmap segment table entry to page table. */
- *table = pmd_val(*pmd) & PAGE_MASK;
- return vmaddr | (address & ~PMD_MASK);
+ *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
+ rmap = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ kfree(rmap);
+ return 0;
+}
+
+static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry =
+ _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int rc;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return -EFAULT;
+ /* Convert the gmap address to an mm address. */
+ while (1) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
+ /* Page table is present */
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ }
+ if (!(segment & _SEGMENT_ENTRY_RO))
+ /* Nothing mapped in the gmap address space. */
+ break;
+ rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
+ if (rc)
+ return rc;
}
return -EFAULT;
}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_discard);
-void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
-{
- struct gmap_rmap *rmap, *next;
- struct gmap_pgtable *mp;
- struct page *page;
- int flush;
-
- flush = 0;
- spin_lock(&mm->page_table_lock);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
- *rmap->entry =
- _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
- list_del(&rmap->list);
- kfree(rmap);
- flush = 1;
- }
- spin_unlock(&mm->page_table_lock);
- if (flush)
- __tlb_flush_global();
-}
-
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
{
}
-static inline void gmap_unmap_notifier(struct mm_struct *mm,
- unsigned long *table)
+static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
+ unsigned long *table)
{
}
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
unsigned int bit, mask;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
return page_table_free_pgste(table);
}
/* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
mm = tlb->mm;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ffab84db690..35837054f73 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
/*
* Add a backed mem_map array to the virtual mem_map array.
*/
-int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
- unsigned long address, start_addr, end_addr;
+ unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
int ret = -ENOMEM;
- start_addr = (unsigned long) start;
- end_addr = (unsigned long) (start + nr);
-
- for (address = start_addr; address < end_addr;) {
+ for (address = start; address < end;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
pu_dir = vmem_pud_alloc();
@@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
}
address += PAGE_SIZE;
}
- memset(start, 0, nr * sizeof(struct page));
+ memset((void *)start, 0, end - start);
ret = 0;
out:
- flush_tlb_kernel_range(start_addr, end_addr);
+ flush_tlb_kernel_range(start, end);
return ret;
}
-void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void vmemmap_free(unsigned long start, unsigned long end)
{
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0972e91cced..82f165f8078 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
+ addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
- memset(addrs, 0, fp->len * sizeof(*addrs));
memset(&jit, 0, sizeof(cjit));
memset(&cjit, 0, sizeof(cjit));
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 584b93674ea..ffeb17ce7f3 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -440,6 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
+ case 0x2827: ops->cpu_type = "s390/zEC12"; break;
default: return -ENODEV;
}
}
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index f0f426a113c..086a2e37935 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
- pci_sysfs.o pci_event.o pci_debug.o
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
+ pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 27b4c17855b..e6f15b5d8b7 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
static struct kmem_cache *zdev_irq_cache;
static struct kmem_cache *zdev_fmb_cache;
-debug_info_t *pci_debug_msg_id;
-debug_info_t *pci_debug_err_id;
-
static inline int irq_to_msi_nr(unsigned int irq)
{
return irq & ZPCI_MSI_MASK;
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
fib->aisb = (u64) bucket->aisb + aisb / 8;
fib->aisbo = aisb & ZPCI_MSI_MASK;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
free_page((unsigned long) fib);
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
fib->iota = args->iota;
fib->fmb_addr = args->fmb_addr;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
free_page((unsigned long) fib);
return rc;
}
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
if (zdev->fmb)
return -EINVAL;
- zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
+ zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
if (!zdev->fmb)
return -ENOMEM;
- memset(zdev->fmb, 0, sizeof(*zdev->fmb));
WARN_ON((u64) zdev->fmb & 0xf);
args.fmb_addr = virt_to_phys(zdev->fmb);
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = pcilg_instr(&data, req, offset);
- data = data << ((8 - len) * 8);
- data = le64_to_cpu(data);
- if (!rc)
+ rc = s390pci_load(&data, req, offset);
+ if (!rc) {
+ data = data << ((8 - len) * 8);
+ data = le64_to_cpu(data);
*val = (u32) data;
- else
+ } else
*val = 0xffffffff;
return rc;
}
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
- rc = pcistg_instr(data, req, offset);
+ rc = s390pci_store(data, req, offset);
return rc;
}
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_load(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_load(zdev, where, val, size);
+
+ return ret;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_store(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_store(zdev, where, val, size);
+
+ return ret;
}
static struct pci_ops pci_root_ops = {
@@ -474,7 +478,7 @@ scan:
}
/* enable interrupts again */
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
/* check again to not lose initiative */
rmb();
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
};
-static void zpci_unmap_resources(struct pci_dev *pdev)
-{
- resource_size_t len;
- int i;
-
- for (i = 0; i < PCI_BAR_COUNT; i++) {
- len = pci_resource_len(pdev, i);
- if (!len)
- continue;
- pci_iounmap(pdev, (void *) pdev->resource[i].start);
- }
-};
-
struct zpci_dev *zpci_alloc_device(void)
{
struct zpci_dev *zdev;
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
kfree(zdev);
}
-/* Called on removal of pci_dev, leaves zpci and bus device */
-static void zpci_remove_device(struct pci_dev *pdev)
-{
- struct zpci_dev *zdev = get_zdev(pdev);
-
- dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
- zpci_dma_exit_device(zdev);
- zpci_fmb_disable_device(zdev);
- zpci_sysfs_remove_device(&pdev->dev);
- zpci_unmap_resources(pdev);
- list_del(&zdev->entry); /* can be called from init */
- zdev->pdev = NULL;
-}
-
-static void zpci_scan_devices(void)
-{
- struct zpci_dev *zdev;
-
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry)
- if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
- zpci_scan_device(zdev);
- mutex_unlock(&zpci_list_lock);
-}
-
/*
* Too late for any s390 specific setup, since interrupts must be set up
* already which requires DMA setup too and the pci scan will access the
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
return 0;
}
-void pcibios_disable_device(struct pci_dev *pdev)
-{
- zpci_remove_device(pdev);
- pdev->sysdata = NULL;
-}
-
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
spin_lock_init(&bucket->lock);
/* set summary to 1 to be called every time for the ISC */
*zpci_irq_si = 1;
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
return 0;
out_ai:
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
spin_unlock(&zpci_iomap_lock);
}
-static int zpci_create_device_bus(struct zpci_dev *zdev)
+int pcibios_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+
+ zdev->pdev = pdev;
+ zpci_debug_init_device(zdev);
+ zpci_fmb_enable_device(zdev);
+ zpci_map_resources(zdev);
+
+ return 0;
+}
+
+static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
LIST_HEAD(resources);
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
pci_add_resource(&resources, res);
}
- zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
- zdev, &resources);
+ zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
+ zdev, &resources);
if (!zdev->bus)
return -EIO;
@@ -959,6 +930,13 @@ out:
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
+int zpci_disable_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ return clp_disable_fh(zdev);
+}
+EXPORT_SYMBOL_GPL(zpci_disable_device);
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out;
- rc = zpci_create_device_bus(zdev);
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto out_free;
+
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ }
+ rc = zpci_scan_bus(zdev);
if (rc)
- goto out_bus;
+ goto out_disable;
mutex_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
hotplug_ops->create_slot(zdev);
mutex_unlock(&zpci_list_lock);
- if (zdev->state == ZPCI_FN_STATE_STANDBY)
- return 0;
-
- rc = zpci_enable_device(zdev);
- if (rc)
- goto out_start;
return 0;
-out_start:
- mutex_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- if (hotplug_ops)
- hotplug_ops->remove_slot(zdev);
- mutex_unlock(&zpci_list_lock);
-out_bus:
+out_disable:
+ if (zdev->state == ZPCI_FN_STATE_ONLINE)
+ zpci_disable_device(zdev);
+out_free:
zpci_free_domain(zdev);
out:
return rc;
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
goto out;
}
- zpci_debug_init_device(zdev);
- zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
pci_bus_add_devices(zdev->bus);
- /* now that pdev was added to the bus mark it as used */
- zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
-
out:
zpci_dma_exit_device(zdev);
clp_disable_fh(zdev);
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
}
EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
-unsigned int s390_pci_probe = 1;
+unsigned int s390_pci_probe;
EXPORT_SYMBOL_GPL(s390_pci_probe);
char * __init pcibios_setup(char *str)
{
- if (!strcmp(str, "off")) {
- s390_pci_probe = 0;
+ if (!strcmp(str, "on")) {
+ s390_pci_probe = 1;
return NULL;
}
return str;
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
- zpci_scan_devices();
return 0;
out_find:
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index f339fe2feb1..bd34359d154 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <asm/pci_debug.h>
#include <asm/pci_clp.h>
/*
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
struct zpci_dev *zdev;
int rc;
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
zdev = zpci_alloc_device();
if (IS_ERR(zdev))
return PTR_ERR(zdev);
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
- pr_err("Set PCI FN failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
+ rrb->response.hdr.rsp);
rc = -EIO;
}
clp_free_block(rrb);
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
if (!rc)
/* Success -> store enabled handle in zdev */
zdev->fh = fh;
+
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
if (!rc)
/* Success -> store disabled handle in zdev */
zdev->fh = fh;
- else
- dev_err(&zdev->pdev->dev,
- "Failed to disable fn handle: 0x%x\n", fh);
+
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index a5d07bc2a54..771b82359af 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -11,12 +11,17 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <asm/debug.h>
#include <asm/pci_dma.h>
static struct dentry *debugfs_root;
+debug_info_t *pci_debug_msg_id;
+EXPORT_SYMBOL_GPL(pci_debug_msg_id);
+debug_info_t *pci_debug_err_id;
+EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
debug_set_level(pci_debug_msg_id, 3);
- zpci_dbg("Debug view initialized\n");
/* error log */
pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
debug_set_level(pci_debug_err_id, 6);
- zpci_err("Debug view initialized\n");
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a547419907c..f8e69d5bc0a 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
* needs to be redone!
*/
goto no_refresh;
- rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
+
+ rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
- WARN_ON_ONCE(offset > PAGE_SIZE);
-
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
- return dma_addr + offset;
+ return dma_addr + (offset & ~PAGE_MASK);
}
out_free:
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
new file mode 100644
index 00000000000..22eeb9d7ffe
--- /dev/null
+++ b/arch/s390/pci/pci_insn.c
@@ -0,0 +1,202 @@
+/*
+ * s390 specific pci instructions
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <asm/pci_insn.h>
+#include <asm/processor.h>
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
+{
+ u8 cc, status;
+
+ do {
+ cc = __mpcifc(req, fib, &status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
+ __func__, cc, status);
+ return (cc) ? -EIO : 0;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
+ __func__, cc, status, addr, range);
+ return (cc) ? -EIO : 0;
+}
+
+/* Set Interruption Controls */
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+{
+ asm volatile (
+ " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+}
+
+/* PCI Load */
+static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ if (!cc)
+ *data = __data;
+
+ return cc;
+}
+
+int s390pci_load(u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_load);
+
+/* PCI Store */
+static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store(u64 data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store);
+
+/* PCI Store Block */
+static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store_block(const u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 0297931335e..b097aed05a9 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -18,8 +18,9 @@
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
-static unsigned int msihash_shift = 6;
-#define msi_hashfn(nr) hash_long(nr, msihash_shift)
+static const unsigned int msi_hash_bits = 8;
+#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
+#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
static DEFINE_SPINLOCK(msi_map_lock);
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
+ INIT_HLIST_NODE(&map->msi_chain);
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
{
unsigned int i;
- msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
+ msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
- for (i = 0; i < (1U << msihash_shift); i++)
+ for (i = 0; i < MSI_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index e569aa1fd2b..c8def8bc902 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -12,7 +12,7 @@ config SCORE
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
select HAVE_MOD_ARCH_SPECIFIC
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index 79568466b57..f4c6d02421d 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -41,24 +41,6 @@ void machine_halt(void) {}
/* If or when software machine-power-off is implemented, add code here. */
void machine_power_off(void) {}
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void __noreturn cpu_idle(void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched())
- barrier();
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
void ret_from_fork(void);
void ret_from_kernel_thread(void);
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index cee6bce1e30..1592aad7dbc 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
static struct kcore_list kcore_mem, kcore_vmalloc;
-static unsigned long setup_zero_page(void)
+static void setup_zero_page(void)
{
struct page *page;
@@ -52,9 +52,7 @@ static unsigned long setup_zero_page(void)
panic("Oh boy, that early out of memory?");
page = virt_to_page((void *) empty_zero_page);
- SetPageReserved(page);
-
- return 1UL;
+ mark_page_reserved(page);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -84,7 +82,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */
+ setup_zero_page(); /* Setup zeroed pages. */
reservedpages = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -109,37 +107,16 @@ void __init mem_init(void)
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
-{
- unsigned long pfn;
-
- for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
- struct page *page = pfn_to_page(pfn);
- void *addr = phys_to_virt(PFN_PHYS(pfn));
-
- ClearPageReserved(page);
- init_page_count(page);
- memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
- __free_page(page);
- totalram_pages++;
- }
- printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
-}
-
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory",
- virt_to_phys((void *) start),
- virt_to_phys((void *) end));
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
void __init_refok free_initmem(void)
{
- free_init_pages("unused kernel memory",
- __pa(&__init_begin),
- __pa(&__init_end));
+ free_initmem_default(POISON_FREE_INITMEM);
}
unsigned long pgd_current;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e859633ce6..1ea597c6497 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -33,6 +33,7 @@ config SUPERH
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_IDLE_POLL_SETUP
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
select GENERIC_STRNCPY_FROM_USER
@@ -148,9 +149,6 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
def_bool n
-config ARCH_HAS_DEFAULT_IDLE
- def_bool y
-
config NO_IOPORT
def_bool !PCI
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index aaff7671101..764530c85aa 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -254,11 +254,13 @@ static int usbhs_get_id(struct platform_device *pdev)
return gpio_get_value(GPIO_PTB3);
}
-static void usbhs_phy_reset(struct platform_device *pdev)
+static int usbhs_phy_reset(struct platform_device *pdev)
{
/* enable vbus if HOST */
if (!gpio_get_value(GPIO_PTB3))
gpio_set_value(GPIO_PTB5, 1);
+
+ return 0;
}
static struct renesas_usbhs_platform_info usbhs_info = {
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index c2c85f6cd73..a162a7f86b2 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -35,7 +35,7 @@ static unsigned int nr_ports;
static struct sh7786_pcie_hwops {
int (*core_init)(void);
- async_func_ptr *port_init_hw;
+ async_func_t port_init_hw;
} *sh7786_pcie_hwops;
static struct resource sh7786_pci0_resources[] = {
diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h
index b3808c7d67b..699255d6d1c 100644
--- a/arch/sh/include/asm/hugetlb.h
+++ b/arch/sh/include/asm/hugetlb.h
@@ -3,6 +3,7 @@
#include <asm/cacheflush.h>
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 7d5ac4e4848..45a93669289 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -207,8 +207,6 @@ static inline bool test_and_clear_restore_sigmask(void)
return true;
}
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 3d5a1b387cc..2ea4483fd72 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -24,98 +24,24 @@
static void (*sh_idle)(void);
-static int hlt_counter;
-
-static int __init nohlt_setup(char *__unused)
-{
- hlt_counter = 1;
- return 1;
-}
-__setup("nohlt", nohlt_setup);
-
-static int __init hlt_setup(char *__unused)
-{
- hlt_counter = 0;
- return 1;
-}
-__setup("hlt", hlt_setup);
-
-static inline int hlt_works(void)
-{
- return !hlt_counter;
-}
-
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
+void default_idle(void)
{
+ set_bl_bit();
local_irq_enable();
- while (!need_resched())
- cpu_relax();
+ /* Isn't this racy ? */
+ cpu_sleep();
+ clear_bl_bit();
}
-void default_idle(void)
+void arch_cpu_idle_dead(void)
{
- if (hlt_works()) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
-
- set_bl_bit();
- if (!need_resched()) {
- local_irq_enable();
- cpu_sleep();
- } else
- local_irq_enable();
-
- set_thread_flag(TIF_POLLING_NRFLAG);
- clear_bl_bit();
- } else
- poll_idle();
+ play_dead();
}
-/*
- * The idle thread. There's no useful work to be done, so just try to conserve
- * power and have a low exit latency (ie sit in a loop waiting for somebody to
- * say that they'd like to reschedule)
- */
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- unsigned int cpu = smp_processor_id();
-
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched()) {
- check_pgt_cache();
- rmb();
-
- if (cpu_is_offline(cpu))
- play_dead();
-
- local_irq_disable();
- /* Don't trace irqs off for idle */
- stop_critical_timings();
- if (cpuidle_idle_call())
- sh_idle();
- /*
- * Sanity check to ensure that sh_idle() returns
- * with IRQs enabled
- */
- WARN_ON(irqs_disabled());
- start_critical_timings();
- }
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ if (cpuidle_idle_call())
+ sh_idle();
}
void __init select_idle_routine(void)
@@ -123,13 +49,8 @@ void __init select_idle_routine(void)
/*
* If a platform has set its own idle routine, leave it alone.
*/
- if (sh_idle)
- return;
-
- if (hlt_works())
+ if (!sh_idle)
sh_idle = default_idle;
- else
- sh_idle = poll_idle;
}
void stop_this_cpu(void *unused)
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 47475cca068..fe584e51696 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -104,6 +104,7 @@ void sh_bios_vbr_reload(void)
);
}
+#ifdef CONFIG_EARLY_PRINTK
/*
* Print a string through the BIOS
*/
@@ -144,8 +145,6 @@ static struct console bios_console = {
.index = -1,
};
-static struct console *early_console;
-
static int __init setup_early_printk(char *buf)
{
int keep_early = 0;
@@ -170,3 +169,4 @@ static int __init setup_early_printk(char *buf)
return 0;
}
early_param("earlyprintk", setup_early_printk);
+#endif
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 2062aa88af4..45696451f0e 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void)
set_cpu_online(cpu, true);
per_cpu(cpu_state, cpu) = CPU_ONLINE;
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
extern struct {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 10579403714..20f9ead650d 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -417,15 +417,13 @@ void __init mem_init(void)
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
- unsigned long node_pages = 0;
void *node_high_memory;
num_physpages += pgdat->node_present_pages;
if (pgdat->node_spanned_pages)
- node_pages = free_all_bootmem_node(pgdat);
+ totalram_pages += free_all_bootmem_node(pgdat);
- totalram_pages += node_pages;
node_high_memory = (void *)__va((pgdat->node_start_pfn +
pgdat->node_spanned_pages) <<
@@ -501,31 +499,13 @@ void __init mem_init(void)
void free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- totalram_pages++;
- }
- printk("Freeing unused kernel memory: %ldk freed\n",
- ((unsigned long)&__init_end -
- (unsigned long)&__init_begin) >> 10);
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- unsigned long p;
- for (p = start; p < end; p += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(p));
- init_page_count(virt_to_page(p));
- free_page(p);
- totalram_pages++;
- }
- printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 289127d5241..66dc562950a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG
default "arch/sparc/configs/sparc32_defconfig" if SPARC32
default "arch/sparc/configs/sparc64_defconfig" if SPARC64
-# CONFIG_BITS can be used at source level to get 32/64 bits
-config BITS
- int
- default 32 if SPARC32
- default 64 if SPARC64
-
config IOMMU_HELPER
bool
default y if SPARC64
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_HWEIGHT
bool
- default y if !ULTRA_HAS_POPULATION_COUNT
+ default y
config GENERIC_CALIBRATE_DELAY
bool
@@ -413,6 +407,8 @@ config SERIAL_CONSOLE
config SPARC_LEON
bool "Sparc Leon processor family"
depends on SPARC32
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_DESC
---help---
If you say Y here if you are running on a SPARC-LEON processor.
The LEON processor is a synthesizable VHDL model of the
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e26d430ce2f..ff18e3cfb6b 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -2,11 +2,16 @@
generic-y += clkdev.h
+generic-y += cputime.h
generic-y += div64.h
+generic-y += emergency-restart.h
generic-y += exec.h
generic-y += local64.h
+generic-y += mutex.h
generic-y += irq_regs.h
generic-y += local.h
generic-y += module.h
+generic-y += serial.h
generic-y += trace_clock.h
+generic-y += types.h
generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h
deleted file mode 100644
index 1a642b81e01..00000000000
--- a/arch/sparc/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_CPUTIME_H
-#define __SPARC_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __SPARC_CPUTIME_H */
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42..00000000000
--- a/arch/sparc/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 7eb57d24504..e4cab465b81 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
#define _ASM_SPARC64_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc1..00000000000
--- a/arch/sparc/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 08fcce90316..7619f2f792a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
+#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h>
/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h
deleted file mode 100644
index f90d61c2805..00000000000
--- a/arch/sparc/include/asm/serial.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_SERIAL_H
-#define __SPARC_SERIAL_H
-
-#define BASE_BAUD ( 1843200 / 16 )
-
-#endif /* __SPARC_SERIAL_H */
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index b73da3c5f10..3c8917f054d 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
void cpu_panic(void);
-extern void smp4m_irq_rotate(int cpu);
/*
* General functions that each host system must provide.
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
void sun4d_init_smp(void);
void smp_callin(void);
-void smp_boot_cpus(void);
void smp_store_cpu_info(int);
void smp_resched_interrupt(void);
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
-#define prof_counter(__cpu) cpu_data(__cpu).counter
-
void smp_setup_cpu_possible_map(void);
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index d06a2660175..6b67e50fb9b 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -45,6 +45,7 @@
#define SUN4V_CHIP_NIAGARA3 0x03
#define SUN4V_CHIP_NIAGARA4 0x04
#define SUN4V_CHIP_NIAGARA5 0x05
+#define SUN4V_CHIP_SPARC64X 0x8a
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index cad36f56fa0..c7de3323819 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -18,8 +18,7 @@ do { \
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
-do { flush_tlb_pending(); \
- save_and_clear_fpu(); \
+do { save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 25849ae3e90..dd3807599bb 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -132,8 +132,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
_TIF_SIGPENDING)
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 269bd92313d..d5e50425107 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -256,8 +256,6 @@ static inline bool test_and_clear_restore_sigmask(void)
return true;
}
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
#define test_thread_64bit_stack(__SP) \
((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 2ef46349415..f0d6a9700f4 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -11,24 +11,40 @@
struct tlb_batch {
struct mm_struct *mm;
unsigned long tlb_nr;
+ unsigned long active;
unsigned long vaddrs[TLB_BATCH_NR];
};
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tsb_user(struct tlb_batch *tb);
+extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
/* TLB flush operations. */
-extern void flush_tlb_pending(void);
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define flush_tlb_range(vma,start,end) \
- do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr) flush_tlb_pending()
-#define flush_tlb_mm(mm) flush_tlb_pending()
+extern void flush_tlb_pending(void);
+extern void arch_enter_lazy_mmu_mode(void);
+extern void arch_leave_lazy_mmu_mode(void);
+#define arch_flush_lazy_mmu_mode() do {} while (0)
/* Local cpu only. */
extern void __flush_tlb_all(void);
-
+extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
__flush_tlb_kernel_range(start,end); \
} while (0)
+static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+}
+
#else /* CONFIG_SMP */
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
#define flush_tlb_kernel_range(start, end) \
do { flush_tsb_kernel_range(start,end); \
smp_flush_tlb_kernel_range(start, end); \
} while (0)
+#define global_flush_tlb_page(mm, vaddr) \
+ smp_flush_tlb_page(mm, vaddr)
+
#endif /* ! CONFIG_SMP */
#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index ce175aff71b..b5843ee09fb 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -44,7 +44,6 @@ header-y += swab.h
header-y += termbits.h
header-y += termios.h
header-y += traps.h
-header-y += types.h
header-y += uctx.h
header-y += unistd.h
header-y += utrap.h
diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h
deleted file mode 100644
index 383d156cde9..00000000000
--- a/arch/sparc/include/uapi/asm/types.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _SPARC_TYPES_H
-#define _SPARC_TYPES_H
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-#if defined(__sparc__)
-
-#include <asm-generic/int-ll64.h>
-
-#endif /* defined(__sparc__) */
-
-#endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index a6c94a2bf9d..5c5125895db 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "niagara5";
break;
+ case SUN4V_CHIP_SPARC64X:
+ sparc_cpu_type = "SPARC64-X";
+ sparc_fpu_type = "SPARC64-X integrated FPU";
+ sparc_pmu_type = "sparc64-x";
+ break;
+
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 2feb15c35d9..26b706a1867 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -134,6 +134,8 @@ prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
prom_sparc_prefix:
.asciz "SPARC-"
+prom_sparc64x_prefix:
+ .asciz "SPARC64-X"
.align 4
prom_root_compatible:
.skip 64
@@ -412,7 +414,7 @@ sun4v_chip_type:
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
- bne,pn %xcc, 4f
+ bne,pn %xcc, 49f
nop
70: ldub [%g1 + 7], %g2
@@ -425,7 +427,7 @@ sun4v_chip_type:
cmp %g2, '5'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
- ba,pt %xcc, 4f
+ ba,pt %xcc, 49f
nop
91: sethi %hi(prom_cpu_compatible), %g1
@@ -439,6 +441,25 @@ sun4v_chip_type:
mov SUN4V_CHIP_NIAGARA2, %g4
4:
+ /* Athena */
+ sethi %hi(prom_cpu_compatible), %g1
+ or %g1, %lo(prom_cpu_compatible), %g1
+ sethi %hi(prom_sparc64x_prefix), %g7
+ or %g7, %lo(prom_sparc64x_prefix), %g7
+ mov 9, %g3
+41: ldub [%g7], %g2
+ ldub [%g1], %g4
+ cmp %g2, %g4
+ bne,pn %icc, 49f
+ add %g7, 1, %g7
+ subcc %g3, 1, %g3
+ bne,pt %xcc, 41b
+ add %g1, 1, %g1
+ mov SUN4V_CHIP_SPARC64X, %g4
+ ba,pt %xcc, 5f
+ nop
+
+49:
mov SUN4V_CHIP_UNKNOWN, %g4
5: sethi %hi(sun4v_chip_type), %g2
or %g2, %lo(sun4v_chip_type), %g2
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 9365432904d..605c960b2fa 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -128,8 +128,7 @@ hv_cpu_startup:
call smp_callin
nop
- call cpu_idle
- mov 0, %o0
+
call cpu_panic
nop
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index fc4320886a3..4d1487138d2 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -186,6 +186,8 @@ struct grpci2_cap_first {
#define CAP9_IOMAP_OFS 0x20
#define CAP9_BARSIZE_OFS 0x24
+#define TGT 256
+
struct grpci2_priv {
struct leon_pci_info info; /* must be on top of this structure */
struct grpci2_regs *regs;
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
if (where & 0x3)
return -EINVAL;
- if (bus == 0 && PCI_SLOT(devfn) != 0)
- devfn += (0x8 * 6);
+ if (bus == 0) {
+ devfn += (0x8 * 6); /* start at AD16=Device0 */
+ } else if (bus == TGT) {
+ bus = 0;
+ devfn = 0; /* special case: bridge controller itself */
+ }
/* Select bus */
spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
if (where & 0x3)
return -EINVAL;
- if (bus == 0 && PCI_SLOT(devfn) != 0)
- devfn += (0x8 * 6);
+ if (bus == 0) {
+ devfn += (0x8 * 6); /* start at AD16=Device0 */
+ } else if (bus == TGT) {
+ bus = 0;
+ devfn = 0; /* special case: bridge controller itself */
+ }
/* Select bus */
spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
unsigned int busno = bus->number;
int ret;
- if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
+ if (PCI_SLOT(devfn) > 15 || busno > 255) {
*val = ~0;
return 0;
}
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
struct grpci2_priv *priv = grpci2priv;
unsigned int busno = bus->number;
- if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
+ if (PCI_SLOT(devfn) > 15 || busno > 255)
return 0;
#ifdef GRPCI2_DEBUG_CFGACCESS
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv)
REGSTORE(regs->ahbmst_map[i], priv->pci_area);
/* Get the GRPCI2 Host PCI ID */
- grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
+ grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
/* Get address to first (always defined) capability structure */
- grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
+ grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
/* Enable/Disable Byte twisting */
- grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
+ grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
- grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
+ grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
/* Setup the Host's PCI Target BARs for other peripherals to access,
* and do DMA to the host's memory. The target BARs can be sized and
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv)
pciadr = 0;
}
}
- grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
- grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
- grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
+ grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
+ bar_sz);
+ grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
+ grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
i, pciadr, ahbadr);
}
/* set as bus master and enable pci memory responses */
- grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
+ grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
- grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
+ grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
/* Enable Error respone (CPU-TRAP) on illegal memory access. */
REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 62eede13831..c85241006e3 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
struct task_struct *last_task_used_math = NULL;
struct thread_info *current_set[NR_CPUS];
-/*
- * the idle loop on a Sparc... ;)
- */
-void cpu_idle(void)
+/* Idle loop support. */
+void arch_cpu_idle(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- for (;;) {
- while (!need_resched()) {
- if (sparc_idle)
- (*sparc_idle)();
- else
- cpu_relax();
- }
- schedule_preempt_disabled();
- }
+ if (sparc_idle)
+ (*sparc_idle)();
+ local_irq_enable();
}
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index cdb80b2adbe..9fbf0d14a36 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -52,20 +52,17 @@
#include "kstack.h"
-static void sparc64_yield(int cpu)
+/* Idle loop support on sparc64. */
+void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
- return;
- }
-
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
-
- while (!need_resched() && !cpu_is_offline(cpu)) {
+ } else {
unsigned long pstate;
- /* Disable interrupts. */
+ /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
+ * the cpu sleep hypervisor call.
+ */
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"andn %0, %1, %0\n\t"
@@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
: "=&r" (pstate)
: "i" (PSTATE_IE));
- if (!need_resched() && !cpu_is_offline(cpu))
+ if (!need_resched() && !cpu_is_offline(smp_processor_id()))
sun4v_cpu_yield();
/* Re-enable interrupts. */
@@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
-
- set_thread_flag(TIF_POLLING_NRFLAG);
+ local_irq_enable();
}
-/* The idle loop on sparc64. */
-void cpu_idle(void)
-{
- int cpu = smp_processor_id();
-
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while(1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched() && !cpu_is_offline(cpu))
- sparc64_yield(cpu);
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
-
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(cpu)) {
- sched_preempt_enable_no_resched();
- cpu_play_dead();
- }
-#endif
- schedule_preempt_disabled();
- }
+void arch_cpu_idle_dead()
+{
+ sched_preempt_enable_no_resched();
+ cpu_play_dead();
}
+#endif
#ifdef CONFIG_COMPAT
static void show_regwindow32(struct pt_regs *regs)
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 9e7e6d71836..e3f2b81c23f 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
local_irq_enable();
wmb();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
/* We should never reach here! */
BUG();
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 537eb66abd0..77539eda928 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -127,6 +127,8 @@ void __cpuinit smp_callin(void)
/* idle thread is expected to have preempt disabled */
preempt_disable();
+
+ cpu_startup_entry(CPUHP_ONLINE);
}
void cpu_panic(void)
@@ -849,7 +851,7 @@ void smp_tsb_sync(struct mm_struct *mm)
}
extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_fetch_glob_regs;
extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1076,56 @@ local_flush_and_out:
put_cpu();
}
+struct tlb_pending_info {
+ unsigned long ctx;
+ unsigned long nr;
+ unsigned long *vaddrs;
+};
+
+static void tlb_pending_func(void *info)
+{
+ struct tlb_pending_info *t = info;
+
+ __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
+}
+
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{
u32 ctx = CTX_HWBITS(mm->context);
+ struct tlb_pending_info info;
int cpu = get_cpu();
+ info.ctx = ctx;
+ info.nr = nr;
+ info.vaddrs = vaddrs;
+
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
- smp_cross_call_masked(&xcall_flush_tlb_pending,
- ctx, nr, (unsigned long) vaddrs,
- mm_cpumask(mm));
+ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+ &info, 1);
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();
}
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+ unsigned long context = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
+
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+ else
+ smp_cross_call_masked(&xcall_flush_tlb_page,
+ context, vaddr, 0,
+ mm_cpumask(mm));
+ __flush_tlb_page(context, vaddr);
+
+ put_cpu();
+}
+
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index da1b781b5e6..2e973a26fbd 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -407,8 +407,7 @@ after_lock_tlb:
call smp_callin
nop
- call cpu_idle
- mov 0, %o0
+
call cpu_panic
nop
1: b,a,pt %xcc, 1b
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
index 48d00e72ce1..8ec4e9c0251 100644
--- a/arch/sparc/lib/bitext.c
+++ b/arch/sparc/lib/bitext.c
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
{
-
- if ((size & 07) != 0)
- BUG();
- memset(map, 0, size>>3);
-
+ bitmap_zero(map, size);
memset(t, 0, sizeof *t);
spin_lock_init(&t->lock);
t->map = map;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 48e0c030e8f..4490c397bb5 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -282,14 +282,8 @@ static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
#endif
- for (tmp = start_pfn; tmp < end_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
- }
+ for (tmp = start_pfn; tmp < end_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
}
void __init mem_init(void)
@@ -347,8 +341,6 @@ void __init mem_init(void)
map_high_region(start_pfn, end_pfn);
}
- totalram_pages += totalhigh_pages;
-
codepages = (((unsigned long) &_etext) - ((unsigned long)&_start));
codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext));
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1588d33d549..6ac99d64a13 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2181,10 +2181,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE];
static long __meminitdata addr_start, addr_end;
static int __meminitdata node_start;
-int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ int node)
{
- unsigned long vstart = (unsigned long) start;
- unsigned long vend = (unsigned long) (start + nr);
unsigned long phys_start = (vstart - VMEMMAP_BASE);
unsigned long phys_end = (vend - VMEMMAP_BASE);
unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
@@ -2236,7 +2235,7 @@ void __meminit vmemmap_populate_print_last(void)
}
}
-void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void vmemmap_free(unsigned long start, unsigned long end)
{
}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 0f4f7191fbb..28f96f27c76 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -34,7 +34,7 @@
#define IOMMU_RNGE IOMMU_RNGE_256MB
#define IOMMU_START 0xF0000000
#define IOMMU_WINSIZE (256*1024*1024U)
-#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
+#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
/* srmmu.c */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c38bb72e3e8..036c2797dec 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
- srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
+ srmmu_nocache_bitmap =
+ __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index ba6ae7ffdc2..83d89bcb44a 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+ struct mm_struct *mm = tb->mm;
- if (tb->tlb_nr) {
- flush_tsb_user(tb);
+ if (!tb->tlb_nr)
+ goto out;
- if (CTX_VALID(tb->mm->context)) {
+ flush_tsb_user(tb);
+
+ if (CTX_VALID(mm->context)) {
+ if (tb->tlb_nr == 1) {
+ global_flush_tlb_page(mm, tb->vaddrs[0]);
+ } else {
#ifdef CONFIG_SMP
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
&tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
tb->tlb_nr, &tb->vaddrs[0]);
#endif
}
- tb->tlb_nr = 0;
}
+ tb->tlb_nr = 0;
+
+out:
put_cpu_var(tlb_batch);
}
+void arch_enter_lazy_mmu_mode(void)
+{
+ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+ tb->active = 1;
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+ if (tb->tlb_nr)
+ flush_tlb_pending();
+ tb->active = 0;
+}
+
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec)
{
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
nr = 0;
}
+ if (!tb->active) {
+ global_flush_tlb_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr);
+ goto out;
+ }
+
if (nr == 0)
tb->mm = mm;
@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
+out:
put_cpu_var(tlb_batch);
}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 428982b9bec..2cc3bce5ee9 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -7,11 +7,10 @@
#include <linux/preempt.h>
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/tlbflush.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
#include <asm/tsb.h>
+#include <asm/tlb.h>
#include <asm/oplib.h>
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}
-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
- unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
+ unsigned long hash_shift,
+ unsigned long nentries)
{
- unsigned long i;
+ unsigned long tag, ent, hash;
- for (i = 0; i < tb->tlb_nr; i++) {
- unsigned long v = tb->vaddrs[i];
- unsigned long tag, ent, hash;
+ v &= ~0x1UL;
+ hash = tsb_hash(v, hash_shift, nentries);
+ ent = tsb + (hash * sizeof(struct tsb));
+ tag = (v >> 22UL);
- v &= ~0x1UL;
+ tsb_flush(ent, tag);
+}
- hash = tsb_hash(v, hash_shift, nentries);
- ent = tsb + (hash * sizeof(struct tsb));
- tag = (v >> 22UL);
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+ unsigned long tsb, unsigned long nentries)
+{
+ unsigned long i;
- tsb_flush(ent, tag);
- }
+ for (i = 0; i < tb->tlb_nr; i++)
+ __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
}
void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_unlock_irqrestore(&mm->context.lock, flags);
}
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+{
+ unsigned long nentries, base, flags;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+ }
+#endif
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index f8e13d421fc..432aa0cb1b3 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
nop
.align 32
+ .globl __flush_tlb_page
+__flush_tlb_page: /* 22 insns */
+ /* %o0 = context, %o1 = vaddr */
+ rdpr %pstate, %g7
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, %pstate
+ mov SECONDARY_CONTEXT, %o4
+ ldxa [%o4] ASI_DMMU, %g2
+ stxa %o0, [%o4] ASI_DMMU
+ andcc %o1, 1, %g0
+ andn %o1, 1, %o3
+ be,pn %icc, 1f
+ or %o3, 0x10, %o3
+ stxa %g0, [%o3] ASI_IMMU_DEMAP
+1: stxa %g0, [%o3] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g2, [%o4] ASI_DMMU
+ sethi %hi(KERNBASE), %o4
+ flush %o4
+ retl
+ wrpr %g7, 0x0, %pstate
+ nop
+ nop
+ nop
+ nop
+
+ .align 32
.globl __flush_tlb_pending
__flush_tlb_pending: /* 26 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
retl
wrpr %g7, 0x0, %pstate
+__cheetah_flush_tlb_page: /* 22 insns */
+ /* %o0 = context, %o1 = vaddr */
+ rdpr %pstate, %g7
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, 0x0, %pstate
+ wrpr %g0, 1, %tl
+ mov PRIMARY_CONTEXT, %o4
+ ldxa [%o4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
+ stxa %o0, [%o4] ASI_DMMU
+ andcc %o1, 1, %g0
+ be,pn %icc, 1f
+ andn %o1, 1, %o3
+ stxa %g0, [%o3] ASI_IMMU_DEMAP
+1: stxa %g0, [%o3] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g2, [%o4] ASI_DMMU
+ sethi %hi(KERNBASE), %o4
+ flush %o4
+ wrpr %g0, 0, %tl
+ retl
+ wrpr %g7, 0x0, %pstate
+
__cheetah_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
retl
nop
+__hypervisor_flush_tlb_page: /* 11 insns */
+ /* %o0 = context, %o1 = vaddr */
+ mov %o0, %g2
+ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
+ mov %g2, %o1 /* ARG1: mmu context */
+ mov HV_MMU_ALL, %o2 /* ARG2: flags */
+ srlx %o0, PAGE_SHIFT, %o0
+ sllx %o0, PAGE_SHIFT, %o0
+ ta HV_MMU_UNMAP_ADDR_TRAP
+ brnz,pn %o0, __hypervisor_tlb_tl0_error
+ mov HV_MMU_UNMAP_ADDR_TRAP, %o1
+ retl
+ nop
+
__hypervisor_flush_tlb_pending: /* 16 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
call tlb_patch_one
mov 19, %o2
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 22, %o2
+
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
- .globl xcall_flush_tlb_pending
-xcall_flush_tlb_pending: /* 21 insns */
- /* %g5=context, %g1=nr, %g7=vaddrs[] */
- sllx %g1, 3, %g1
+ .globl xcall_flush_tlb_page
+xcall_flush_tlb_page: /* 17 insns */
+ /* %g5=context, %g1=vaddr */
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
or %g5, %g4, %g5
mov PRIMARY_CONTEXT, %g4
stxa %g5, [%g4] ASI_DMMU
-1: sub %g1, (1 << 3), %g1
- ldx [%g7 + %g1], %g5
- andcc %g5, 0x1, %g0
+ andcc %g1, 0x1, %g0
be,pn %icc, 2f
-
- andn %g5, 0x1, %g5
+ andn %g1, 0x1, %g5
stxa %g0, [%g5] ASI_IMMU_DEMAP
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
membar #Sync
- brnz,pt %g1, 1b
- nop
stxa %g2, [%g4] ASI_DMMU
retry
nop
+ nop
.globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range: /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
membar #Sync
retry
- .globl __hypervisor_xcall_flush_tlb_pending
-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
- /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
- sllx %g1, 3, %g1
+ .globl __hypervisor_xcall_flush_tlb_page
+__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+ /* %g5=ctx, %g1=vaddr */
mov %o0, %g2
mov %o1, %g3
mov %o2, %g4
-1: sub %g1, (1 << 3), %g1
- ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
+ mov %g1, %o0 /* ARG0: virtual address */
mov %g5, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
srlx %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
mov %o0, %g5
- brnz,pt %g1, 1b
- nop
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one
mov 10, %o2
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__hypervisor_flush_tlb_page), %o1
+ or %o1, %lo(__hypervisor_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 11, %o2
+
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one
mov 21, %o2
- sethi %hi(xcall_flush_tlb_pending), %o0
- or %o0, %lo(xcall_flush_tlb_pending), %o0
- sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
- or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
+ sethi %hi(xcall_flush_tlb_page), %o0
+ or %o0, %lo(xcall_flush_tlb_page), %o0
+ sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
+ or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one
- mov 21, %o2
+ mov 17, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index ff496ab1e79..25877aebc68 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -17,7 +17,7 @@ config TILE
select GENERIC_IRQ_SHOW
select HAVE_DEBUG_BUGVERBOSE
select HAVE_SYSCALL_WRAPPERS if TILEGX
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 8c5eff6d6df..47684815e5c 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
-CONFIG_MULTICORE_RAID456=y
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index e7a3dfcbcda..dd2b8f0c631 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
-CONFIG_MULTICORE_RAID456=y
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 001d418a895..78f1f2ded86 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -288,6 +288,9 @@ long compat_sys_sync_file_range2(int fd, unsigned int flags,
long compat_sys_fallocate(int fd, int mode,
u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi);
+long compat_sys_llseek(unsigned int fd, unsigned int offset_high,
+ unsigned int offset_low, loff_t __user * result,
+ unsigned int origin);
/* Assembly trampoline to avoid clobbering r0. */
long _compat_sys_rt_sigreturn(void);
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index 0f885af2b62..3257733003f 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -16,6 +16,7 @@
#define _ASM_TILE_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 241c0bb60b1..c96f9bbb760 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -40,7 +40,15 @@
#include <asm/percpu.h>
#include <arch/spr_def.h>
-/* Set and clear kernel interrupt masks. */
+/*
+ * Set and clear kernel interrupt masks.
+ *
+ * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
+ * clobber. We rely on it being equivalent to a compiler barrier in
+ * this code since arch_local_irq_save() and friends must act as
+ * compiler barriers. This compiler semantic is baked into enough
+ * places that the compiler will maintain it going forward.
+ */
#if CHIP_HAS_SPLIT_INTR_MASK()
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
# error Fix assumptions about which word various interrupts are in
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index e9c670d7a7f..ccc8ef37235 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -153,8 +153,6 @@ extern void _cpu_idle(void);
#define TS_POLLING 0x0004 /* in idle loop but not sleeping */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 7f72401b4f4..6ea4cdb3c6a 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -32,50 +32,65 @@
* adapt the usual convention.
*/
-long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
+ u32, low, u32, high)
{
return sys_truncate(filename, ((loff_t)high << 32) | low);
}
-long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
+ u32, low, u32, high)
{
return sys_ftruncate(fd, ((loff_t)high << 32) | low);
}
-long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count,
- u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
+ size_t, count, u32, dummy, u32, low, u32, high)
{
return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
-long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count,
- u32 dummy, u32 low, u32 high)
+COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
+ size_t, count, u32, dummy, u32, low, u32, high)
{
return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low);
}
-long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len)
+COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, low, u32, high,
+ char __user *, buf, size_t, len)
{
return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len);
}
-long compat_sys_sync_file_range2(int fd, unsigned int flags,
- u32 offset_lo, u32 offset_hi,
- u32 nbytes_lo, u32 nbytes_hi)
+COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
+ u32, offset_lo, u32, offset_hi,
+ u32, nbytes_lo, u32, nbytes_hi)
{
return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)nbytes_hi << 32) | nbytes_lo,
flags);
}
-long compat_sys_fallocate(int fd, int mode,
- u32 offset_lo, u32 offset_hi,
- u32 len_lo, u32 len_hi)
+COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
+ u32, offset_lo, u32, offset_hi,
+ u32, len_lo, u32, len_hi)
{
return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
((loff_t)len_hi << 32) | len_lo);
}
+/*
+ * Avoid bug in generic sys_llseek() that specifies offset_high and
+ * offset_low as "unsigned long", thus making it possible to pass
+ * a sign-extended high 32 bits in offset_low.
+ */
+COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
+ unsigned int, offset_low, loff_t __user *, result,
+ unsigned int, origin)
+{
+ return sys_llseek(fd, offset_high, offset_low, result, origin);
+}
+
/* Provide the compat syscall number to call mapping. */
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
@@ -83,6 +98,7 @@ long compat_sys_fallocate(int fd, int mode,
/* See comments in sys.c */
#define compat_sys_fadvise64_64 sys32_fadvise64_64
#define compat_sys_readahead sys32_readahead
+#define sys_llseek compat_sys_llseek
/* Call the assembly trampolines where necessary. */
#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index afb9c9a0d88..34d72a151bf 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/printk.h>
#include <asm/setup.h>
#include <hv/hypervisor.h>
@@ -33,25 +34,8 @@ static struct console early_hv_console = {
};
/* Direct interface for emergencies */
-static struct console *early_console = &early_hv_console;
-static int early_console_initialized;
static int early_console_complete;
-static void early_vprintk(const char *fmt, va_list ap)
-{
- char buf[512];
- int n = vscnprintf(buf, sizeof(buf), fmt, ap);
- early_console->write(early_console, buf, n);
-}
-
-void early_printk(const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- early_vprintk(fmt, ap);
- va_end(ap);
-}
-
void early_panic(const char *fmt, ...)
{
va_list ap;
@@ -69,14 +53,13 @@ static int __initdata keep_early;
static int __init setup_early_printk(char *str)
{
- if (early_console_initialized)
+ if (early_console)
return 1;
if (str != NULL && strncmp(str, "keep", 4) == 0)
keep_early = 1;
early_console = &early_hv_console;
- early_console_initialized = 1;
register_console(early_console);
return 0;
@@ -85,12 +68,12 @@ static int __init setup_early_printk(char *str)
void __init disable_early_printk(void)
{
early_console_complete = 1;
- if (!early_console_initialized || !early_console)
+ if (!early_console)
return;
if (!keep_early) {
early_printk("disabling early console\n");
unregister_console(early_console);
- early_console_initialized = 0;
+ early_console = NULL;
} else {
early_printk("keeping early console\n");
}
@@ -98,7 +81,7 @@ void __init disable_early_printk(void)
void warn_early_printk(void)
{
- if (early_console_complete || early_console_initialized)
+ if (early_console_complete || early_console)
return;
early_printk("\
Machine shutting down before console output is fully initialized.\n\
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index caf93ae1179..80b2a18deb8 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -40,13 +40,11 @@
#include <arch/abi.h>
#include <arch/sim_def.h>
-
/*
* Use the (x86) "idle=poll" option to prefer low latency when leaving the
* idle loop over low power while in the idle loop, e.g. if we have
* one thread per core and we want to get threads out of futex waits fast.
*/
-static int no_idle_nap;
static int __init idle_setup(char *str)
{
if (!str)
@@ -54,64 +52,19 @@ static int __init idle_setup(char *str)
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads.\n");
- no_idle_nap = 1;
- } else if (!strcmp(str, "halt"))
- no_idle_nap = 0;
- else
- return -1;
-
- return 0;
+ cpu_idle_poll_ctrl(true);
+ return 0;
+ } else if (!strcmp(str, "halt")) {
+ return 0;
+ }
+ return -1;
}
early_param("idle", idle_setup);
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- int cpu = smp_processor_id();
-
-
- current_thread_info()->status |= TS_POLLING;
-
- if (no_idle_nap) {
- while (1) {
- while (!need_resched())
- cpu_relax();
- schedule();
- }
- }
-
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched()) {
- if (cpu_is_offline(cpu))
- BUG(); /* no HOTPLUG_CPU */
-
- local_irq_disable();
- __get_cpu_var(irq_stat).idle_timestamp = jiffies;
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
-
- if (!need_resched())
- _cpu_idle();
- else
- local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ _cpu_idle();
}
/*
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index d1e15f7b59c..7a5aa1a7864 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
#ifdef CONFIG_BLK_DEV_INITRD
-/*
- * Note that the kernel can potentially support other compression
- * techniques than gz, though we don't do so by default. If we ever
- * decide to do so we can either look for other filename extensions,
- * or just allow a file with this name to be compressed with an
- * arbitrary compressor (somewhat counterintuitively).
- */
static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
+static char __initdata initramfs_file[128] = "initramfs";
static int __init setup_initramfs_file(char *str)
{
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
early_param("initramfs_file", setup_initramfs_file);
/*
- * We look for an "initramfs.cpio.gz" file in the hvfs.
- * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs.
+ * We look for a file called "initramfs" in the hvfs. If there is one, we
+ * allocate some memory for it and it will be unpacked to the initramfs.
+ * If it's compressed, the initd code will uncompress it first.
*/
static void __init load_hv_initrd(void)
{
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
if (fd == HV_ENOENT) {
- if (set_initramfs_file)
+ if (set_initramfs_file) {
pr_warning("No such hvfs initramfs file '%s'\n",
initramfs_file);
- return;
+ return;
+ } else {
+ /* Try old backwards-compatible name. */
+ fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
+ if (fd == HV_ENOENT)
+ return;
+ }
}
BUG_ON(fd < 0);
stat = hv_fs_fstat(fd);
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index e686c5ac90b..44bab29bf2f 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -207,9 +207,7 @@ void __cpuinit online_secondary(void)
/* Set up tile-timer clock-event device on this cpu */
setup_tile_timer();
- preempt_enable();
-
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index b3b4972c245..dfd63ce8732 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
- read_lock(&vmlist_lock);
- for (p = vmlist; p; p = p->next) {
- if (p->addr == addr)
- break;
- }
- read_unlock(&vmlist_lock);
+ p = find_vm_area((void *)addr);
if (!p) {
pr_err("iounmap: bad address %p\n", addr);
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index 78f1b899996..c512b0306dd 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf,
extern int console_open_chan(struct line *line, struct console *co);
extern void deactivate_chan(struct chan *chan, int irq);
extern void reactivate_chan(struct chan *chan, int irq);
-extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty);
+extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
extern int enable_chan(struct line *line);
extern void close_chan(struct line *line);
extern int chan_window_size(struct line *line,
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 15c553c239a..acbe6c67afb 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans)
return err;
}
-void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
+void chan_enable_winch(struct chan *chan, struct tty_port *port)
{
if (chan && chan->primary && chan->ops->winch)
- register_winch(chan->fd, tty);
+ register_winch(chan->fd, port);
}
static void line_timer_cb(struct work_struct *work)
@@ -568,11 +568,7 @@ void chan_interrupt(struct line *line, int irq)
reactivate_fd(chan->fd, irq);
if (err == -EIO) {
if (chan->primary) {
- struct tty_struct *tty = tty_port_tty_get(&line->port);
- if (tty != NULL) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_tty_hangup(&line->port, false);
if (line->chan_out != chan)
close_one_chan(line->chan_out, 1);
}
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 9be670ad23b..3fd7c3efdb1 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -216,7 +216,7 @@ static int winch_thread(void *arg)
}
}
-static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
+static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
unsigned long *stack_out)
{
struct winch_data data;
@@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
return err;
}
-void register_winch(int fd, struct tty_struct *tty)
+void register_winch(int fd, struct tty_port *port)
{
unsigned long stack;
int pid, thread, count, thread_fd = -1;
@@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty)
return;
pid = tcgetpgrp(fd);
- if (is_skas_winch(pid, fd, tty)) {
- register_winch_irq(-1, fd, -1, tty, 0);
+ if (is_skas_winch(pid, fd, port)) {
+ register_winch_irq(-1, fd, -1, port, 0);
return;
}
if (pid == -1) {
- thread = winch_tramp(fd, tty, &thread_fd, &stack);
+ thread = winch_tramp(fd, port, &thread_fd, &stack);
if (thread < 0)
return;
- register_winch_irq(thread_fd, fd, thread, tty, stack);
+ register_winch_irq(thread_fd, fd, thread, port, stack);
count = write(thread_fd, &c, sizeof(c));
if (count != sizeof(c))
diff --git a/arch/um/drivers/chan_user.h b/arch/um/drivers/chan_user.h
index dc693298eb8..03f1b565c5f 100644
--- a/arch/um/drivers/chan_user.h
+++ b/arch/um/drivers/chan_user.h
@@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
unsigned short *cols_out);
extern void generic_free(void *data);
-struct tty_struct;
-extern void register_winch(int fd, struct tty_struct *tty);
+struct tty_port;
+extern void register_winch(int fd, struct tty_port *port);
extern void register_winch_irq(int fd, int tty_fd, int pid,
- struct tty_struct *tty, unsigned long stack);
+ struct tty_port *port, unsigned long stack);
#define __channel_help(fn, prefix) \
__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index f1b38571f94..8035145f043 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -248,7 +248,6 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
- struct tty_struct *tty;
int err;
/*
@@ -267,12 +266,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
}
spin_unlock(&line->lock);
- tty = tty_port_tty_get(&line->port);
- if (tty == NULL)
- return IRQ_NONE;
-
- tty_wakeup(tty);
- tty_kref_put(tty);
+ tty_port_tty_wakeup(&line->port);
return IRQ_HANDLED;
}
@@ -305,7 +299,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
return ret;
if (!line->sigio) {
- chan_enable_winch(line->chan_out, tty);
+ chan_enable_winch(line->chan_out, port);
line->sigio = 1;
}
@@ -315,8 +309,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
return 0;
}
+static void unregister_winch(struct tty_struct *tty);
+
+static void line_destruct(struct tty_port *port)
+{
+ struct tty_struct *tty = tty_port_tty_get(port);
+ struct line *line = tty->driver_data;
+
+ if (line->sigio) {
+ unregister_winch(tty);
+ line->sigio = 0;
+ }
+}
+
static const struct tty_port_operations line_port_ops = {
.activate = line_activate,
+ .destruct = line_destruct,
};
int line_open(struct tty_struct *tty, struct file *filp)
@@ -340,18 +348,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty,
return 0;
}
-static void unregister_winch(struct tty_struct *tty);
-
-void line_cleanup(struct tty_struct *tty)
-{
- struct line *line = tty->driver_data;
-
- if (line->sigio) {
- unregister_winch(tty);
- line->sigio = 0;
- }
-}
-
void line_close(struct tty_struct *tty, struct file * filp)
{
struct line *line = tty->driver_data;
@@ -601,7 +597,7 @@ struct winch {
int fd;
int tty_fd;
int pid;
- struct tty_struct *tty;
+ struct tty_port *port;
unsigned long stack;
struct work_struct work;
};
@@ -655,7 +651,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
goto out;
}
}
- tty = winch->tty;
+ tty = tty_port_tty_get(winch->port);
if (tty != NULL) {
line = tty->driver_data;
if (line != NULL) {
@@ -663,6 +659,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
&tty->winsize.ws_col);
kill_pgrp(tty->pgrp, SIGWINCH, 1);
}
+ tty_kref_put(tty);
}
out:
if (winch->fd != -1)
@@ -670,7 +667,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
+void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
unsigned long stack)
{
struct winch *winch;
@@ -685,7 +682,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
.fd = fd,
.tty_fd = tty_fd,
.pid = pid,
- .tty = tty,
+ .port = port,
.stack = stack });
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
@@ -714,15 +711,18 @@ static void unregister_winch(struct tty_struct *tty)
{
struct list_head *ele, *next;
struct winch *winch;
+ struct tty_struct *wtty;
spin_lock(&winch_handler_lock);
list_for_each_safe(ele, next, &winch_handlers) {
winch = list_entry(ele, struct winch, list);
- if (winch->tty == tty) {
+ wtty = tty_port_tty_get(winch->port);
+ if (wtty == tty) {
free_winch(winch);
break;
}
+ tty_kref_put(wtty);
}
spin_unlock(&winch_handler_lock);
}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d8926c30362..39f186252e0 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
len = (*lp->write)(lp->fd, skb, lp);
+ skb_tx_timestamp(skb);
if (len == skb->len) {
dev->stats.tx_packets++;
@@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev,
static const struct ethtool_ops uml_net_ethtool_ops = {
.get_drvinfo = uml_net_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void uml_net_user_timer_expire(unsigned long _conn)
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 16fdd0a0f9d..b8d14fa5205 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = {
.throttle = line_throttle,
.unthrottle = line_unthrottle,
.install = ssl_install,
- .cleanup = line_cleanup,
.hangup = line_hangup,
};
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 827777af3f6..7b361f36ca9 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -110,7 +110,6 @@ static const struct tty_operations console_ops = {
.set_termios = line_set_termios,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
- .cleanup = line_cleanup,
.hangup = line_hangup,
};
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
index 49480f09245..4a0800bc37b 100644
--- a/arch/um/kernel/early_printk.c
+++ b/arch/um/kernel/early_printk.c
@@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int
um_early_printk(s, n);
}
-static struct console early_console = {
+static struct console early_console_dev = {
.name = "earlycon",
.write = early_console_write,
.flags = CON_BOOT,
@@ -25,8 +25,10 @@ static struct console early_console = {
static int __init setup_early_printk(char *buf)
{
- register_console(&early_console);
-
+ if (!early_console) {
+ early_console = &early_console_dev;
+ register_console(&early_console_dev);
+ }
return 0;
}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 5abcbfbe7e2..9df292b270a 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -42,17 +42,12 @@ static unsigned long brk_end;
static void setup_highmem(unsigned long highmem_start,
unsigned long highmem_len)
{
- struct page *page;
unsigned long highmem_pfn;
int i;
highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
- for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
- page = &mem_map[highmem_pfn + i];
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- }
+ for (i = 0; i < highmem_len >> PAGE_SHIFT; i++)
+ free_highmem_page(&mem_map[highmem_pfn + i]);
}
#endif
@@ -73,18 +68,13 @@ void __init mem_init(void)
totalram_pages = free_all_bootmem();
max_low_pfn = totalram_pages;
#ifdef CONFIG_HIGHMEM
- totalhigh_pages = highmem >> PAGE_SHIFT;
- totalram_pages += totalhigh_pages;
+ setup_highmem(end_iomem, highmem);
#endif
num_physpages = totalram_pages;
max_pfn = totalram_pages;
printk(KERN_INFO "Memory: %luk available\n",
nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1;
-
-#ifdef CONFIG_HIGHMEM
- setup_highmem(end_iomem, highmem);
-#endif
}
/*
@@ -254,15 +244,7 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start < end)
- printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
- (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page(start);
- totalram_pages++;
- }
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index b462b13c5ba..bbcef522bcb 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -210,33 +210,14 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
kmalloc_ok = save_kmalloc_ok;
}
-void default_idle(void)
+void arch_cpu_idle(void)
{
unsigned long long nsecs;
- while (1) {
- /* endless idle loop with no priority at all */
-
- /*
- * although we are an idle CPU, we do not want to
- * get into the scheduler unnecessarily.
- */
- if (need_resched())
- schedule();
-
- tick_nohz_idle_enter();
- rcu_idle_enter();
- nsecs = disable_timer();
- idle_sleep(nsecs);
- rcu_idle_exit();
- tick_nohz_idle_exit();
- }
-}
-
-void cpu_idle(void)
-{
cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
- default_idle();
+ nsecs = disable_timer();
+ idle_sleep(nsecs);
+ local_irq_enable();
}
int __cant_sleep(void) {
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index b1469fe9329..9d9f1b4bf82 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -15,7 +15,7 @@
#include <sysdep/mcontext.h>
#include "internal.h"
-void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index da4b9e9999f..337518c5042 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -15,6 +15,8 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/resource.h>
#include <asm/unistd.h>
#include <init.h>
#include <os.h>
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index dc50b157fc8..2943e3acdf0 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -9,7 +9,7 @@ config UNICORE32
select GENERIC_ATOMIC64
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select ARCH_HAVE_CUSTOM_GPIO_H
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
index 3922255f1fa..9be0d5d02a9 100644
--- a/arch/unicore32/kernel/early_printk.c
+++ b/arch/unicore32/kernel/early_printk.c
@@ -33,21 +33,17 @@ static struct console early_ocd_console = {
.index = -1,
};
-/* Direct interface for emergencies */
-static struct console *early_console = &early_ocd_console;
-
-static int __initdata keep_early;
-
static int __init setup_early_printk(char *buf)
{
- if (!buf)
+ int keep_early;
+
+ if (!buf || early_console)
return 0;
if (strstr(buf, "keep"))
keep_early = 1;
- if (!strncmp(buf, "ocd", 3))
- early_console = &early_ocd_console;
+ early_console = &early_ocd_console;
if (keep_early)
early_console->flags &= ~CON_BOOT;
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
index 872d7e22d84..7fab86d7c5d 100644
--- a/arch/unicore32/kernel/process.c
+++ b/arch/unicore32/kernel/process.c
@@ -45,25 +45,10 @@ static const char * const processor_modes[] = {
"UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
};
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched()) {
- local_irq_disable();
- stop_critical_timings();
- cpu_do_idle();
- local_irq_enable();
- start_critical_timings();
- }
- rcu_idle_exit();
- tick_nohz_idle_exit();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+ cpu_do_idle();
+ local_irq_enable();
}
static char reboot_mode = 'h';
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index de186bde897..63df12d71ce 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -66,6 +66,9 @@ void show_mem(unsigned int filter)
printk(KERN_DEFAULT "Mem-info:\n");
show_free_areas(filter);
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
+
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
@@ -313,24 +316,6 @@ void __init bootmem_init(void)
max_pfn = max_high - PHYS_PFN_OFFSET;
}
-static inline int free_area(unsigned long pfn, unsigned long end, char *s)
-{
- unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
-
- for (; pfn < end; pfn++) {
- struct page *page = pfn_to_page(pfn);
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- pages++;
- }
-
- if (size && s)
- printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
-
- return pages;
-}
-
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
@@ -404,9 +389,9 @@ void __init mem_init(void)
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
- /* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
+ /* this will put all unused low memory onto the freelists */
totalram_pages += free_all_bootmem();
reserved_pages = free_pages = 0;
@@ -491,9 +476,7 @@ void __init mem_init(void)
void free_initmem(void)
{
- totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
- __phys_to_pfn(__pa(__init_end)),
- "init");
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -503,9 +486,7 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
- totalram_pages += free_area(__phys_to_pfn(__pa(start)),
- __phys_to_pfn(__pa(end)),
- "initrd");
+ free_reserved_area(start, end, 0, "initrd");
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index b7a605597b0..13068ee22f3 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
void __uc32_iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
- struct vm_struct **p, *tmp;
+ struct vm_struct *vm;
/*
* If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
* all the mappings before the area can be reclaimed
* by someone else.
*/
- write_lock(&vmlist_lock);
- for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
- if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
- if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
- unmap_area_sections((unsigned long)tmp->addr,
- tmp->size);
- }
- break;
- }
- }
- write_unlock(&vmlist_lock);
+ vm = find_vm_area(addr);
+ if (vm && (vm->flags & VM_IOREMAP) &&
+ (vm->flags & VM_UNICORE_SECTION_MAPPING))
+ unmap_area_sections((unsigned long)vm->addr, vm->size);
vunmap(addr);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a4f24f5b121..d75b48c11be 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,7 +112,7 @@ config X86
select GENERIC_STRNLEN_USER
select HAVE_CONTEXT_TRACKING if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select MODULES_USE_ELF_REL if X86_32
select MODULES_USE_ELF_RELA if X86_64
select CLONE_BACKWARDS if X86_32
@@ -188,9 +188,6 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_HAS_CPU_RELAX
def_bool y
-config ARCH_HAS_DEFAULT_IDLE
- def_bool y
-
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
@@ -1549,6 +1546,7 @@ config X86_SMAP
config EFI
bool "EFI runtime service support"
depends on ACPI
+ select UCS2_STRING
---help---
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 8a84501acb1..5ef205c5f37 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
# create a compressed vmlinux image from the original vmlinux
#
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
+targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/piggy.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
-$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
ifeq ($(CONFIG_EFI_STUB), y)
VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
@@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-targets += vmlinux.bin.all vmlinux.relocs
+targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c205035a6b9..35ee62fccf9 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
*size = len;
}
+static efi_status_t setup_efi_vars(struct boot_params *params)
+{
+ struct setup_data *data;
+ struct efi_var_bootdata *efidata;
+ u64 store_size, remaining_size, var_size;
+ efi_status_t status;
+
+ if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
+ &remaining_size, &var_size);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_phys3(sys_table->boottime->allocate_pool,
+ EFI_LOADER_DATA, sizeof(*efidata), &efidata);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ efidata->data.type = SETUP_EFI_VARS;
+ efidata->data.len = sizeof(struct efi_var_bootdata) -
+ sizeof(struct setup_data);
+ efidata->data.next = 0;
+ efidata->store_size = store_size;
+ efidata->remaining_size = remaining_size;
+ efidata->max_var_size = var_size;
+
+ if (data)
+ data->next = (unsigned long)efidata;
+ else
+ params->hdr.setup_data = (unsigned long)efidata;
+
+}
+
static efi_status_t setup_efi_pci(struct boot_params *params)
{
efi_pci_io_protocol *pci;
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
setup_graphics(boot_params);
+ setup_efi_vars(boot_params);
+
setup_efi_pci(boot_params);
status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 5b5e9cb774b..653668d140f 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -14,13 +14,29 @@
* analysis of kexec-tools; if other broken bootloaders initialize a
* different set of fields we will need to figure out how to disambiguate.
*
+ * Note: efi_info is commonly left uninitialized, but that field has a
+ * private magic, so it is better to leave it unchanged.
*/
static void sanitize_boot_params(struct boot_params *boot_params)
{
+ /*
+ * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
+ * this field. The purpose of this field is to guarantee
+ * compliance with the x86 boot spec located in
+ * Documentation/x86/boot.txt . That spec says that the
+ * *whole* structure should be cleared, after which only the
+ * portion defined by struct setup_header (boot_params->hdr)
+ * should be copied in.
+ *
+ * If you're having an issue because the sentinel is set, you
+ * need to change the whole structure to be cleared, not this
+ * (or any other) individual field, or you will soon have
+ * problems again.
+ */
if (boot_params->sentinel) {
- /*fields in boot_params are not valid, clear them */
+ /* fields in boot_params are left uninitialized, clear them */
memset(&boot_params->olpc_ofw_header, 0,
- (char *)&boot_params->alt_mem_k -
+ (char *)&boot_params->efi_info -
(char *)&boot_params->olpc_ofw_header);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 93fe929d1ce..ac10df72925 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,6 +168,7 @@
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -311,6 +312,7 @@ extern const char * const x86_power_flags[32];
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
+#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 60c89f30c72..2fb5d5884e2 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
+struct efi_var_bootdata {
+ struct setup_data data;
+ u64 store_size;
+ u64 remaining_size;
+ u64 max_var_size;
+};
+
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index bdd35dbd060..a8091216963 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
#define _ASM_X86_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d3ddd17405d..5a6d2873f80 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -77,6 +77,7 @@ struct arch_specific_insn {
* a post_handler or break_handler).
*/
int boostable;
+ bool if_modifier;
};
struct arch_optimized_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d2240..4979778cc7f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
gpa_t time;
struct pvclock_vcpu_time_info hv_clock;
unsigned int hw_tsc_khz;
- unsigned int time_offset;
- struct page *time_page;
+ struct gfn_to_hva_cache pv_time;
+ bool pv_time_enabled;
/* set guest stopped flag in pvclock flags field */
bool pvclock_set_guest_stopped_request;
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 5edd1742cfd..7361e47db79 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-void arch_flush_lazy_mmu_mode(void);
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 142236ed83a..b3b0ec1dac8 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
+ void (*flush)(void);
};
struct pv_time_ops {
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 4f7e67e2345..85e13ccf15c 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -24,45 +24,45 @@
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
-#define P4_ESCR_EVENT_MASK 0x7e000000U
+#define P4_ESCR_EVENT_MASK 0x7e000000ULL
#define P4_ESCR_EVENT_SHIFT 25
-#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
+#define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL
#define P4_ESCR_EVENTMASK_SHIFT 9
-#define P4_ESCR_TAG_MASK 0x000001e0U
+#define P4_ESCR_TAG_MASK 0x000001e0ULL
#define P4_ESCR_TAG_SHIFT 5
-#define P4_ESCR_TAG_ENABLE 0x00000010U
-#define P4_ESCR_T0_OS 0x00000008U
-#define P4_ESCR_T0_USR 0x00000004U
-#define P4_ESCR_T1_OS 0x00000002U
-#define P4_ESCR_T1_USR 0x00000001U
+#define P4_ESCR_TAG_ENABLE 0x00000010ULL
+#define P4_ESCR_T0_OS 0x00000008ULL
+#define P4_ESCR_T0_USR 0x00000004ULL
+#define P4_ESCR_T1_OS 0x00000002ULL
+#define P4_ESCR_T1_USR 0x00000001ULL
#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
-#define P4_CCCR_OVF 0x80000000U
-#define P4_CCCR_CASCADE 0x40000000U
-#define P4_CCCR_OVF_PMI_T0 0x04000000U
-#define P4_CCCR_OVF_PMI_T1 0x08000000U
-#define P4_CCCR_FORCE_OVF 0x02000000U
-#define P4_CCCR_EDGE 0x01000000U
-#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
+#define P4_CCCR_OVF 0x80000000ULL
+#define P4_CCCR_CASCADE 0x40000000ULL
+#define P4_CCCR_OVF_PMI_T0 0x04000000ULL
+#define P4_CCCR_OVF_PMI_T1 0x08000000ULL
+#define P4_CCCR_FORCE_OVF 0x02000000ULL
+#define P4_CCCR_EDGE 0x01000000ULL
+#define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL
#define P4_CCCR_THRESHOLD_SHIFT 20
-#define P4_CCCR_COMPLEMENT 0x00080000U
-#define P4_CCCR_COMPARE 0x00040000U
-#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
+#define P4_CCCR_COMPLEMENT 0x00080000ULL
+#define P4_CCCR_COMPARE 0x00040000ULL
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL
#define P4_CCCR_ESCR_SELECT_SHIFT 13
-#define P4_CCCR_ENABLE 0x00001000U
-#define P4_CCCR_THREAD_SINGLE 0x00010000U
-#define P4_CCCR_THREAD_BOTH 0x00020000U
-#define P4_CCCR_THREAD_ANY 0x00030000U
-#define P4_CCCR_RESERVED 0x00000fffU
+#define P4_CCCR_ENABLE 0x00001000ULL
+#define P4_CCCR_THREAD_SINGLE 0x00010000ULL
+#define P4_CCCR_THREAD_BOTH 0x00020000ULL
+#define P4_CCCR_THREAD_ANY 0x00030000ULL
+#define P4_CCCR_RESERVED 0x00000fffULL
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
#define P4_GEN_ESCR_EMASK(class, name, bit) \
- class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
+ class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_EMASK_BIT(class, name) class##__##name
/*
@@ -107,7 +107,7 @@
* P4_PEBS_CONFIG_MASK and related bits on
* modification.)
*/
-#define P4_CONFIG_ALIASABLE (1 << 9)
+#define P4_CONFIG_ALIASABLE (1ULL << 9)
/*
* The bits we allow to pass for RAW events
@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS {
* Note we have UOP and PEBS bits reserved for now
* just in case if we will need them once
*/
-#define P4_PEBS_CONFIG_ENABLE (1 << 7)
-#define P4_PEBS_CONFIG_UOP_TAG (1 << 8)
-#define P4_PEBS_CONFIG_METRIC_MASK 0x3f
-#define P4_PEBS_CONFIG_MASK 0xff
+#define P4_PEBS_CONFIG_ENABLE (1ULL << 7)
+#define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8)
+#define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL
+#define P4_PEBS_CONFIG_MASK 0xFFLL
/*
* mem: Only counters MSR_IQ_COUNTER4 (16) and
* MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
*/
-#define P4_PEBS_ENABLE 0x02000000U
-#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U
+#define P4_PEBS_ENABLE 0x02000000ULL
+#define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL
#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 1ace47b6259..2e188d68397 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
*/
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
- return regs->orig_ax & __SYSCALL_MASK;
+ return regs->orig_ax;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
- regs->ax = regs->orig_ax & __SYSCALL_MASK;
+ regs->ax = regs->orig_ax;
}
static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2cd056e3ada..a1df6e84691 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -241,8 +241,6 @@ static inline struct thread_info *current_thread_info(void)
skip sending interrupt */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
-
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
static inline void set_restore_sigmask(void)
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 4fef20773b8..c7797307fc2 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
#define tlb_flush(tlb) \
{ \
- if (tlb->fullmm == 0) \
+ if (!tlb->fullmm && !tlb->need_flush_all) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 8ff8be7835a..6e5197910fd 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -55,4 +55,5 @@ extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#endif /* _ASM_UPROBES_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index c20d1ce62dc..e709884d0ef 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
-extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
+extern int __must_check xen_physdev_op_compat(int, void *);
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
if (unlikely(rc == -ENOSYS))
- rc = HYPERVISOR_physdev_op_compat(cmd, arg);
+ rc = xen_physdev_op_compat(cmd, arg);
return rc;
}
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf9071..08744242b8d 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,6 +6,7 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
+#define SETUP_EFI_VARS 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 892ce40a747..b5757885d7a 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -44,6 +44,7 @@
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
+#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
@@ -71,6 +72,7 @@
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
@@ -194,6 +196,10 @@
#define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+/* Fam 16h MSRs */
+#define MSR_F16H_L2I_PERF_CTL 0xc0010230
+#define MSR_F16H_L2I_PERF_CTR 0xc0010231
+
/* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200
#define MSR_F15H_PERF_CTR 0xc0010201
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index a0e067d3d96..deef0399fc7 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifdef CONFIG_PERF_EVENTS
-obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
+obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o perf_event_amd_uncore.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
@@ -43,10 +43,10 @@ obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
quiet_cmd_mkcapflags = MKCAP $@
- cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
+ cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
cpufeature = $(src)/../../include/asm/cpufeature.h
targets += capflags.c
-$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.pl FORCE
+$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags)
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
deleted file mode 100644
index 091972ef49d..00000000000
--- a/arch/x86/kernel/cpu/mkcapflags.pl
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/perl -w
-#
-# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
-#
-
-($in, $out) = @ARGV;
-
-open(IN, "< $in\0") or die "$0: cannot open: $in: $!\n";
-open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
-
-print OUT "#ifndef _ASM_X86_CPUFEATURE_H\n";
-print OUT "#include <asm/cpufeature.h>\n";
-print OUT "#endif\n";
-print OUT "\n";
-print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
-
-%features = ();
-$err = 0;
-
-while (defined($line = <IN>)) {
- if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
- $macro = $1;
- $feature = "\L$2";
- $tail = $3;
- if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
- $feature = "\L$1";
- }
-
- next if ($feature eq '');
-
- if ($features{$feature}++) {
- print STDERR "$in: duplicate feature name: $feature\n";
- $err++;
- }
- printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
- }
-}
-print OUT "};\n";
-
-close(IN);
-close(OUT);
-
-if ($err) {
- unlink($out);
- exit(1);
-}
-
-exit(0);
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
new file mode 100644
index 00000000000..2bf61650549
--- /dev/null
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+# Generate the x86_cap_flags[] array from include/asm/cpufeature.h
+#
+
+IN=$1
+OUT=$2
+
+TABS="$(printf '\t\t\t\t\t')"
+trap 'rm "$OUT"' EXIT
+
+(
+ echo "#ifndef _ASM_X86_CPUFEATURE_H"
+ echo "#include <asm/cpufeature.h>"
+ echo "#endif"
+ echo ""
+ echo "const char * const x86_cap_flags[NCAPINTS*32] = {"
+
+ # Iterate through any input lines starting with #define X86_FEATURE_
+ sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN |
+ while read i
+ do
+ # Name is everything up to the first whitespace
+ NAME="$(echo "$i" | sed 's/ .*//')"
+
+ # If the /* comment */ starts with a quote string, grab that.
+ VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
+ [ -z "$VALUE" ] && VALUE="\"$NAME\""
+ [ "$VALUE" == '""' ] && continue
+
+ # Name is uppercase, VALUE is all lowercase
+ VALUE="$(echo "$VALUE" | tr A-Z a-z)"
+
+ TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 ))
+ printf "\t[%s]%.*s = %s,\n" \
+ "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE"
+ done
+ echo "};"
+) > $OUT
+
+trap - EXIT
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a7d26d83fb7..8f4be53ea04 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
- /*
- * Xen emulates Hyper-V to support enlightened Windows.
- * Check to see first if we are on a Xen Hypervisor.
- */
- if (xen_cpuid_base())
- return false;
-
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
-#if IS_ENABLED(CONFIG_HYPERV)
- /*
- * Setup the IDT for hypervisor callback.
- */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
-#endif
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
void hv_register_vmbus_handler(int irq, irq_handler_t handler)
{
+ /*
+ * Setup the IDT for hypervisor callback.
+ */
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+
vmbus_irq = irq;
vmbus_isr = handler;
}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index bf0f01aea99..1025f3c99d2 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -180,8 +180,9 @@ static void release_pmc_hardware(void) {}
static bool check_hw_exists(void)
{
- u64 val, val_new = ~0;
- int i, reg, ret = 0;
+ u64 val, val_fail, val_new= ~0;
+ int i, reg, reg_fail, ret = 0;
+ int bios_fail = 0;
/*
* Check to see if the BIOS enabled any of the counters, if so
@@ -192,8 +193,11 @@ static bool check_hw_exists(void)
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- goto bios_fail;
+ if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
+ bios_fail = 1;
+ val_fail = val;
+ reg_fail = reg;
+ }
}
if (x86_pmu.num_counters_fixed) {
@@ -202,8 +206,11 @@ static bool check_hw_exists(void)
if (ret)
goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
- if (val & (0x03 << i*4))
- goto bios_fail;
+ if (val & (0x03 << i*4)) {
+ bios_fail = 1;
+ val_fail = val;
+ reg_fail = reg;
+ }
}
}
@@ -221,14 +228,13 @@ static bool check_hw_exists(void)
if (ret || val != val_new)
goto msr_fail;
- return true;
-
-bios_fail:
/*
* We still allow the PMU driver to operate:
*/
- printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
- printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
+ if (bios_fail) {
+ printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
+ printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
+ }
return true;
@@ -1316,9 +1322,16 @@ static struct attribute_group x86_pmu_format_group = {
*/
static void __init filter_events(struct attribute **attrs)
{
+ struct device_attribute *d;
+ struct perf_pmu_events_attr *pmu_attr;
int i, j;
for (i = 0; attrs[i]; i++) {
+ d = (struct device_attribute *)attrs[i];
+ pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
+ /* str trumps id */
+ if (pmu_attr->event_str)
+ continue;
if (x86_pmu.event_map(i))
continue;
@@ -1330,22 +1343,45 @@ static void __init filter_events(struct attribute **attrs)
}
}
-static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
+/* Merge two pointer arrays */
+static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
+{
+ struct attribute **new;
+ int j, i;
+
+ for (j = 0; a[j]; j++)
+ ;
+ for (i = 0; b[i]; i++)
+ j++;
+ j++;
+
+ new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ j = 0;
+ for (i = 0; a[i]; i++)
+ new[j++] = a[i];
+ for (i = 0; b[i]; i++)
+ new[j++] = b[i];
+ new[j] = NULL;
+
+ return new;
+}
+
+ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr = \
container_of(attr, struct perf_pmu_events_attr, attr);
-
u64 config = x86_pmu.event_map(pmu_attr->id);
- return x86_pmu.events_sysfs_show(page, config);
-}
-#define EVENT_VAR(_id) event_attr_##_id
-#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
+ /* string trumps id */
+ if (pmu_attr->event_str)
+ return sprintf(page, "%s", pmu_attr->event_str);
-#define EVENT_ATTR(_name, _id) \
- PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \
- events_sysfs_show)
+ return x86_pmu.events_sysfs_show(page, config);
+}
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );
@@ -1459,16 +1495,27 @@ static int __init init_hw_perf_events(void)
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
- 0, x86_pmu.num_counters, 0);
+ 0, x86_pmu.num_counters, 0, 0);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
+ if (x86_pmu.event_attrs)
+ x86_pmu_events_group.attrs = x86_pmu.event_attrs;
+
if (!x86_pmu.events_sysfs_show)
x86_pmu_events_group.attrs = &empty_attrs;
else
filter_events(x86_pmu_events_group.attrs);
+ if (x86_pmu.cpu_events) {
+ struct attribute **tmp;
+
+ tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
+ if (!WARN_ON(!tmp))
+ x86_pmu_events_group.attrs = tmp;
+ }
+
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 7f5c75c2afd..ba9aadfa683 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -46,6 +46,7 @@ enum extra_reg_type {
EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
EXTRA_REG_LBR = 2, /* lbr_select */
+ EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */
EXTRA_REG_MAX /* number of entries needed */
};
@@ -59,7 +60,13 @@ struct event_constraint {
u64 cmask;
int weight;
int overlap;
+ int flags;
};
+/*
+ * struct event_constraint flags
+ */
+#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
+#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -170,16 +177,17 @@ struct cpu_hw_events {
void *kfree_on_online;
};
-#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
.overlap = (o), \
+ .flags = f, \
}
#define EVENT_CONSTRAINT(c, n, m) \
- __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
/*
* The overlap flag marks event constraints with overlapping counter
@@ -203,7 +211,7 @@ struct cpu_hw_events {
* and its counter masks must be kept at a minimum.
*/
#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
- __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
+ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
/*
* Constraint on the Event code.
@@ -231,6 +239,14 @@ struct cpu_hw_events {
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
+#define INTEL_PLD_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
+
+#define INTEL_PST_CONSTRAINT(c, n) \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
+
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
@@ -260,12 +276,22 @@ struct extra_reg {
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
- .idx = EXTRA_REG_##i \
+ .idx = EXTRA_REG_##i, \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
+#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
+ EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
+
+#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
+ INTEL_UEVENT_EXTRA_REG(c, \
+ MSR_PEBS_LD_LAT_THRESHOLD, \
+ 0xffff, \
+ LDLAT)
+
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
union perf_capabilities {
@@ -355,8 +381,10 @@ struct x86_pmu {
*/
int attr_rdpmc;
struct attribute **format_attrs;
+ struct attribute **event_attrs;
ssize_t (*events_sysfs_show)(char *page, u64 config);
+ struct attribute **cpu_events;
/*
* CPU Hotplug hooks
@@ -421,6 +449,23 @@ do { \
#define ERF_NO_HT_SHARING 1
#define ERF_HAS_RSP_1 2
+#define EVENT_VAR(_id) event_attr_##_id
+#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
+
+#define EVENT_ATTR(_name, _id) \
+static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
+ .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
+ .id = PERF_COUNT_HW_##_id, \
+ .event_str = NULL, \
+};
+
+#define EVENT_ATTR_STR(_name, v, str) \
+static struct perf_pmu_events_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = str, \
+};
+
extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -628,6 +673,9 @@ int p6_pmu_init(void);
int knc_pmu_init(void);
+ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page);
+
#else /* CONFIG_CPU_SUP_INTEL */
static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dfdab42aed2..7e28d9467bb 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,14 +132,11 @@ static u64 amd_pmu_event_map(int hw_event)
return amd_perfmon_event_map[hw_event];
}
-static struct event_constraint *amd_nb_event_constraint;
-
/*
* Previously calculated offsets
*/
static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
-static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
/*
* Legacy CPUs:
@@ -147,14 +144,10 @@ static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
*
* CPUs with core performance counter extensions:
* 6 counters starting at 0xc0010200 each offset by 2
- *
- * CPUs with north bridge performance counter extensions:
- * 4 additional counters starting at 0xc0010240 each offset by 2
- * (indexed right above either one of the above core counters)
*/
static inline int amd_pmu_addr_offset(int index, bool eventsel)
{
- int offset, first, base;
+ int offset;
if (!index)
return index;
@@ -167,23 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
if (offset)
return offset;
- if (amd_nb_event_constraint &&
- test_bit(index, amd_nb_event_constraint->idxmsk)) {
- /*
- * calculate the offset of NB counters with respect to
- * base eventsel or perfctr
- */
-
- first = find_first_bit(amd_nb_event_constraint->idxmsk,
- X86_PMC_IDX_MAX);
-
- if (eventsel)
- base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
- else
- base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
-
- offset = base + ((index - first) << 1);
- } else if (!cpu_has_perfctr_core)
+ if (!cpu_has_perfctr_core)
offset = index;
else
offset = index << 1;
@@ -196,36 +173,6 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
return offset;
}
-static inline int amd_pmu_rdpmc_index(int index)
-{
- int ret, first;
-
- if (!index)
- return index;
-
- ret = rdpmc_indexes[index];
-
- if (ret)
- return ret;
-
- if (amd_nb_event_constraint &&
- test_bit(index, amd_nb_event_constraint->idxmsk)) {
- /*
- * according to the mnual, ECX value of the NB counters is
- * the index of the NB counter (0, 1, 2 or 3) plus 6
- */
-
- first = find_first_bit(amd_nb_event_constraint->idxmsk,
- X86_PMC_IDX_MAX);
- ret = index - first + 6;
- } else
- ret = index;
-
- rdpmc_indexes[index] = ret;
-
- return ret;
-}
-
static int amd_core_hw_config(struct perf_event *event)
{
if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -245,34 +192,6 @@ static int amd_core_hw_config(struct perf_event *event)
}
/*
- * NB counters do not support the following event select bits:
- * Host/Guest only
- * Counter mask
- * Invert counter mask
- * Edge detect
- * OS/User mode
- */
-static int amd_nb_hw_config(struct perf_event *event)
-{
- /* for NB, we only allow system wide counting mode */
- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
- return -EINVAL;
-
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_host || event->attr.exclude_guest)
- return -EINVAL;
-
- event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
- ARCH_PERFMON_EVENTSEL_OS);
-
- if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
- ARCH_PERFMON_EVENTSEL_INT))
- return -EINVAL;
-
- return 0;
-}
-
-/*
* AMD64 events are detected based on their event codes.
*/
static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
@@ -285,11 +204,6 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
return (hwc->config & 0xe0) == 0xe0;
}
-static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
-{
- return amd_nb_event_constraint && amd_is_nb_event(hwc);
-}
-
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
{
struct amd_nb *nb = cpuc->amd_nb;
@@ -315,9 +229,6 @@ static int amd_pmu_hw_config(struct perf_event *event)
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
- if (amd_is_perfctr_nb_event(&event->hw))
- return amd_nb_hw_config(event);
-
return amd_core_hw_config(event);
}
@@ -341,19 +252,6 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
}
}
-static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
-{
- int core_id = cpu_data(smp_processor_id()).cpu_core_id;
-
- /* deliver interrupts only to this core */
- if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
- hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
- hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
- hwc->config |= (u64)(core_id) <<
- AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
- }
-}
-
/*
* AMD64 NorthBridge events need special treatment because
* counter access needs to be synchronized across all cores
@@ -441,9 +339,6 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
if (new == -1)
return &emptyconstraint;
- if (amd_is_perfctr_nb_event(hwc))
- amd_nb_interrupt_hw_config(hwc);
-
return &nb->event_constraints[new];
}
@@ -543,8 +438,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
return &unconstrained;
- return __amd_get_nb_event_constraints(cpuc, event,
- amd_nb_event_constraint);
+ return __amd_get_nb_event_constraints(cpuc, event, NULL);
}
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
@@ -643,9 +537,6 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
-static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
-static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
-
static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
{
@@ -711,8 +602,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
return &amd_f15_PMC20;
}
case AMD_EVENT_NB:
- return __amd_get_nb_event_constraints(cpuc, event,
- amd_nb_event_constraint);
+ /* moved to perf_event_amd_uncore.c */
+ return &emptyconstraint;
default:
return &emptyconstraint;
}
@@ -738,7 +629,6 @@ static __initconst const struct x86_pmu amd_pmu = {
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
.addr_offset = amd_pmu_addr_offset,
- .rdpmc_index = amd_pmu_rdpmc_index,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS,
@@ -790,23 +680,6 @@ static int setup_perfctr_core(void)
return 0;
}
-static int setup_perfctr_nb(void)
-{
- if (!cpu_has_perfctr_nb)
- return -ENODEV;
-
- x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
-
- if (cpu_has_perfctr_core)
- amd_nb_event_constraint = &amd_NBPMC96;
- else
- amd_nb_event_constraint = &amd_NBPMC74;
-
- printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
-
- return 0;
-}
-
__init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
@@ -817,7 +690,6 @@ __init int amd_pmu_init(void)
setup_event_constraints();
setup_perfctr_core();
- setup_perfctr_nb();
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
new file mode 100644
index 00000000000..c0c661adf03
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -0,0 +1,547 @@
+/*
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Jacob Shin <jacob.shin@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+
+#include <asm/cpufeature.h>
+#include <asm/perf_event.h>
+#include <asm/msr.h>
+
+#define NUM_COUNTERS_NB 4
+#define NUM_COUNTERS_L2 4
+#define MAX_COUNTERS NUM_COUNTERS_NB
+
+#define RDPMC_BASE_NB 6
+#define RDPMC_BASE_L2 10
+
+#define COUNTER_SHIFT 16
+
+struct amd_uncore {
+ int id;
+ int refcnt;
+ int cpu;
+ int num_counters;
+ int rdpmc_base;
+ u32 msr_base;
+ cpumask_t *active_mask;
+ struct pmu *pmu;
+ struct perf_event *events[MAX_COUNTERS];
+ struct amd_uncore *free_when_cpu_online;
+};
+
+static struct amd_uncore * __percpu *amd_uncore_nb;
+static struct amd_uncore * __percpu *amd_uncore_l2;
+
+static struct pmu amd_nb_pmu;
+static struct pmu amd_l2_pmu;
+
+static cpumask_t amd_nb_active_mask;
+static cpumask_t amd_l2_active_mask;
+
+static bool is_nb_event(struct perf_event *event)
+{
+ return event->pmu->type == amd_nb_pmu.type;
+}
+
+static bool is_l2_event(struct perf_event *event)
+{
+ return event->pmu->type == amd_l2_pmu.type;
+}
+
+static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
+{
+ if (is_nb_event(event) && amd_uncore_nb)
+ return *per_cpu_ptr(amd_uncore_nb, event->cpu);
+ else if (is_l2_event(event) && amd_uncore_l2)
+ return *per_cpu_ptr(amd_uncore_l2, event->cpu);
+
+ return NULL;
+}
+
+static void amd_uncore_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+ s64 delta;
+
+ /*
+ * since we do not enable counter overflow interrupts,
+ * we do not have to worry about prev_count changing on us
+ */
+
+ prev = local64_read(&hwc->prev_count);
+ rdpmcl(hwc->event_base_rdpmc, new);
+ local64_set(&hwc->prev_count, new);
+ delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
+ delta >>= COUNTER_SHIFT;
+ local64_add(delta, &event->count);
+}
+
+static void amd_uncore_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
+
+ hwc->state = 0;
+ wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
+ perf_event_update_userpage(event);
+}
+
+static void amd_uncore_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ wrmsrl(hwc->config_base, hwc->config);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ amd_uncore_read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int amd_uncore_add(struct perf_event *event, int flags)
+{
+ int i;
+ struct amd_uncore *uncore = event_to_amd_uncore(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* are we already assigned? */
+ if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
+ goto out;
+
+ for (i = 0; i < uncore->num_counters; i++) {
+ if (uncore->events[i] == event) {
+ hwc->idx = i;
+ goto out;
+ }
+ }
+
+ /* if not, take the first available counter */
+ hwc->idx = -1;
+ for (i = 0; i < uncore->num_counters; i++) {
+ if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
+ hwc->idx = i;
+ break;
+ }
+ }
+
+out:
+ if (hwc->idx == -1)
+ return -EBUSY;
+
+ hwc->config_base = uncore->msr_base + (2 * hwc->idx);
+ hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
+ hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ amd_uncore_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void amd_uncore_del(struct perf_event *event, int flags)
+{
+ int i;
+ struct amd_uncore *uncore = event_to_amd_uncore(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ amd_uncore_stop(event, PERF_EF_UPDATE);
+
+ for (i = 0; i < uncore->num_counters; i++) {
+ if (cmpxchg(&uncore->events[i], event, NULL) == event)
+ break;
+ }
+
+ hwc->idx = -1;
+}
+
+static int amd_uncore_event_init(struct perf_event *event)
+{
+ struct amd_uncore *uncore;
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * NB and L2 counters (MSRs) are shared across all cores that share the
+ * same NB / L2 cache. Interrupts can be directed to a single target
+ * core, however, event counts generated by processes running on other
+ * cores cannot be masked out. So we do not support sampling and
+ * per-thread events.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ /* NB and L2 counters do not have usr/os/guest/host bits */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_host || event->attr.exclude_guest)
+ return -EINVAL;
+
+ /* and we do not enable counter overflow interrupts */
+ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
+ hwc->idx = -1;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ uncore = event_to_amd_uncore(event);
+ if (!uncore)
+ return -ENODEV;
+
+ /*
+ * since request can come in to any of the shared cores, we will remap
+ * to a single common cpu.
+ */
+ event->cpu = uncore->cpu;
+
+ return 0;
+}
+
+static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n;
+ cpumask_t *active_mask;
+ struct pmu *pmu = dev_get_drvdata(dev);
+
+ if (pmu->type == amd_nb_pmu.type)
+ active_mask = &amd_nb_active_mask;
+ else if (pmu->type == amd_l2_pmu.type)
+ active_mask = &amd_l2_active_mask;
+ else
+ return 0;
+
+ n = cpulist_scnprintf(buf, PAGE_SIZE - 2, active_mask);
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ return n;
+}
+static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
+
+static struct attribute *amd_uncore_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group amd_uncore_attr_group = {
+ .attrs = amd_uncore_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7,32-35");
+PMU_FORMAT_ATTR(umask, "config:8-15");
+
+static struct attribute *amd_uncore_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ NULL,
+};
+
+static struct attribute_group amd_uncore_format_group = {
+ .name = "format",
+ .attrs = amd_uncore_format_attr,
+};
+
+static const struct attribute_group *amd_uncore_attr_groups[] = {
+ &amd_uncore_attr_group,
+ &amd_uncore_format_group,
+ NULL,
+};
+
+static struct pmu amd_nb_pmu = {
+ .attr_groups = amd_uncore_attr_groups,
+ .name = "amd_nb",
+ .event_init = amd_uncore_event_init,
+ .add = amd_uncore_add,
+ .del = amd_uncore_del,
+ .start = amd_uncore_start,
+ .stop = amd_uncore_stop,
+ .read = amd_uncore_read,
+};
+
+static struct pmu amd_l2_pmu = {
+ .attr_groups = amd_uncore_attr_groups,
+ .name = "amd_l2",
+ .event_init = amd_uncore_event_init,
+ .add = amd_uncore_add,
+ .del = amd_uncore_del,
+ .start = amd_uncore_start,
+ .stop = amd_uncore_stop,
+ .read = amd_uncore_read,
+};
+
+static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu)
+{
+ return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
+ cpu_to_node(cpu));
+}
+
+static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
+{
+ struct amd_uncore *uncore;
+
+ if (amd_uncore_nb) {
+ uncore = amd_uncore_alloc(cpu);
+ uncore->cpu = cpu;
+ uncore->num_counters = NUM_COUNTERS_NB;
+ uncore->rdpmc_base = RDPMC_BASE_NB;
+ uncore->msr_base = MSR_F15H_NB_PERF_CTL;
+ uncore->active_mask = &amd_nb_active_mask;
+ uncore->pmu = &amd_nb_pmu;
+ *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
+ }
+
+ if (amd_uncore_l2) {
+ uncore = amd_uncore_alloc(cpu);
+ uncore->cpu = cpu;
+ uncore->num_counters = NUM_COUNTERS_L2;
+ uncore->rdpmc_base = RDPMC_BASE_L2;
+ uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
+ uncore->active_mask = &amd_l2_active_mask;
+ uncore->pmu = &amd_l2_pmu;
+ *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
+ }
+}
+
+static struct amd_uncore *
+__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
+ struct amd_uncore * __percpu *uncores)
+{
+ unsigned int cpu;
+ struct amd_uncore *that;
+
+ for_each_online_cpu(cpu) {
+ that = *per_cpu_ptr(uncores, cpu);
+
+ if (!that)
+ continue;
+
+ if (this == that)
+ continue;
+
+ if (this->id == that->id) {
+ that->free_when_cpu_online = this;
+ this = that;
+ break;
+ }
+ }
+
+ this->refcnt++;
+ return this;
+}
+
+static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
+{
+ unsigned int eax, ebx, ecx, edx;
+ struct amd_uncore *uncore;
+
+ if (amd_uncore_nb) {
+ uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+ uncore->id = ecx & 0xff;
+
+ uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
+ *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
+ }
+
+ if (amd_uncore_l2) {
+ unsigned int apicid = cpu_data(cpu).apicid;
+ unsigned int nshared;
+
+ uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
+ cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
+ nshared = ((eax >> 14) & 0xfff) + 1;
+ uncore->id = apicid - (apicid % nshared);
+
+ uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
+ *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
+ }
+}
+
+static void __cpuinit uncore_online(unsigned int cpu,
+ struct amd_uncore * __percpu *uncores)
+{
+ struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
+
+ kfree(uncore->free_when_cpu_online);
+ uncore->free_when_cpu_online = NULL;
+
+ if (cpu == uncore->cpu)
+ cpumask_set_cpu(cpu, uncore->active_mask);
+}
+
+static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
+{
+ if (amd_uncore_nb)
+ uncore_online(cpu, amd_uncore_nb);
+
+ if (amd_uncore_l2)
+ uncore_online(cpu, amd_uncore_l2);
+}
+
+static void __cpuinit uncore_down_prepare(unsigned int cpu,
+ struct amd_uncore * __percpu *uncores)
+{
+ unsigned int i;
+ struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
+
+ if (this->cpu != cpu)
+ return;
+
+ /* this cpu is going down, migrate to a shared sibling if possible */
+ for_each_online_cpu(i) {
+ struct amd_uncore *that = *per_cpu_ptr(uncores, i);
+
+ if (cpu == i)
+ continue;
+
+ if (this == that) {
+ perf_pmu_migrate_context(this->pmu, cpu, i);
+ cpumask_clear_cpu(cpu, that->active_mask);
+ cpumask_set_cpu(i, that->active_mask);
+ that->cpu = i;
+ break;
+ }
+ }
+}
+
+static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
+{
+ if (amd_uncore_nb)
+ uncore_down_prepare(cpu, amd_uncore_nb);
+
+ if (amd_uncore_l2)
+ uncore_down_prepare(cpu, amd_uncore_l2);
+}
+
+static void __cpuinit uncore_dead(unsigned int cpu,
+ struct amd_uncore * __percpu *uncores)
+{
+ struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
+
+ if (cpu == uncore->cpu)
+ cpumask_clear_cpu(cpu, uncore->active_mask);
+
+ if (!--uncore->refcnt)
+ kfree(uncore);
+ *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
+}
+
+static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
+{
+ if (amd_uncore_nb)
+ uncore_dead(cpu, amd_uncore_nb);
+
+ if (amd_uncore_l2)
+ uncore_dead(cpu, amd_uncore_l2);
+}
+
+static int __cpuinit
+amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ amd_uncore_cpu_up_prepare(cpu);
+ break;
+
+ case CPU_STARTING:
+ amd_uncore_cpu_starting(cpu);
+ break;
+
+ case CPU_ONLINE:
+ amd_uncore_cpu_online(cpu);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ amd_uncore_cpu_down_prepare(cpu);
+ break;
+
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ amd_uncore_cpu_dead(cpu);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = {
+ .notifier_call = amd_uncore_cpu_notifier,
+ .priority = CPU_PRI_PERF + 1,
+};
+
+static void __init init_cpu_already_online(void *dummy)
+{
+ unsigned int cpu = smp_processor_id();
+
+ amd_uncore_cpu_starting(cpu);
+ amd_uncore_cpu_online(cpu);
+}
+
+static int __init amd_uncore_init(void)
+{
+ unsigned int cpu;
+ int ret = -ENODEV;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
+ if (!cpu_has_topoext)
+ return -ENODEV;
+
+ if (cpu_has_perfctr_nb) {
+ amd_uncore_nb = alloc_percpu(struct amd_uncore *);
+ perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
+
+ printk(KERN_INFO "perf: AMD NB counters detected\n");
+ ret = 0;
+ }
+
+ if (cpu_has_perfctr_l2) {
+ amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
+ perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+
+ printk(KERN_INFO "perf: AMD L2I counters detected\n");
+ ret = 0;
+ }
+
+ if (ret)
+ return -ENODEV;
+
+ get_online_cpus();
+ /* init cpus already online before registering for hotplug notifier */
+ for_each_online_cpu(cpu) {
+ amd_uncore_cpu_up_prepare(cpu);
+ smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
+ }
+
+ register_cpu_notifier(&amd_uncore_cpu_notifier_block);
+ put_online_cpus();
+
+ return 0;
+}
+device_initcall(amd_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 529c8931fc0..ffd6050a1de 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -81,6 +81,7 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
EVENT_EXTRA_END
};
@@ -101,9 +102,15 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
+ INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
+ INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
EVENT_CONSTRAINT_END
};
@@ -132,6 +139,7 @@ static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
EVENT_EXTRA_END
};
@@ -149,11 +157,34 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
};
static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END
};
+static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+ EVENT_EXTRA_END
+};
+
+EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
+EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
+EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
+
+struct attribute *nhm_events_attrs[] = {
+ EVENT_PTR(mem_ld_nhm),
+ NULL,
+};
+
+struct attribute *snb_events_attrs[] = {
+ EVENT_PTR(mem_ld_snb),
+ EVENT_PTR(mem_st_snb),
+ NULL,
+};
+
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
@@ -1388,8 +1419,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if ((event->hw.config & c->cmask) == c->code)
+ if ((event->hw.config & c->cmask) == c->code) {
+ /* hw.flags zeroed at initialization */
+ event->hw.flags |= c->flags;
return c;
+ }
}
}
@@ -1434,6 +1468,7 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
+ event->hw.flags = 0;
intel_put_shared_regs_event_constraints(cpuc, event);
}
@@ -1757,6 +1792,8 @@ static void intel_pmu_flush_branch_stack(void)
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
+PMU_FORMAT_ATTR(ldlat, "config1:0-15");
+
static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -1767,6 +1804,7 @@ static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_cmask.attr,
&format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
+ &format_attr_ldlat.attr, /* PEBS load latency */
NULL,
};
@@ -2027,6 +2065,8 @@ __init int intel_pmu_init(void)
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
+ x86_pmu.cpu_events = nhm_events_attrs;
+
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2070,6 +2110,8 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_westmere_extra_regs;
x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.cpu_events = nhm_events_attrs;
+
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2093,11 +2135,16 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ if (boot_cpu_data.x86_model == 45)
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ else
+ x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.cpu_events = snb_events_attrs;
+
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
@@ -2119,11 +2166,16 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ if (boot_cpu_data.x86_model == 62)
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ else
+ x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+ x86_pmu.cpu_events = snb_events_attrs;
+
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 826054a4f2e..60250f68705 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -24,6 +24,130 @@ struct pebs_record_32 {
*/
+union intel_x86_pebs_dse {
+ u64 val;
+ struct {
+ unsigned int ld_dse:4;
+ unsigned int ld_stlb_miss:1;
+ unsigned int ld_locked:1;
+ unsigned int ld_reserved:26;
+ };
+ struct {
+ unsigned int st_l1d_hit:1;
+ unsigned int st_reserved1:3;
+ unsigned int st_stlb_miss:1;
+ unsigned int st_locked:1;
+ unsigned int st_reserved2:26;
+ };
+};
+
+
+/*
+ * Map PEBS Load Latency Data Source encodings to generic
+ * memory data source information
+ */
+#define P(a, b) PERF_MEM_S(a, b)
+#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
+
+static const u64 pebs_data_source[] = {
+ P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
+ OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
+ OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
+ OP_LH | P(LVL, L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
+ OP_LH | P(LVL, L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
+ OP_LH | P(LVL, L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
+ OP_LH | P(LVL, L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
+ OP_LH | P(LVL, L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
+ OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
+ OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
+ OP_LH | P(LVL, LOC_RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
+ OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
+ OP_LH | P(LVL, LOC_RAM) | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */
+ OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */
+ OP_LH | P(LVL, IO) | P(SNOOP, NONE), /* 0x0e: I/O */
+ OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
+};
+
+static u64 precise_store_data(u64 status)
+{
+ union intel_x86_pebs_dse dse;
+ u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
+
+ dse.val = status;
+
+ /*
+ * bit 4: TLB access
+ * 1 = stored missed 2nd level TLB
+ *
+ * so it either hit the walker or the OS
+ * otherwise hit 2nd level TLB
+ */
+ if (dse.st_stlb_miss)
+ val |= P(TLB, MISS);
+ else
+ val |= P(TLB, HIT);
+
+ /*
+ * bit 0: hit L1 data cache
+ * if not set, then all we know is that
+ * it missed L1D
+ */
+ if (dse.st_l1d_hit)
+ val |= P(LVL, HIT);
+ else
+ val |= P(LVL, MISS);
+
+ /*
+ * bit 5: Locked prefix
+ */
+ if (dse.st_locked)
+ val |= P(LOCK, LOCKED);
+
+ return val;
+}
+
+static u64 load_latency_data(u64 status)
+{
+ union intel_x86_pebs_dse dse;
+ u64 val;
+ int model = boot_cpu_data.x86_model;
+ int fam = boot_cpu_data.x86;
+
+ dse.val = status;
+
+ /*
+ * use the mapping table for bit 0-3
+ */
+ val = pebs_data_source[dse.ld_dse];
+
+ /*
+ * Nehalem models do not support TLB, Lock infos
+ */
+ if (fam == 0x6 && (model == 26 || model == 30
+ || model == 31 || model == 46)) {
+ val |= P(TLB, NA) | P(LOCK, NA);
+ return val;
+ }
+ /*
+ * bit 4: TLB access
+ * 0 = did not miss 2nd level TLB
+ * 1 = missed 2nd level TLB
+ */
+ if (dse.ld_stlb_miss)
+ val |= P(TLB, MISS) | P(TLB, L2);
+ else
+ val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
+
+ /*
+ * bit 5: locked prefix
+ */
+ if (dse.ld_locked)
+ val |= P(LOCK, LOCKED);
+
+ return val;
+}
+
struct pebs_record_core {
u64 flags, ip;
u64 ax, bx, cx, dx;
@@ -314,10 +438,11 @@ int intel_pmu_drain_bts_buffer(void)
if (top <= at)
return 0;
+ memset(&regs, 0, sizeof(regs));
+
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0, event->hw.last_period);
- regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
@@ -364,7 +489,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
};
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
@@ -379,7 +504,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
};
struct event_constraint intel_westmere_pebs_event_constraints[] = {
- INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
+ INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
@@ -399,7 +524,8 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
+ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -413,7 +539,8 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
+ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -430,8 +557,10 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
- if ((event->hw.config & c->cmask) == c->code)
+ if ((event->hw.config & c->cmask) == c->code) {
+ event->hw.flags |= c->flags;
return c;
+ }
}
}
@@ -446,6 +575,11 @@ void intel_pmu_pebs_enable(struct perf_event *event)
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
cpuc->pebs_enabled |= 1ULL << hwc->idx;
+
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+ cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
+ else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
+ cpuc->pebs_enabled |= 1ULL << 63;
}
void intel_pmu_pebs_disable(struct perf_event *event)
@@ -558,20 +692,51 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
/*
- * We cast to pebs_record_core since that is a subset of
- * both formats and we don't use the other fields in this
- * routine.
+ * We cast to pebs_record_nhm to get the load latency data
+ * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used
*/
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct pebs_record_core *pebs = __pebs;
+ struct pebs_record_nhm *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
+ u64 sample_type;
+ int fll, fst;
if (!intel_pmu_save_and_restart(event))
return;
+ fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
+ fst = event->hw.flags & PERF_X86_EVENT_PEBS_ST;
+
perf_sample_data_init(&data, 0, event->hw.last_period);
+ data.period = event->hw.last_period;
+ sample_type = event->attr.sample_type;
+
+ /*
+ * if PEBS-LL or PreciseStore
+ */
+ if (fll || fst) {
+ if (sample_type & PERF_SAMPLE_ADDR)
+ data.addr = pebs->dla;
+
+ /*
+ * Use latency for weight (only avail with PEBS-LL)
+ */
+ if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
+ data.weight = pebs->lat;
+
+ /*
+ * data.data_src encodes the data source
+ */
+ if (sample_type & PERF_SAMPLE_DATA_SRC) {
+ if (fll)
+ data.data_src.val = load_latency_data(pebs->dse);
+ else
+ data.data_src.val = precise_store_data(pebs->dse);
+ }
+ }
+
/*
* We use the interrupt regs as a base because the PEBS record
* does not contain a full regs set, specifically it seems to
@@ -729,3 +894,13 @@ void intel_ds_init(void)
}
}
}
+
+void perf_restore_debug_store(void)
+{
+ struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
+
+ if (!x86_pmu.bts && !x86_pmu.pebs)
+ return;
+
+ wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
+}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index b43200dbfe7..d0f9e5aa215 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -17,6 +17,9 @@ static struct event_constraint constraint_fixed =
static struct event_constraint constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
+#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
+ ((1ULL << (n)) - 1)))
+
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
@@ -31,9 +34,13 @@ DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
+DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
+DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
@@ -110,6 +117,21 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even
reg1->alloc = 0;
}
+static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
+{
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ u64 config;
+
+ er = &box->shared_regs[idx];
+
+ raw_spin_lock_irqsave(&er->lock, flags);
+ config = er->config;
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ return config;
+}
+
/* Sandy Bridge-EP uncore support */
static struct intel_uncore_type snbep_uncore_cbox;
static struct intel_uncore_type snbep_uncore_pcu;
@@ -205,7 +227,7 @@ static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
- wrmsrl(reg1->reg, reg1->config);
+ wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
@@ -226,29 +248,6 @@ static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
}
-static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
-
- if (box->pmu->type == &snbep_uncore_cbox) {
- reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
- SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
- reg1->config = event->attr.config1 &
- SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
- } else {
- if (box->pmu->type == &snbep_uncore_pcu) {
- reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
- reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
- } else {
- return 0;
- }
- }
- reg1->idx = 0;
-
- return 0;
-}
-
static struct attribute *snbep_uncore_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -345,16 +344,16 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
.attrs = snbep_uncore_qpi_formats_attr,
};
+#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
+ .init_box = snbep_uncore_msr_init_box, \
+ .disable_box = snbep_uncore_msr_disable_box, \
+ .enable_box = snbep_uncore_msr_enable_box, \
+ .disable_event = snbep_uncore_msr_disable_event, \
+ .enable_event = snbep_uncore_msr_enable_event, \
+ .read_counter = uncore_msr_read_counter
+
static struct intel_uncore_ops snbep_uncore_msr_ops = {
- .init_box = snbep_uncore_msr_init_box,
- .disable_box = snbep_uncore_msr_disable_box,
- .enable_box = snbep_uncore_msr_enable_box,
- .disable_event = snbep_uncore_msr_disable_event,
- .enable_event = snbep_uncore_msr_enable_event,
- .read_counter = uncore_msr_read_counter,
- .get_constraint = uncore_get_constraint,
- .put_constraint = uncore_put_constraint,
- .hw_config = snbep_uncore_hw_config,
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
};
static struct intel_uncore_ops snbep_uncore_pci_ops = {
@@ -372,6 +371,7 @@ static struct event_constraint snbep_uncore_cbox_constraints[] = {
UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
@@ -421,6 +421,14 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
@@ -428,6 +436,8 @@ static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
EVENT_CONSTRAINT_END
};
@@ -446,6 +456,145 @@ static struct intel_uncore_type snbep_uncore_ubox = {
.format_group = &snbep_uncore_ubox_format_group,
};
+static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
+ SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
+ SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
+ EVENT_EXTRA_END
+};
+
+static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+ int i;
+
+ if (uncore_box_is_fake(box))
+ return;
+
+ for (i = 0; i < 5; i++) {
+ if (reg1->alloc & (0x1 << i))
+ atomic_sub(1 << (i * 6), &er->ref);
+ }
+ reg1->alloc = 0;
+}
+
+static struct event_constraint *
+__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
+ u64 (*cbox_filter_mask)(int fields))
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+ int i, alloc = 0;
+ unsigned long flags;
+ u64 mask;
+
+ if (reg1->idx == EXTRA_REG_NONE)
+ return NULL;
+
+ raw_spin_lock_irqsave(&er->lock, flags);
+ for (i = 0; i < 5; i++) {
+ if (!(reg1->idx & (0x1 << i)))
+ continue;
+ if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
+ continue;
+
+ mask = cbox_filter_mask(0x1 << i);
+ if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
+ !((reg1->config ^ er->config) & mask)) {
+ atomic_add(1 << (i * 6), &er->ref);
+ er->config &= ~mask;
+ er->config |= reg1->config & mask;
+ alloc |= (0x1 << i);
+ } else {
+ break;
+ }
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+ if (i < 5)
+ goto fail;
+
+ if (!uncore_box_is_fake(box))
+ reg1->alloc |= alloc;
+
+ return 0;
+fail:
+ for (; i >= 0; i--) {
+ if (alloc & (0x1 << i))
+ atomic_sub(1 << (i * 6), &er->ref);
+ }
+ return &constraint_empty;
+}
+
+static u64 snbep_cbox_filter_mask(int fields)
+{
+ u64 mask = 0;
+
+ if (fields & 0x1)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
+ if (fields & 0x2)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
+ if (fields & 0x4)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
+
+ return mask;
+}
+
+static struct event_constraint *
+snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
+}
+
+static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct extra_reg *er;
+ int idx = 0;
+
+ for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ idx |= er->idx;
+ }
+
+ if (idx) {
+ reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+ SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
+ reg1->idx = idx;
+ }
+ return 0;
+}
+
+static struct intel_uncore_ops snbep_uncore_cbox_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = snbep_cbox_hw_config,
+ .get_constraint = snbep_cbox_get_constraint,
+ .put_constraint = snbep_cbox_put_constraint,
+};
+
static struct intel_uncore_type snbep_uncore_cbox = {
.name = "cbox",
.num_counters = 4,
@@ -458,10 +607,104 @@ static struct intel_uncore_type snbep_uncore_cbox = {
.msr_offset = SNBEP_CBO_MSR_OFFSET,
.num_shared_regs = 1,
.constraints = snbep_uncore_cbox_constraints,
- .ops = &snbep_uncore_msr_ops,
+ .ops = &snbep_uncore_cbox_ops,
.format_group = &snbep_uncore_cbox_format_group,
};
+static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ u64 config = reg1->config;
+
+ if (new_idx > reg1->idx)
+ config <<= 8 * (new_idx - reg1->idx);
+ else
+ config >>= 8 * (reg1->idx - new_idx);
+
+ if (modify) {
+ hwc->config += new_idx - reg1->idx;
+ reg1->config = config;
+ reg1->idx = new_idx;
+ }
+ return config;
+}
+
+static struct event_constraint *
+snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+ unsigned long flags;
+ int idx = reg1->idx;
+ u64 mask, config1 = reg1->config;
+ bool ok = false;
+
+ if (reg1->idx == EXTRA_REG_NONE ||
+ (!uncore_box_is_fake(box) && reg1->alloc))
+ return NULL;
+again:
+ mask = 0xff << (idx * 8);
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
+ !((config1 ^ er->config) & mask)) {
+ atomic_add(1 << (idx * 8), &er->ref);
+ er->config &= ~mask;
+ er->config |= config1 & mask;
+ ok = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (!ok) {
+ idx = (idx + 1) % 4;
+ if (idx != reg1->idx) {
+ config1 = snbep_pcu_alter_er(event, idx, false);
+ goto again;
+ }
+ return &constraint_empty;
+ }
+
+ if (!uncore_box_is_fake(box)) {
+ if (idx != reg1->idx)
+ snbep_pcu_alter_er(event, idx, true);
+ reg1->alloc = 1;
+ }
+ return NULL;
+}
+
+static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+
+ if (uncore_box_is_fake(box) || !reg1->alloc)
+ return;
+
+ atomic_sub(1 << (reg1->idx * 8), &er->ref);
+ reg1->alloc = 0;
+}
+
+static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
+
+ if (ev_sel >= 0xb && ev_sel <= 0xe) {
+ reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
+ reg1->idx = ev_sel - 0xb;
+ reg1->config = event->attr.config1 & (0xff << reg1->idx);
+ }
+ return 0;
+}
+
+static struct intel_uncore_ops snbep_uncore_pcu_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = snbep_pcu_hw_config,
+ .get_constraint = snbep_pcu_get_constraint,
+ .put_constraint = snbep_pcu_put_constraint,
+};
+
static struct intel_uncore_type snbep_uncore_pcu = {
.name = "pcu",
.num_counters = 4,
@@ -472,7 +715,7 @@ static struct intel_uncore_type snbep_uncore_pcu = {
.event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
.box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
.num_shared_regs = 1,
- .ops = &snbep_uncore_msr_ops,
+ .ops = &snbep_uncore_pcu_ops,
.format_group = &snbep_uncore_pcu_format_group,
};
@@ -544,55 +787,63 @@ static struct intel_uncore_type snbep_uncore_r3qpi = {
SNBEP_UNCORE_PCI_COMMON_INIT(),
};
+enum {
+ SNBEP_PCI_UNCORE_HA,
+ SNBEP_PCI_UNCORE_IMC,
+ SNBEP_PCI_UNCORE_QPI,
+ SNBEP_PCI_UNCORE_R2PCIE,
+ SNBEP_PCI_UNCORE_R3QPI,
+};
+
static struct intel_uncore_type *snbep_pci_uncores[] = {
- &snbep_uncore_ha,
- &snbep_uncore_imc,
- &snbep_uncore_qpi,
- &snbep_uncore_r2pcie,
- &snbep_uncore_r3qpi,
+ [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
+ [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
+ [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
+ [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
+ [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
NULL,
};
static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
{ /* Home Agent */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
- .driver_data = (unsigned long)&snbep_uncore_ha,
+ .driver_data = SNBEP_PCI_UNCORE_HA,
},
{ /* MC Channel 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
- .driver_data = (unsigned long)&snbep_uncore_imc,
+ .driver_data = SNBEP_PCI_UNCORE_IMC,
},
{ /* MC Channel 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
- .driver_data = (unsigned long)&snbep_uncore_imc,
+ .driver_data = SNBEP_PCI_UNCORE_IMC,
},
{ /* MC Channel 2 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
- .driver_data = (unsigned long)&snbep_uncore_imc,
+ .driver_data = SNBEP_PCI_UNCORE_IMC,
},
{ /* MC Channel 3 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
- .driver_data = (unsigned long)&snbep_uncore_imc,
+ .driver_data = SNBEP_PCI_UNCORE_IMC,
},
{ /* QPI Port 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
- .driver_data = (unsigned long)&snbep_uncore_qpi,
+ .driver_data = SNBEP_PCI_UNCORE_QPI,
},
{ /* QPI Port 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
- .driver_data = (unsigned long)&snbep_uncore_qpi,
+ .driver_data = SNBEP_PCI_UNCORE_QPI,
},
- { /* P2PCIe */
+ { /* R2PCIe */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
- .driver_data = (unsigned long)&snbep_uncore_r2pcie,
+ .driver_data = SNBEP_PCI_UNCORE_R2PCIE,
},
{ /* R3QPI Link 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
- .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+ .driver_data = SNBEP_PCI_UNCORE_R3QPI,
},
{ /* R3QPI Link 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
- .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+ .driver_data = SNBEP_PCI_UNCORE_R3QPI,
},
{ /* end: all zeroes */ }
};
@@ -605,7 +856,7 @@ static struct pci_driver snbep_uncore_pci_driver = {
/*
* build pci bus to socket mapping
*/
-static int snbep_pci2phy_map_init(void)
+static int snbep_pci2phy_map_init(int devid)
{
struct pci_dev *ubox_dev = NULL;
int i, bus, nodeid;
@@ -614,9 +865,7 @@ static int snbep_pci2phy_map_init(void)
while (1) {
/* find the UBOX device */
- ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
- ubox_dev);
+ ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
if (!ubox_dev)
break;
bus = ubox_dev->bus->number;
@@ -639,7 +888,7 @@ static int snbep_pci2phy_map_init(void)
break;
}
}
- };
+ }
if (ubox_dev)
pci_dev_put(ubox_dev);
@@ -648,6 +897,440 @@ static int snbep_pci2phy_map_init(void)
}
/* end of Sandy Bridge-EP uncore support */
+/* IvyTown uncore support */
+static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ if (msr)
+ wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
+}
+
+static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
+}
+
+#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
+ .init_box = ivt_uncore_msr_init_box, \
+ .disable_box = snbep_uncore_msr_disable_box, \
+ .enable_box = snbep_uncore_msr_enable_box, \
+ .disable_event = snbep_uncore_msr_disable_event, \
+ .enable_event = snbep_uncore_msr_enable_event, \
+ .read_counter = uncore_msr_read_counter
+
+static struct intel_uncore_ops ivt_uncore_msr_ops = {
+ IVT_UNCORE_MSR_OPS_COMMON_INIT(),
+};
+
+static struct intel_uncore_ops ivt_uncore_pci_ops = {
+ .init_box = ivt_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = snbep_uncore_pci_disable_event,
+ .enable_event = snbep_uncore_pci_enable_event,
+ .read_counter = snbep_uncore_pci_read_counter,
+};
+
+#define IVT_UNCORE_PCI_COMMON_INIT() \
+ .perf_ctr = SNBEP_PCI_PMON_CTR0, \
+ .event_ctl = SNBEP_PCI_PMON_CTL0, \
+ .event_mask = IVT_PMON_RAW_EVENT_MASK, \
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
+ .ops = &ivt_uncore_pci_ops, \
+ .format_group = &ivt_uncore_format_group
+
+static struct attribute *ivt_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute *ivt_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ NULL,
+};
+
+static struct attribute *ivt_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_filter_tid.attr,
+ &format_attr_filter_link.attr,
+ &format_attr_filter_state2.attr,
+ &format_attr_filter_nid2.attr,
+ &format_attr_filter_opc2.attr,
+ NULL,
+};
+
+static struct attribute *ivt_uncore_pcu_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_occ_sel.attr,
+ &format_attr_edge.attr,
+ &format_attr_thresh5.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge.attr,
+ &format_attr_filter_band0.attr,
+ &format_attr_filter_band1.attr,
+ &format_attr_filter_band2.attr,
+ &format_attr_filter_band3.attr,
+ NULL,
+};
+
+static struct attribute *ivt_uncore_qpi_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute_group ivt_uncore_format_group = {
+ .name = "format",
+ .attrs = ivt_uncore_formats_attr,
+};
+
+static struct attribute_group ivt_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = ivt_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group ivt_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = ivt_uncore_cbox_formats_attr,
+};
+
+static struct attribute_group ivt_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = ivt_uncore_pcu_formats_attr,
+};
+
+static struct attribute_group ivt_uncore_qpi_format_group = {
+ .name = "format",
+ .attrs = ivt_uncore_qpi_formats_attr,
+};
+
+static struct intel_uncore_type ivt_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_U_MSR_PMON_CTL0,
+ .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
+ .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+ .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+ .ops = &ivt_uncore_msr_ops,
+ .format_group = &ivt_uncore_ubox_format_group,
+};
+
+static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
+ SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
+ SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
+ EVENT_EXTRA_END
+};
+
+static u64 ivt_cbox_filter_mask(int fields)
+{
+ u64 mask = 0;
+
+ if (fields & 0x1)
+ mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
+ if (fields & 0x2)
+ mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
+ if (fields & 0x4)
+ mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8)
+ mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
+ if (fields & 0x10)
+ mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
+
+ return mask;
+}
+
+static struct event_constraint *
+ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
+}
+
+static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct extra_reg *er;
+ int idx = 0;
+
+ for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ idx |= er->idx;
+ }
+
+ if (idx) {
+ reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+ SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
+ reg1->idx = idx;
+ }
+ return 0;
+}
+
+static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ u64 filter = uncore_shared_reg_config(box, 0);
+ wrmsrl(reg1->reg, filter & 0xffffffff);
+ wrmsrl(reg1->reg + 6, filter >> 32);
+ }
+
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static struct intel_uncore_ops ivt_uncore_cbox_ops = {
+ .init_box = ivt_uncore_msr_init_box,
+ .disable_box = snbep_uncore_msr_disable_box,
+ .enable_box = snbep_uncore_msr_enable_box,
+ .disable_event = snbep_uncore_msr_disable_event,
+ .enable_event = ivt_cbox_enable_event,
+ .read_counter = uncore_msr_read_counter,
+ .hw_config = ivt_cbox_hw_config,
+ .get_constraint = ivt_cbox_get_constraint,
+ .put_constraint = snbep_cbox_put_constraint,
+};
+
+static struct intel_uncore_type ivt_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 15,
+ .perf_ctr_bits = 44,
+ .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
+ .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
+ .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
+ .msr_offset = SNBEP_CBO_MSR_OFFSET,
+ .num_shared_regs = 1,
+ .constraints = snbep_uncore_cbox_constraints,
+ .ops = &ivt_uncore_cbox_ops,
+ .format_group = &ivt_uncore_cbox_format_group,
+};
+
+static struct intel_uncore_ops ivt_uncore_pcu_ops = {
+ IVT_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = snbep_pcu_hw_config,
+ .get_constraint = snbep_pcu_get_constraint,
+ .put_constraint = snbep_pcu_put_constraint,
+};
+
+static struct intel_uncore_type ivt_uncore_pcu = {
+ .name = "pcu",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
+ .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &ivt_uncore_pcu_ops,
+ .format_group = &ivt_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *ivt_msr_uncores[] = {
+ &ivt_uncore_ubox,
+ &ivt_uncore_cbox,
+ &ivt_uncore_pcu,
+ NULL,
+};
+
+static struct intel_uncore_type ivt_uncore_ha = {
+ .name = "ha",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ IVT_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type ivt_uncore_imc = {
+ .name = "imc",
+ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+ .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+ IVT_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type ivt_uncore_qpi = {
+ .name = "qpi",
+ .num_counters = 4,
+ .num_boxes = 3,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
+ .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .ops = &ivt_uncore_pci_ops,
+ .format_group = &ivt_uncore_qpi_format_group,
+};
+
+static struct intel_uncore_type ivt_uncore_r2pcie = {
+ .name = "r2pcie",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r2pcie_constraints,
+ IVT_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type ivt_uncore_r3qpi = {
+ .name = "r3qpi",
+ .num_counters = 3,
+ .num_boxes = 2,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r3qpi_constraints,
+ IVT_UNCORE_PCI_COMMON_INIT(),
+};
+
+enum {
+ IVT_PCI_UNCORE_HA,
+ IVT_PCI_UNCORE_IMC,
+ IVT_PCI_UNCORE_QPI,
+ IVT_PCI_UNCORE_R2PCIE,
+ IVT_PCI_UNCORE_R3QPI,
+};
+
+static struct intel_uncore_type *ivt_pci_uncores[] = {
+ [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
+ [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
+ [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
+ [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
+ [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
+ NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
+ { /* Home Agent 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
+ .driver_data = IVT_PCI_UNCORE_HA,
+ },
+ { /* Home Agent 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
+ .driver_data = IVT_PCI_UNCORE_HA,
+ },
+ { /* MC0 Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC0 Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC0 Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC0 Channel 4 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC1 Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC1 Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC1 Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* MC1 Channel 4 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
+ .driver_data = IVT_PCI_UNCORE_IMC,
+ },
+ { /* QPI0 Port 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
+ .driver_data = IVT_PCI_UNCORE_QPI,
+ },
+ { /* QPI0 Port 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
+ .driver_data = IVT_PCI_UNCORE_QPI,
+ },
+ { /* QPI1 Port 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
+ .driver_data = IVT_PCI_UNCORE_QPI,
+ },
+ { /* R2PCIe */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
+ .driver_data = IVT_PCI_UNCORE_R2PCIE,
+ },
+ { /* R3QPI0 Link 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
+ .driver_data = IVT_PCI_UNCORE_R3QPI,
+ },
+ { /* R3QPI0 Link 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
+ .driver_data = IVT_PCI_UNCORE_R3QPI,
+ },
+ { /* R3QPI1 Link 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
+ .driver_data = IVT_PCI_UNCORE_R3QPI,
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver ivt_uncore_pci_driver = {
+ .name = "ivt_uncore",
+ .id_table = ivt_uncore_pci_ids,
+};
+/* end of IvyTown uncore support */
+
/* Sandy Bridge uncore support */
static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
@@ -808,9 +1491,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
/* end of Nehalem uncore support */
/* Nehalem-EX uncore support */
-#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
- ((1ULL << (n)) - 1)))
-
DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
@@ -1161,7 +1841,7 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
};
/* Nehalem-EX or Westmere-EX ? */
-bool uncore_nhmex;
+static bool uncore_nhmex;
static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
{
@@ -1239,7 +1919,7 @@ static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
atomic_sub(1 << (idx * 8), &er->ref);
}
-u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
+static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1554,7 +2234,7 @@ static struct intel_uncore_type nhmex_uncore_mbox = {
.format_group = &nhmex_uncore_mbox_format_group,
};
-void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
+static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -1724,21 +2404,6 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
return 0;
}
-static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
-{
- struct intel_uncore_extra_reg *er;
- unsigned long flags;
- u64 config;
-
- er = &box->shared_regs[idx];
-
- raw_spin_lock_irqsave(&er->lock, flags);
- config = er->config;
- raw_spin_unlock_irqrestore(&er->lock, flags);
-
- return config;
-}
-
static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -1759,7 +2424,7 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
case 2:
case 3:
wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
- nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
+ uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
break;
case 4:
wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
@@ -2285,7 +2950,7 @@ out:
return ret;
}
-int uncore_pmu_event_init(struct perf_event *event)
+static int uncore_pmu_event_init(struct perf_event *event)
{
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
@@ -2438,7 +3103,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
type->unconstrainted = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
- 0, type->num_counters, 0);
+ 0, type->num_counters, 0, 0);
for (i = 0; i < type->num_boxes; i++) {
pmus[i].func_id = -1;
@@ -2556,6 +3221,8 @@ static void uncore_pci_remove(struct pci_dev *pdev)
if (WARN_ON_ONCE(phys_id != box->phys_id))
return;
+ pci_set_drvdata(pdev, NULL);
+
raw_spin_lock(&uncore_box_lock);
list_del(&box->list);
raw_spin_unlock(&uncore_box_lock);
@@ -2574,11 +3241,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
static int uncore_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct intel_uncore_type *type;
-
- type = (struct intel_uncore_type *)id->driver_data;
-
- return uncore_pci_add(type, pdev);
+ return uncore_pci_add(pci_uncores[id->driver_data], pdev);
}
static int __init uncore_pci_init(void)
@@ -2587,12 +3250,19 @@ static int __init uncore_pci_init(void)
switch (boot_cpu_data.x86_model) {
case 45: /* Sandy Bridge-EP */
- ret = snbep_pci2phy_map_init();
+ ret = snbep_pci2phy_map_init(0x3ce0);
if (ret)
return ret;
pci_uncores = snbep_pci_uncores;
uncore_pci_driver = &snbep_uncore_pci_driver;
break;
+ case 62: /* IvyTown */
+ ret = snbep_pci2phy_map_init(0x0e1e);
+ if (ret)
+ return ret;
+ pci_uncores = ivt_pci_uncores;
+ uncore_pci_driver = &ivt_uncore_pci_driver;
+ break;
default:
return 0;
}
@@ -2622,6 +3292,21 @@ static void __init uncore_pci_exit(void)
}
}
+/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
+static LIST_HEAD(boxes_to_free);
+
+static void __cpuinit uncore_kfree_boxes(void)
+{
+ struct intel_uncore_box *box;
+
+ while (!list_empty(&boxes_to_free)) {
+ box = list_entry(boxes_to_free.next,
+ struct intel_uncore_box, list);
+ list_del(&box->list);
+ kfree(box);
+ }
+}
+
static void __cpuinit uncore_cpu_dying(int cpu)
{
struct intel_uncore_type *type;
@@ -2636,7 +3321,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
box = *per_cpu_ptr(pmu->box, cpu);
*per_cpu_ptr(pmu->box, cpu) = NULL;
if (box && atomic_dec_and_test(&box->refcnt))
- kfree(box);
+ list_add(&box->list, &boxes_to_free);
}
}
}
@@ -2666,8 +3351,11 @@ static int __cpuinit uncore_cpu_starting(int cpu)
if (exist && exist->phys_id == phys_id) {
atomic_inc(&exist->refcnt);
*per_cpu_ptr(pmu->box, cpu) = exist;
- kfree(box);
- box = NULL;
+ if (box) {
+ list_add(&box->list,
+ &boxes_to_free);
+ box = NULL;
+ }
break;
}
}
@@ -2806,6 +3494,10 @@ static int
case CPU_DYING:
uncore_cpu_dying(cpu);
break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ uncore_kfree_boxes();
+ break;
default:
break;
}
@@ -2871,6 +3563,12 @@ static int __init uncore_cpu_init(void)
nhmex_uncore_cbox.num_boxes = max_cores;
msr_uncores = nhmex_msr_uncores;
break;
+ case 62: /* IvyTown */
+ if (ivt_uncore_cbox.num_boxes > max_cores)
+ ivt_uncore_cbox.num_boxes = max_cores;
+ msr_uncores = ivt_msr_uncores;
+ break;
+
default:
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index e68a4550e95..f9528917f6e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -76,7 +76,7 @@
#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
#define SNBEP_PMON_CTL_RST (1 << 17)
#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
-#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
+#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
#define SNBEP_PMON_CTL_EN (1 << 22)
#define SNBEP_PMON_CTL_INVERT (1 << 23)
#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
@@ -148,9 +148,20 @@
#define SNBEP_C0_MSR_PMON_CTL0 0xd10
#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
-#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
#define SNBEP_CBO_MSR_OFFSET 0x20
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
+
+#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
+ .event = (e), \
+ .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
+ .config_mask = (m), \
+ .idx = (i) \
+}
+
/* SNB-EP PCU register */
#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
@@ -160,6 +171,55 @@
#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
+/* IVT event control */
+#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+ SNBEP_PMON_BOX_CTL_RST_CTRS)
+#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_TRESH_MASK)
+/* IVT Ubox */
+#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00
+#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
+#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
+
+#define IVT_U_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+/* IVT Cbo */
+#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \
+ SNBEP_CBO_PMON_CTL_TID_EN)
+
+#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
+#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63)
+
+/* IVT home agent */
+#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
+#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \
+ (IVT_PMON_RAW_EVENT_MASK | \
+ IVT_HA_PCI_PMON_CTL_Q_OCC_RST)
+/* IVT PCU */
+#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_EV_SEL_EXT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+/* IVT QPI */
+#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \
+ (IVT_PMON_RAW_EVENT_MASK | \
+ SNBEP_PMON_CTL_EV_SEL_EXT)
+
/* NHM-EX event control */
#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 92c7e39a079..3486e666035 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
*
- * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
- * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
+ * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
+ * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
*/
}
@@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* asserted again and again
*/
(void)wrmsrl_safe(hwc->config_base,
- (u64)(p4_config_unpack_cccr(hwc->config)) &
- ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
+ p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
static void p4_pmu_disable_all(void)
@@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
u64 escr_addr, cccr;
bind = &p4_event_bind_map[idx];
- escr_addr = (u64)bind->escr_msr[thread];
+ escr_addr = bind->escr_msr[thread];
/*
* - we dont support cascaded counters yet
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 9b9f18b4991..d15f575a861 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -169,25 +169,9 @@ static struct console early_serial_console = {
.index = -1,
};
-/* Direct interface for emergencies */
-static struct console *early_console = &early_vga_console;
-static int __initdata early_console_initialized;
-
-asmlinkage void early_printk(const char *fmt, ...)
-{
- char buf[512];
- int n;
- va_list ap;
-
- va_start(ap, fmt);
- n = vscnprintf(buf, sizeof(buf), fmt, ap);
- early_console->write(early_console, buf, n);
- va_end(ap);
-}
-
static inline void early_console_register(struct console *con, int keep_early)
{
- if (early_console->index != -1) {
+ if (con->index != -1) {
printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
con->name);
return;
@@ -207,9 +191,8 @@ static int __init setup_early_printk(char *buf)
if (!buf)
return 0;
- if (early_console_initialized)
+ if (early_console)
return 0;
- early_console_initialized = 1;
keep = (strstr(buf, "keep") != NULL);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 3f06e614998..9895a9a4138 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -353,7 +353,11 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
* have given.
*/
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
- BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
+ if ((s64) (s32) newdisp != newdisp) {
+ pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
+ pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
+ return 0;
+ }
disp = (u8 *) dest + insn_offset_displacement(&insn);
*(s32 *) disp = (s32) newdisp;
}
@@ -375,6 +379,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
else
p->ainsn.boostable = -1;
+ /* Check whether the instruction modifies Interrupt Flag or not */
+ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
+
/* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0];
}
@@ -434,7 +441,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
- if (is_IF_modifier(p->ainsn.insn))
+ if (p->ainsn.if_modifier)
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 577db8417d1..833d51d6ee0 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
u32 eax = 0x00000000;
u32 ebx, ecx = 0, edx;
- if (!have_cpuid_p())
- return X86_VENDOR_UNKNOWN;
-
native_cpuid(&eax, &ebx, &ecx, &edx);
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
return X86_VENDOR_UNKNOWN;
}
+static int __cpuinit x86_family(void)
+{
+ u32 eax = 0x00000001;
+ u32 ebx, ecx = 0, edx;
+ int x86;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ x86 = (eax >> 8) & 0xf;
+ if (x86 == 15)
+ x86 += (eax >> 20) & 0xff;
+
+ return x86;
+}
+
void __init load_ucode_bsp(void)
{
- int vendor = x86_vendor();
+ int vendor, x86;
+
+ if (!have_cpuid_p())
+ return;
- if (vendor == X86_VENDOR_INTEL)
+ vendor = x86_vendor();
+ x86 = x86_family();
+
+ if (vendor == X86_VENDOR_INTEL && x86 >= 6)
load_ucode_intel_bsp();
}
void __cpuinit load_ucode_ap(void)
{
- int vendor = x86_vendor();
+ int vendor, x86;
+
+ if (!have_cpuid_p())
+ return;
+
+ vendor = x86_vendor();
+ x86 = x86_family();
- if (vendor == X86_VENDOR_INTEL)
+ if (vendor == X86_VENDOR_INTEL && x86 >= 6)
load_ucode_intel_ap();
}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index 7890bc83895..d893e8ed8ac 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
struct microcode_intel ***mc_saved;
mc_saved = (struct microcode_intel ***)
- __pa_symbol(&mc_saved_data->mc_saved);
+ __pa_nodebug(&mc_saved_data->mc_saved);
for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
struct microcode_intel *p;
p = *(struct microcode_intel **)
- __pa(mc_saved_data->mc_saved + i);
- mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
+ __pa_nodebug(mc_saved_data->mc_saved + i);
+ mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
}
}
#endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
struct cpio_data cd;
long offset = 0;
#ifdef CONFIG_X86_32
- char *p = (char *)__pa_symbol(ucode_name);
+ char *p = (char *)__pa_nodebug(ucode_name);
#else
char *p = ucode_name;
#endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
if (mc_intel == NULL)
return;
- delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
- current_mc_date_p = (int *)__pa_symbol(&current_mc_date);
+ delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
+ current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
*delay_ucode_info_p = 1;
*current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
}
#endif
-static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
- struct ucode_cpu_info *uci)
+static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
+ struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
#ifdef CONFIG_X86_32
struct boot_params *boot_params_p;
- boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
+ boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
ramdisk_image = boot_params_p->hdr.ramdisk_image;
ramdisk_size = boot_params_p->hdr.ramdisk_size;
initrd_start_early = ramdisk_image;
initrd_end_early = initrd_start_early + ramdisk_size;
_load_ucode_intel_bsp(
- (struct mc_saved_data *)__pa_symbol(&mc_saved_data),
- (unsigned long *)__pa_symbol(&mc_saved_in_initrd),
+ (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
initrd_start_early, initrd_end_early, &uci);
#else
ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
unsigned long *initrd_start_p;
mc_saved_in_initrd_p =
- (unsigned long *)__pa_symbol(mc_saved_in_initrd);
- mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
- initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
- initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
+ (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+ initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+ initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
#else
mc_saved_data_p = &mc_saved_data;
mc_saved_in_initrd_p = mc_saved_in_initrd;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 17fff18a103..8bfb335f74b 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}
+void paravirt_flush_lazy_mmu(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode);
}
-void arch_flush_lazy_mmu_mode(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
+ .flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 14ae10031ff..6833bffaadb 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -301,13 +301,7 @@ void exit_idle(void)
}
#endif
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
+void arch_cpu_idle_prepare(void)
{
/*
* If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +311,40 @@ void cpu_idle(void)
* canaries already on the stack wont ever trigger).
*/
boot_init_stack_canary();
- current_thread_info()->status |= TS_POLLING;
-
- while (1) {
- tick_nohz_idle_enter();
-
- while (!need_resched()) {
- rmb();
-
- if (cpu_is_offline(smp_processor_id()))
- play_dead();
-
- /*
- * Idle routines should keep interrupts disabled
- * from here on, until they go to idle.
- * Otherwise, idle callbacks can misfire.
- */
- local_touch_nmi();
- local_irq_disable();
-
- enter_idle();
-
- /* Don't trace irqs off for idle */
- stop_critical_timings();
-
- /* enter_idle() needs rcu for notifiers */
- rcu_idle_enter();
+}
- if (cpuidle_idle_call())
- x86_idle();
+void arch_cpu_idle_enter(void)
+{
+ local_touch_nmi();
+ enter_idle();
+}
- rcu_idle_exit();
- start_critical_timings();
+void arch_cpu_idle_exit(void)
+{
+ __exit_idle();
+}
- /* In many cases the interrupt that ended idle
- has already called exit_idle. But some idle
- loops can be woken up without interrupt. */
- __exit_idle();
- }
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
- tick_nohz_idle_exit();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+/*
+ * Called from the generic idle code.
+ */
+void arch_cpu_idle(void)
+{
+ if (cpuidle_idle_call())
+ x86_idle();
}
/*
- * We use this if we don't have any better
- * idle routine..
+ * We use this if we don't have any better idle routine..
*/
void default_idle(void)
{
trace_cpu_idle_rcuidle(1, smp_processor_id());
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we
- * test NEED_RESCHED:
- */
- smp_mb();
-
- if (!need_resched())
- safe_halt(); /* enables interrupts racelessly */
- else
- local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ safe_halt();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_APM_MODULE
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
halt();
}
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
- trace_cpu_idle_rcuidle(0, smp_processor_id());
- local_irq_enable();
- while (!need_resched())
- cpu_relax();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
-}
-
bool amd_e400_c1e_detected;
EXPORT_SYMBOL(amd_e400_c1e_detected);
@@ -489,10 +438,10 @@ static void amd_e400_idle(void)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- if (x86_idle == poll_idle && smp_num_siblings > 1)
+ if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif
- if (x86_idle)
+ if (x86_idle || boot_option_idle_override == IDLE_POLL)
return;
if (cpu_has_amd_erratum(amd_erratum_400)) {
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads\n");
- x86_idle = poll_idle;
boot_option_idle_override = IDLE_POLL;
+ cpu_idle_poll_ctrl(true);
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 84d32855f65..fae9134a2de 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -171,9 +171,15 @@ static struct resource bss_resource = {
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
-struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
+ .wp_works_ok = -1,
+ .fdiv_bug = -1,
+};
/* common cpu data for all cpus */
-struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
+struct cpuinfo_x86 boot_cpu_data __read_mostly = {
+ .wp_works_ok = -1,
+ .fdiv_bug = -1,
+};
EXPORT_SYMBOL(boot_cpu_data);
unsigned int def_to_bigsmp;
@@ -501,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
+ * On 64bit, old kexec-tools need to under 896MiB.
*/
#ifdef CONFIG_X86_32
-# define CRASH_KERNEL_ADDR_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
#else
-# define CRASH_KERNEL_ADDR_MAX MAXMEM
+# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
#endif
static void __init reserve_crashkernel_low(void)
@@ -515,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
unsigned long long low_base = 0, low_size = 0;
unsigned long total_low_mem;
unsigned long long base;
+ bool auto_set = false;
int ret;
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
+ /* crashkernel=Y,low */
ret = parse_crashkernel_low(boot_command_line, total_low_mem,
&low_size, &base);
- if (ret != 0 || low_size <= 0)
- return;
+ if (ret != 0) {
+ /*
+ * two parts from lib/swiotlb.c:
+ * swiotlb size: user specified with swiotlb= or default.
+ * swiotlb overflow buffer: now is hardcoded to 32k.
+ * We round it to 8M for other buffers that
+ * may need to stay low too.
+ */
+ low_size = swiotlb_size_or_default() + (8UL<<20);
+ auto_set = true;
+ } else {
+ /* passed with crashkernel=0,low ? */
+ if (!low_size)
+ return;
+ }
low_base = memblock_find_in_range(low_size, (1ULL<<32),
low_size, alignment);
if (!low_base) {
- pr_info("crashkernel low reservation failed - No suitable area found.\n");
+ if (!auto_set)
+ pr_info("crashkernel low reservation failed - No suitable area found.\n");
return;
}
@@ -548,14 +573,22 @@ static void __init reserve_crashkernel(void)
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
+ bool high = false;
int ret;
total_mem = memblock_phys_mem_size();
+ /* crashkernel=XM */
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
- if (ret != 0 || crash_size <= 0)
- return;
+ if (ret != 0 || crash_size <= 0) {
+ /* crashkernel=X,high */
+ ret = parse_crashkernel_high(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+ high = true;
+ }
/* 0 means: find the address automatically */
if (crash_base <= 0) {
@@ -563,7 +596,9 @@ static void __init reserve_crashkernel(void)
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/
crash_base = memblock_find_in_range(alignment,
- CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
+ high ? CRASH_KERNEL_ADDR_HIGH_MAX :
+ CRASH_KERNEL_ADDR_LOW_MAX,
+ crash_size, alignment);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a6ceaedc396..9c73b51817e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused)
x86_cpuinit.setup_percpu_clockev();
wmb();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
void __init smp_store_boot_cpu_info(void)
@@ -1365,9 +1365,8 @@ static inline void mwait_play_dead(void)
unsigned int eax, ebx, ecx, edx;
unsigned int highest_cstate = 0;
unsigned int highest_subcstate = 0;
- int i;
void *mwait_ptr;
- struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
+ int i;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 0ba4cfb4f41..2ed845928b5 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -697,3 +697,32 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
send_sig(SIGTRAP, current, 0);
return ret;
}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ int rasize, ncopied;
+ unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
+
+ rasize = is_ia32_task() ? 4 : 8;
+ ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize);
+ if (unlikely(ncopied))
+ return -1;
+
+ /* check whether address has been already hijacked */
+ if (orig_ret_vaddr == trampoline_vaddr)
+ return orig_ret_vaddr;
+
+ ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
+ if (likely(!ncopied))
+ return orig_ret_vaddr;
+
+ if (ncopied != rasize) {
+ pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
+ "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
+
+ force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
+ }
+
+ return -1;
+}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd4e4a..f77df1c5de6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
- addr);
+ addr, sizeof(u8));
}
void kvm_lapic_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f71500af1f8..e1721324c27 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
- /* Keep irq disabled to prevent changes to the clock */
- local_irq_save(flags);
- this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
- if (unlikely(this_tsc_khz == 0)) {
- local_irq_restore(flags);
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
- return 1;
- }
-
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+ /* Keep irq disabled to prevent changes to the clock */
+ local_irq_save(flags);
+ this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+ if (unlikely(this_tsc_khz == 0)) {
+ local_irq_restore(flags);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+ return 1;
+ }
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1827,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+ sizeof(u32)))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1837,10 +1834,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,14 +1951,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
-
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+ gpa_offset = data & ~(PAGE_MASK | 1);
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL,
+ sizeof(struct pvclock_vcpu_time_info)))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -1980,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS))
+ data & KVM_STEAL_VALID_BITS,
+ sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data;
@@ -2967,7 +2963,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6714,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 1cbd89ca556..7114c63f047 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
+ pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 05928aae911..906fea31579 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
char c;
unsigned zero_len;
- for (; len; --len) {
+ for (; len; --len, to++) {
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
- if (__put_user_nocheck(c, to++, sizeof(char)))
+ if (__put_user_nocheck(c, to, sizeof(char)))
break;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fa8c02de0d2..022a9a0a3c6 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd))
+ if (pgd_none(*pgd)) {
set_pgd(pgd, *pgd_ref);
- else
+ arch_flush_lazy_mmu_mode();
+ } else {
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
/*
* Below here mismatches are bugs because these lower tables
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 6f31ee56c00..252b8f5489b 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,5 +137,4 @@ void __init set_highmem_pages_init(void)
add_highpages_with_active_regions(nid, zone_start_pfn,
zone_end_pfn);
}
- totalram_pages += totalhigh_pages;
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 4903a03ae87..fdc5dca14fb 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -410,9 +410,8 @@ void __init init_mem_mapping(void)
/* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS);
- /* xen has big range in reserved near end of ram, skip it at first */
- addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE,
- PAGE_SIZE);
+ /* xen has big range in reserved near end of ram, skip it at first.*/
+ addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
real_end = addr + PMD_SIZE;
/* step_size need to be small so pgt_buf from BRK could cover it */
@@ -516,11 +515,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
for (; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
+ free_reserved_page(virt_to_page(addr));
}
#endif
}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 2d19001151d..3ac7e319918 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -427,14 +427,6 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
pkmap_page_table = pte;
}
-static void __init add_one_highpage_init(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
-}
-
void __init add_highpages_with_active_regions(int nid,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -448,7 +440,7 @@ void __init add_highpages_with_active_regions(int nid,
start_pfn, end_pfn);
for ( ; pfn < e_pfn; pfn++)
if (pfn_valid(pfn))
- add_one_highpage_init(pfn_to_page(pfn));
+ free_highmem_page(pfn_to_page(pfn));
}
}
#else
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 474e28f1081..71ff55a1b28 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1011,11 +1011,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
flush_tlb_all();
}
-void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages)
+void __ref vmemmap_free(unsigned long start, unsigned long end)
{
- unsigned long start = (unsigned long)memmap;
- unsigned long end = (unsigned long)(memmap + nr_pages);
-
remove_pagetable(start, end, false);
}
@@ -1067,10 +1064,9 @@ void __init mem_init(void)
/* clear_bss() already clear the empty_zero_page */
- reservedpages = 0;
-
- /* this will put all low memory onto the freelists */
register_page_bootmem_info();
+
+ /* this will put all memory onto the freelists */
totalram_pages = free_all_bootmem();
absent_pages = absent_pages_in_range(0, max_pfn);
@@ -1285,18 +1281,17 @@ static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;
-int __meminit
-vmemmap_populate(struct page *start_page, unsigned long size, int node)
+static int __meminit vmemmap_populate_hugepages(unsigned long start,
+ unsigned long end, int node)
{
- unsigned long addr = (unsigned long)start_page;
- unsigned long end = (unsigned long)(start_page + size);
+ unsigned long addr;
unsigned long next;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- for (; addr < end; addr = next) {
- void *p = NULL;
+ for (addr = start; addr < end; addr = next) {
+ next = pmd_addr_end(addr, end);
pgd = vmemmap_pgd_populate(addr, node);
if (!pgd)
@@ -1306,31 +1301,14 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
if (!pud)
return -ENOMEM;
- if (!cpu_has_pse) {
- next = (addr + PAGE_SIZE) & PAGE_MASK;
- pmd = vmemmap_pmd_populate(pud, addr, node);
-
- if (!pmd)
- return -ENOMEM;
-
- p = vmemmap_pte_populate(pmd, addr, node);
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+ void *p;
- if (!p)
- return -ENOMEM;
-
- addr_end = addr + PAGE_SIZE;
- p_end = p + PAGE_SIZE;
- } else {
- next = pmd_addr_end(addr, end);
-
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd)) {
+ p = vmemmap_alloc_block_buf(PMD_SIZE, node);
+ if (p) {
pte_t entry;
- p = vmemmap_alloc_block_buf(PMD_SIZE, node);
- if (!p)
- return -ENOMEM;
-
entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE);
set_pmd(pmd, __pmd(pte_val(entry)));
@@ -1347,15 +1325,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
addr_end = addr + PMD_SIZE;
p_end = p + PMD_SIZE;
- } else
- vmemmap_verify((pte_t *)pmd, node, addr, next);
+ continue;
+ }
+ } else if (pmd_large(*pmd)) {
+ vmemmap_verify((pte_t *)pmd, node, addr, next);
+ continue;
}
-
+ pr_warn_once("vmemmap: falling back to regular page backing\n");
+ if (vmemmap_populate_basepages(addr, next, node))
+ return -ENOMEM;
}
- sync_global_pgds((unsigned long)start_page, end - 1);
return 0;
}
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+{
+ int err;
+
+ if (cpu_has_pse)
+ err = vmemmap_populate_hugepages(start, end, node);
+ else
+ err = vmemmap_populate_basepages(start, end, node);
+ if (!err)
+ sync_global_pgds(start, end - 1);
+ return err;
+}
+
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1ac49..9a1e6583910 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
- read_lock(&vmlist_lock);
- for (p = vmlist; p; p = p->next) {
- if (p->addr == (void __force *)addr)
- break;
- }
- read_unlock(&vmlist_lock);
+ p = find_vm_area((void __force *)addr);
if (!p) {
printk(KERN_ERR "iounmap: bad address %p\n", addr);
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 72fe01e9e41..a71c4e20767 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -114,14 +114,11 @@ void numa_clear_node(int cpu)
*/
void __init setup_node_to_cpumask_map(void)
{
- unsigned int node, num = 0;
+ unsigned int node;
/* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES) {
- for_each_node_mask(node, node_possible_map)
- num = node;
- nr_node_ids = num + 1;
- }
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
/* allocate the map */
for (node = 0; node < nr_node_ids; node++)
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index b0086567271..d0b1773d9d2 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
s->gpg++;
i += GPS/PAGE_SIZE;
} else if (level == PG_LEVEL_2M) {
- if (!(pte_val(*pte) & _PAGE_PSE)) {
+ if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR
"%lx level %d but not PSE %Lx\n",
addr, level, (u64)pte_val(*pte));
@@ -130,13 +130,12 @@ static int pageattr_test(void)
}
failed += print_split(&sa);
- srandom32(100);
for (i = 0; i < NTEST; i++) {
- unsigned long pfn = random32() % max_pfn_mapped;
+ unsigned long pfn = prandom_u32() % max_pfn_mapped;
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
- len[i] = random32() % 100;
+ len[i] = prandom_u32() % 100;
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
if (len[i] == 0)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 091934e1d0d..fb4e73ec24d 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* We are safe now. Check whether the new pgprot is the same:
*/
old_pte = *kpte;
- old_prot = new_prot = req_prot = pte_pgprot(old_pte);
+ old_prot = req_prot = pte_pgprot(old_pte);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
* for the ancient hardware that doesn't support it.
*/
- if (pgprot_val(new_prot) & _PAGE_PRESENT)
- pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+ if (pgprot_val(req_prot) & _PAGE_PRESENT)
+ pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
else
- pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+ pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
- new_prot = canon_pgprot(new_prot);
+ req_prot = canon_pgprot(req_prot);
/*
* old_pte points to the large page base address. So we need
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
+
+ arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_HIBERNATION
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 2610bd93c89..657438858e8 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -563,6 +563,13 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
if (base > __pa(high_memory-1))
return 0;
+ /*
+ * some areas in the middle of the kernel identity range
+ * are not mapped, like the PCI space.
+ */
+ if (!page_is_ram(base >> PAGE_SHIFT))
+ return 0;
+
id_sz = (__pa(high_memory-1) <= base + size) ?
__pa(high_memory) - base :
size;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 193350b51f9..17fda6a8b3c 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
+ /*
+ * NOTE! For PAE, any changes to the top page-directory-pointer-table
+ * entries need a full cr3 reload to flush.
+ */
+#ifdef CONFIG_X86_PAE
+ tlb->need_flush_all = 1;
+#endif
tlb_remove_page(tlb, virt_to_page(pmd));
}
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 901177d75ff..305c68b8d53 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/dmi.h>
@@ -170,6 +171,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
pcibios_fixup_device_resources(dev);
}
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
/*
* Only use DMI information to set this if nothing was passed
* on the kernel command line (which was parsed earlier).
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 94e76620460..4a9be6ddf05 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto error;
i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSIX) ?
"pcifront-msi-x" :
"pcifront-msi",
@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
dev_dbg(&dev->dev,
"xen: msi already bound to pirq=%d\n", pirq);
}
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
DOMID_SELF);
@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
}
ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
- map_irq.pirq, map_irq.index,
+ map_irq.pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
domid);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 5f2ecaf3f9d..e4a86a677ce 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -41,6 +41,7 @@
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/bcd.h>
+#include <linux/ucs2_string.h>
#include <asm/setup.h>
#include <asm/efi.h>
@@ -51,6 +52,13 @@
#define EFI_DEBUG 1
+/*
+ * There's some additional metadata associated with each
+ * variable. Intel's reference implementation is 60 bytes - bump that
+ * to account for potential alignment constraints
+ */
+#define VAR_METADATA_SIZE 64
+
struct efi __read_mostly efi = {
.mps = EFI_INVALID_TABLE_ADDR,
.acpi = EFI_INVALID_TABLE_ADDR,
@@ -69,6 +77,13 @@ struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
+static u64 efi_var_store_size;
+static u64 efi_var_remaining_size;
+static u64 efi_var_max_var_size;
+static u64 boot_used_size;
+static u64 boot_var_size;
+static u64 active_size;
+
unsigned long x86_efi_facility;
/*
@@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)
}
early_param("add_efi_memmap", setup_add_efi_memmap);
+static bool efi_no_storage_paranoia;
+
+static int __init setup_storage_paranoia(char *arg)
+{
+ efi_no_storage_paranoia = true;
+ return 0;
+}
+early_param("efi_no_storage_paranoia", setup_storage_paranoia);
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
@@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
- return efi_call_virt3(get_next_variable,
- name_size, name, vendor);
+ efi_status_t status;
+ static bool finished = false;
+ static u64 var_size;
+
+ status = efi_call_virt3(get_next_variable,
+ name_size, name, vendor);
+
+ if (status == EFI_NOT_FOUND) {
+ finished = true;
+ if (var_size < boot_used_size) {
+ boot_var_size = boot_used_size - var_size;
+ active_size += boot_var_size;
+ } else {
+ printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
+ }
+ }
+
+ if (boot_used_size && !finished) {
+ unsigned long size;
+ u32 attr;
+ efi_status_t s;
+ void *tmp;
+
+ s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
+
+ if (s != EFI_BUFFER_TOO_SMALL || !size)
+ return status;
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+
+ if (!tmp)
+ return status;
+
+ s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
+
+ if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
+ var_size += size;
+ var_size += ucs2_strsize(name, 1024);
+ active_size += size;
+ active_size += VAR_METADATA_SIZE;
+ active_size += ucs2_strsize(name, 1024);
+ }
+
+ kfree(tmp);
+ }
+
+ return status;
}
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
- return efi_call_virt5(set_variable,
- name, vendor, attr,
- data_size, data);
+ efi_status_t status;
+ u32 orig_attr = 0;
+ unsigned long orig_size = 0;
+
+ status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
+ NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ orig_size = 0;
+
+ status = efi_call_virt5(set_variable,
+ name, vendor, attr,
+ data_size, data);
+
+ if (status == EFI_SUCCESS) {
+ if (orig_size) {
+ active_size -= orig_size;
+ active_size -= ucs2_strsize(name, 1024);
+ active_size -= VAR_METADATA_SIZE;
+ }
+ if (data_size) {
+ active_size += data_size;
+ active_size += ucs2_strsize(name, 1024);
+ active_size += VAR_METADATA_SIZE;
+ }
+ }
+
+ return status;
}
static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -682,6 +776,9 @@ void __init efi_init(void)
char vendor[100] = "unknown";
int i = 0;
void *tmp;
+ struct setup_data *data;
+ struct efi_var_bootdata *efi_var_data;
+ u64 pa_data;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@@ -699,6 +796,22 @@ void __init efi_init(void)
if (efi_systab_init(efi_phys.systab))
return;
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = early_ioremap(pa_data, sizeof(*efi_var_data));
+ if (data->type == SETUP_EFI_VARS) {
+ efi_var_data = (struct efi_var_bootdata *)data;
+
+ efi_var_store_size = efi_var_data->store_size;
+ efi_var_remaining_size = efi_var_data->remaining_size;
+ efi_var_max_var_size = efi_var_data->max_var_size;
+ }
+ pa_data = data->next;
+ early_iounmap(data, sizeof(*efi_var_data));
+ }
+
+ boot_used_size = efi_var_store_size - efi_var_remaining_size;
+
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
/*
@@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
}
return 0;
}
+
+/*
+ * Some firmware has serious problems when using more than 50% of the EFI
+ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
+ * we never use more than this safe limit.
+ *
+ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
+ * store.
+ */
+efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+ efi_status_t status;
+ u64 storage_size, remaining_size, max_size;
+
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (!max_size && remaining_size > size)
+ printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
+ " is returning MaxVariableSize=0\n");
+ /*
+ * Some firmware implementations refuse to boot if there's insufficient
+ * space in the variable store. We account for that by refusing the
+ * write if permitting it would reduce the available space to under
+ * 50%. However, some firmware won't reclaim variable space until
+ * after the used (not merely the actively used) space drops below
+ * a threshold. We can approximate that case with the value calculated
+ * above. If both the firmware and our calculations indicate that the
+ * available space would drop below 50%, refuse the write.
+ */
+
+ if (!storage_size || size > remaining_size ||
+ (max_size && size > max_size))
+ return EFI_OUT_OF_RESOURCES;
+
+ if (!efi_no_storage_paranoia &&
+ ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
+ (remaining_size - size < storage_size / 2)))
+ return EFI_OUT_OF_RESOURCES;
+
+ return EFI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(efi_query_variable_store);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 120cee1c3f8..3c68768d7a7 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -11,6 +11,7 @@
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/smp.h>
+#include <linux/perf_event.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
@@ -228,6 +229,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
do_fpu_end();
x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
+ perf_restore_debug_store();
}
/* Needed by apm.c */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c8e1c7b95c3..ddbd54a9b84 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/edd.h>
#include <xen/xen.h>
#include <xen/events.h>
@@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static void __init xen_boot_params_init_edd(void)
+{
+#if IS_ENABLED(CONFIG_EDD)
+ struct xen_platform_op op;
+ struct edd_info *edd_info;
+ u32 *mbr_signature;
+ unsigned nr;
+ int ret;
+
+ edd_info = boot_params.eddbuf;
+ mbr_signature = boot_params.edd_mbr_sig_buffer;
+
+ op.cmd = XENPF_firmware_info;
+
+ op.u.firmware_info.type = XEN_FW_DISK_INFO;
+ for (nr = 0; nr < EDDMAXNR; nr++) {
+ struct edd_info *info = edd_info + nr;
+
+ op.u.firmware_info.index = nr;
+ info->params.length = sizeof(info->params);
+ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
+ &info->params);
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ break;
+
+#define C(x) info->x = op.u.firmware_info.u.disk_info.x
+ C(device);
+ C(version);
+ C(interface_support);
+ C(legacy_max_cylinder);
+ C(legacy_max_head);
+ C(legacy_sectors_per_track);
+#undef C
+ }
+ boot_params.eddbuf_entries = nr;
+
+ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
+ for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
+ op.u.firmware_info.index = nr;
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ break;
+ mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
+ }
+ boot_params.edd_mbr_sig_buf_entries = nr;
+#endif
+}
+
/*
* Set up the GDT and segment registers for -fstack-protector. Until
* we do this, we have to be careful not to call any stack-protected
@@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void)
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
+ xen_boot_params_init_edd();
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
@@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
xen_vcpu_setup(cpu);
- if (xen_have_vector_callback)
+ if (xen_have_vector_callback) {
xen_init_lock_cpu(cpu);
+ if (xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_timer(cpu);
+ }
break;
default:
break;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index e8e34938c57..e006c18d288 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
-
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
}
#endif
@@ -1750,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
}
/* Set the page permissions on an identity-mapped pages */
-static void set_page_prot(void *addr, pgprot_t prot)
+static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
+static void set_page_prot(void *addr, pgprot_t prot)
+{
+ return set_page_prot_flags(addr, prot, UVMF_NONE);
+}
#ifdef CONFIG_X86_32
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
{
@@ -1841,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
- set_page_prot((void *)addr, PAGE_KERNEL);
+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
- set_page_prot((void *)addr, PAGE_KERNEL);
+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_end)--;
}
@@ -2122,6 +2124,7 @@ static void __init xen_post_allocator_init(void)
#endif
#ifdef CONFIG_X86_64
+ pv_mmu_ops.write_cr3 = &xen_write_cr3;
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_mark_init_mm_pinned();
@@ -2197,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
+ .flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 09ea61d2e02..8ff37995d54 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void)
static void __cpuinit cpu_bringup_and_idle(void)
{
cpu_bringup();
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
static int xen_smp_intr_init(unsigned int cpu)
@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
+ /*
+ * The IRQ worker on PVHVM goes through the native path and uses the
+ * IPI mechanism.
+ */
+ if (xen_hvm_domain())
+ return 0;
+
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
cpu,
@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu)
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL);
+ if (xen_hvm_domain())
+ return rc;
+
if (per_cpu(xen_irq_work, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
@@ -418,7 +428,7 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu)
{
- while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ if (!xen_hvm_domain())
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
}
@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
static void xen_hvm_cpu_die(unsigned int cpu)
{
- unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ xen_cpu_die(cpu);
native_cpu_die(cpu);
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index f7a080ef035..8b54603ce81 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu)
int irq;
const char *name;
+ WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+ cpu, per_cpu(lock_kicker_irq, cpu));
+
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+ per_cpu(lock_kicker_irq, cpu) = -1;
}
void __init xen_init_spinlocks(void)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 0296a952250..3d88bfdf9e1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
static const struct clock_event_device *xen_clockevent =
&xen_timerop_clockevent;
-static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 };
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu)
struct clock_event_device *evt;
int irq;
+ evt = &per_cpu(xen_clock_events, cpu);
+ WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
+
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
name = kasprintf(GFP_KERNEL, "timer%d", cpu);
@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu)
IRQF_FORCE_RESUME,
name, NULL);
- evt = &per_cpu(xen_clock_events, cpu);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu)
BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu);
unbind_from_irqhandler(evt->irq, NULL);
+ evt->irq = -1;
}
void xen_setup_cpu_clockevents(void)
@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
{
int cpu = smp_processor_id();
xen_setup_runstate_info(cpu);
- xen_setup_timer(cpu);
+ /*
+ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
+ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
+ * early bootup and also during CPU hotplug events).
+ */
xen_setup_cpu_clockevents();
}
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 35876ffac11..b09de49dbec 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,7 +9,7 @@ config XTENSA
select HAVE_IDE
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
- select HAVE_VIRT_TO_BUS
+ select VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 5cd82e9f601..1c85323f01d 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -105,19 +105,9 @@ void coprocessor_flush_all(struct thread_info *ti)
/*
* Powermanagement idle function, if any is provided by the platform.
*/
-
-void cpu_idle(void)
+void arch_cpu_idle(void)
{
- local_irq_enable();
-
- /* endless idle loop with no priority at all */
- while (1) {
- rcu_idle_enter();
- while (!need_resched())
- platform_idle();
- rcu_idle_exit();
- schedule_preempt_disabled();
- }
+ platform_idle();
}
/*
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 7a5156ffebb..bba125b4bb0 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -208,32 +208,17 @@ void __init mem_init(void)
highmemsize >> 10);
}
-void
-free_reserved_mem(void *start, void *end)
-{
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- init_page_count(virt_to_page(start));
- free_page((unsigned long)start);
- totalram_pages++;
- }
-}
-
#ifdef CONFIG_BLK_DEV_INITRD
extern int initrd_is_mapped;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (initrd_is_mapped) {
- free_reserved_mem((void*)start, (void*)end);
- printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
- }
+ if (initrd_is_mapped)
+ free_reserved_area(start, end, 0, "initrd");
}
#endif
void free_initmem(void)
{
- free_reserved_mem(__init_begin, __init_end);
- printk("Freeing unused kernel memory: %zuk freed\n",
- (__init_end - __init_begin) >> 10);
+ free_initmem_default(0);
}