summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig14
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/kernel/pci_iommu.c4
-rw-r--r--arch/alpha/kernel/ptrace.c7
-rw-r--r--arch/arm/Kconfig41
-rw-r--r--arch/arm/common/gic.c28
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/kgdb.h5
-rw-r--r--arch/arm/include/asm/memblock.h7
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/kgdb.c2
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/ptrace.c28
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h111
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h8
-rw-r--r--arch/arm/mach-ixp2000/core.c2
-rw-r--r--arch/arm/mach-kirkwood/common.c7
-rw-r--r--arch/arm/mach-kirkwood/d2net_v2-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.c14
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.h2
-rw-r--r--arch/arm/mach-kirkwood/mpp.c4
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c6
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c4
-rw-r--r--arch/arm/mach-kirkwood/ts41x-setup.c14
-rw-r--r--arch/arm/mach-mmp/include/mach/cputype.h3
-rw-r--r--arch/arm/mach-msm/Kconfig9
-rw-r--r--arch/arm/mach-msm/board-halibut.c1
-rw-r--r--arch/arm/mach-msm/include/mach/debug-macro.S15
-rw-r--r--arch/arm/mach-msm/iommu_dev.c22
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-mv78xx0/mpp.c4
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c2
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c1
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c1
-rw-r--r--arch/arm/mach-omap1/devices.c5
-rw-r--r--arch/arm/mach-omap1/include/mach/camera.h2
-rw-r--r--arch/arm/mach-omap2/Makefile4
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c7
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c3
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c15
-rw-r--r--arch/arm/mach-omap2/dsp.c85
-rw-r--r--arch/arm/mach-orion5x/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c1
-rw-r--r--arch/arm/mach-pxa/ezx.c2
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c13
-rw-r--r--arch/arm/mach-s3c2410/include/mach/h1940-latch.h57
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c169
-rw-r--r--arch/arm/mach-s3c2440/Kconfig7
-rw-r--r--arch/arm/mach-s3c2440/mach-rx1950.c218
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig3
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1100.c5
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c387
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c180
-rw-r--r--arch/arm/mach-shmobile/clock-sh7377.c2
-rw-r--r--arch/arm/mach-shmobile/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h12
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c30
-rw-r--r--arch/arm/mach-shmobile/pfc-sh7372.c8
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c94
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c1
-rw-r--r--arch/arm/mach-tegra/timer.c1
-rw-r--r--arch/arm/mach-u300/clock.c6
-rw-r--r--arch/arm/mach-u300/core.c47
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h2
-rw-r--r--arch/arm/mach-u300/spi.c2
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c13
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c2
-rw-r--r--arch/arm/mm/cache-fa.S12
-rw-r--r--arch/arm/mm/cache-v3.S10
-rw-r--r--arch/arm/mm/cache-v4.S10
-rw-r--r--arch/arm/mm/cache-v4wb.S12
-rw-r--r--arch/arm/mm/cache-v4wt.S12
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault-armv.c28
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm/mm/init.c155
-rw-r--r--arch/arm/mm/mmu.c73
-rw-r--r--arch/arm/mm/proc-arm1020.S15
-rw-r--r--arch/arm/mm/proc-arm1020e.S15
-rw-r--r--arch/arm/mm/proc-arm1022.S15
-rw-r--r--arch/arm/mm/proc-arm1026.S15
-rw-r--r--arch/arm/mm/proc-arm920.S12
-rw-r--r--arch/arm/mm/proc-arm922.S12
-rw-r--r--arch/arm/mm/proc-arm925.S12
-rw-r--r--arch/arm/mm/proc-arm926.S12
-rw-r--r--arch/arm/mm/proc-arm940.S12
-rw-r--r--arch/arm/mm/proc-arm946.S12
-rw-r--r--arch/arm/mm/proc-feroceon.S13
-rw-r--r--arch/arm/mm/proc-xsc3.S12
-rw-r--r--arch/arm/mm/proc-xscale.S12
-rw-r--r--arch/arm/plat-mxc/include/mach/dma.h67
-rw-r--r--arch/arm/plat-mxc/include/mach/sdma.h17
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h135
-rw-r--r--arch/arm/plat-omap/common.c2
-rw-r--r--arch/arm/plat-omap/devices.c72
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/include/plat/dsp.h31
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h3
-rw-r--r--arch/arm/plat-orion/pcie.c5
-rw-r--r--arch/arm/plat-pxa/include/plat/pxa3xx_nand.h18
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h32
-rw-r--r--arch/arm/plat-s3c24xx/Kconfig1
-rw-r--r--arch/arm/plat-s3c24xx/gpiolib.c2
-rw-r--r--arch/avr32/Kconfig7
-rw-r--r--arch/avr32/kernel/ptrace.c11
-rw-r--r--arch/blackfin/Kconfig7
-rw-r--r--arch/blackfin/kernel/kgdb.c3
-rw-r--r--arch/blackfin/kernel/process.c1
-rw-r--r--arch/blackfin/kernel/ptrace.c16
-rw-r--r--arch/cris/Kconfig7
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c20
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c16
-rw-r--r--arch/frv/Kconfig6
-rw-r--r--arch/frv/kernel/process.c1
-rw-r--r--arch/frv/kernel/ptrace.c32
-rw-r--r--arch/frv/mm/highmem.c3
-rw-r--r--arch/h8300/Kconfig7
-rw-r--r--arch/h8300/kernel/process.c1
-rw-r--r--arch/h8300/kernel/ptrace.c33
-rw-r--r--arch/ia64/Kconfig7
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/include/asm/cputime.h6
-rw-r--r--arch/ia64/kernel/perfmon.c9
-rw-r--r--arch/ia64/kernel/ptrace.c3
-rw-r--r--arch/m32r/Kconfig7
-rw-r--r--arch/m32r/kernel/ptrace.c11
-rw-r--r--arch/m68k/Kconfig6
-rw-r--r--arch/m68k/include/asm/irqflags.h2
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/kernel/process.c1
-rw-r--r--arch/m68k/kernel/ptrace.c51
-rw-r--r--arch/m68knommu/Kconfig7
-rw-r--r--arch/m68knommu/kernel/process.c1
-rw-r--r--arch/m68knommu/kernel/ptrace.c57
-rw-r--r--arch/microblaze/Kconfig5
-rw-r--r--arch/microblaze/kernel/ptrace.c5
-rw-r--r--arch/mips/Kconfig81
-rw-r--r--arch/mips/Kconfig.debug9
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/ar7/gpio.c243
-rw-r--r--arch/mips/ar7/platform.c56
-rw-r--r--arch/mips/ar7/prom.c2
-rw-r--r--arch/mips/ar7/setup.c14
-rw-r--r--arch/mips/bcm63xx/cpu.c30
-rw-r--r--arch/mips/cavium-octeon/Kconfig23
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c34
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c581
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c810
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c112
-rw-r--r--arch/mips/cavium-octeon/serial.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c120
-rw-r--r--arch/mips/include/asm/atomic.h208
-rw-r--r--arch/mips/include/asm/bitops.h270
-rw-r--r--arch/mips/include/asm/bootinfo.h12
-rw-r--r--arch/mips/include/asm/cmpxchg.h7
-rw-r--r--arch/mips/include/asm/cpu.h26
-rw-r--r--arch/mips/include/asm/device.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h96
-rw-r--r--arch/mips/include/asm/dma.h3
-rw-r--r--arch/mips/include/asm/local.h2
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h47
-rw-r--r--arch/mips/include/asm/mach-ar7/gpio.h3
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h97
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h24
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h5
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h9
-rw-r--r--arch/mips/include/asm/mipsregs.h51
-rw-r--r--arch/mips/include/asm/octeon/cvmx-agl-defs.h616
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asm.h11
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h857
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gpio-defs.h74
-rw-r--r--arch/mips/include/asm/octeon/cvmx-iob-defs.h242
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd-defs.h314
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h738
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h225
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h38
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h5
-rw-r--r--arch/mips/include/asm/octeon/cvmx-led-defs.h41
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mio-defs.h807
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mixx-defs.h200
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npei-defs.h681
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npi-defs.h362
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pci-defs.h265
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pciercx-defs.h435
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pescx-defs.h50
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pexp-defs.h378
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow-defs.h157
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rnm-defs.h67
-rw-r--r--arch/mips/include/asm/octeon/cvmx-smix-defs.h46
-rw-r--r--arch/mips/include/asm/octeon/cvmx-uctlx-defs.h261
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h36
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h10
-rw-r--r--arch/mips/include/asm/pci/bridge.h2
-rw-r--r--arch/mips/include/asm/perf_event.h25
-rw-r--r--arch/mips/include/asm/pgtable-64.h4
-rw-r--r--arch/mips/include/asm/processor.h40
-rw-r--r--arch/mips/include/asm/system.h52
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/uaccess.h4
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c82
-rw-r--r--arch/mips/kernel/irq.c24
-rw-r--r--arch/mips/kernel/perf_event.c601
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1052
-rw-r--r--arch/mips/kernel/ptrace.c25
-rw-r--r--arch/mips/kernel/setup.c1
-rw-r--r--arch/mips/kernel/traps.c31
-rw-r--r--arch/mips/kernel/unaligned.c7
-rw-r--r--arch/mips/loongson/Kconfig2
-rw-r--r--arch/mips/math-emu/cp1emu.c3
-rw-r--r--arch/mips/mm/c-octeon.c16
-rw-r--r--arch/mips/mm/c-r4k.c21
-rw-r--r--arch/mips/mm/dma-default.c165
-rw-r--r--arch/mips/mm/fault.c11
-rw-r--r--arch/mips/mm/highmem.c3
-rw-r--r--arch/mips/mm/sc-mips.c34
-rw-r--r--arch/mips/mm/tlbex.c11
-rw-r--r--arch/mips/mm/uasm.c20
-rw-r--r--arch/mips/pci/pci-octeon.c60
-rw-r--r--arch/mips/pci/pcie-octeon.c5
-rw-r--r--arch/mn10300/Kconfig286
-rw-r--r--arch/mn10300/Makefile6
-rw-r--r--arch/mn10300/boot/compressed/head.S69
-rw-r--r--arch/mn10300/configs/asb2303_defconfig2
-rw-r--r--arch/mn10300/configs/asb2364_defconfig98
-rw-r--r--arch/mn10300/include/asm/atomic.h350
-rw-r--r--arch/mn10300/include/asm/bitops.h14
-rw-r--r--arch/mn10300/include/asm/cache.h14
-rw-r--r--arch/mn10300/include/asm/cacheflush.h164
-rw-r--r--arch/mn10300/include/asm/cpu-regs.h91
-rw-r--r--arch/mn10300/include/asm/dmactl-regs.h87
-rw-r--r--arch/mn10300/include/asm/elf.h12
-rw-r--r--arch/mn10300/include/asm/exceptions.h8
-rw-r--r--arch/mn10300/include/asm/fpu.h157
-rw-r--r--arch/mn10300/include/asm/frame.inc20
-rw-r--r--arch/mn10300/include/asm/gdb-stub.h2
-rw-r--r--arch/mn10300/include/asm/hardirq.h3
-rw-r--r--arch/mn10300/include/asm/highmem.h8
-rw-r--r--arch/mn10300/include/asm/intctl-regs.h37
-rw-r--r--arch/mn10300/include/asm/io.h13
-rw-r--r--arch/mn10300/include/asm/irq.h12
-rw-r--r--arch/mn10300/include/asm/irq_regs.h6
-rw-r--r--arch/mn10300/include/asm/irqflags.h111
-rw-r--r--arch/mn10300/include/asm/mmu_context.h73
-rw-r--r--arch/mn10300/include/asm/pgalloc.h1
-rw-r--r--arch/mn10300/include/asm/pgtable.h88
-rw-r--r--arch/mn10300/include/asm/processor.h66
-rw-r--r--arch/mn10300/include/asm/ptrace.h13
-rw-r--r--arch/mn10300/include/asm/reset-regs.h2
-rw-r--r--arch/mn10300/include/asm/rtc.h11
-rw-r--r--arch/mn10300/include/asm/rwlock.h125
-rw-r--r--arch/mn10300/include/asm/serial-regs.h51
-rw-r--r--arch/mn10300/include/asm/serial.h8
-rw-r--r--arch/mn10300/include/asm/smp.h91
-rw-r--r--arch/mn10300/include/asm/smsc911x.h1
-rw-r--r--arch/mn10300/include/asm/spinlock.h179
-rw-r--r--arch/mn10300/include/asm/spinlock_types.h20
-rw-r--r--arch/mn10300/include/asm/system.h73
-rw-r--r--arch/mn10300/include/asm/thread_info.h18
-rw-r--r--arch/mn10300/include/asm/timer-regs.h191
-rw-r--r--arch/mn10300/include/asm/timex.h20
-rw-r--r--arch/mn10300/include/asm/tlbflush.h174
-rw-r--r--arch/mn10300/include/asm/uaccess.h6
-rw-r--r--arch/mn10300/kernel/Makefile15
-rw-r--r--arch/mn10300/kernel/asm-offsets.c11
-rw-r--r--arch/mn10300/kernel/cevt-mn10300.c131
-rw-r--r--arch/mn10300/kernel/csrc-mn10300.c35
-rw-r--r--arch/mn10300/kernel/entry.S146
-rw-r--r--arch/mn10300/kernel/fpu-low.S265
-rw-r--r--arch/mn10300/kernel/fpu-nofpu-low.S39
-rw-r--r--arch/mn10300/kernel/fpu-nofpu.c30
-rw-r--r--arch/mn10300/kernel/fpu.c141
-rw-r--r--arch/mn10300/kernel/gdb-io-serial-low.S5
-rw-r--r--arch/mn10300/kernel/gdb-io-serial.c37
-rw-r--r--arch/mn10300/kernel/gdb-io-ttysm.c24
-rw-r--r--arch/mn10300/kernel/gdb-stub.c17
-rw-r--r--arch/mn10300/kernel/head.S202
-rw-r--r--arch/mn10300/kernel/internal.h25
-rw-r--r--arch/mn10300/kernel/irq.c276
-rw-r--r--arch/mn10300/kernel/kprobes.c4
-rw-r--r--arch/mn10300/kernel/mn10300-serial-low.S6
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c210
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog-low.S9
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog.c100
-rw-r--r--arch/mn10300/kernel/process.c62
-rw-r--r--arch/mn10300/kernel/profile.c2
-rw-r--r--arch/mn10300/kernel/ptrace.c20
-rw-r--r--arch/mn10300/kernel/rtc.c41
-rw-r--r--arch/mn10300/kernel/setup.c79
-rw-r--r--arch/mn10300/kernel/signal.c20
-rw-r--r--arch/mn10300/kernel/smp-low.S97
-rw-r--r--arch/mn10300/kernel/smp.c1152
-rw-r--r--arch/mn10300/kernel/switch_to.S7
-rw-r--r--arch/mn10300/kernel/time.c112
-rw-r--r--arch/mn10300/kernel/traps.c42
-rw-r--r--arch/mn10300/lib/bitops.c4
-rw-r--r--arch/mn10300/lib/delay.c8
-rw-r--r--arch/mn10300/lib/do_csum.S49
-rw-r--r--arch/mn10300/mm/Kconfig.cache101
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-by-reg.S308
-rw-r--r--arch/mn10300/mm/cache-flush-by-tag.S251
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c155
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-inv-by-reg.S356
-rw-r--r--arch/mn10300/mm/cache-inv-by-tag.S348
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c129
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache-smp-flush.c156
-rw-r--r--arch/mn10300/mm/cache-smp-inv.c153
-rw-r--r--arch/mn10300/mm/cache-smp.c105
-rw-r--r--arch/mn10300/mm/cache-smp.h69
-rw-r--r--arch/mn10300/mm/cache.c95
-rw-r--r--arch/mn10300/mm/fault.c17
-rw-r--r--arch/mn10300/mm/init.c26
-rw-r--r--arch/mn10300/mm/misalignment.c3
-rw-r--r--arch/mn10300/mm/mmu-context.c41
-rw-r--r--arch/mn10300/mm/pgtable.c2
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S59
-rw-r--r--arch/mn10300/mm/tlb-smp.c214
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/cache.h9
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/clock.h2
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h102
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h29
-rw-r--r--arch/mn10300/proc-mn103e010/include/proc/proc.h2
-rw-r--r--arch/mn10300/proc-mn103e010/proc-init.c37
-rw-r--r--arch/mn10300/proc-mn2ws0050/Makefile5
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/cache.h48
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/clock.h20
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h103
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h29
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/irq.h49
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h120
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/proc.h18
-rw-r--r--arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h51
-rw-r--r--arch/mn10300/proc-mn2ws0050/proc-init.c134
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/clock.h25
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/serial.h5
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/timex.h109
-rw-r--r--arch/mn10300/unit-asb2303/unit-init.c10
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/clock.h25
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/serial.h5
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/timex.h109
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c16
-rw-r--r--arch/mn10300/unit-asb2305/pci.c2
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c6
-rw-r--r--arch/mn10300/unit-asb2364/Makefile12
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/clock.h29
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/fpga-regs.h52
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/irq.h35
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/leds.h54
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/serial.h151
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/smsc911x.h171
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/timex.h159
-rw-r--r--arch/mn10300/unit-asb2364/irq-fpga.c96
-rw-r--r--arch/mn10300/unit-asb2364/leds.c98
-rw-r--r--arch/mn10300/unit-asb2364/smsc911x.c58
-rw-r--r--arch/mn10300/unit-asb2364/unit-init.c88
-rw-r--r--arch/parisc/Kconfig11
-rw-r--r--arch/parisc/hpux/sys_hpux.c1
-rw-r--r--arch/parisc/include/asm/cache.h2
-rw-r--r--arch/parisc/include/asm/cacheflush.h8
-rw-r--r--arch/parisc/include/asm/irq.h2
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/irq.c41
-rw-r--r--arch/parisc/kernel/pdc_cons.c141
-rw-r--r--arch/parisc/kernel/ptrace.c13
-rw-r--r--arch/parisc/kernel/sys_parisc32.c1
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/kernel/unaligned.c3
-rw-r--r--arch/parisc/kernel/unwind.c5
-rw-r--r--arch/parisc/math-emu/Makefile2
-rw-r--r--arch/powerpc/Kconfig17
-rw-r--r--arch/powerpc/boot/div64.S3
-rw-r--r--arch/powerpc/include/asm/cputime.h12
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h34
-rw-r--r--arch/powerpc/include/asm/fsldma.h137
-rw-r--r--arch/powerpc/include/asm/kgdb.h1
-rw-r--r--arch/powerpc/kernel/kgdb.c188
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c66
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S2
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/highmem.c4
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S5
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c10
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c244
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c76
-rw-r--r--arch/s390/Kconfig68
-rw-r--r--arch/s390/Kconfig.debug12
-rw-r--r--arch/s390/hypfs/hypfs_diag.c19
-rw-r--r--arch/s390/hypfs/inode.c8
-rw-r--r--arch/s390/include/asm/cputime.h10
-rw-r--r--arch/s390/include/asm/dasd.h40
-rw-r--r--arch/s390/include/asm/page.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c6
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c67
-rw-r--r--arch/s390/kernel/ptrace.c3
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S6
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/mm/gup.c7
-rw-r--r--arch/score/Kconfig5
-rw-r--r--arch/score/kernel/ptrace.c7
-rw-r--r--arch/sh/Kconfig22
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/Kconfig7
-rw-r--r--arch/sh/boards/Makefile2
-rw-r--r--arch/sh/boards/board-edosk7705.c78
-rw-r--r--arch/sh/boards/board-secureedge5410.c (renamed from arch/sh/boards/mach-snapgear/setup.c)38
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c30
-rw-r--r--arch/sh/boards/mach-cayman/irq.c16
-rw-r--r--arch/sh/boards/mach-dreamcast/irq.c17
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c64
-rw-r--r--arch/sh/boards/mach-edosk7705/Makefile5
-rw-r--r--arch/sh/boards/mach-edosk7705/io.c71
-rw-r--r--arch/sh/boards/mach-edosk7705/setup.c36
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c30
-rw-r--r--arch/sh/boards/mach-landisk/irq.c15
-rw-r--r--arch/sh/boards/mach-microdev/io.c246
-rw-r--r--arch/sh/boards/mach-microdev/irq.c30
-rw-r--r--arch/sh/boards/mach-microdev/setup.c23
-rw-r--r--arch/sh/boards/mach-migor/setup.c60
-rw-r--r--arch/sh/boards/mach-se/7206/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7206/io.c104
-rw-r--r--arch/sh/boards/mach-se/7206/irq.c24
-rw-r--r--arch/sh/boards/mach-se/7206/setup.c15
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c15
-rw-r--r--arch/sh/boards/mach-se/770x/Makefile2
-rw-r--r--arch/sh/boards/mach-se/770x/io.c156
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c22
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c15
-rw-r--r--arch/sh/boards/mach-se/7724/irq.c13
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c55
-rw-r--r--arch/sh/boards/mach-se/7751/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7751/io.c119
-rw-r--r--arch/sh/boards/mach-se/7751/setup.c18
-rw-r--r--arch/sh/boards/mach-snapgear/Makefile5
-rw-r--r--arch/sh/boards/mach-snapgear/io.c121
-rw-r--r--arch/sh/boards/mach-systemh/Makefile13
-rw-r--r--arch/sh/boards/mach-systemh/io.c158
-rw-r--r--arch/sh/boards/mach-systemh/irq.c76
-rw-r--r--arch/sh/boards/mach-systemh/setup.c57
-rw-r--r--arch/sh/boards/mach-x3proto/gpio.c7
-rw-r--r--arch/sh/cchips/hd6446x/hd64461.c19
-rw-r--r--arch/sh/configs/secureedge5410_defconfig (renamed from arch/sh/configs/snapgear_defconfig)0
-rw-r--r--arch/sh/configs/systemh_defconfig28
-rw-r--r--arch/sh/include/asm/addrspace.h8
-rw-r--r--arch/sh/include/asm/pgtable.h14
-rw-r--r--arch/sh/include/asm/pgtable_32.h2
-rw-r--r--arch/sh/include/asm/pgtable_64.h9
-rw-r--r--arch/sh/include/asm/processor.h1
-rw-r--r--arch/sh/include/asm/system.h4
-rw-r--r--arch/sh/include/asm/system_32.h36
-rw-r--r--arch/sh/include/asm/system_64.h3
-rw-r--r--arch/sh/include/asm/uncached.h40
-rw-r--r--arch/sh/include/mach-common/mach/edosk7705.h7
-rw-r--r--arch/sh/include/mach-common/mach/microdev.h9
-rw-r--r--arch/sh/include/mach-common/mach/secureedge5410.h (renamed from arch/sh/include/mach-common/mach/snapgear.h)22
-rw-r--r--arch/sh/include/mach-common/mach/systemh7751.h71
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c14
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c49
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c29
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c2
-rw-r--r--arch/sh/kernel/irq.c37
-rw-r--r--arch/sh/kernel/irq_64.c16
-rw-r--r--arch/sh/kernel/ptrace_32.c45
-rw-r--r--arch/sh/kernel/ptrace_64.c25
-rw-r--r--arch/sh/kernel/setup.c4
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/consistent.c15
-rw-r--r--arch/sh/mm/gup.c273
-rw-r--r--arch/sh/mm/uncached.c2
-rw-r--r--arch/sh/oprofile/Makefile2
-rw-r--r--arch/sh/oprofile/backtrace.c2
-rw-r--r--arch/sh/oprofile/common.c35
-rw-r--r--arch/sh/tools/mach-types1
-rw-r--r--arch/sparc/Kconfig9
-rw-r--r--arch/sparc/include/asm/io_32.h31
-rw-r--r--arch/sparc/include/asm/io_64.h31
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/kernel/irq_32.c4
-rw-r--r--arch/sparc/kernel/leon_smp.c5
-rw-r--r--arch/sparc/kernel/ptrace_32.c57
-rw-r--r--arch/sparc/kernel/ptrace_64.c15
-rw-r--r--arch/sparc/kernel/rtrap_32.S6
-rw-r--r--arch/sparc/kernel/rtrap_64.S36
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c1
-rw-r--r--arch/sparc/kernel/unaligned_32.c1
-rw-r--r--arch/sparc/kernel/windows.c1
-rw-r--r--arch/sparc/mm/fault_32.c12
-rw-r--r--arch/sparc/mm/highmem.c4
-rw-r--r--arch/tile/Kconfig5
-rw-r--r--arch/tile/include/asm/highmem.h1
-rw-r--r--arch/tile/include/asm/kmap_types.h34
-rw-r--r--arch/tile/include/asm/pgtable.h6
-rw-r--r--arch/tile/include/asm/stat.h3
-rw-r--r--arch/tile/include/asm/unistd.h1
-rw-r--r--arch/tile/kernel/compat.c11
-rw-r--r--arch/tile/kernel/compat_signal.c1
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/ptrace.c44
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/setup.c10
-rw-r--r--arch/tile/kernel/signal.c10
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c1
-rw-r--r--arch/tile/kernel/sys.c1
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/lib/memcpy_tile64.c11
-rw-r--r--arch/tile/mm/fault.c1
-rw-r--r--arch/tile/mm/highmem.c5
-rw-r--r--arch/tile/mm/hugetlbpage.c1
-rw-r--r--arch/tile/mm/init.c8
-rw-r--r--arch/tile/mm/pgtable.c4
-rw-r--r--arch/um/Kconfig.common2
-rw-r--r--arch/um/include/asm/ptrace-generic.h4
-rw-r--r--arch/um/kernel/exec.c1
-rw-r--r--arch/um/kernel/ptrace.c23
-rw-r--r--arch/um/sys-i386/ptrace.c4
-rw-r--r--arch/um/sys-x86_64/ptrace.c11
-rw-r--r--arch/x86/Kbuild1
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/Makefile_32.cpu13
-rw-r--r--arch/x86/ia32/sys_ia32.c1
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/apic.h10
-rw-r--r--arch/x86/include/asm/io.h13
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/pci.h33
-rw-r--r--arch/x86/include/asm/pci_x86.h1
-rw-r--r--arch/x86/include/asm/perf_event.h19
-rw-r--r--arch/x86/include/asm/smp.h9
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h21
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h189
-rw-r--r--arch/x86/include/asm/x86_init.h9
-rw-r--r--arch/x86/include/asm/xen/pci.h65
-rw-r--r--arch/x86/kernel/Makefile12
-rw-r--r--arch/x86/kernel/acpi/boot.c60
-rw-r--r--arch/x86/kernel/alternative.c71
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/io_apic.c11
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c36
-rw-r--r--arch/x86/kernel/cpu/perf_event.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c216
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/dumpstack_32.c6
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/irq_32.c17
-rw-r--r--arch/x86/kernel/kgdb.c15
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c7
-rw-r--r--arch/x86/kernel/msr.c1
-rw-r--r--arch/x86/kernel/ptrace.c17
-rw-r--r--arch/x86/kernel/pvclock.c38
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c1
-rw-r--r--arch/x86/kernel/x86_init.c7
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c19
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/highmem_32.c3
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c3
-rw-r--r--arch/x86/mm/numa_64.c7
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c6
-rw-r--r--arch/x86/oprofile/op_model_amd.c146
-rw-r--r--arch/x86/pci/Makefile1
-rw-r--r--arch/x86/pci/acpi.c103
-rw-r--r--arch/x86/pci/common.c17
-rw-r--r--arch/x86/pci/i386.c19
-rw-r--r--arch/x86/pci/irq.c11
-rw-r--r--arch/x86/pci/mmconfig-shared.c4
-rw-r--r--arch/x86/pci/xen.c416
-rw-r--r--arch/x86/platform/Makefile8
-rw-r--r--arch/x86/platform/efi/Makefile1
-rw-r--r--arch/x86/platform/efi/efi.c (renamed from arch/x86/kernel/efi.c)0
-rw-r--r--arch/x86/platform/efi/efi_32.c (renamed from arch/x86/kernel/efi_32.c)0
-rw-r--r--arch/x86/platform/efi/efi_64.c (renamed from arch/x86/kernel/efi_64.c)0
-rw-r--r--arch/x86/platform/efi/efi_stub_32.S (renamed from arch/x86/kernel/efi_stub_32.S)0
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S (renamed from arch/x86/kernel/efi_stub_64.S)0
-rw-r--r--arch/x86/platform/mrst/Makefile1
-rw-r--r--arch/x86/platform/mrst/mrst.c (renamed from arch/x86/kernel/mrst.c)0
-rw-r--r--arch/x86/platform/olpc/Makefile3
-rw-r--r--arch/x86/platform/olpc/olpc-xo1.c (renamed from arch/x86/kernel/olpc-xo1.c)0
-rw-r--r--arch/x86/platform/olpc/olpc.c (renamed from arch/x86/kernel/olpc.c)0
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c (renamed from arch/x86/kernel/olpc_ofw.c)0
-rw-r--r--arch/x86/platform/scx200/Makefile2
-rw-r--r--arch/x86/platform/scx200/scx200_32.c (renamed from arch/x86/kernel/scx200_32.c)0
-rw-r--r--arch/x86/platform/sfi/Makefile1
-rw-r--r--arch/x86/platform/sfi/sfi.c (renamed from arch/x86/kernel/sfi.c)0
-rw-r--r--arch/x86/platform/uv/Makefile1
-rw-r--r--arch/x86/platform/uv/bios_uv.c (renamed from arch/x86/kernel/bios_uv.c)0
-rw-r--r--arch/x86/platform/uv/tlb_uv.c (renamed from arch/x86/kernel/tlb_uv.c)13
-rw-r--r--arch/x86/platform/uv/uv_irq.c (renamed from arch/x86/kernel/uv_irq.c)0
-rw-r--r--arch/x86/platform/uv/uv_sysfs.c (renamed from arch/x86/kernel/uv_sysfs.c)0
-rw-r--r--arch/x86/platform/uv/uv_time.c (renamed from arch/x86/kernel/uv_time.c)0
-rw-r--r--arch/x86/platform/visws/Makefile1
-rw-r--r--arch/x86/platform/visws/visws_quirks.c (renamed from arch/x86/kernel/visws_quirks.c)0
-rw-r--r--arch/x86/xen/Kconfig10
-rw-r--r--arch/x86/xen/enlighten.c8
-rw-r--r--arch/x86/xen/mmu.c49
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--arch/x86/xen/setup.c19
-rw-r--r--arch/x86/xen/smp.c32
-rw-r--r--arch/xtensa/Kconfig5
-rw-r--r--arch/xtensa/kernel/ptrace.c14
656 files changed, 23604 insertions, 9378 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 53d7f619a1b..8bf0fa652eb 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -42,6 +42,20 @@ config KPROBES
for kernel debugging, non-intrusive instrumentation and testing.
If in doubt, say "N".
+config JUMP_LABEL
+ bool "Optimize trace point call sites"
+ depends on HAVE_ARCH_JUMP_LABEL
+ help
+ If it is detected that the compiler has support for "asm goto",
+ the kernel will compile trace point locations with just a
+ nop instruction. When trace points are enabled, the nop will
+ be converted to a jump to the trace function. This technique
+ lowers overhead and stress on the branch prediction of the
+ processor.
+
+ On i386, options added to the compiler flags may increase
+ the size of the kernel slightly.
+
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 28f93a6c0fd..943fe6930f7 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -1,7 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
config ALPHA
bool
default y
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index d1dbd9acd1d..022c2748fa4 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -223,7 +223,7 @@ iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
*/
static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
{
- dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+ dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
int ok = 1;
/* If this is not set, the machine doesn't support DAC at all. */
@@ -756,7 +756,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
spin_lock_irqsave(&arena->lock, flags);
for (end = sg + nents; sg < end; ++sg) {
- dma64_addr_t addr;
+ dma_addr_t addr;
size_t size;
long npages, ofs;
dma_addr_t tend;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index baa903602f6..e2af5eb59bb 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -269,7 +269,8 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
size_t copied;
@@ -292,7 +293,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR:
force_successful_syscall_return();
ret = get_reg(child, addr);
- DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+ DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret));
break;
/* When I and D space are separate, this will have to be fixed. */
@@ -302,7 +303,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_POKEUSR: /* write the specified register */
- DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+ DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data));
ret = put_reg(child, addr, data);
break;
default:
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c0d6bdae263..db524e75c4a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
config ARM
bool
default y
@@ -13,7 +6,7 @@ config ARM
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
- select GENERIC_ATOMIC64 if (!CPU_32v6K)
+ select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
@@ -653,7 +646,7 @@ config ARCH_S3C2410
select ARCH_HAS_CPUFREQ
select HAVE_CLK
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
+ select HAVE_S3C2410_I2C if I2C
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -683,8 +676,8 @@ config ARCH_S3C64XX
select S3C_DEV_NAND
select USB_ARCH_HAS_OHCI
select SAMSUNG_GPIOLIB_4BIT
- select HAVE_S3C2410_I2C
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S3C64XX series based systems
@@ -693,10 +686,10 @@ config ARCH_S5P64X0
select CPU_V6
select GENERIC_GPIO
select HAVE_CLK
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
- select HAVE_S3C_RTC
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C_RTC if RTC_CLASS
help
Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
SMDK6450.
@@ -707,7 +700,7 @@ config ARCH_S5P6442
select GENERIC_GPIO
select HAVE_CLK
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5P6442 CPU based systems
@@ -718,9 +711,9 @@ config ARCH_S5PC100
select CPU_V7
select ARM_L1_CACHE_SHIFT_6
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
- select HAVE_S3C_RTC
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C_RTC if RTC_CLASS
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5PC100 series based systems
@@ -733,9 +726,9 @@ config ARCH_S5PV210
select ARM_L1_CACHE_SHIFT_6
select ARCH_HAS_CPUFREQ
select ARCH_USES_GETTIMEOFFSET
- select HAVE_S3C2410_I2C
- select HAVE_S3C_RTC
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C_RTC if RTC_CLASS
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5PV210/S5PC110 series based systems
@@ -746,9 +739,9 @@ config ARCH_S5PV310
select GENERIC_GPIO
select HAVE_CLK
select GENERIC_CLOCKEVENTS
- select HAVE_S3C_RTC
- select HAVE_S3C2410_I2C
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C_RTC if RTC_CLASS
+ select HAVE_S3C2410_I2C if I2C
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
help
Samsung S5PV310 series based systems
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359160e..772f95f1aec 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
/*
- * Set priority on all interrupts.
+ * Set priority on all global interrupts.
*/
- for (i = 0; i < max_irq; i += 4)
+ for (i = 32; i < max_irq; i += 4)
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
/*
- * Disable all interrupts.
+ * Disable all interrupts. Leave the PPI and SGIs alone
+ * as these enables are banked registers.
*/
- for (i = 0; i < max_irq; i += 32)
+ for (i = 32; i < max_irq; i += 32)
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
/*
@@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
{
+ void __iomem *dist_base;
+ int i;
+
if (gic_nr >= MAX_GIC_NR)
BUG();
+ dist_base = gic_data[gic_nr].dist_base;
+ BUG_ON(!dist_base);
+
gic_data[gic_nr].cpu_base = base;
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * PPI interrupts, ensure all SGI interrupts are enabled.
+ */
+ writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+ writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 32; i += 4)
+ writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
writel(0xf0, base + GIC_CPU_PRIMASK);
writel(1, base + GIC_CPU_CTRL);
}
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7fc7eb..21fa272301f 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
IT8152_PD_IRQ(1) USB (USBR)
IT8152_PD_IRQ(0) Audio controller (ACR)
*/
-#define IT8152_IRQ(x) (IRQ_BOARD_END + (x))
+#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
#define IT8152_LD_IRQ_COUNT 9
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 08265993227..48066ce9ea3 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -70,7 +70,8 @@ extern int kgdb_fault_expected;
#define _GP_REGS 16
#define _FP_REGS 8
#define _EXTRA_REGS 2
-#define DBG_MAX_REG_NUM (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
@@ -93,7 +94,7 @@ extern int kgdb_fault_expected;
#define _SPT 13
#define _LR 14
#define _PC 15
-#define _CPSR (DBG_MAX_REG_NUM - 1)
+#define _CPSR (GDB_MAX_REGS - 1)
/*
* So that we can denote the end of a frame for tracing,
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index fdbc43b2e6c..b8da2e415e4 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -1,13 +1,6 @@
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
-#ifdef CONFIG_MMU
-extern phys_addr_t lowmem_end_addr;
-#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
-#else
-#define MEMBLOCK_REAL_LIMIT 0
-#endif
-
struct meminfo;
struct machine_desc;
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0c241..21e3a4ab3b8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
breakpoint_handler(addr, regs);
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
- WARN_ON("Asynchronous watchpoint exception taken. "
- "Debugging results may be unreliable");
+ WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, regs);
break;
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index d6e8b4d2e60..778c2f7024f 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
return;
/* Initialize to zero */
- for (regno = 0; regno < DBG_MAX_REG_NUM; regno++)
+ for (regno = 0; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1467e..07a50357492 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
enum armv7_counters counter)
{
- int ret;
+ int ret = 0;
if (counter == ARMV7_CYCLE_COUNTER)
ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e0cb6370ed1..3e97483abcf 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -1075,13 +1075,15 @@ out:
}
#endif
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
@@ -1089,34 +1091,34 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (void __user *)data);
+ ret = ptrace_getfpregs(child, datap);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (void __user *)data);
+ ret = ptrace_setfpregs(child, datap);
break;
#ifdef CONFIG_IWMMXT
case PTRACE_GETWMMXREGS:
- ret = ptrace_getwmmxregs(child, (void __user *)data);
+ ret = ptrace_getwmmxregs(child, datap);
break;
case PTRACE_SETWMMXREGS:
- ret = ptrace_setwmmxregs(child, (void __user *)data);
+ ret = ptrace_setwmmxregs(child, datap);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ datap);
break;
case PTRACE_SET_SYSCALL:
@@ -1126,21 +1128,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
- ret = ptrace_getcrunchregs(child, (void __user *)data);
+ ret = ptrace_getcrunchregs(child, datap);
break;
case PTRACE_SETCRUNCHREGS:
- ret = ptrace_setcrunchregs(child, (void __user *)data);
+ ret = ptrace_setcrunchregs(child, datap);
break;
#endif
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
- ret = ptrace_getvfpregs(child, (void __user *)data);
+ ret = ptrace_getvfpregs(child, datap);
break;
case PTRACE_SETVFPREGS:
- ret = ptrace_setvfpregs(child, (void __user *)data);
+ ret = ptrace_setvfpregs(child, datap);
break;
#endif
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411e47f..c2e112e1a05 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
/* only go to a higher address on the stack */
low = frame->sp;
- high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+ high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d59aa3..446aee97436 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
- char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
- sprint_symbol(sym1, where);
- sprint_symbol(sym2, from);
- printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
+ printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a161765f6d..d2cb0b3c987 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
/* only go to a higher address on the stack */
low = frame->sp;
- high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+ high = ALIGN(low, THREAD_SIZE);
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 1953e3d21ab..cead8893b46 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -113,6 +113,7 @@ SECTIONS
*(.rodata.*)
*(.glue_7)
*(.glue_7t)
+ . = ALIGN(4);
*(.got) /* Global offset table */
ARM_CPU_KEEP(PROC_INFO)
}
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d3f3b..5e31b2b25da 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/dma.h
+/**
+ * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
+ *
+ * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
+ * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
+ * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
+ * engine.
+ *
+ * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
+ *
*/
#ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
#include <linux/list.h>
#include <linux/types.h>
+/**
+ * struct ep93xx_dma_buffer - Information about a buffer to be transferred
+ * using the DMA M2P engine
+ *
+ * @list: Entry in DMA buffer list
+ * @bus_addr: Physical address of the buffer
+ * @size: Size of the buffer in bytes
+ */
struct ep93xx_dma_buffer {
struct list_head list;
u32 bus_addr;
u16 size;
};
+/**
+ * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
+ *
+ * @name: Unique name for this client
+ * @flags: Client flags
+ * @cookie: User data to pass to callback functions
+ * @buffer_started: Non NULL function to call when a transfer is started.
+ * The arguments are the user data cookie and the DMA
+ * buffer which is starting.
+ * @buffer_finished: Non NULL function to call when a transfer is completed.
+ * The arguments are the user data cookie, the DMA buffer
+ * which has completed, and a boolean flag indicating if
+ * the transfer had an error.
+ */
struct ep93xx_dma_m2p_client {
char *name;
u8 flags;
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
struct ep93xx_dma_buffer *buf,
int bytes, int error);
- /* Internal to the DMA code. */
+ /* private: Internal use only */
void *channel;
};
+/* DMA M2P ports */
#define EP93XX_DMA_M2P_PORT_I2S1 0x00
#define EP93XX_DMA_M2P_PORT_I2S2 0x01
#define EP93XX_DMA_M2P_PORT_AAC1 0x02
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
#define EP93XX_DMA_M2P_PORT_UART3 0x08
#define EP93XX_DMA_M2P_PORT_IRDA 0x09
#define EP93XX_DMA_M2P_PORT_MASK 0x0f
-#define EP93XX_DMA_M2P_TX 0x00
-#define EP93XX_DMA_M2P_RX 0x10
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
-#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
-#define EP93XX_DMA_M2P_ERROR_MASK 0x60
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+/* DMA M2P client flags */
+#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
+#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
+
+/*
+ * DMA M2P client error handling flags. See the EP93xx users guide
+ * documentation on the DMA M2P CONTROL register for more details
+ */
+#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
+#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
+#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
+
+/**
+ * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client information to register
+ * returns 0 on success
+ *
+ * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
+ * client
+ */
+int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client to unregister
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ */
void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * If the current or next transfer positions are free on the M2P client then
+ * the transfer is started immediately. If not, the transfer is added to the
+ * list of pending transfers. This function must not be called from the
+ * buffer_finished callback for an M2P channel.
+ *
+ */
void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
+ * for an M2P channel
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * This function must only be called from the buffer_finished callback for an
+ * M2P channel. It is commonly used to add the next transfer in a chained list
+ * of DMA transfers.
+ */
void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
+ *
+ * @m2p: DMA client to flush transfers on
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ *
+ */
void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h
index 287431cc13e..ac6fd713828 100644
--- a/arch/arm/mach-imx/include/mach/dma-v1.h
+++ b/arch/arm/mach-imx/include/mach/dma-v1.h
@@ -27,6 +27,8 @@
#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
+#include <mach/dma.h>
+
#define IMX_DMA_CHANNELS 16
#define DMA_MODE_READ 0
@@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name);
void imx_dma_free(int channel);
-enum imx_dma_prio {
- DMA_PRIO_HIGH = 0,
- DMA_PRIO_MEDIUM = 1,
- DMA_PRIO_LOW = 2
-};
-
int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
#endif /* __MACH_DMA_V1_H__ */
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index babb2259716..e24e3d05397 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -197,7 +197,7 @@ unsigned long ixp2000_gettimeoffset (void)
return offset / ticks_per_usec;
}
-static int ixp2000_timer_interrupt(int irq, void *dev_id)
+static irqreturn_t ixp2000_timer_interrupt(int irq, void *dev_id)
{
/* clear timer 1 */
ixp2000_reg_wrb(IXP2000_T1_CLR, 1);
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 51ff23b72d3..3688123b5ad 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
kirkwood_pcie_id(&dev, &rev);
- if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
- rev == MV88F6281_REV_A1)) ||
- (dev == MV88F6282_DEV_ID))
- return 200000000;
+ if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
+ if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
+ return 200000000;
return 166666667;
}
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4a152..a31c9499ab3 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
.init_machine = d2net_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6c8a0..285edab776e 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
pr_err("Failed to power up HDD%d\n", i + 1);
}
}
-
-/*****************************************************************************
- * Timer
- ****************************************************************************/
-
-static void lacie_v2_timer_init(void)
-{
- kirkwood_tclk = 166666667;
- orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
-}
-
-struct sys_timer lacie_v2_timer = {
- .init = lacie_v2_timer_init,
-};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af521315b87..fc64f578536 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
void lacie_v2_register_i2c_devices(void);
void lacie_v2_hdd_power_init(int hdd_num);
-extern struct sys_timer lacie_v2_timer;
-
#endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d177c..27901f702fe 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
}
printk("\n");
- while (*mpp_list) {
+ for ( ; *mpp_list; mpp_list++) {
unsigned int num = MPP_NUM(*mpp_list);
unsigned int sel = MPP_SEL(*mpp_list);
int shift, gpio_mode;
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
if (sel != 0)
gpio_mode = 0;
orion_gpio_set_valid(num, gpio_mode);
-
- mpp_list++;
}
printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5ea66f1f417..65ee21fd2f3 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
@@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
@@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d501ae..93afd3c8bfd 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
.init_machine = netxbig_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
.init_machine = netxbig_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
- .timer = &lacie_v2_timer,
+ .timer = &kirkwood_timer,
MACHINE_END
#endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0ce4a..3587a281d99 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
#include "mpp.h"
#include "tsx1x-common.h"
+/* for the PCIe reset workaround */
+#include <plat/pcie.h>
+
+
#define QNAP_TS41X_JUMPER_JP1 45
static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
static int __init ts41x_pci_init(void)
{
- if (machine_is_ts41x())
+ if (machine_is_ts41x()) {
+ /*
+ * Without this explicit reset, the PCIe SATA controller
+ * (Marvell 88sx7042/sata_mv) is known to stop working
+ * after a few minutes.
+ */
+ orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
+
kirkwood_pcie_init(KW_PCIE0);
+ }
return 0;
}
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b213f..8a3b56dfd35 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
#ifdef CONFIG_CPU_MMP2
static inline int cpu_is_mmp2(void)
{
- return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+}
#else
#define cpu_is_mmp2() (0)
#endif
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 3115a29dec4..dbbcfeb919d 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -6,6 +6,7 @@ choice
config ARCH_MSM7X00A
bool "MSM7x00A / MSM7x01A"
+ select MACH_TROUT if !MACH_HALIBUT
select ARCH_MSM_ARM11
select MSM_SMD
select MSM_SMD_PKG3
@@ -15,34 +16,34 @@ config ARCH_MSM7X00A
config ARCH_MSM7X30
bool "MSM7x30"
+ select MACH_MSM7X30_SURF # if !
select ARCH_MSM_SCORPION
select MSM_SMD
select MSM_VIC
select CPU_V7
- select MSM_REMOTE_SPINLOCK_DEKKERS
select MSM_GPIOMUX
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
config ARCH_QSD8X50
bool "QSD8X50"
+ select MACH_QSD8X50_SURF if !MACH_QSD8X50A_ST1_5
select ARCH_MSM_SCORPION
select MSM_SMD
select MSM_VIC
select CPU_V7
- select MSM_REMOTE_SPINLOCK_LDREX
select MSM_GPIOMUX
select MSM_PROC_COMM
select HAS_MSM_DEBUG_UART_PHYS
config ARCH_MSM8X60
bool "MSM8X60"
+ select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \
+ && !MACH_MSM8X60_FFA)
select ARM_GIC
select CPU_V7
select MSM_V2_TLMM
select MSM_GPIOMUX
- select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \
- && !MACH_MSM8X60_FFA)
endchoice
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 59edecbe126..75dabb16c80 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -83,7 +83,6 @@ static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags,
{
mi->nr_banks=1;
mi->bank[0].start = PHYS_OFFSET;
- mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
mi->bank[0].size = (101*1024*1024);
}
diff --git a/arch/arm/mach-msm/include/mach/debug-macro.S b/arch/arm/mach-msm/include/mach/debug-macro.S
index fbd5d90dcc8..646b99ebc77 100644
--- a/arch/arm/mach-msm/include/mach/debug-macro.S
+++ b/arch/arm/mach-msm/include/mach/debug-macro.S
@@ -19,7 +19,7 @@
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
-#ifdef CONFIG_HAS_MSM_DEBUG_UART_PHYS
+#if defined(CONFIG_HAS_MSM_DEBUG_UART_PHYS) && !defined(CONFIG_MSM_DEBUG_UART_NONE)
.macro addruart, rp, rv
ldr \rp, =MSM_DEBUG_UART_PHYS
ldr \rv, =MSM_DEBUG_UART_BASE
@@ -36,7 +36,18 @@
tst \rd, #0x04
beq 1001b
.endm
+#else
+ .macro addruart, rp, rv
+ mov \rv, #0xff000000
+ orr \rv, \rv, #0x00f00000
+ .endm
- .macro busyuart,rd,rx
+ .macro senduart,rd,rx
+ .endm
+
+ .macro waituart,rd,rx
.endm
#endif
+
+ .macro busyuart,rd,rx
+ .endm
diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c
index c33ae786c41..9019cee2907 100644
--- a/arch/arm/mach-msm/iommu_dev.c
+++ b/arch/arm/mach-msm/iommu_dev.c
@@ -128,7 +128,7 @@ static void msm_iommu_reset(void __iomem *base)
static int msm_iommu_probe(struct platform_device *pdev)
{
- struct resource *r;
+ struct resource *r, *r2;
struct clk *iommu_clk;
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
@@ -183,27 +183,27 @@ static int msm_iommu_probe(struct platform_device *pdev)
len = r->end - r->start + 1;
- r = request_mem_region(r->start, len, r->name);
- if (!r) {
+ r2 = request_mem_region(r->start, len, r->name);
+ if (!r2) {
pr_err("Could not request memory region: "
"start=%p, len=%d\n", (void *) r->start, len);
ret = -EBUSY;
goto fail;
}
- regs_base = ioremap(r->start, len);
+ regs_base = ioremap(r2->start, len);
if (!regs_base) {
pr_err("Could not ioremap: start=%p, len=%d\n",
- (void *) r->start, len);
+ (void *) r2->start, len);
ret = -EBUSY;
- goto fail;
+ goto fail_mem;
}
irq = platform_get_irq_byname(pdev, "secure_irq");
if (irq < 0) {
ret = -ENODEV;
- goto fail;
+ goto fail_io;
}
mb();
@@ -211,14 +211,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
if (GET_IDR(regs_base) == 0) {
pr_err("Invalid IDR value detected\n");
ret = -ENODEV;
- goto fail;
+ goto fail_io;
}
ret = request_irq(irq, msm_iommu_fault_handler, 0,
"msm_iommu_secure_irpt_handler", drvdata);
if (ret) {
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail;
+ goto fail_io;
}
msm_iommu_reset(regs_base);
@@ -237,6 +237,10 @@ static int msm_iommu_probe(struct platform_device *pdev)
return 0;
+fail_io:
+ iounmap(regs_base);
+fail_mem:
+ release_mem_region(r->start, len);
fail:
kfree(drvdata);
return ret;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 7689848ec68..950100f19d0 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -137,7 +137,7 @@ static struct msm_clock msm_clocks[] = {
.rating = 200,
.read = msm_gpt_read,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 24,
+ .shift = 17,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
.irq = {
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac514eb8..84db2dfc475 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
}
printk("\n");
- while (*mpp_list) {
+ for ( ; *mpp_list; mpp_list++) {
unsigned int num = MPP_NUM(*mpp_list);
unsigned int sel = MPP_SEL(*mpp_list);
int shift, gpio_mode;
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
if (sel != 0)
gpio_mode = 0;
orion_gpio_set_valid(num, gpio_mode);
-
- mpp_list++;
}
printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index cb657692b80..b752f6bc20a 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -308,7 +308,6 @@ static struct soc_camera_link iclink_mt9v022 = {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm037_i2c_camera[1],
.i2c_adapter_id = 2,
- .module_name = "mt9v022",
};
static struct soc_camera_link iclink_mt9t031 = {
@@ -316,7 +315,6 @@ static struct soc_camera_link iclink_mt9t031 = {
.power = pcm037_camera_power,
.board_info = &pcm037_i2c_camera[0],
.i2c_adapter_id = 2,
- .module_name = "mt9t031",
};
static struct i2c_board_info pcm037_i2c_devices[] = {
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index a35e8c9a0a0..f449a97ae1a 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -177,7 +177,6 @@ static struct soc_camera_link base_iclink = {
.reset = marxbot_basecam_reset,
.board_info = &marxbot_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9t031",
};
static struct platform_device marxbot_camera[] = {
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index 8e989e5b3af..bbec3c82264 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -86,7 +86,6 @@ static struct soc_camera_link base_iclink = {
.reset = smartbot_cam_reset,
.board_info = &smartbot_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9t031",
};
static struct platform_device smartbot_camera[] = {
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index ea0d80a89da..e7f9ee63dce 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -321,10 +321,9 @@ static struct platform_device omap_wdt_device = {
static int __init omap_init_wdt(void)
{
if (!cpu_is_omap16xx())
- return;
+ return -ENODEV;
- platform_device_register(&omap_wdt_device);
- return 0;
+ return platform_device_register(&omap_wdt_device);
}
subsys_initcall(omap_init_wdt);
#endif
diff --git a/arch/arm/mach-omap1/include/mach/camera.h b/arch/arm/mach-omap1/include/mach/camera.h
index fd54b452eb2..847d00f0bb0 100644
--- a/arch/arm/mach-omap1/include/mach/camera.h
+++ b/arch/arm/mach-omap1/include/mach/camera.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ARCH_CAMERA_H_
#define __ASM_ARCH_CAMERA_H_
+#include <media/omap1_camera.h>
+
void omap1_camera_init(void *);
static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info)
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 7352412e491..60e51bcf53b 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -108,6 +108,10 @@ obj-y += $(iommu-m) $(iommu-y)
i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
obj-y += $(i2c-omap-m) $(i2c-omap-y)
+ifneq ($(CONFIG_TIDSPBRIDGE),)
+obj-y += dsp.o
+endif
+
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 69a4ae971e4..df5a425a49d 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -269,9 +269,14 @@ static int omap4_twl6030_hsmmc_late_init(struct device *dev)
struct omap_mmc_platform_data *pdata = dev->platform_data;
/* Setting MMC1 Card detect Irq */
- if (pdev->id == 0)
+ if (pdev->id == 0) {
+ ret = twl6030_mmc_card_detect_config();
+ if (ret)
+ pr_err("Failed configuring MMC1 card detect\n");
pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
MMCDETECT_INTR_OFFSET;
+ pdata->slots[0].card_detect = twl6030_mmc_card_detect;
+ }
return ret;
}
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 067f4379c87..53ac762518b 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -242,9 +242,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- devkit8000_vmmc1_supply.dev = mmc[0].dev;
-
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 702f2a63f2c..1ecd0a6cefb 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -160,10 +160,19 @@ static int omap4_twl6030_hsmmc_late_init(struct device *dev)
struct platform_device, dev);
struct omap_mmc_platform_data *pdata = dev->platform_data;
+ if (!pdata) {
+ dev_err(dev, "%s: NULL platform data\n", __func__);
+ return -EINVAL;
+ }
/* Setting MMC1 Card detect Irq */
- if (pdev->id == 0)
- pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
- MMCDETECT_INTR_OFFSET;
+ if (pdev->id == 0) {
+ ret = twl6030_mmc_card_detect_config();
+ if (ret)
+ dev_err(dev, "%s: Error card detect config(%d)\n",
+ __func__, ret);
+ else
+ pdata->slots[0].card_detect = twl6030_mmc_card_detect;
+ }
return ret;
}
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
new file mode 100644
index 00000000000..6feeeae6c21
--- /dev/null
+++ b/arch/arm/mach-omap2/dsp.c
@@ -0,0 +1,85 @@
+/*
+ * TI's OMAP DSP platform device registration
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include "prm.h"
+#include "cm.h"
+#ifdef CONFIG_BRIDGE_DVFS
+#include <plat/omap-pm.h>
+#endif
+
+#include <plat/dsp.h>
+
+extern phys_addr_t omap_dsp_get_mempool_base(void);
+
+static struct platform_device *omap_dsp_pdev;
+
+static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
+#ifdef CONFIG_BRIDGE_DVFS
+ .dsp_set_min_opp = omap_pm_dsp_set_min_opp,
+ .dsp_get_opp = omap_pm_dsp_get_opp,
+ .cpu_set_freq = omap_pm_cpu_set_freq,
+ .cpu_get_freq = omap_pm_cpu_get_freq,
+#endif
+ .dsp_prm_read = prm_read_mod_reg,
+ .dsp_prm_write = prm_write_mod_reg,
+ .dsp_prm_rmw_bits = prm_rmw_mod_reg_bits,
+ .dsp_cm_read = cm_read_mod_reg,
+ .dsp_cm_write = cm_write_mod_reg,
+ .dsp_cm_rmw_bits = cm_rmw_mod_reg_bits,
+};
+
+static int __init omap_dsp_init(void)
+{
+ struct platform_device *pdev;
+ int err = -ENOMEM;
+ struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
+
+ pdata->phys_mempool_base = omap_dsp_get_mempool_base();
+
+ if (pdata->phys_mempool_base) {
+ pdata->phys_mempool_size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+ pr_info("%s: %x bytes @ %x\n", __func__,
+ pdata->phys_mempool_size, pdata->phys_mempool_base);
+ }
+
+ pdev = platform_device_alloc("omap-dsp", -1);
+ if (!pdev)
+ goto err_out;
+
+ err = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+ if (err)
+ goto err_out;
+
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_out;
+
+ omap_dsp_pdev = pdev;
+ return 0;
+
+err_out:
+ platform_device_put(pdev);
+ return err;
+}
+module_init(omap_dsp_init);
+
+static void __exit omap_dsp_exit(void)
+{
+ platform_device_unregister(omap_dsp_pdev);
+}
+module_exit(omap_dsp_exit);
+
+MODULE_AUTHOR("Hiroshi DOYU");
+MODULE_DESCRIPTION("TI's OMAP DSP platform device registration");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9aaf8..db485d3b814 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
/* Initialize gpiolib. */
orion_gpio_init();
- while (mode->mpp >= 0) {
+ for ( ; mode->mpp >= 0; mode++) {
u32 *reg;
int num_type;
int shift;
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
orion_gpio_set_unused(mode->mpp);
orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
-
- mode++;
}
writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5324b..c1c1cd04bdd 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
static struct resource ts78xx_ts_nand_resources = {
.start = TS_NAND_DATA,
.end = TS_NAND_DATA + 4,
- .flags = IORESOURCE_IO,
+ .flags = IORESOURCE_MEM,
};
static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598ce972..d34b99febeb 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
static void __init cmx2xx_init_irq(void)
{
- pxa27x_init_irq();
-
if (cpu_is_pxa25x()) {
pxa25x_init_irq();
cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index ab48bb81b57..ed0dbfdb22e 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1015,7 +1015,6 @@ static struct soc_camera_link iclink = {
.power = em_x270_sensor_power,
.board_info = &em_x270_i2c_cam_info[0],
.i2c_adapter_id = 0,
- .module_name = "mt9m111",
};
static struct platform_device em_x270_camera = {
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 80a9352d43f..142c711f4cd 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -755,7 +755,6 @@ static struct soc_camera_link a780_iclink = {
.flags = SOCAM_SENSOR_INVERT_PCLK,
.i2c_adapter_id = 0,
.board_info = &a780_camera_i2c_board_info,
- .module_name = "mt9m111",
.power = a780_camera_power,
.reset = a780_camera_reset,
};
@@ -1024,7 +1023,6 @@ static struct soc_camera_link a910_iclink = {
.bus_id = 0,
.i2c_adapter_id = 0,
.board_info = &a910_camera_i2c_board_info,
- .module_name = "mt9m111",
.power = a910_camera_power,
.reset = a910_camera_reset,
};
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 0c31fabfc7f..f5fb915e131 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -711,7 +711,6 @@ static struct soc_camera_link iclink = {
.bus_id = 0, /* Match id in pxa27x_device_camera in device.c */
.board_info = &mioa701_i2c_devices[0],
.i2c_adapter_id = 0,
- .module_name = "mt9m111",
};
struct i2c_pxa_platform_data i2c_pdata = {
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index f56ae100875..f33647a8e0b 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -453,7 +453,6 @@ static struct soc_camera_link iclink[] = {
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
- .module_name = "mt9v022",
}, {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm990_camera_i2c[1],
@@ -461,7 +460,6 @@ static struct soc_camera_link iclink[] = {
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
- .module_name = "mt9m001",
},
};
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e045d7..ffa50e633ee 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
},
};
-#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
static uint16_t lcd_power_on[] = {
/* single frame */
SMART_CMD_NOOP,
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index 8cdeb14af59..8aa2f1902a9 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -30,7 +30,7 @@ static void h1940bt_enable(int on)
{
if (on) {
/* Power on the chip */
- h1940_latch_control(0, H1940_LATCH_BLUETOOTH_POWER);
+ gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 1);
/* Reset the chip */
mdelay(10);
@@ -43,7 +43,7 @@ static void h1940bt_enable(int on)
mdelay(10);
gpio_set_value(S3C2410_GPH(1), 0);
mdelay(10);
- h1940_latch_control(H1940_LATCH_BLUETOOTH_POWER, 0);
+ gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 0);
}
}
@@ -64,7 +64,14 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
ret = gpio_request(S3C2410_GPH(1), dev_name(&pdev->dev));
if (ret) {
- dev_err(&pdev->dev, "could not get GPH1\n");\
+ dev_err(&pdev->dev, "could not get GPH1\n");
+ return ret;
+ }
+
+ ret = gpio_request(H1940_LATCH_BLUETOOTH_POWER, dev_name(&pdev->dev));
+ if (ret) {
+ gpio_free(S3C2410_GPH(1));
+ dev_err(&pdev->dev, "could not get BT_POWER\n");
return ret;
}
diff --git a/arch/arm/mach-s3c2410/include/mach/h1940-latch.h b/arch/arm/mach-s3c2410/include/mach/h1940-latch.h
index d8a832729a8..97e42bfce81 100644
--- a/arch/arm/mach-s3c2410/include/mach/h1940-latch.h
+++ b/arch/arm/mach-s3c2410/include/mach/h1940-latch.h
@@ -14,51 +14,30 @@
#ifndef __ASM_ARCH_H1940_LATCH_H
#define __ASM_ARCH_H1940_LATCH_H
+#include <mach/gpio.h>
-#ifndef __ASSEMBLY__
-#define H1940_LATCH ((void __force __iomem *)0xF8000000)
-#else
-#define H1940_LATCH 0xF8000000
-#endif
-
-#define H1940_PA_LATCH (S3C2410_CS2)
+#define H1940_LATCH_GPIO(x) (S3C_GPIO_END + (x))
/* SD layer latch */
-#define H1940_LATCH_SDQ1 (1<<16)
-#define H1940_LATCH_LCD_P1 (1<<17)
-#define H1940_LATCH_LCD_P2 (1<<18)
-#define H1940_LATCH_LCD_P3 (1<<19)
-#define H1940_LATCH_MAX1698_nSHUTDOWN (1<<20) /* LCD backlight */
-#define H1940_LATCH_LED_RED (1<<21)
-#define H1940_LATCH_SDQ7 (1<<22)
-#define H1940_LATCH_USB_DP (1<<23)
+#define H1940_LATCH_LCD_P0 H1940_LATCH_GPIO(0)
+#define H1940_LATCH_LCD_P1 H1940_LATCH_GPIO(1)
+#define H1940_LATCH_LCD_P2 H1940_LATCH_GPIO(2)
+#define H1940_LATCH_LCD_P3 H1940_LATCH_GPIO(3)
+#define H1940_LATCH_MAX1698_nSHUTDOWN H1940_LATCH_GPIO(4)
+#define H1940_LATCH_LED_RED H1940_LATCH_GPIO(5)
+#define H1940_LATCH_SDQ7 H1940_LATCH_GPIO(6)
+#define H1940_LATCH_USB_DP H1940_LATCH_GPIO(7)
/* CPU layer latch */
-#define H1940_LATCH_UDA_POWER (1<<24)
-#define H1940_LATCH_AUDIO_POWER (1<<25)
-#define H1940_LATCH_SM803_ENABLE (1<<26)
-#define H1940_LATCH_LCD_P4 (1<<27)
-#define H1940_LATCH_CPUQ5 (1<<28) /* untraced */
-#define H1940_LATCH_BLUETOOTH_POWER (1<<29) /* active high */
-#define H1940_LATCH_LED_GREEN (1<<30)
-#define H1940_LATCH_LED_FLASH (1<<31)
-
-/* default settings */
-
-#define H1940_LATCH_DEFAULT \
- H1940_LATCH_LCD_P4 | \
- H1940_LATCH_SM803_ENABLE | \
- H1940_LATCH_SDQ1 | \
- H1940_LATCH_LCD_P1 | \
- H1940_LATCH_LCD_P2 | \
- H1940_LATCH_LCD_P3 | \
- H1940_LATCH_MAX1698_nSHUTDOWN | \
- H1940_LATCH_CPUQ5
-
-/* control functions */
-
-extern void h1940_latch_control(unsigned int clear, unsigned int set);
+#define H1940_LATCH_UDA_POWER H1940_LATCH_GPIO(8)
+#define H1940_LATCH_AUDIO_POWER H1940_LATCH_GPIO(9)
+#define H1940_LATCH_SM803_ENABLE H1940_LATCH_GPIO(10)
+#define H1940_LATCH_LCD_P4 H1940_LATCH_GPIO(11)
+#define H1940_LATCH_SD_POWER H1940_LATCH_GPIO(12)
+#define H1940_LATCH_BLUETOOTH_POWER H1940_LATCH_GPIO(13)
+#define H1940_LATCH_LED_GREEN H1940_LATCH_GPIO(14)
+#define H1940_LATCH_LED_FLASH H1940_LATCH_GPIO(15)
#endif /* __ASM_ARCH_H1940_LATCH_H */
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index 98c5c9e81ee..d7ada8c7e41 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pwm_backlight.h>
+#include <linux/i2c.h>
#include <video/platform_lcd.h>
#include <linux/mmc/host.h>
@@ -59,6 +60,14 @@
#include <plat/mci.h>
#include <plat/ts.h>
+#include <sound/uda1380.h>
+
+#define H1940_LATCH ((void __force __iomem *)0xF8000000)
+
+#define H1940_PA_LATCH S3C2410_CS2
+
+#define H1940_LATCH_BIT(x) (1 << ((x) + 16 - S3C_GPIO_END))
+
static struct map_desc h1940_iodesc[] __initdata = {
[0] = {
.virtual = (unsigned long)H1940_LATCH,
@@ -100,9 +109,9 @@ static struct s3c2410_uartcfg h1940_uartcfgs[] __initdata = {
/* Board control latch control */
-static unsigned int latch_state = H1940_LATCH_DEFAULT;
+static unsigned int latch_state;
-void h1940_latch_control(unsigned int clear, unsigned int set)
+static void h1940_latch_control(unsigned int clear, unsigned int set)
{
unsigned long flags;
@@ -116,7 +125,42 @@ void h1940_latch_control(unsigned int clear, unsigned int set)
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(h1940_latch_control);
+static inline int h1940_gpiolib_to_latch(int offset)
+{
+ return 1 << (offset + 16);
+}
+
+static void h1940_gpiolib_latch_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int latch_bit = h1940_gpiolib_to_latch(offset);
+
+ h1940_latch_control(value ? 0 : latch_bit,
+ value ? latch_bit : 0);
+}
+
+static int h1940_gpiolib_latch_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ h1940_gpiolib_latch_set(chip, offset, value);
+ return 0;
+}
+
+static int h1940_gpiolib_latch_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ return (latch_state >> (offset + 16)) & 1;
+}
+
+struct gpio_chip h1940_latch_gpiochip = {
+ .base = H1940_LATCH_GPIO(0),
+ .owner = THIS_MODULE,
+ .label = "H1940_LATCH",
+ .ngpio = 16,
+ .direction_output = h1940_gpiolib_latch_output,
+ .set = h1940_gpiolib_latch_set,
+ .get = h1940_gpiolib_latch_get,
+};
static void h1940_udc_pullup(enum s3c2410_udc_cmd_e cmd)
{
@@ -125,10 +169,10 @@ static void h1940_udc_pullup(enum s3c2410_udc_cmd_e cmd)
switch (cmd)
{
case S3C2410_UDC_P_ENABLE :
- h1940_latch_control(0, H1940_LATCH_USB_DP);
+ gpio_set_value(H1940_LATCH_USB_DP, 1);
break;
case S3C2410_UDC_P_DISABLE :
- h1940_latch_control(H1940_LATCH_USB_DP, 0);
+ gpio_set_value(H1940_LATCH_USB_DP, 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -199,10 +243,25 @@ static struct platform_device h1940_device_bluetooth = {
.id = -1,
};
+static void h1940_set_mmc_power(unsigned char power_mode, unsigned short vdd)
+{
+ switch (power_mode) {
+ case MMC_POWER_OFF:
+ gpio_set_value(H1940_LATCH_SD_POWER, 0);
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ gpio_set_value(H1940_LATCH_SD_POWER, 1);
+ break;
+ default:
+ break;
+ };
+}
+
static struct s3c24xx_mci_pdata h1940_mmc_cfg __initdata = {
.gpio_detect = S3C2410_GPF(5),
.gpio_wprotect = S3C2410_GPH(8),
- .set_power = NULL,
+ .set_power = h1940_set_mmc_power,
.ocr_avail = MMC_VDD_32_33,
};
@@ -213,15 +272,32 @@ static int h1940_backlight_init(struct device *dev)
gpio_direction_output(S3C2410_GPB(0), 0);
s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE);
s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 1);
return 0;
}
+static int h1940_backlight_notify(struct device *dev, int brightness)
+{
+ if (!brightness) {
+ gpio_direction_output(S3C2410_GPB(0), 1);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 0);
+ } else {
+ gpio_direction_output(S3C2410_GPB(0), 0);
+ s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 1);
+ }
+ return brightness;
+}
+
static void h1940_backlight_exit(struct device *dev)
{
gpio_direction_output(S3C2410_GPB(0), 1);
+ gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 0);
}
+
static struct platform_pwm_backlight_data backlight_data = {
.pwm_id = 0,
.max_brightness = 100,
@@ -229,6 +305,7 @@ static struct platform_pwm_backlight_data backlight_data = {
/* tcnt = 0x31 */
.pwm_period_ns = 36296,
.init = h1940_backlight_init,
+ .notify = h1940_backlight_notify,
.exit = h1940_backlight_exit,
};
@@ -247,19 +324,37 @@ static void h1940_lcd_power_set(struct plat_lcd_data *pd,
int value;
if (!power) {
- /* set to 3ec */
- gpio_direction_output(S3C2410_GPC(0), 0);
+ gpio_set_value(S3C2410_GPC(0), 0);
/* wait for 3ac */
do {
value = gpio_get_value(S3C2410_GPC(6));
} while (value);
- /* set to 38c */
- gpio_direction_output(S3C2410_GPC(5), 0);
+
+ gpio_set_value(H1940_LATCH_LCD_P2, 0);
+ gpio_set_value(H1940_LATCH_LCD_P3, 0);
+ gpio_set_value(H1940_LATCH_LCD_P4, 0);
+
+ gpio_direction_output(S3C2410_GPC(1), 0);
+ gpio_direction_output(S3C2410_GPC(4), 0);
+
+ gpio_set_value(H1940_LATCH_LCD_P1, 0);
+ gpio_set_value(H1940_LATCH_LCD_P0, 0);
+
+ gpio_set_value(S3C2410_GPC(5), 0);
+
} else {
- /* Set to 3ac */
- gpio_direction_output(S3C2410_GPC(5), 1);
- /* Set to 3ad */
- gpio_direction_output(S3C2410_GPC(0), 1);
+ gpio_set_value(H1940_LATCH_LCD_P0, 1);
+ gpio_set_value(H1940_LATCH_LCD_P1, 1);
+
+ s3c_gpio_cfgpin(S3C2410_GPC(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S3C2410_GPC(4), S3C_GPIO_SFN(2));
+
+ gpio_set_value(S3C2410_GPC(5), 1);
+ gpio_set_value(S3C2410_GPC(0), 1);
+
+ gpio_set_value(H1940_LATCH_LCD_P3, 1);
+ gpio_set_value(H1940_LATCH_LCD_P2, 1);
+ gpio_set_value(H1940_LATCH_LCD_P4, 1);
}
}
@@ -273,12 +368,26 @@ static struct platform_device h1940_lcd_powerdev = {
.dev.platform_data = &h1940_lcd_power_data,
};
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = H1940_LATCH_UDA_POWER,
+ .gpio_reset = S3C2410_GPA(12),
+ .dac_clk = UDA1380_DAC_CLK_SYSCLK,
+};
+
+static struct i2c_board_info h1940_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x1a),
+ .platform_data = &uda1380_info,
+ },
+};
+
static struct platform_device *h1940_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
+ &s3c_device_pcm,
&s3c_device_usbgadget,
&h1940_device_leds,
&h1940_device_bluetooth,
@@ -303,6 +412,10 @@ static void __init h1940_map_io(void)
memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 1024);
#endif
s3c_pm_init();
+
+ /* Add latch gpio chip, set latch initial value */
+ h1940_latch_control(0, 0);
+ WARN_ON(gpiochip_add(&h1940_latch_gpiochip));
}
/* H1940 and RX3715 need to reserve this for suspend */
@@ -340,12 +453,38 @@ static void __init h1940_init(void)
writel(tmp, S3C2410_UPLLCON);
gpio_request(S3C2410_GPC(0), "LCD power");
+ gpio_request(S3C2410_GPC(1), "LCD power");
+ gpio_request(S3C2410_GPC(4), "LCD power");
gpio_request(S3C2410_GPC(5), "LCD power");
gpio_request(S3C2410_GPC(6), "LCD power");
-
+ gpio_request(H1940_LATCH_LCD_P0, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P1, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P2, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P3, "LCD power");
+ gpio_request(H1940_LATCH_LCD_P4, "LCD power");
+ gpio_request(H1940_LATCH_MAX1698_nSHUTDOWN, "LCD power");
+ gpio_direction_output(S3C2410_GPC(0), 0);
+ gpio_direction_output(S3C2410_GPC(1), 0);
+ gpio_direction_output(S3C2410_GPC(4), 0);
+ gpio_direction_output(S3C2410_GPC(5), 0);
gpio_direction_input(S3C2410_GPC(6));
+ gpio_direction_output(H1940_LATCH_LCD_P0, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P1, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P2, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P3, 0);
+ gpio_direction_output(H1940_LATCH_LCD_P4, 0);
+ gpio_direction_output(H1940_LATCH_MAX1698_nSHUTDOWN, 0);
+
+ gpio_request(H1940_LATCH_USB_DP, "USB pullup");
+ gpio_direction_output(H1940_LATCH_USB_DP, 0);
+
+ gpio_request(H1940_LATCH_SD_POWER, "SD power");
+ gpio_direction_output(H1940_LATCH_SD_POWER, 0);
platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices));
+
+ i2c_register_board_info(0, h1940_i2c_devices,
+ ARRAY_SIZE(h1940_i2c_devices));
}
MACHINE_START(H1940, "IPAQ-H1940")
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index cd8e7de388f..ff024a6c0f8 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -4,7 +4,6 @@
config CPU_S3C2440
bool
- depends on ARCH_S3C2410
select CPU_ARM920T
select S3C_GPIO_PULL_UP
select S3C2410_CLOCK
@@ -18,7 +17,6 @@ config CPU_S3C2440
config CPU_S3C2442
bool
- depends on ARCH_S3C2410
select CPU_ARM920T
select S3C2410_CLOCK
select S3C2410_GPIO
@@ -30,7 +28,7 @@ config CPU_S3C2442
config CPU_S3C244X
bool
- depends on ARCH_S3C2410 && (CPU_S3C2440 || CPU_S3C2442)
+ depends on CPU_S3C2440 || CPU_S3C2442
help
Support for S3C2440 and S3C2442 Samsung Mobile CPU based systems.
@@ -72,7 +70,7 @@ config S3C2440_PLL_16934400
config S3C2440_DMA
bool
- depends on ARCH_S3C2410 && CPU_S3C24405B
+ depends on CPU_S3C2440
help
Support for S3C2440 specific DMA code5A
@@ -181,7 +179,6 @@ config MACH_MINI2440
select CPU_S3C2440
select EEPROM_AT24
select LEDS_TRIGGER_BACKLIGHT
- select SND_S3C24XX_SOC_S3C24XX_UDA134X
select S3C_DEV_NAND
select S3C_DEV_USB_HOST
help
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index 32019bd9db3..e0622bbb6df 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -25,8 +25,12 @@
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/sysdev.h>
+#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/pwm.h>
+#include <linux/s3c_adc_battery.h>
+#include <linux/leds.h>
+#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -55,6 +59,8 @@
#include <plat/irq.h>
#include <plat/ts.h>
+#include <sound/uda1380.h>
+
#define LCD_PWM_PERIOD 192960
#define LCD_PWM_DUTY 127353
@@ -127,6 +133,193 @@ static struct s3c2410fb_display rx1950_display = {
};
+static int power_supply_init(struct device *dev)
+{
+ return gpio_request(S3C2410_GPF(2), "cable plugged");
+}
+
+static int rx1950_is_ac_online(void)
+{
+ return !gpio_get_value(S3C2410_GPF(2));
+}
+
+static void power_supply_exit(struct device *dev)
+{
+ gpio_free(S3C2410_GPF(2));
+}
+
+static char *rx1950_supplicants[] = {
+ "main-battery"
+};
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = rx1950_is_ac_online,
+ .exit = power_supply_exit,
+ .supplied_to = rx1950_supplicants,
+ .num_supplicants = ARRAY_SIZE(rx1950_supplicants),
+};
+
+static struct resource power_supply_resources[] = {
+ [0] = {
+ .name = "ac",
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE |
+ IORESOURCE_IRQ_HIGHEDGE,
+ .start = IRQ_EINT2,
+ .end = IRQ_EINT2,
+ },
+};
+
+static struct platform_device power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data =
+ &power_supply_info,
+ },
+ .resource = power_supply_resources,
+ .num_resources = ARRAY_SIZE(power_supply_resources),
+};
+
+static const struct s3c_adc_bat_thresh bat_lut_noac[] = {
+ { .volt = 4100, .cur = 156, .level = 100},
+ { .volt = 4050, .cur = 156, .level = 95},
+ { .volt = 4025, .cur = 141, .level = 90},
+ { .volt = 3995, .cur = 144, .level = 85},
+ { .volt = 3957, .cur = 162, .level = 80},
+ { .volt = 3931, .cur = 147, .level = 75},
+ { .volt = 3902, .cur = 147, .level = 70},
+ { .volt = 3863, .cur = 153, .level = 65},
+ { .volt = 3838, .cur = 150, .level = 60},
+ { .volt = 3800, .cur = 153, .level = 55},
+ { .volt = 3765, .cur = 153, .level = 50},
+ { .volt = 3748, .cur = 172, .level = 45},
+ { .volt = 3740, .cur = 153, .level = 40},
+ { .volt = 3714, .cur = 175, .level = 35},
+ { .volt = 3710, .cur = 156, .level = 30},
+ { .volt = 3963, .cur = 156, .level = 25},
+ { .volt = 3672, .cur = 178, .level = 20},
+ { .volt = 3651, .cur = 178, .level = 15},
+ { .volt = 3629, .cur = 178, .level = 10},
+ { .volt = 3612, .cur = 162, .level = 5},
+ { .volt = 3605, .cur = 162, .level = 0},
+};
+
+static const struct s3c_adc_bat_thresh bat_lut_acin[] = {
+ { .volt = 4200, .cur = 0, .level = 100},
+ { .volt = 4190, .cur = 0, .level = 99},
+ { .volt = 4178, .cur = 0, .level = 95},
+ { .volt = 4110, .cur = 0, .level = 70},
+ { .volt = 4076, .cur = 0, .level = 65},
+ { .volt = 4046, .cur = 0, .level = 60},
+ { .volt = 4021, .cur = 0, .level = 55},
+ { .volt = 3999, .cur = 0, .level = 50},
+ { .volt = 3982, .cur = 0, .level = 45},
+ { .volt = 3965, .cur = 0, .level = 40},
+ { .volt = 3957, .cur = 0, .level = 35},
+ { .volt = 3948, .cur = 0, .level = 30},
+ { .volt = 3936, .cur = 0, .level = 25},
+ { .volt = 3927, .cur = 0, .level = 20},
+ { .volt = 3906, .cur = 0, .level = 15},
+ { .volt = 3880, .cur = 0, .level = 10},
+ { .volt = 3829, .cur = 0, .level = 5},
+ { .volt = 3820, .cur = 0, .level = 0},
+};
+
+int rx1950_bat_init(void)
+{
+ int ret;
+
+ ret = gpio_request(S3C2410_GPJ(2), "rx1950-charger-enable-1");
+ if (ret)
+ goto err_gpio1;
+ ret = gpio_request(S3C2410_GPJ(3), "rx1950-charger-enable-2");
+ if (ret)
+ goto err_gpio2;
+
+ return 0;
+
+err_gpio2:
+ gpio_free(S3C2410_GPJ(2));
+err_gpio1:
+ return ret;
+}
+
+void rx1950_bat_exit(void)
+{
+ gpio_free(S3C2410_GPJ(2));
+ gpio_free(S3C2410_GPJ(3));
+}
+
+void rx1950_enable_charger(void)
+{
+ gpio_direction_output(S3C2410_GPJ(2), 1);
+ gpio_direction_output(S3C2410_GPJ(3), 1);
+}
+
+void rx1950_disable_charger(void)
+{
+ gpio_direction_output(S3C2410_GPJ(2), 0);
+ gpio_direction_output(S3C2410_GPJ(3), 0);
+}
+
+static struct gpio_led rx1950_leds_desc[] = {
+ {
+ .name = "Green",
+ .default_trigger = "main-battery-charging-or-full",
+ .gpio = S3C2410_GPA(6),
+ },
+ {
+ .name = "Red",
+ .default_trigger = "main-battery-full",
+ .gpio = S3C2410_GPA(7),
+ },
+ {
+ .name = "Blue",
+ .default_trigger = "rx1950-acx-mem",
+ .gpio = S3C2410_GPA(11),
+ },
+};
+
+static struct gpio_led_platform_data rx1950_leds_pdata = {
+ .num_leds = ARRAY_SIZE(rx1950_leds_desc),
+ .leds = rx1950_leds_desc,
+};
+
+static struct platform_device rx1950_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &rx1950_leds_pdata,
+ },
+};
+
+static struct s3c_adc_bat_pdata rx1950_bat_cfg = {
+ .init = rx1950_bat_init,
+ .exit = rx1950_bat_exit,
+ .enable_charger = rx1950_enable_charger,
+ .disable_charger = rx1950_disable_charger,
+ .gpio_charge_finished = S3C2410_GPF(3),
+ .lut_noac = bat_lut_noac,
+ .lut_noac_cnt = ARRAY_SIZE(bat_lut_noac),
+ .lut_acin = bat_lut_acin,
+ .lut_acin_cnt = ARRAY_SIZE(bat_lut_acin),
+ .volt_channel = 0,
+ .current_channel = 1,
+ .volt_mult = 4235,
+ .current_mult = 2900,
+ .internal_impedance = 200,
+};
+
+static struct platform_device rx1950_battery = {
+ .name = "s3c-adc-battery",
+ .id = -1,
+ .dev = {
+ .parent = &s3c_device_adc.dev,
+ .platform_data = &rx1950_bat_cfg,
+ },
+};
+
static struct s3c2410fb_mach_info rx1950_lcd_cfg = {
.displays = &rx1950_display,
.num_displays = 1,
@@ -481,11 +674,17 @@ static struct platform_device rx1950_device_gpiokeys = {
.dev.platform_data = &rx1950_gpio_keys_data,
};
-static struct s3c2410_platform_i2c rx1950_i2c_data = {
- .flags = 0,
- .slave_addr = 0x42,
- .frequency = 400 * 1000,
- .sda_delay = S3C2410_IICLC_SDA_DELAY5 | S3C2410_IICLC_FILTER_ON,
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = S3C2410_GPJ(0),
+ .gpio_reset = S3C2410_GPD(0),
+ .dac_clk = UDA1380_DAC_CLK_SYSCLK,
+};
+
+static struct i2c_board_info rx1950_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x1a),
+ .platform_data = &uda1380_info,
+ },
};
static struct platform_device *rx1950_devices[] __initdata = {
@@ -493,6 +692,7 @@ static struct platform_device *rx1950_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
+ &s3c_device_pcm,
&s3c_device_usbgadget,
&s3c_device_rtc,
&s3c_device_nand,
@@ -503,6 +703,9 @@ static struct platform_device *rx1950_devices[] __initdata = {
&s3c_device_timer[1],
&rx1950_backlight,
&rx1950_device_gpiokeys,
+ &power_supply,
+ &rx1950_battery,
+ &rx1950_leds,
};
static struct clk *rx1950_clocks[] __initdata = {
@@ -538,7 +741,7 @@ static void __init rx1950_init_machine(void)
s3c24xx_udc_set_platdata(&rx1950_udc_cfg);
s3c24xx_ts_set_platdata(&rx1950_ts_cfg);
s3c24xx_mci_set_platdata(&rx1950_mmc_cfg);
- s3c_i2c0_set_platdata(&rx1950_i2c_data);
+ s3c_i2c0_set_platdata(NULL);
s3c_nand_set_platdata(&rx1950_nand_info);
/* Turn off suspend on both USB ports, and switch the
@@ -569,6 +772,9 @@ static void __init rx1950_init_machine(void)
WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power"));
platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices));
+
+ i2c_register_board_info(0, rx1950_i2c_devices,
+ ARRAY_SIZE(rx1950_i2c_devices));
}
/* H1940 and RX3715 need to reserve this for suspend */
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index fe1fd3007ae..579d2f0f4dd 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -143,7 +143,7 @@ config MACH_SMDK6410
select S3C_DEV_USB_HSOTG
select S3C_DEV_WDT
select SAMSUNG_DEV_KEYPAD
- select HAVE_S3C2410_WATCHDOG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
select S3C64XX_SETUP_SDHCI
select S3C64XX_SETUP_I2C1
select S3C64XX_SETUP_IDE
@@ -206,6 +206,7 @@ config SMDK6410_WM1192_EV1
select REGULATOR_WM831X
select S3C24XX_GPIO_EXTRA64
select MFD_WM831X
+ select MFD_WM831X_I2C
help
The Wolfson Microelectronics 1192-EV1 is a WM831x based PMIC
daughtercard for the Samsung SMDK6410 reference platform.
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c
index c0a13ef5436..96f7dc103b5 100644
--- a/arch/arm/mach-sa1100/cpu-sa1100.c
+++ b/arch/arm/mach-sa1100/cpu-sa1100.c
@@ -184,16 +184,15 @@ static int sa1100_target(struct cpufreq_policy *policy,
{
unsigned int cur = sa11x0_getspeed(0);
unsigned int new_ppcr;
-
struct cpufreq_freqs freqs;
+
+ new_ppcr = sa11x0_freq_to_ppcr(target_freq);
switch(relation){
case CPUFREQ_RELATION_L:
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
if (sa11x0_ppcr_to_freq(new_ppcr) > policy->max)
new_ppcr--;
break;
case CPUFREQ_RELATION_H:
- new_ppcr = sa11x0_freq_to_ppcr(target_freq);
if ((sa11x0_ppcr_to_freq(new_ppcr) > target_freq) &&
(sa11x0_ppcr_to_freq(new_ppcr - 1) >= policy->min))
new_ppcr--;
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 54b479c35ee..51dcd59eda6 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -116,4 +116,6 @@ endmenu
config SH_CLK_CPG
bool
+source "drivers/sh/Kconfig"
+
endif
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 14923989ea0..d3260542b94 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -30,7 +30,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
-#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
@@ -44,6 +43,10 @@
#include <linux/input/sh_keysc.h>
#include <linux/usb/r8a66597.h>
+#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
+#include <media/soc_camera.h>
+
#include <sound/sh_fsi.h>
#include <video/sh_mobile_hdmi.h>
@@ -160,11 +163,13 @@ static struct mtd_partition nor_flash_partitions[] = {
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
+ .mask_flags = MTD_WRITEABLE,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
+ .mask_flags = MTD_WRITEABLE,
},
{
.name = "kernel_ro",
@@ -235,10 +240,22 @@ static struct platform_device smc911x_device = {
},
};
+/*
+ * The card detect pin of the top SD/MMC slot (CN7) is active low and is
+ * connected to GPIO A22 of SH7372 (GPIO_PORT41).
+ */
+static int slot_cn7_get_cd(struct platform_device *pdev)
+{
+ if (gpio_is_valid(GPIO_PORT41))
+ return !gpio_get_value(GPIO_PORT41);
+ else
+ return -ENXIO;
+}
+
/* SH_MMCIF */
static struct resource sh_mmcif_resources[] = {
[0] = {
- .name = "SH_MMCIF",
+ .name = "MMCIF",
.start = 0xE6BD0000,
.end = 0xE6BD00FF,
.flags = IORESOURCE_MEM,
@@ -261,6 +278,7 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
.caps = MMC_CAP_4_BIT_DATA |
MMC_CAP_8_BIT_DATA |
MMC_CAP_NEEDS_POLL,
+ .get_cd = slot_cn7_get_cd,
};
static struct platform_device sh_mmcif_device = {
@@ -310,6 +328,8 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_ocr_mask = MMC_VDD_165_195,
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_caps = MMC_CAP_NEEDS_POLL,
+ .get_cd = slot_cn7_get_cd,
};
static struct resource sdhi1_resources[] = {
@@ -375,10 +395,40 @@ static struct platform_device usb1_host_device = {
.resource = usb1_host_resources,
};
+const static struct fb_videomode ap4evb_lcdc_modes[] = {
+ {
+#ifdef CONFIG_AP4EVB_QHD
+ .name = "R63302(QHD)",
+ .xres = 544,
+ .yres = 961,
+ .left_margin = 72,
+ .right_margin = 600,
+ .hsync_len = 16,
+ .upper_margin = 8,
+ .lower_margin = 8,
+ .vsync_len = 2,
+ .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+#else
+ .name = "WVGA Panel",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 70,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .sync = 0,
+#endif
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
+ .lcd_cfg = ap4evb_lcdc_modes,
+ .num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
}
};
@@ -517,26 +567,41 @@ static struct platform_device *qhd_devices[] __initdata = {
/* FSI */
#define IRQ_FSI evt2irq(0x1840)
-#define FSIACKCR 0xE6150018
-static void fsiackcr_init(struct clk *clk)
+
+static int fsi_set_rate(int is_porta, int rate)
{
- u32 status = __raw_readl(clk->enable_reg);
+ struct clk *fsib_clk;
+ struct clk *fdiv_clk = &sh7372_fsidivb_clk;
+ int ret;
- /* use external clock */
- status &= ~0x000000ff;
- status |= 0x00000080;
- __raw_writel(status, clk->enable_reg);
-}
+ /* set_rate is not needed if port A */
+ if (is_porta)
+ return 0;
+
+ fsib_clk = clk_get(NULL, "fsib_clk");
+ if (IS_ERR(fsib_clk))
+ return -EINVAL;
+
+ switch (rate) {
+ case 44100:
+ clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000));
+ ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ break;
+ case 48000:
+ clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
+ clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
+ ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ break;
+ default:
+ pr_err("unsupported rate in FSI2 port B\n");
+ ret = -EINVAL;
+ break;
+ }
-static struct clk_ops fsiackcr_clk_ops = {
- .init = fsiackcr_init,
-};
+ clk_put(fsib_clk);
-static struct clk fsiackcr_clk = {
- .ops = &fsiackcr_clk_ops,
- .enable_reg = (void __iomem *)FSIACKCR,
- .rate = 0, /* unknown */
-};
+ return ret;
+}
static struct sh_fsi_platform_info fsi_info = {
.porta_flags = SH_FSI_BRS_INV |
@@ -544,6 +609,12 @@ static struct sh_fsi_platform_info fsi_info = {
SH_FSI_IN_SLAVE_MODE |
SH_FSI_OFMT(PCM) |
SH_FSI_IFMT(PCM),
+
+ .portb_flags = SH_FSI_BRS_INV |
+ SH_FSI_BRM_INV |
+ SH_FSI_LRS_INV |
+ SH_FSI_OFMT(SPDIF),
+ .set_rate = fsi_set_rate,
};
static struct resource fsi_resources[] = {
@@ -577,26 +648,6 @@ static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
.interface_type = RGB24,
.clock_divider = 1,
.flags = LCDC_FLAGS_DWPOL,
- .lcd_cfg = {
- .name = "HDMI",
- /* So far only 720p is supported */
- .xres = 1280,
- .yres = 720,
- /*
- * If left and right margins are not multiples of 8,
- * LDHAJR will be adjusted accordingly by the LCDC
- * driver. Until we start using EDID, these values
- * might have to be adjusted for different monitors.
- */
- .left_margin = 200,
- .right_margin = 88,
- .hsync_len = 48,
- .upper_margin = 20,
- .lower_margin = 5,
- .vsync_len = 5,
- .pixclock = 13468,
- .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
- },
}
};
@@ -608,7 +659,7 @@ static struct resource lcdc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = intcs_evt2irq(0x17a0),
+ .start = intcs_evt2irq(0x1780),
.flags = IORESOURCE_IRQ,
},
};
@@ -627,6 +678,7 @@ static struct platform_device lcdc1_device = {
static struct sh_mobile_hdmi_info hdmi_info = {
.lcd_chan = &sh_mobile_lcdc1_info.ch[0],
.lcd_dev = &lcdc1_device.dev,
+ .flags = HDMI_SND_SRC_SPDIF,
};
static struct resource hdmi_resources[] = {
@@ -689,6 +741,95 @@ static struct platform_device leds_device = {
},
};
+static struct i2c_board_info imx074_info = {
+ I2C_BOARD_INFO("imx074", 0x1a),
+};
+
+struct soc_camera_link imx074_link = {
+ .bus_id = 0,
+ .board_info = &imx074_info,
+ .i2c_adapter_id = 0,
+ .module_name = "imx074",
+};
+
+static struct platform_device ap4evb_camera = {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &imx074_link,
+ },
+};
+
+static struct sh_csi2_client_config csi2_clients[] = {
+ {
+ .phy = SH_CSI2_PHY_MAIN,
+ .lanes = 3,
+ .channel = 0,
+ .pdev = &ap4evb_camera,
+ },
+};
+
+static struct sh_csi2_pdata csi2_info = {
+ .type = SH_CSI2C,
+ .clients = csi2_clients,
+ .num_clients = ARRAY_SIZE(csi2_clients),
+ .flags = SH_CSI2_ECC | SH_CSI2_CRC,
+};
+
+static struct resource csi2_resources[] = {
+ [0] = {
+ .name = "CSI2",
+ .start = 0xffc90000,
+ .end = 0xffc90fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x17a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device csi2_device = {
+ .name = "sh-mobile-csi2",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(csi2_resources),
+ .resource = csi2_resources,
+ .dev = {
+ .platform_data = &csi2_info,
+ },
+};
+
+static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
+ .flags = SH_CEU_FLAG_USE_8BIT_BUS,
+ .csi2_dev = &csi2_device.dev,
+};
+
+static struct resource ceu_resources[] = {
+ [0] = {
+ .name = "CEU",
+ .start = 0xfe910000,
+ .end = 0xfe91009f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x880),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device ceu_device = {
+ .name = "sh_mobile_ceu",
+ .id = 0, /* "ceu0" clock */
+ .num_resources = ARRAY_SIZE(ceu_resources),
+ .resource = ceu_resources,
+ .dev = {
+ .platform_data = &sh_mobile_ceu_info,
+ },
+};
+
static struct platform_device *ap4evb_devices[] __initdata = {
&leds_device,
&nor_flash_device,
@@ -701,6 +842,9 @@ static struct platform_device *ap4evb_devices[] __initdata = {
&lcdc1_device,
&lcdc_device,
&hdmi_device,
+ &csi2_device,
+ &ceu_device,
+ &ap4evb_camera,
};
static int __init hdmi_init_pm_clock(void)
@@ -715,22 +859,22 @@ static int __init hdmi_init_pm_clock(void)
goto out;
}
- ret = clk_set_parent(&pllc2_clk, &dv_clki_div2_clk);
+ ret = clk_set_parent(&sh7372_pllc2_clk, &sh7372_dv_clki_div2_clk);
if (ret < 0) {
- pr_err("Cannot set PLLC2 parent: %d, %d users\n", ret, pllc2_clk.usecount);
+ pr_err("Cannot set PLLC2 parent: %d, %d users\n", ret, sh7372_pllc2_clk.usecount);
goto out;
}
- pr_debug("PLLC2 initial frequency %lu\n", clk_get_rate(&pllc2_clk));
+ pr_debug("PLLC2 initial frequency %lu\n", clk_get_rate(&sh7372_pllc2_clk));
- rate = clk_round_rate(&pllc2_clk, 594000000);
+ rate = clk_round_rate(&sh7372_pllc2_clk, 594000000);
if (rate < 0) {
pr_err("Cannot get suitable rate: %ld\n", rate);
ret = rate;
goto out;
}
- ret = clk_set_rate(&pllc2_clk, rate);
+ ret = clk_set_rate(&sh7372_pllc2_clk, rate);
if (ret < 0) {
pr_err("Cannot set rate %ld: %d\n", rate, ret);
goto out;
@@ -738,7 +882,7 @@ static int __init hdmi_init_pm_clock(void)
pr_debug("PLLC2 set frequency %lu\n", rate);
- ret = clk_set_parent(hdmi_ick, &pllc2_clk);
+ ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
if (ret < 0) {
pr_err("Cannot set HDMI parent: %d\n", ret);
goto out;
@@ -752,11 +896,51 @@ out:
device_initcall(hdmi_init_pm_clock);
+#define FSIACK_DUMMY_RATE 48000
+static int __init fsi_init_pm_clock(void)
+{
+ struct clk *fsia_ick;
+ int ret;
+
+ /*
+ * FSIACK is connected to AK4642,
+ * and the rate is depend on playing sound rate.
+ * So, set dummy rate (= 48k) here
+ */
+ ret = clk_set_rate(&sh7372_fsiack_clk, FSIACK_DUMMY_RATE);
+ if (ret < 0) {
+ pr_err("Cannot set FSIACK dummy rate: %d\n", ret);
+ return ret;
+ }
+
+ fsia_ick = clk_get(&fsi_device.dev, "icka");
+ if (IS_ERR(fsia_ick)) {
+ ret = PTR_ERR(fsia_ick);
+ pr_err("Cannot get FSI ICK: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk);
+ if (ret < 0) {
+ pr_err("Cannot set FSI-A parent: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_set_rate(fsia_ick, FSIACK_DUMMY_RATE);
+ if (ret < 0)
+ pr_err("Cannot set FSI-A rate: %d\n", ret);
+
+out:
+ clk_put(fsia_ick);
+
+ return ret;
+}
+device_initcall(fsi_init_pm_clock);
+
/*
* FIXME !!
*
* gpio_no_direction
- * gpio_pull_up
* are quick_hack.
*
* current gpio frame work doesn't have
@@ -768,49 +952,37 @@ static void __init gpio_no_direction(u32 addr)
__raw_writeb(0x00, addr);
}
-static void __init gpio_pull_up(u32 addr)
-{
- u8 data = __raw_readb(addr);
-
- data &= 0x0F;
- data |= 0xC0;
- __raw_writeb(data, addr);
-}
-
/* TouchScreen */
+#ifdef CONFIG_AP4EVB_QHD
+# define GPIO_TSC_IRQ GPIO_FN_IRQ28_123
+# define GPIO_TSC_PORT GPIO_PORT123
+#else /* WVGA */
+# define GPIO_TSC_IRQ GPIO_FN_IRQ7_40
+# define GPIO_TSC_PORT GPIO_PORT40
+#endif
+
#define IRQ28 evt2irq(0x3380) /* IRQ28A */
#define IRQ7 evt2irq(0x02e0) /* IRQ7A */
static int ts_get_pendown_state(void)
{
- int val1, val2;
+ int val;
- gpio_free(GPIO_FN_IRQ28_123);
- gpio_free(GPIO_FN_IRQ7_40);
+ gpio_free(GPIO_TSC_IRQ);
- gpio_request(GPIO_PORT123, NULL);
- gpio_request(GPIO_PORT40, NULL);
+ gpio_request(GPIO_TSC_PORT, NULL);
- gpio_direction_input(GPIO_PORT123);
- gpio_direction_input(GPIO_PORT40);
+ gpio_direction_input(GPIO_TSC_PORT);
- val1 = gpio_get_value(GPIO_PORT123);
- val2 = gpio_get_value(GPIO_PORT40);
+ val = gpio_get_value(GPIO_TSC_PORT);
- gpio_request(GPIO_FN_IRQ28_123, NULL); /* for QHD */
- gpio_request(GPIO_FN_IRQ7_40, NULL); /* for WVGA */
+ gpio_request(GPIO_TSC_IRQ, NULL);
- return val1 ^ val2;
+ return !val;
}
-#define PORT40CR 0xE6051028
-#define PORT123CR 0xE605007B
static int ts_init(void)
{
- gpio_request(GPIO_FN_IRQ28_123, NULL); /* for QHD */
- gpio_request(GPIO_FN_IRQ7_40, NULL); /* for WVGA */
-
- gpio_pull_up(PORT40CR);
- gpio_pull_up(PORT123CR);
+ gpio_request(GPIO_TSC_IRQ, NULL);
return 0;
}
@@ -865,6 +1037,7 @@ static void __init ap4evb_map_io(void)
#define GPIO_PORT9CR 0xE6051009
#define GPIO_PORT10CR 0xE605100A
+#define USCCR1 0xE6058144
static void __init ap4evb_init(void)
{
u32 srcr4;
@@ -935,7 +1108,7 @@ static void __init ap4evb_init(void)
/* setup USB phy */
__raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */
- /* enable FSI2 */
+ /* enable FSI2 port A (ak4643) */
gpio_request(GPIO_FN_FSIAIBT, NULL);
gpio_request(GPIO_FN_FSIAILR, NULL);
gpio_request(GPIO_FN_FSIAISLD, NULL);
@@ -948,6 +1121,14 @@ static void __init ap4evb_init(void)
gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */
gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */
+ /* card detect pin for MMC slot (CN7) */
+ gpio_request(GPIO_PORT41, NULL);
+ gpio_direction_input(GPIO_PORT41);
+
+ /* setup FSI2 port B (HDMI) */
+ gpio_request(GPIO_FN_FSIBCK, NULL);
+ __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
+
/* set SPU2 clock to 119.6 MHz */
clk = clk_get(NULL, "spu_clk");
if (!IS_ERR(clk)) {
@@ -955,14 +1136,6 @@ static void __init ap4evb_init(void)
clk_put(clk);
}
- /* change parent of FSI A */
- clk = clk_get(NULL, "fsia_clk");
- if (!IS_ERR(clk)) {
- clk_register(&fsiackcr_clk);
- clk_set_parent(clk, &fsiackcr_clk);
- clk_put(clk);
- }
-
/*
* set irq priority, to avoid sound chopping
* when NFS rootfs is used
@@ -977,8 +1150,10 @@ static void __init ap4evb_init(void)
ARRAY_SIZE(i2c1_devices));
#ifdef CONFIG_AP4EVB_QHD
+
/*
- * QHD
+ * For QHD Panel (MIPI-DSI, CONFIG_AP4EVB_QHD=y) and
+ * IRQ28 for Touch Panel, set dip switches S3, S43 as OFF, ON.
*/
/* enable KEYSC */
@@ -1004,17 +1179,6 @@ static void __init ap4evb_init(void)
lcdc_info.ch[0].interface_type = RGB24;
lcdc_info.ch[0].clock_divider = 1;
lcdc_info.ch[0].flags = LCDC_FLAGS_DWPOL;
- lcdc_info.ch[0].lcd_cfg.name = "R63302(QHD)";
- lcdc_info.ch[0].lcd_cfg.xres = 544;
- lcdc_info.ch[0].lcd_cfg.yres = 961;
- lcdc_info.ch[0].lcd_cfg.left_margin = 72;
- lcdc_info.ch[0].lcd_cfg.right_margin = 600;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 16;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 8;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 8;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 2;
- lcdc_info.ch[0].lcd_cfg.sync = FB_SYNC_VERT_HIGH_ACT |
- FB_SYNC_HOR_HIGH_ACT;
lcdc_info.ch[0].lcd_size_cfg.width = 44;
lcdc_info.ch[0].lcd_size_cfg.height = 79;
@@ -1022,8 +1186,10 @@ static void __init ap4evb_init(void)
#else
/*
- * WVGA
+ * For WVGA Panel (18-bit RGB, CONFIG_AP4EVB_WVGA=y) and
+ * IRQ7 for Touch Panel, set dip switches S3, S43 to ON, OFF.
*/
+
gpio_request(GPIO_FN_LCDD17, NULL);
gpio_request(GPIO_FN_LCDD16, NULL);
gpio_request(GPIO_FN_LCDD15, NULL);
@@ -1055,16 +1221,6 @@ static void __init ap4evb_init(void)
lcdc_info.ch[0].interface_type = RGB18;
lcdc_info.ch[0].clock_divider = 2;
lcdc_info.ch[0].flags = 0;
- lcdc_info.ch[0].lcd_cfg.name = "WVGA Panel";
- lcdc_info.ch[0].lcd_cfg.xres = 800;
- lcdc_info.ch[0].lcd_cfg.yres = 480;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 70;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
- lcdc_info.ch[0].lcd_cfg.sync = 0;
lcdc_info.ch[0].lcd_size_cfg.width = 152;
lcdc_info.ch[0].lcd_size_cfg.height = 91;
@@ -1075,6 +1231,23 @@ static void __init ap4evb_init(void)
i2c_register_board_info(0, &tsc_device, 1);
#endif /* CONFIG_AP4EVB_QHD */
+ /* CEU */
+
+ /*
+ * TODO: reserve memory for V4L2 DMA buffers, when a suitable API
+ * becomes available
+ */
+
+ /* MIPI-CSI stuff */
+ gpio_request(GPIO_FN_VIO_CKO, NULL);
+
+ clk = clk_get(NULL, "vck1_clk");
+ if (!IS_ERR(clk)) {
+ clk_set_rate(clk, clk_round_rate(clk, 13000000));
+ clk_enable(clk);
+ clk_put(clk);
+ }
+
sh7372_add_standard_devices();
/* HDMI */
@@ -1097,7 +1270,7 @@ static void __init ap4evb_timer_init(void)
shmobile_timer.init();
/* External clock source */
- clk_set_rate(&dv_clki_clk, 27000000);
+ clk_set_rate(&sh7372_dv_clki_clk, 27000000);
}
static struct sys_timer ap4evb_timer = {
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index b6454c9f2ab..9f78729098f 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -321,7 +321,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[SYMSTP001]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[SYMSTP000]), /* SCIFA4 */
CLKDEV_DEV_ID("sh_siu", &mstp_clks[SYMSTP231]), /* SIU */
- CLKDEV_CON_ID("cmt1", &mstp_clks[SYMSTP229]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[SYMSTP229]), /* CMT10 */
CLKDEV_DEV_ID("sh_irda", &mstp_clks[SYMSTP225]), /* IRDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[SYMSTP223]), /* IIC1 */
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[SYMSTP222]), /* USBHS */
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 759468992ad..7db31e6c6bf 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -50,8 +50,11 @@
#define SMSTPCR3 0xe615013c
#define SMSTPCR4 0xe6150140
+#define FSIDIVA 0xFE1F8000
+#define FSIDIVB 0xFE1F8008
+
/* Platforms must set frequency on their DV_CLKI pin */
-struct clk dv_clki_clk = {
+struct clk sh7372_dv_clki_clk = {
};
/* Fixed 32 KHz root clock from EXTALR pin */
@@ -86,9 +89,9 @@ static struct clk_ops div2_clk_ops = {
};
/* Divide dv_clki by two */
-struct clk dv_clki_div2_clk = {
+struct clk sh7372_dv_clki_div2_clk = {
.ops = &div2_clk_ops,
- .parent = &dv_clki_clk,
+ .parent = &sh7372_dv_clki_clk,
};
/* Divide extal1 by two */
@@ -150,7 +153,7 @@ static struct clk pllc1_div2_clk = {
static struct clk *pllc2_parent[] = {
[0] = &extal1_div2_clk,
[1] = &extal2_div2_clk,
- [2] = &dv_clki_div2_clk,
+ [2] = &sh7372_dv_clki_div2_clk,
};
/* Only multipliers 20 * 2 to 46 * 2 are valid, last entry for CPUFREQ_TABLE_END */
@@ -284,27 +287,37 @@ static struct clk_ops pllc2_clk_ops = {
.set_parent = pllc2_set_parent,
};
-struct clk pllc2_clk = {
+struct clk sh7372_pllc2_clk = {
.ops = &pllc2_clk_ops,
.parent = &extal1_div2_clk,
.freq_table = pllc2_freq_table,
+ .nr_freqs = ARRAY_SIZE(pllc2_freq_table) - 1,
.parent_table = pllc2_parent,
.parent_num = ARRAY_SIZE(pllc2_parent),
};
+/* External input clock (pin name: FSIACK/FSIBCK ) */
+struct clk sh7372_fsiack_clk = {
+};
+
+struct clk sh7372_fsibck_clk = {
+};
+
static struct clk *main_clks[] = {
- &dv_clki_clk,
+ &sh7372_dv_clki_clk,
&r_clk,
&sh7372_extal1_clk,
&sh7372_extal2_clk,
- &dv_clki_div2_clk,
+ &sh7372_dv_clki_div2_clk,
&extal1_div2_clk,
&extal2_div2_clk,
&extal2_div4_clk,
&pllc0_clk,
&pllc1_clk,
&pllc1_div2_clk,
- &pllc2_clk,
+ &sh7372_pllc2_clk,
+ &sh7372_fsiack_clk,
+ &sh7372_fsibck_clk,
};
static void div4_kick(struct clk *clk)
@@ -357,7 +370,7 @@ static struct clk div4_clks[DIV4_NR] = {
};
enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_FMSI, DIV6_FMSO,
- DIV6_FSIA, DIV6_FSIB, DIV6_SUB, DIV6_SPU,
+ DIV6_SUB, DIV6_SPU,
DIV6_VOU, DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P,
DIV6_NR };
@@ -367,8 +380,6 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_VCK3] = SH_CLK_DIV6(&pllc1_div2_clk, VCLKCR3, 0),
[DIV6_FMSI] = SH_CLK_DIV6(&pllc1_div2_clk, FMSICKCR, 0),
[DIV6_FMSO] = SH_CLK_DIV6(&pllc1_div2_clk, FMSOCKCR, 0),
- [DIV6_FSIA] = SH_CLK_DIV6(&pllc1_div2_clk, FSIACKCR, 0),
- [DIV6_FSIB] = SH_CLK_DIV6(&pllc1_div2_clk, FSIBCKCR, 0),
[DIV6_SUB] = SH_CLK_DIV6(&sh7372_extal2_clk, SUBCKCR, 0),
[DIV6_SPU] = SH_CLK_DIV6(&pllc1_div2_clk, SPUCKCR, 0),
[DIV6_VOU] = SH_CLK_DIV6(&pllc1_div2_clk, VOUCKCR, 0),
@@ -377,24 +388,137 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_DSI1P] = SH_CLK_DIV6(&pllc1_div2_clk, DSI1PCKCR, 0),
};
-enum { DIV6_HDMI, DIV6_REPARENT_NR };
+enum { DIV6_HDMI, DIV6_FSIA, DIV6_FSIB, DIV6_REPARENT_NR };
/* Indices are important - they are the actual src selecting values */
static struct clk *hdmi_parent[] = {
[0] = &pllc1_div2_clk,
- [1] = &pllc2_clk,
- [2] = &dv_clki_clk,
+ [1] = &sh7372_pllc2_clk,
+ [2] = &sh7372_dv_clki_clk,
[3] = NULL, /* pllc2_div4 not implemented yet */
};
+static struct clk *fsiackcr_parent[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &sh7372_pllc2_clk,
+ [2] = &sh7372_fsiack_clk, /* external input for FSI A */
+ [3] = NULL, /* setting prohibited */
+};
+
+static struct clk *fsibckcr_parent[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &sh7372_pllc2_clk,
+ [2] = &sh7372_fsibck_clk, /* external input for FSI B */
+ [3] = NULL, /* setting prohibited */
+};
+
static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
[DIV6_HDMI] = SH_CLK_DIV6_EXT(&pllc1_div2_clk, HDMICKCR, 0,
hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2),
+ [DIV6_FSIA] = SH_CLK_DIV6_EXT(&pllc1_div2_clk, FSIACKCR, 0,
+ fsiackcr_parent, ARRAY_SIZE(fsiackcr_parent), 6, 2),
+ [DIV6_FSIB] = SH_CLK_DIV6_EXT(&pllc1_div2_clk, FSIBCKCR, 0,
+ fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
+};
+
+/* FSI DIV */
+static unsigned long fsidiv_recalc(struct clk *clk)
+{
+ unsigned long value;
+
+ value = __raw_readl(clk->mapping->base);
+
+ if ((value & 0x3) != 0x3)
+ return 0;
+
+ value >>= 16;
+ if (value < 2)
+ return 0;
+
+ return clk->parent->rate / value;
+}
+
+static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_rate_div_range_round(clk, 2, 0xffff, rate);
+}
+
+static void fsidiv_disable(struct clk *clk)
+{
+ __raw_writel(0, clk->mapping->base);
+}
+
+static int fsidiv_enable(struct clk *clk)
+{
+ unsigned long value;
+
+ value = __raw_readl(clk->mapping->base) >> 16;
+ if (value < 2) {
+ fsidiv_disable(clk);
+ return -ENOENT;
+ }
+
+ __raw_writel((value << 16) | 0x3, clk->mapping->base);
+
+ return 0;
+}
+
+static int fsidiv_set_rate(struct clk *clk,
+ unsigned long rate, int algo_id)
+{
+ int idx;
+
+ if (clk->parent->rate == rate) {
+ fsidiv_disable(clk);
+ return 0;
+ }
+
+ idx = (clk->parent->rate / rate) & 0xffff;
+ if (idx < 2)
+ return -ENOENT;
+
+ __raw_writel(idx << 16, clk->mapping->base);
+ return fsidiv_enable(clk);
+}
+
+static struct clk_ops fsidiv_clk_ops = {
+ .recalc = fsidiv_recalc,
+ .round_rate = fsidiv_round_rate,
+ .set_rate = fsidiv_set_rate,
+ .enable = fsidiv_enable,
+ .disable = fsidiv_disable,
+};
+
+static struct clk_mapping sh7372_fsidiva_clk_mapping = {
+ .phys = FSIDIVA,
+ .len = 8,
+};
+
+struct clk sh7372_fsidiva_clk = {
+ .ops = &fsidiv_clk_ops,
+ .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */
+ .mapping = &sh7372_fsidiva_clk_mapping,
+};
+
+static struct clk_mapping sh7372_fsidivb_clk_mapping = {
+ .phys = FSIDIVB,
+ .len = 8,
+};
+
+struct clk sh7372_fsidivb_clk = {
+ .ops = &fsidiv_clk_ops,
+ .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */
+ .mapping = &sh7372_fsidivb_clk_mapping,
+};
+
+static struct clk *late_main_clks[] = {
+ &sh7372_fsidiva_clk,
+ &sh7372_fsidivb_clk,
};
enum { MSTP001,
MSTP131, MSTP130,
- MSTP129, MSTP128, MSTP127, MSTP126,
+ MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
MSTP118, MSTP117, MSTP116,
MSTP106, MSTP101, MSTP100,
MSTP223,
@@ -414,6 +538,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
+ [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
[MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
[MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -429,7 +554,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
- [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
+ [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -445,10 +570,11 @@ static struct clk mstp_clks[MSTP_NR] = {
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
static struct clk_lookup lookups[] = {
/* main clocks */
- CLKDEV_CON_ID("dv_clki_div2_clk", &dv_clki_div2_clk),
+ CLKDEV_CON_ID("dv_clki_div2_clk", &sh7372_dv_clki_div2_clk),
CLKDEV_CON_ID("r_clk", &r_clk),
CLKDEV_CON_ID("extal1", &sh7372_extal1_clk),
CLKDEV_CON_ID("extal2", &sh7372_extal2_clk),
@@ -458,7 +584,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("pllc0_clk", &pllc0_clk),
CLKDEV_CON_ID("pllc1_clk", &pllc1_clk),
CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk),
- CLKDEV_CON_ID("pllc2_clk", &pllc2_clk),
+ CLKDEV_CON_ID("pllc2_clk", &sh7372_pllc2_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]),
@@ -483,8 +609,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]),
CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]),
- CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FSIA]),
- CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FSIB]),
+ CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FSIA]),
+ CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FSIB]),
CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]),
CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
@@ -501,6 +627,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
+ CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
+ CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
@@ -516,7 +644,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
- CLKDEV_CON_ID("cmt1", &mstp_clks[MSTP329]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP323]), /* USB0 */
@@ -531,7 +659,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
- {.con_id = "ick", .dev_id = "sh-mobile-hdmi", .clk = &div6_reparent_clks[DIV6_HDMI]},
+
+ CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
+ CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
+ CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
};
void __init sh7372_clock_init(void)
@@ -548,11 +679,14 @@ void __init sh7372_clock_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_NR);
+ ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
if (!ret)
ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
+ ret = clk_register(late_main_clks[k]);
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
diff --git a/arch/arm/mach-shmobile/clock-sh7377.c b/arch/arm/mach-shmobile/clock-sh7377.c
index e007c28cf0a..f91395aeb9a 100644
--- a/arch/arm/mach-shmobile/clock-sh7377.c
+++ b/arch/arm/mach-shmobile/clock-sh7377.c
@@ -333,7 +333,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
- CLKDEV_CON_ID("cmt1", &mstp_clks[MSTP329]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_irda", &mstp_clks[MSTP325]), /* IRDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USBHS */
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 5bc6bd444d7..2b1bb9e43dd 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -35,12 +35,12 @@ static inline int gpio_cansleep(unsigned gpio)
static inline int gpio_to_irq(unsigned gpio)
{
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
{
- return -EINVAL;
+ return -ENOSYS;
}
#endif /* CONFIG_GPIOLIB */
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 33e9700ded7..e4f9004e710 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -457,8 +457,14 @@ enum {
SHDMA_SLAVE_SDHI2_TX,
};
-extern struct clk dv_clki_clk;
-extern struct clk dv_clki_div2_clk;
-extern struct clk pllc2_clk;
+extern struct clk sh7372_extal1_clk;
+extern struct clk sh7372_extal2_clk;
+extern struct clk sh7372_dv_clki_clk;
+extern struct clk sh7372_dv_clki_div2_clk;
+extern struct clk sh7372_pllc2_clk;
+extern struct clk sh7372_fsiack_clk;
+extern struct clk sh7372_fsibck_clk;
+extern struct clk sh7372_fsidiva_clk;
+extern struct clk sh7372_fsidivb_clk;
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index e3551b56cd0..30b2f400666 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -98,7 +98,7 @@ static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0),
INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220),
INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260),
- INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ31A, 0x32a0),
+ INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ21A, 0x32a0),
INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0),
INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320),
INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360),
@@ -369,9 +369,13 @@ enum {
INTCS,
/* interrupt sources INTCS */
+
+ /* IRQ0S - IRQ31S */
VEU_VEU0, VEU_VEU1, VEU_VEU2, VEU_VEU3,
RTDMAC_1_DEI0, RTDMAC_1_DEI1, RTDMAC_1_DEI2, RTDMAC_1_DEI3,
CEU, BEU_BEU0, BEU_BEU1, BEU_BEU2,
+ /* MFI */
+ /* BBIF2 */
VPU,
TSIF1,
_3DG_SGX530,
@@ -379,13 +383,17 @@ enum {
IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
IPMMU_IPMMUR, IPMMU_IPMMUR2,
RTDMAC_2_DEI4, RTDMAC_2_DEI5, RTDMAC_2_DADERR,
+ /* KEYSC */
+ /* TTI20 */
MSIOF,
IIC0_ALI0, IIC0_TACKI0, IIC0_WAITI0, IIC0_DTEI0,
TMU_TUNI0, TMU_TUNI1, TMU_TUNI2,
CMT0,
TSIF0,
+ /* CMT2 */
LMB,
CTI,
+ /* RWDT0 */
ICB,
JPU_JPEG,
LCDC,
@@ -397,11 +405,17 @@ enum {
CSIRX,
DSITX_DSITX0,
DSITX_DSITX1,
+ /* SPU2 */
+ /* FSI */
+ /* FMSI */
+ /* HDMI */
TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2,
CMT4,
DSITX1_DSITX1_0,
DSITX1_DSITX1_1,
+ /* MFIS2 */
CPORTS2R,
+ /* CEC */
JPU6E,
/* interrupt groups INTCS */
@@ -410,12 +424,15 @@ enum {
};
static struct intc_vect intcs_vectors[] = {
+ /* IRQ0S - IRQ31S */
INTCS_VECT(VEU_VEU0, 0x700), INTCS_VECT(VEU_VEU1, 0x720),
INTCS_VECT(VEU_VEU2, 0x740), INTCS_VECT(VEU_VEU3, 0x760),
INTCS_VECT(RTDMAC_1_DEI0, 0x800), INTCS_VECT(RTDMAC_1_DEI1, 0x820),
INTCS_VECT(RTDMAC_1_DEI2, 0x840), INTCS_VECT(RTDMAC_1_DEI3, 0x860),
INTCS_VECT(CEU, 0x880), INTCS_VECT(BEU_BEU0, 0x8a0),
INTCS_VECT(BEU_BEU1, 0x8c0), INTCS_VECT(BEU_BEU2, 0x8e0),
+ /* MFI */
+ /* BBIF2 */
INTCS_VECT(VPU, 0x980),
INTCS_VECT(TSIF1, 0x9a0),
INTCS_VECT(_3DG_SGX530, 0x9e0),
@@ -425,14 +442,19 @@ static struct intc_vect intcs_vectors[] = {
INTCS_VECT(IPMMU_IPMMUR, 0xb00), INTCS_VECT(IPMMU_IPMMUR2, 0xb20),
INTCS_VECT(RTDMAC_2_DEI4, 0xb80), INTCS_VECT(RTDMAC_2_DEI5, 0xba0),
INTCS_VECT(RTDMAC_2_DADERR, 0xbc0),
+ /* KEYSC */
+ /* TTI20 */
+ INTCS_VECT(MSIOF, 0x0d20),
INTCS_VECT(IIC0_ALI0, 0xe00), INTCS_VECT(IIC0_TACKI0, 0xe20),
INTCS_VECT(IIC0_WAITI0, 0xe40), INTCS_VECT(IIC0_DTEI0, 0xe60),
INTCS_VECT(TMU_TUNI0, 0xe80), INTCS_VECT(TMU_TUNI1, 0xea0),
INTCS_VECT(TMU_TUNI2, 0xec0),
INTCS_VECT(CMT0, 0xf00),
INTCS_VECT(TSIF0, 0xf20),
+ /* CMT2 */
INTCS_VECT(LMB, 0xf60),
INTCS_VECT(CTI, 0x400),
+ /* RWDT0 */
INTCS_VECT(ICB, 0x480),
INTCS_VECT(JPU_JPEG, 0x560),
INTCS_VECT(LCDC, 0x580),
@@ -446,12 +468,18 @@ static struct intc_vect intcs_vectors[] = {
INTCS_VECT(CSIRX, 0x17a0),
INTCS_VECT(DSITX_DSITX0, 0x17c0),
INTCS_VECT(DSITX_DSITX1, 0x17e0),
+ /* SPU2 */
+ /* FSI */
+ /* FMSI */
+ /* HDMI */
INTCS_VECT(TMU1_TUNI0, 0x1900), INTCS_VECT(TMU1_TUNI1, 0x1920),
INTCS_VECT(TMU1_TUNI2, 0x1940),
INTCS_VECT(CMT4, 0x1980),
INTCS_VECT(DSITX1_DSITX1_0, 0x19a0),
INTCS_VECT(DSITX1_DSITX1_1, 0x19c0),
+ /* MFIS2 */
INTCS_VECT(CPORTS2R, 0x1a20),
+ /* CEC */
INTCS_VECT(JPU6E, 0x1a80),
INTC_VECT(INTCS, 0xf80),
diff --git a/arch/arm/mach-shmobile/pfc-sh7372.c b/arch/arm/mach-shmobile/pfc-sh7372.c
index ec420353f8e..9c265dae138 100644
--- a/arch/arm/mach-shmobile/pfc-sh7372.c
+++ b/arch/arm/mach-shmobile/pfc-sh7372.c
@@ -166,12 +166,12 @@ enum {
MSIOF2_TSYNC_MARK, MSIOF2_TSCK_MARK, MSIOF2_RXD_MARK,
MSIOF2_TXD_MARK,
- /* MSIOF3 */
+ /* BBIF1 */
BBIF1_RXD_MARK, BBIF1_TSYNC_MARK, BBIF1_TSCK_MARK,
BBIF1_TXD_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK,
BBIF1_FLOW_MARK, BB_RX_FLOW_N_MARK,
- /* MSIOF4 */
+ /* BBIF2 */
BBIF2_TSCK1_MARK, BBIF2_TSYNC1_MARK,
BBIF2_TXD1_MARK, BBIF2_RXD_MARK,
@@ -976,12 +976,12 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_TSCK), GPIO_FN(MSIOF2_RXD),
GPIO_FN(MSIOF2_TXD),
- /* MSIOF3 */
+ /* BBIF1 */
GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TSYNC), GPIO_FN(BBIF1_TSCK),
GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC),
GPIO_FN(BBIF1_FLOW), GPIO_FN(BB_RX_FLOW_N),
- /* MSIOF4 */
+ /* BBIF2 */
GPIO_FN(BBIF2_TSCK1), GPIO_FN(BBIF2_TSYNC1),
GPIO_FN(BBIF2_TXD1), GPIO_FN(BBIF2_RXD),
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 3148c11a550..003008c1836 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -154,7 +154,6 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "r_clk",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index e26686c9d0b..564a6d0be47 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -158,7 +158,6 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "cmt1",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
@@ -186,6 +185,67 @@ static struct platform_device cmt10_device = {
.num_resources = ARRAY_SIZE(cmt10_resources),
};
+/* TMU */
+static struct sh_timer_config tmu00_platform_data = {
+ .name = "TMU00",
+ .channel_offset = 0x4,
+ .timer_bit = 0,
+ .clockevent_rating = 200,
+};
+
+static struct resource tmu00_resources[] = {
+ [0] = {
+ .name = "TMU00",
+ .start = 0xfff60008,
+ .end = 0xfff60013,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0xe80), /* TMU_TUNI0 */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu00_device = {
+ .name = "sh_tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu00_platform_data,
+ },
+ .resource = tmu00_resources,
+ .num_resources = ARRAY_SIZE(tmu00_resources),
+};
+
+static struct sh_timer_config tmu01_platform_data = {
+ .name = "TMU01",
+ .channel_offset = 0x10,
+ .timer_bit = 1,
+ .clocksource_rating = 200,
+};
+
+static struct resource tmu01_resources[] = {
+ [0] = {
+ .name = "TMU01",
+ .start = 0xfff60014,
+ .end = 0xfff6001f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0xea0), /* TMU_TUNI1 */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tmu01_device = {
+ .name = "sh_tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu01_platform_data,
+ },
+ .resource = tmu01_resources,
+ .num_resources = ARRAY_SIZE(tmu01_resources),
+};
+
/* I2C */
static struct resource iic0_resources[] = {
[0] = {
@@ -419,14 +479,14 @@ static struct resource sh7372_dmae0_resources[] = {
},
{
/* DMA error IRQ */
- .start = 246,
- .end = 246,
+ .start = evt2irq(0x20c0),
+ .end = evt2irq(0x20c0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-5 */
- .start = 240,
- .end = 245,
+ .start = evt2irq(0x2000),
+ .end = evt2irq(0x20a0),
.flags = IORESOURCE_IRQ,
},
};
@@ -447,14 +507,14 @@ static struct resource sh7372_dmae1_resources[] = {
},
{
/* DMA error IRQ */
- .start = 254,
- .end = 254,
+ .start = evt2irq(0x21c0),
+ .end = evt2irq(0x21c0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-5 */
- .start = 248,
- .end = 253,
+ .start = evt2irq(0x2100),
+ .end = evt2irq(0x21a0),
.flags = IORESOURCE_IRQ,
},
};
@@ -475,14 +535,14 @@ static struct resource sh7372_dmae2_resources[] = {
},
{
/* DMA error IRQ */
- .start = 262,
- .end = 262,
+ .start = evt2irq(0x22c0),
+ .end = evt2irq(0x22c0),
.flags = IORESOURCE_IRQ,
},
{
/* IRQ for channels 0-5 */
- .start = 256,
- .end = 261,
+ .start = evt2irq(0x2200),
+ .end = evt2irq(0x22a0),
.flags = IORESOURCE_IRQ,
},
};
@@ -526,6 +586,11 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
&scif5_device,
&scif6_device,
&cmt10_device,
+ &tmu00_device,
+ &tmu01_device,
+};
+
+static struct platform_device *sh7372_late_devices[] __initdata = {
&iic0_device,
&iic1_device,
&dma0_device,
@@ -537,6 +602,9 @@ void __init sh7372_add_standard_devices(void)
{
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
+
+ platform_add_devices(sh7372_late_devices,
+ ARRAY_SIZE(sh7372_late_devices));
}
void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index bb4adf17dbf..575dbd6c2f1 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -172,7 +172,6 @@ static struct sh_timer_config cmt10_platform_data = {
.name = "CMT10",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "r_clk",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 2f420210d40..9057d6fd1d3 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -28,7 +28,6 @@
#include <linux/cnt32_to_63.h>
#include <asm/mach/time.h>
-#include <asm/mach/time.h>
#include <asm/localtimer.h>
#include <mach/iomap.h>
diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c
index 60acf9e708a..7458fc6df5c 100644
--- a/arch/arm/mach-u300/clock.c
+++ b/arch/arm/mach-u300/clock.c
@@ -66,7 +66,7 @@ static DEFINE_SPINLOCK(syscon_resetreg_lock);
* AMBA bus
* |
* +- CPU
- * +- NANDIF NAND Flash interface
+ * +- FSMC NANDIF NAND Flash interface
* +- SEMI Shared Memory interface
* +- ISP Image Signal Processor (U335 only)
* +- CDS (U335 only)
@@ -726,7 +726,7 @@ static struct clk cpu_clk = {
};
static struct clk nandif_clk = {
- .name = "NANDIF",
+ .name = "FSMC",
.parent = &amba_clk,
.hw_ctrld = false,
.reset = true,
@@ -1259,7 +1259,7 @@ static struct clk_lookup lookups[] = {
/* Connected directly to the AMBA bus */
DEF_LOOKUP("amba", &amba_clk),
DEF_LOOKUP("cpu", &cpu_clk),
- DEF_LOOKUP("fsmc", &nandif_clk),
+ DEF_LOOKUP("fsmc-nand", &nandif_clk),
DEF_LOOKUP("semi", &semi_clk),
#ifdef CONFIG_MACH_U300_BS335
DEF_LOOKUP("isp", &isp_clk),
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index ea41c236be0..aa53ee22438 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -21,7 +21,8 @@
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <mach/coh901318.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/fsmc.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -30,6 +31,7 @@
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
+#include <mach/coh901318.h>
#include <mach/hardware.h>
#include <mach/syscon.h>
#include <mach/dma_channels.h>
@@ -285,6 +287,13 @@ static struct resource rtc_resources[] = {
*/
static struct resource fsmc_resources[] = {
{
+ .name = "nand_data",
+ .start = U300_NAND_CS0_PHYS_BASE,
+ .end = U300_NAND_CS0_PHYS_BASE + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fsmc_regs",
.start = U300_NAND_IF_PHYS_BASE,
.end = U300_NAND_IF_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
@@ -1429,11 +1438,39 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct platform_device fsmc_device = {
- .name = "nandif",
+static struct mtd_partition u300_partitions[] = {
+ {
+ .name = "bootrecords",
+ .offset = 0,
+ .size = SZ_128K,
+ },
+ {
+ .name = "free",
+ .offset = SZ_128K,
+ .size = 8064 * SZ_1K,
+ },
+ {
+ .name = "platform",
+ .offset = 8192 * SZ_1K,
+ .size = 253952 * SZ_1K,
+ },
+};
+
+static struct fsmc_nand_platform_data nand_platform_data = {
+ .partitions = u300_partitions,
+ .nr_partitions = ARRAY_SIZE(u300_partitions),
+ .options = NAND_SKIP_BBTSCAN,
+ .width = FSMC_NAND_BW8,
+};
+
+static struct platform_device nand_device = {
+ .name = "fsmc-nand",
.id = -1,
- .num_resources = ARRAY_SIZE(fsmc_resources),
.resource = fsmc_resources,
+ .num_resources = ARRAY_SIZE(fsmc_resources),
+ .dev = {
+ .platform_data = &nand_platform_data,
+ },
};
static struct platform_device ave_device = {
@@ -1465,7 +1502,7 @@ static struct platform_device *platform_devs[] __initdata = {
&keypad_device,
&rtc_device,
&gpio_device,
- &fsmc_device,
+ &nand_device,
&wdog_device,
&ave_device
};
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 56721a0cd2a..8b85df4c8d8 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -20,11 +20,9 @@
/* NAND Flash CS0 */
#define U300_NAND_CS0_PHYS_BASE 0x80000000
-#define U300_NAND_CS0_VIRT_BASE 0xff040000
/* NFIF */
#define U300_NAND_IF_PHYS_BASE 0x9f800000
-#define U300_NAND_IF_VIRT_BASE 0xff030000
/* AHB Peripherals */
#define U300_AHB_PER_PHYS_BASE 0xa0000000
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
index edb2c0d255c..00869def542 100644
--- a/arch/arm/mach-u300/spi.c
+++ b/arch/arm/mach-u300/spi.c
@@ -67,7 +67,7 @@ static struct spi_board_info u300_spi_devices[] = {
.bus_num = 0, /* Only one bus on this chip */
.chip_select = 0,
/* Means SPI_CS_HIGH, change if e.g low CS */
- .mode = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP,
+ .mode = SPI_MODE_1 | SPI_LOOP,
},
#endif
};
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index cbbe69a76a7..4a94be3304b 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -208,35 +208,25 @@ static struct resource dma40_resources[] = {
/* Default configuration for physcial memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
- .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE |
- STEDMA40_LOW_PRIORITY_CHANNEL |
- STEDMA40_PCHAN_BASIC_MODE),
+ .mode = STEDMA40_MODE_PHYSICAL,
.dir = STEDMA40_MEM_TO_MEM,
- .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
/* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = {
- .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
- STEDMA40_LOW_PRIORITY_CHANNEL |
- STEDMA40_LCHAN_SRC_LOG_DST_LOG |
- STEDMA40_NO_TIM_FOR_LINK),
.dir = STEDMA40_MEM_TO_MEM,
- .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
- .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
@@ -269,7 +259,6 @@ static struct stedma40_platform_data dma40_plat_data = {
.memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
.memcpy_conf_phy = &dma40_memcpy_conf_phy,
.memcpy_conf_log = &dma40_memcpy_conf_log,
- .llis_per_log = 8,
.disabled_channels = {-1},
};
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a9e02..fd25ccd7272 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
static void __init ct_ca9x4_map_io(void)
{
+#ifdef CONFIG_LOCAL_TIMERS
twd_base = MMIO_P2V(A9_MPCORE_TWD);
+#endif
v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
}
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 7148e53e607..1fa6f71470d 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -38,6 +38,17 @@
#define CACHE_DLIMIT (CACHE_DSIZE * 2)
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(fa_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(fa_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -233,6 +244,7 @@ ENDPROC(fa_dma_unmap_area)
.type fa_cache_fns, #object
ENTRY(fa_cache_fns)
+ .long fa_flush_icache_all
.long fa_flush_kern_cache_all
.long fa_flush_user_cache_all
.long fa_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2ff3c599fe..2e2bc406a18 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -13,6 +13,15 @@
#include "proc-macros.S"
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v3_flush_icache_all)
+ mov pc, lr
+ENDPROC(v3_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -122,6 +131,7 @@ ENDPROC(v3_dma_map_area)
.type v3_cache_fns, #object
ENTRY(v3_cache_fns)
+ .long v3_flush_icache_all
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 4810f7e3e81..a8fefb523f1 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -13,6 +13,15 @@
#include "proc-macros.S"
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4_flush_icache_all)
+ mov pc, lr
+ENDPROC(v4_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -134,6 +143,7 @@ ENDPROC(v4_dma_map_area)
.type v4_cache_fns, #object
ENTRY(v4_cache_fns)
+ .long v4_flush_icache_all
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index df8368afa10..d3644db467b 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -51,6 +51,17 @@ flush_base:
.text
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wb_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wb_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
@@ -244,6 +255,7 @@ ENDPROC(v4wb_dma_unmap_area)
.type v4wb_cache_fns, #object
ENTRY(v4wb_cache_fns)
+ .long v4wb_flush_icache_all
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 45c70312f43..49c2b66cf3d 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -41,6 +41,17 @@
#define CACHE_DLIMIT 16384
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v4wt_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(v4wt_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -188,6 +199,7 @@ ENDPROC(v4wt_dma_map_area)
.type v4wt_cache_fns, #object
ENTRY(v4wt_cache_fns)
+ .long v4wt_flush_icache_all
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd0646e85..ac6a36142fc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
* fragmentation of the DMA space, and also prevents allocations
* smaller than a section from crossing a section boundary.
*/
- bit = fls(size - 1) + 1;
+ bit = fls(size - 1);
if (bit > SECTION_SHIFT)
bit = SECTION_SHIFT;
align = 1 << bit;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index c493d7244d3..83e59f87042 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -66,6 +66,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret;
}
+#if USE_SPLIT_PTLOCKS
+/*
+ * If we are using split PTE locks, then we need to take the page
+ * lock here. Otherwise we are using shared mm->page_table_lock
+ * which is already locked, thus cannot take it.
+ */
+static inline void do_pte_lock(spinlock_t *ptl)
+{
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
+ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+}
+
+static inline void do_pte_unlock(spinlock_t *ptl)
+{
+ spin_unlock(ptl);
+}
+#else /* !USE_SPLIT_PTLOCKS */
+static inline void do_pte_lock(spinlock_t *ptl) {}
+static inline void do_pte_unlock(spinlock_t *ptl) {}
+#endif /* USE_SPLIT_PTLOCKS */
+
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn)
{
@@ -90,11 +114,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
*/
ptl = pte_lockptr(vma->vm_mm, pmd);
pte = pte_offset_map(pmd, address);
- spin_lock(ptl);
+ do_pte_lock(ptl);
ret = do_adjust_pte(vma, address, pfn, pte);
- spin_unlock(ptl);
+ do_pte_unlock(ptl);
pte_unmap(pte);
return ret;
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index c00f119babb..c435fd9e1da 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -89,7 +89,7 @@ void __kunmap_atomic(void *kvaddr)
int idx, type;
if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
@@ -101,6 +101,7 @@ void __kunmap_atomic(void *kvaddr)
#else
(void) idx; /* to kill a warning */
#endif
+ kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7fd9b5eb177..5164069ced4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/sort.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
@@ -121,9 +122,10 @@ void show_mem(void)
printk("%d pages swap cached\n", cached);
}
-static void __init find_limits(struct meminfo *mi,
- unsigned long *min, unsigned long *max_low, unsigned long *max_high)
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
{
+ struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
@@ -147,14 +149,13 @@ static void __init find_limits(struct meminfo *mi,
}
}
-static void __init arm_bootmem_init(struct meminfo *mi,
- unsigned long start_pfn, unsigned long end_pfn)
+static void __init arm_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
- int i;
/*
* Allocate the bootmem bitmap page. This must be in a region
@@ -172,30 +173,39 @@ static void __init arm_bootmem_init(struct meminfo *mi,
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
- for_each_bank(i, mi) {
- struct membank *bank = &mi->bank[i];
- if (!bank->highmem)
- free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
+ /* Free the lowmem regions from memblock into bootmem. */
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
- /*
- * Reserve the memblock reserved regions in bootmem.
- */
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
- phys_addr_t start = memblock_region_reserved_base_pfn(reg);
- phys_addr_t end = memblock_region_reserved_end_pfn(reg);
- if (start >= start_pfn && end <= end_pfn)
- reserve_bootmem_node(pgdat, __pfn_to_phys(start),
- (end - start) << PAGE_SHIFT,
- BOOTMEM_DEFAULT);
+ unsigned long start = memblock_region_reserved_base_pfn(reg);
+ unsigned long end = memblock_region_reserved_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ reserve_bootmem(__pfn_to_phys(start),
+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
-static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
- unsigned long max_low, unsigned long max_high)
+static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- int i;
+ struct memblock_region *reg;
/*
* initialise the zones.
@@ -217,13 +227,20 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
- for_each_bank(i, mi) {
- int idx = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+ zhole_size[0] -= low_end - start;
+ }
#ifdef CONFIG_HIGHMEM
- if (mi->bank[i].highmem)
- idx = ZONE_HIGHMEM;
+ if (end > max_low) {
+ unsigned long high_start = max(start, max_low);
+ zhole_size[ZONE_HIGHMEM] -= end - high_start;
+ }
#endif
- zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
}
/*
@@ -256,10 +273,19 @@ static void arm_memory_present(void)
}
#endif
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -292,14 +318,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
void __init bootmem_init(void)
{
- struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
max_low = max_high = 0;
- find_limits(mi, &min, &max_low, &max_high);
+ find_limits(&min, &max_low, &max_high);
- arm_bootmem_init(mi, min, max_low);
+ arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
@@ -317,7 +342,7 @@ void __init bootmem_init(void)
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- arm_bootmem_free(mi, min, max_low, max_high);
+ arm_bootmem_free(min, max_low, max_high);
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
@@ -411,6 +436,56 @@ static void __init free_unused_memmap(struct meminfo *mi)
}
}
+static void __init free_highpages(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+ struct memblock_region *mem, *res;
+
+ /* set highmem page free */
+ for_each_memblock(memory, mem) {
+ unsigned long start = memblock_region_memory_base_pfn(mem);
+ unsigned long end = memblock_region_memory_end_pfn(mem);
+
+ /* Ignore complete lowmem entries */
+ if (end <= max_low)
+ continue;
+
+ /* Truncate partial highmem entries */
+ if (start < max_low)
+ start = max_low;
+
+ /* Find and exclude any reserved regions */
+ for_each_memblock(reserved, res) {
+ unsigned long res_start, res_end;
+
+ res_start = memblock_region_reserved_base_pfn(res);
+ res_end = memblock_region_reserved_end_pfn(res);
+
+ if (res_end < start)
+ continue;
+ if (res_start < start)
+ res_start = start;
+ if (res_start > end)
+ res_start = end;
+ if (res_end > end)
+ res_end = end;
+ if (res_start != start)
+ totalhigh_pages += free_area(start, res_start,
+ NULL);
+ start = res_end;
+ if (start == end)
+ break;
+ }
+
+ /* And now free anything which remains */
+ if (start < end)
+ totalhigh_pages += free_area(start, end, NULL);
+ }
+ totalram_pages += totalhigh_pages;
+#endif
+}
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
@@ -419,6 +494,7 @@ static void __init free_unused_memmap(struct meminfo *mi)
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
int i;
#ifdef CONFIG_HAVE_TCM
/* These pointers are filled in on TCM detection */
@@ -439,16 +515,7 @@ void __init mem_init(void)
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
-#ifdef CONFIG_HIGHMEM
- /* set highmem page free */
- for_each_bank (i, &meminfo) {
- unsigned long start = bank_pfn_start(&meminfo.bank[i]);
- unsigned long end = bank_pfn_end(&meminfo.bank[i]);
- if (start >= max_low_pfn + PHYS_PFN_OFFSET)
- totalhigh_pages += free_area(start, end, NULL);
- }
- totalram_pages += totalhigh_pages;
-#endif
+ free_highpages();
reserved_pages = free_pages = 0;
@@ -478,9 +545,11 @@ void __init mem_init(void)
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
- for (i = 0; i < meminfo.nr_banks; i++) {
- num_physpages += bank_pfn_size(&meminfo.bank[i]);
- printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c32f731d56d..72ad3e1f56c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,7 +14,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
-#include <linux/sort.h>
#include <linux/fs.h>
#include <asm/cputype.h>
@@ -265,17 +264,17 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DTCM] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG |
- L_PTE_DIRTY | L_PTE_WRITE,
- .prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
- .domain = DOMAIN_KERNEL,
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_WRITE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
},
[MT_MEMORY_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_USER | L_PTE_EXEC,
+ L_PTE_WRITE | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_IO,
+ .domain = DOMAIN_KERNEL,
},
};
@@ -745,13 +744,14 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-phys_addr_t lowmem_end_addr;
+static phys_addr_t lowmem_limit __initdata = 0;
static void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
- lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
+ lowmem_limit = __pa(vmalloc_min - 1) + 1;
+ memblock_set_current_limit(lowmem_limit);
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
@@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void)
static inline void prepare_page_table(void)
{
unsigned long addr;
+ phys_addr_t end;
/*
* Clear out all the mappings below the kernel image.
@@ -867,10 +868,17 @@ static inline void prepare_page_table(void)
pmd_clear(pmd_off_k(addr));
/*
+ * Find the end of the first block of lowmem.
+ */
+ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
+ if (end >= lowmem_limit)
+ end = lowmem_limit;
+
+ /*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
- for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
+ for (addr = __phys_to_virt(end);
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
@@ -987,37 +995,28 @@ static void __init kmap_init(void)
#endif
}
-static inline void map_memory_bank(struct membank *bank)
-{
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-}
-
static void __init map_lowmem(void)
{
- struct meminfo *mi = &meminfo;
- int i;
+ struct memblock_region *reg;
/* Map all the lowmem memory banks. */
- for (i = 0; i < mi->nr_banks; i++) {
- struct membank *bank = &mi->bank[i];
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
+
+ if (end > lowmem_limit)
+ end = lowmem_limit;
+ if (start >= end)
+ break;
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-}
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+ create_mapping(&map);
+ }
}
/*
@@ -1028,8 +1027,6 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
-
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index a6f5f8475b9..bcf748d9f4e 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1020_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1020_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -351,6 +365,7 @@ ENTRY(arm1020_dma_unmap_area)
ENDPROC(arm1020_dma_unmap_area)
ENTRY(arm1020_cache_fns)
+ .long arm1020_flush_icache_all
.long arm1020_flush_kern_cache_all
.long arm1020_flush_user_cache_all
.long arm1020_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index afc06b9c313..ab7ec26657e 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020e_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1020e_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1020e_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -337,6 +351,7 @@ ENTRY(arm1020e_dma_unmap_area)
ENDPROC(arm1020e_dma_unmap_area)
ENTRY(arm1020e_cache_fns)
+ .long arm1020e_flush_icache_all
.long arm1020e_flush_kern_cache_all
.long arm1020e_flush_user_cache_all
.long arm1020e_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8915e0ba3fe..831c5e54e22 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1022_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1022_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1022_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -326,6 +340,7 @@ ENTRY(arm1022_dma_unmap_area)
ENDPROC(arm1022_dma_unmap_area)
ENTRY(arm1022_cache_fns)
+ .long arm1022_flush_icache_all
.long arm1022_flush_kern_cache_all
.long arm1022_flush_user_cache_all
.long arm1022_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index ff446c5d476..e3f7e9a166b 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1026_do_idle)
/* ================================= CACHE ================================ */
.align 5
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm1026_flush_icache_all)
+#ifndef CONFIG_CPU_ICACHE_DISABLE
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+#endif
+ mov pc, lr
+ENDPROC(arm1026_flush_icache_all)
+
/*
* flush_user_cache_all()
*
@@ -320,6 +334,7 @@ ENTRY(arm1026_dma_unmap_area)
ENDPROC(arm1026_dma_unmap_area)
ENTRY(arm1026_cache_fns)
+ .long arm1026_flush_icache_all
.long arm1026_flush_kern_cache_all
.long arm1026_flush_user_cache_all
.long arm1026_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index fecf570939f..6109f278a90 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -110,6 +110,17 @@ ENTRY(cpu_arm920_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm920_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm920_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -305,6 +316,7 @@ ENTRY(arm920_dma_unmap_area)
ENDPROC(arm920_dma_unmap_area)
ENTRY(arm920_cache_fns)
+ .long arm920_flush_icache_all
.long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index e3cbf87c948..bb2f0f46a5e 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -112,6 +112,17 @@ ENTRY(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm922_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm922_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -307,6 +318,7 @@ ENTRY(arm922_dma_unmap_area)
ENDPROC(arm922_dma_unmap_area)
ENTRY(arm922_cache_fns)
+ .long arm922_flush_icache_all
.long arm922_flush_kern_cache_all
.long arm922_flush_user_cache_all
.long arm922_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 572424c867b..c13e01accfe 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -145,6 +145,17 @@ ENTRY(cpu_arm925_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm925_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm925_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -362,6 +373,7 @@ ENTRY(arm925_dma_unmap_area)
ENDPROC(arm925_dma_unmap_area)
ENTRY(arm925_cache_fns)
+ .long arm925_flush_icache_all
.long arm925_flush_kern_cache_all
.long arm925_flush_user_cache_all
.long arm925_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 63d168b4ebe..42eb4315740 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -111,6 +111,17 @@ ENTRY(cpu_arm926_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm926_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm926_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -325,6 +336,7 @@ ENTRY(arm926_dma_unmap_area)
ENDPROC(arm926_dma_unmap_area)
ENTRY(arm926_cache_fns)
+ .long arm926_flush_icache_all
.long arm926_flush_kern_cache_all
.long arm926_flush_user_cache_all
.long arm926_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index f6a62822418..7b11cdb9935 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -68,6 +68,17 @@ ENTRY(cpu_arm940_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm940_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm940_flush_icache_all)
+
+/*
* flush_user_cache_all()
*/
ENTRY(arm940_flush_user_cache_all)
@@ -254,6 +265,7 @@ ENTRY(arm940_dma_unmap_area)
ENDPROC(arm940_dma_unmap_area)
ENTRY(arm940_cache_fns)
+ .long arm940_flush_icache_all
.long arm940_flush_kern_cache_all
.long arm940_flush_user_cache_all
.long arm940_flush_user_cache_range
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index ea2e7f2eb95..1a5bbf08034 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -75,6 +75,17 @@ ENTRY(cpu_arm946_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(arm946_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(arm946_flush_icache_all)
+
+/*
* flush_user_cache_all()
*/
ENTRY(arm946_flush_user_cache_all)
@@ -296,6 +307,7 @@ ENTRY(arm946_dma_unmap_area)
ENDPROC(arm946_dma_unmap_area)
ENTRY(arm946_cache_fns)
+ .long arm946_flush_icache_all
.long arm946_flush_kern_cache_all
.long arm946_flush_user_cache_all
.long arm946_flush_user_cache_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 578da69200c..b4597edbff9 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -124,6 +124,17 @@ ENTRY(cpu_feroceon_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(feroceon_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(feroceon_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -401,6 +412,7 @@ ENTRY(feroceon_dma_unmap_area)
ENDPROC(feroceon_dma_unmap_area)
ENTRY(feroceon_cache_fns)
+ .long feroceon_flush_icache_all
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
.long feroceon_flush_user_cache_range
@@ -412,6 +424,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
+ .long feroceon_flush_icache_all
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
.long feroceon_flush_user_cache_range
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index cad07e40304..ec26355cb7c 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -141,6 +141,17 @@ ENTRY(cpu_xsc3_do_idle)
/* ================================= CACHE ================================ */
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(xsc3_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(xsc3_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -325,6 +336,7 @@ ENTRY(xsc3_dma_unmap_area)
ENDPROC(xsc3_dma_unmap_area)
ENTRY(xsc3_cache_fns)
+ .long xsc3_flush_icache_all
.long xsc3_flush_kern_cache_all
.long xsc3_flush_user_cache_all
.long xsc3_flush_user_cache_range
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index cb245edb2c2..523408c0bb3 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -181,6 +181,17 @@ ENTRY(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(xscale_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(xscale_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
@@ -397,6 +408,7 @@ ENTRY(xscale_dma_unmap_area)
ENDPROC(xscale_dma_unmap_area)
ENTRY(xscale_cache_fns)
+ .long xscale_flush_icache_all
.long xscale_flush_kern_cache_all
.long xscale_flush_user_cache_all
.long xscale_flush_user_cache_range
diff --git a/arch/arm/plat-mxc/include/mach/dma.h b/arch/arm/plat-mxc/include/mach/dma.h
new file mode 100644
index 00000000000..ef7751546f5
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/dma.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MXC_DMA_H__
+#define __ASM_ARCH_MXC_DMA_H__
+
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/*
+ * This enumerates peripheral types. Used for SDMA.
+ */
+enum sdma_peripheral_type {
+ IMX_DMATYPE_SSI, /* MCU domain SSI */
+ IMX_DMATYPE_SSI_SP, /* Shared SSI */
+ IMX_DMATYPE_MMC, /* MMC */
+ IMX_DMATYPE_SDHC, /* SDHC */
+ IMX_DMATYPE_UART, /* MCU domain UART */
+ IMX_DMATYPE_UART_SP, /* Shared UART */
+ IMX_DMATYPE_FIRI, /* FIRI */
+ IMX_DMATYPE_CSPI, /* MCU domain CSPI */
+ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
+ IMX_DMATYPE_SIM, /* SIM */
+ IMX_DMATYPE_ATA, /* ATA */
+ IMX_DMATYPE_CCM, /* CCM */
+ IMX_DMATYPE_EXT, /* External peripheral */
+ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
+ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
+ IMX_DMATYPE_DSP, /* DSP */
+ IMX_DMATYPE_MEMORY, /* Memory */
+ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+ IMX_DMATYPE_SPDIF, /* SPDIF */
+ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+};
+
+enum imx_dma_prio {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+};
+
+struct imx_dma_data {
+ int dma_request; /* DMA request line */
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
+};
+
+static inline int imx_dma_is_ipu(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+}
+
+static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "imx-sdma") ||
+ !strcmp(dev_name(chan->device->dev), "imx-dma");
+}
+
+#endif
diff --git a/arch/arm/plat-mxc/include/mach/sdma.h b/arch/arm/plat-mxc/include/mach/sdma.h
new file mode 100644
index 00000000000..9be112227ac
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/sdma.h
@@ -0,0 +1,17 @@
+#ifndef __MACH_MXC_SDMA_H__
+#define __MACH_MXC_SDMA_H__
+
+/**
+ * struct sdma_platform_data - platform specific data for SDMA engine
+ *
+ * @sdma_version The version of this SDMA engine
+ * @cpu_name used to generate the firmware name
+ * @to_version CPU Tape out version
+ */
+struct sdma_platform_data {
+ int sdma_version;
+ char *cpu_name;
+ int to_version;
+};
+
+#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 5fbde4b8dc1..74b62f10d07 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -1,10 +1,8 @@
/*
- * arch/arm/plat-nomadik/include/plat/ste_dma40.h
- *
- * Copyright (C) ST-Ericsson 2007-2010
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Author: Per Friden <per.friden@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
@@ -14,43 +12,25 @@
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
/* dev types for memcpy */
#define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1)
-/*
- * Description of bitfields of channel_type variable is available in
- * the info structure.
- */
+enum stedma40_mode {
+ STEDMA40_MODE_LOGICAL = 0,
+ STEDMA40_MODE_PHYSICAL,
+ STEDMA40_MODE_OPERATION,
+};
-/* Priority */
-#define STEDMA40_INFO_PRIO_TYPE_POS 2
-#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
-#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
-
-/* Mode */
-#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
-#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
-
-/* Mode options */
-#define STEDMA40_INFO_CH_MODE_OPT_POS 8
-#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
-#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
-
-/* Interrupt */
-#define STEDMA40_INFO_TIM_POS 10
-#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
-#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
-
-/* End of channel_type configuration */
+enum stedma40_mode_opt {
+ STEDMA40_PCHAN_BASIC_MODE = 0,
+ STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
+ STEDMA40_PCHAN_MODULO_MODE,
+ STEDMA40_PCHAN_DOUBLE_DST_MODE,
+ STEDMA40_LCHAN_SRC_PHY_DST_LOG,
+ STEDMA40_LCHAN_SRC_LOG_DST_PHY,
+};
#define STEDMA40_ESIZE_8_BIT 0x0
#define STEDMA40_ESIZE_16_BIT 0x1
@@ -73,16 +53,14 @@
#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
+/* Maximum number of possible physical channels */
+#define STEDMA40_MAX_PHYS 32
+
enum stedma40_flow_ctrl {
STEDMA40_NO_FLOW_CTRL,
STEDMA40_FLOW_CTRL,
};
-enum stedma40_endianess {
- STEDMA40_LITTLE_ENDIAN,
- STEDMA40_BIG_ENDIAN
-};
-
enum stedma40_periph_data_width {
STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
@@ -90,15 +68,8 @@ enum stedma40_periph_data_width {
STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
};
-struct stedma40_half_channel_info {
- enum stedma40_endianess endianess;
- enum stedma40_periph_data_width data_width;
- int psize;
- enum stedma40_flow_ctrl flow_ctrl;
-};
-
enum stedma40_xfer_dir {
- STEDMA40_MEM_TO_MEM,
+ STEDMA40_MEM_TO_MEM = 1,
STEDMA40_MEM_TO_PERIPH,
STEDMA40_PERIPH_TO_MEM,
STEDMA40_PERIPH_TO_PERIPH
@@ -106,18 +77,31 @@ enum stedma40_xfer_dir {
/**
+ * struct stedma40_chan_cfg - dst/src channel configuration
+ *
+ * @big_endian: true if the src/dst should be read as big endian
+ * @data_width: Data width of the src/dst hardware
+ * @p_size: Burst size
+ * @flow_ctrl: Flow control on/off.
+ */
+struct stedma40_half_channel_info {
+ bool big_endian;
+ enum stedma40_periph_data_width data_width;
+ int psize;
+ enum stedma40_flow_ctrl flow_ctrl;
+};
+
+/**
* struct stedma40_chan_cfg - Structure to be filled by client drivers.
*
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @channel_type: priority, mode, mode options and interrupt configuration.
+ * @high_priority: true if high-priority
+ * @mode: channel mode: physical, logical, or operation
+ * @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type
* @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel
- * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
- * @pre_transfer: Callback used if needed before preparation of transfer.
- * Only called if device is set. size of bytes to transfer
- * (in case of multiple element transfer size is size of the first element).
*
*
* This structure has to be filled by the client drivers.
@@ -126,15 +110,13 @@ enum stedma40_xfer_dir {
*/
struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir;
- unsigned int channel_type;
+ bool high_priority;
+ enum stedma40_mode mode;
+ enum stedma40_mode_opt mode_opt;
int src_dev_type;
int dst_dev_type;
struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info;
- void *pre_transfer_data;
- int (*pre_transfer) (struct dma_chan *chan,
- void *data,
- int size);
};
/**
@@ -147,7 +129,6 @@ struct stedma40_chan_cfg {
* @memcpy_len: length of memcpy
* @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy
- * @llis_per_log: number of max linked list items per logical channel
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
*/
@@ -159,23 +140,10 @@ struct stedma40_platform_data {
u32 memcpy_len;
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
- unsigned int llis_per_log;
- int disabled_channels[8];
+ int disabled_channels[STEDMA40_MAX_PHYS];
};
-/**
- * setdma40_set_psize() - Used for changing the package size of an
- * already configured dma channel.
- *
- * @chan: dmaengine handle
- * @src_psize: new package side for src. (STEDMA40_PSIZE*)
- * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
- *
- * returns 0 on ok, otherwise negative error number.
- */
-int stedma40_set_psize(struct dma_chan *chan,
- int src_psize,
- int dst_psize);
+#ifdef CONFIG_STE_DMA40
/**
* stedma40_filter() - Provides stedma40_chan_cfg to the
@@ -238,4 +206,21 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
direction, flags);
}
+#else
+static inline bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ return false;
+}
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 221a675ebba..f0473182030 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -19,6 +19,7 @@
#include <plat/common.h>
#include <plat/board.h>
#include <plat/vram.h>
+#include <plat/dsp.h>
#define NO_LENGTH_CHECK 0xffffffff
@@ -64,4 +65,5 @@ void __init omap_reserve(void)
{
omapfb_reserve_sdram_memblock();
omap_vram_reserve_sdram_memblock();
+ omap_dsp_reserve_sdram_memblock();
}
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 1e2383eae63..fc819120978 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -231,6 +232,77 @@ static void omap_init_uwire(void)
static inline void omap_init_uwire(void) {}
#endif
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+
+static struct resource wdt_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device omap_wdt_device = {
+ .name = "omap_wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(wdt_resources),
+ .resource = wdt_resources,
+};
+
+static void omap_init_wdt(void)
+{
+ if (cpu_is_omap16xx())
+ wdt_resources[0].start = 0xfffeb000;
+ else if (cpu_is_omap2420())
+ wdt_resources[0].start = 0x48022000; /* WDT2 */
+ else if (cpu_is_omap2430())
+ wdt_resources[0].start = 0x49016000; /* WDT2 */
+ else if (cpu_is_omap343x())
+ wdt_resources[0].start = 0x48314000; /* WDT2 */
+ else if (cpu_is_omap44xx())
+ wdt_resources[0].start = 0x4a314000;
+ else
+ return;
+
+ wdt_resources[0].end = wdt_resources[0].start + 0x4f;
+
+ (void) platform_device_register(&omap_wdt_device);
+}
+#else
+static inline void omap_init_wdt(void) {}
+#endif
+
+#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
+
+static phys_addr_t omap_dsp_phys_mempool_base;
+
+void __init omap_dsp_reserve_sdram_memblock(void)
+{
+ phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+ phys_addr_t paddr;
+
+ if (!size)
+ return;
+
+ paddr = memblock_alloc(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %x bytes\n",
+ __func__, size);
+ return;
+ }
+ memblock_free(paddr, size);
+ memblock_remove(paddr, size);
+
+ omap_dsp_phys_mempool_base = paddr;
+}
+
+phys_addr_t omap_dsp_get_mempool_base(void)
+{
+ return omap_dsp_phys_mempool_base;
+}
+EXPORT_SYMBOL(omap_dsp_get_mempool_base);
+#endif
+
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index f5c5b8da9a8..2c2826571d4 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1983,6 +1983,8 @@ static int omap2_dma_handle_ch(int ch)
dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
dma_write(1 << ch, IRQSTATUS_L0);
+ /* read back the register to flush the write */
+ dma_read(IRQSTATUS_L0);
/* If the ch is not chained then chain_id will be -1 */
if (dma_chan[ch].chain_id != -1) {
diff --git a/arch/arm/plat-omap/include/plat/dsp.h b/arch/arm/plat-omap/include/plat/dsp.h
new file mode 100644
index 00000000000..9c604b390f9
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/dsp.h
@@ -0,0 +1,31 @@
+#ifndef __OMAP_DSP_H__
+#define __OMAP_DSP_H__
+
+#include <linux/types.h>
+
+struct omap_dsp_platform_data {
+ void (*dsp_set_min_opp) (u8 opp_id);
+ u8 (*dsp_get_opp) (void);
+ void (*cpu_set_freq) (unsigned long f);
+ unsigned long (*cpu_get_freq) (void);
+ unsigned long mpu_speed[6];
+
+ /* functions to write and read PRCM registers */
+ void (*dsp_prm_write)(u32, s16 , u16);
+ u32 (*dsp_prm_read)(s16 , u16);
+ u32 (*dsp_prm_rmw_bits)(u32, u32, s16, s16);
+ void (*dsp_cm_write)(u32, s16 , u16);
+ u32 (*dsp_cm_read)(s16 , u16);
+ u32 (*dsp_cm_rmw_bits)(u32, u32, s16, s16);
+
+ phys_addr_t phys_mempool_base;
+ phys_addr_t phys_mempool_size;
+};
+
+#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
+extern void omap_dsp_reserve_sdram_memblock(void);
+#else
+static inline void omap_dsp_reserve_sdram_memblock(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef72b4e..cc99163e73f 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
#ifndef __PLAT_PCIE_H
#define __PLAT_PCIE_H
+struct pci_bus;
+
u32 orion_pcie_dev_id(void __iomem *base);
u32 orion_pcie_rev(void __iomem *base);
int orion_pcie_link_up(void __iomem *base);
int orion_pcie_x4_mode(void __iomem *base);
int orion_pcie_get_local_bus_nr(void __iomem *base);
void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
+void orion_pcie_reset(void __iomem *base);
void orion_pcie_setup(void __iomem *base,
struct mbus_dram_target_info *dram);
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a1595..af2d733c50b 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@ void __init orion_pcie_setup(void __iomem *base,
u32 mask;
/*
- * soft reset PCIe unit
- */
- orion_pcie_reset(base);
-
- /*
* Point PCIe unit MBUS decode windows to DRAM space.
*/
orion_pcie_setup_wins(base, dram);
diff --git a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
index 3478eae32d8..01a8448e471 100644
--- a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
+++ b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
@@ -30,15 +30,15 @@ struct pxa3xx_nand_cmdset {
};
struct pxa3xx_nand_flash {
- const struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
- const struct pxa3xx_nand_cmdset *cmdset;
-
- uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
- uint32_t page_size; /* Page size in bytes (PAGE_SZ) */
- uint32_t flash_width; /* Width of Flash memory (DWIDTH_M) */
- uint32_t dfc_width; /* Width of flash controller(DWIDTH_C) */
- uint32_t num_blocks; /* Number of physical blocks in Flash */
- uint32_t chip_id;
+ uint32_t chip_id;
+ unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
+ unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
+ unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
+ unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
+ unsigned int num_blocks; /* Number of physical blocks in Flash */
+
+ struct pxa3xx_nand_cmdset *cmdset; /* NAND command set */
+ struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
};
struct pxa3xx_nand_platform_data {
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
new file mode 100644
index 00000000000..e49c5b6fc4e
--- /dev/null
+++ b/arch/arm/plat-pxa/include/plat/sdhci.h
@@ -0,0 +1,32 @@
+/* linux/arch/arm/plat-pxa/include/plat/sdhci.h
+ *
+ * Copyright 2010 Marvell
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ *
+ * PXA Platform - SDHCI platform data definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PLAT_PXA_SDHCI_H
+#define __PLAT_PXA_SDHCI_H
+
+/* pxa specific flag */
+/* Require clock free running */
+#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
+
+/*
+ * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
+ * @max_speed: the maximum speed supported
+ * @quirks: quirks of specific device
+ * @flags: flags for platform requirement
+ */
+struct sdhci_pxa_platdata {
+ unsigned int max_speed;
+ unsigned int quirks;
+ unsigned int flags;
+};
+
+#endif /* __PLAT_PXA_SDHCI_H */
diff --git a/arch/arm/plat-s3c24xx/Kconfig b/arch/arm/plat-s3c24xx/Kconfig
index 984bf66826d..5a27b1b538f 100644
--- a/arch/arm/plat-s3c24xx/Kconfig
+++ b/arch/arm/plat-s3c24xx/Kconfig
@@ -69,6 +69,7 @@ config S3C24XX_GPIO_EXTRA
int
default 128 if S3C24XX_GPIO_EXTRA128
default 64 if S3C24XX_GPIO_EXTRA64
+ default 16 if ARCH_H1940
default 0
config S3C24XX_GPIO_EXTRA64
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index 243b6411050..24c6f5a3059 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -82,6 +82,8 @@ static struct s3c_gpio_cfg s3c24xx_gpiocfg_banka = {
struct s3c_gpio_cfg s3c24xx_gpiocfg_default = {
.set_config = s3c_gpio_setcfg_s3c24xx,
.get_config = s3c_gpio_getcfg_s3c24xx,
+ .set_pull = s3c_gpio_setpull_1up,
+ .get_pull = s3c_gpio_getpull_1up,
};
struct s3c_gpio_chip s3c24xx_gpios[] = {
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index f0dc5b8075a..313b13073c5 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
config AVR32
def_bool y
# With EMBEDDED=n, we get lots of stuff automatically selected
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 5e73c25f8f8..4aedcab7cd4 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -146,9 +146,11 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
return ret;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *datap = (void __user *) data;
switch (request) {
/* Read the word at location addr in the child process */
@@ -158,8 +160,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/* Write the word in data at location addr */
@@ -173,11 +174,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (const void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index d9a1cb7ec30..0a221d48152 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Blackfin Kernel Configuration"
-
config SYMBOL_PREFIX
string
default "_"
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 08bc44ea688..edae461b1c5 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -320,7 +320,7 @@ static void bfin_correct_hw_break(void)
}
}
-void kgdb_disable_hw_debug(struct pt_regs *regs)
+static void bfin_disable_hw_debug(struct pt_regs *regs)
{
/* Disable hardware debugging while we are in kgdb */
bfin_write_WPIACTL(0);
@@ -406,6 +406,7 @@ struct kgdb_arch arch_kgdb_ops = {
#endif
.set_hw_breakpoint = bfin_set_hw_break,
.remove_hw_breakpoint = bfin_remove_hw_break,
+ .disable_hw_break = bfin_disable_hw_debug,
.remove_all_hw_break = bfin_remove_all_hw_break,
.correct_hw_break = bfin_correct_hw_break,
};
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index cd0c090ebc5..b407bc8ad91 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/uaccess.h>
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index b3583935413..75089f80855 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -38,12 +38,13 @@
* Get contents of register REGNO in task TASK.
*/
static inline long
-get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
+get_reg(struct task_struct *task, unsigned long regno,
+ unsigned long __user *datap)
{
long tmp;
struct pt_regs *regs = task_pt_regs(task);
- if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
return -EIO;
switch (regno) {
@@ -74,11 +75,11 @@ get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
* Write contents of register REGNO in task TASK.
*/
static inline int
-put_reg(struct task_struct *task, long regno, unsigned long data)
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
{
struct pt_regs *regs = task_pt_regs(task);
- if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ if (regno & 3 || regno > PT_LAST_PSEUDO)
return -EIO;
switch (regno) {
@@ -240,7 +241,8 @@ void user_disable_single_step(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
@@ -368,14 +370,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_bfin_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
pr_debug("ptrace: PTRACE_SETREGS\n");
return copy_regset_from_user(child, &user_bfin_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
case_default:
default:
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index aefe3b18a07..613e62831c5 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see the Configure script.
-#
-
-mainmenu "Linux/CRIS Kernel Configuration"
-
config MMU
bool
default y
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index e70c804e937..320065f3cbe 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -76,9 +76,11 @@ ptrace_disable(struct task_struct *child)
* (in user space) where the result of the ptrace call is written (instead of
* being returned).
*/
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
@@ -93,10 +95,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
ret = put_user(tmp, datap);
break;
}
@@ -110,19 +112,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- addr >>= 2;
-
- if (addr == PT_DCCR) {
+ if (regno == PT_DCCR) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= DCCR_MASK;
data |= get_reg(child, PT_DCCR) & ~DCCR_MASK;
}
- if (put_reg(child, addr, data))
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
@@ -141,7 +141,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- data += sizeof(long);
+ datap++;
}
break;
@@ -165,7 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
break;
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f4ebd1e7d0f..511ece94a57 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -126,9 +126,11 @@ ptrace_disable(struct task_struct *child)
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
@@ -163,10 +165,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
ret = put_user(tmp, datap);
break;
}
@@ -180,19 +182,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address in the USER area. */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 || addr > PT_MAX << 2)
+ if ((addr & 3) || regno > PT_MAX)
break;
- addr >>= 2;
-
- if (addr == PT_CCS) {
+ if (regno == PT_CCS) {
/* don't allow the tracing process to change stuff like
* interrupt enable, kernel/user bit, dma enables etc.
*/
data &= CCS_MASK;
data |= get_reg(child, PT_CCS) & ~CCS_MASK;
}
- if (put_reg(child, addr, data))
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 0f2417df632..f6bcb039cd6 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -1,7 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
config FRV
bool
default y
@@ -61,8 +57,6 @@ config HZ
int
default 1000
-mainmenu "Fujitsu FR-V Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 2b63b0191f5..efad12071c2 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index fac028936a0..9d68f7fac73 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -254,23 +254,26 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
tmp = 0;
ret = -EIO;
- if ((addr & 3) || addr < 0)
+ if (addr & 3)
break;
ret = 0;
- switch (addr >> 2) {
+ switch (regno) {
case 0 ... PT__END - 1:
- tmp = get_reg(child, addr >> 2);
+ tmp = get_reg(child, regno);
break;
case PT__END + 0:
@@ -299,23 +302,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
if (ret == 0)
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0)
+ if (addr & 3)
break;
- ret = 0;
- switch (addr >> 2) {
+ switch (regno) {
case 0 ... PT__END - 1:
- ret = put_reg(child, addr >> 2, data);
- break;
-
- default:
- ret = -EIO;
+ ret = put_reg(child, regno, data);
break;
}
break;
@@ -324,25 +322,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_frv_native_view,
REGSET_GENERAL,
0, sizeof(child->thread.user->i),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_frv_native_view,
REGSET_GENERAL,
0, sizeof(child->thread.user->i),
- (const void __user *)data);
+ datap);
case PTRACE_GETFPREGS: /* Get the child FP/Media state. */
return copy_regset_to_user(child, &user_frv_native_view,
REGSET_FPMEDIA,
0, sizeof(child->thread.user->f),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS: /* Set the child FP/Media state. */
return copy_regset_from_user(child, &user_frv_native_view,
REGSET_FPMEDIA,
0, sizeof(child->thread.user->f),
- (const void __user *)data);
+ datap);
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index 61088dcc159..fd7fcd4c2e3 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
- int type = kmap_atomic_idx_pop();
+ int type = kmap_atomic_idx();
switch (type) {
case 0: __kunmap_atomic_primary(4, 6); break;
case 1: __kunmap_atomic_primary(5, 7); break;
@@ -83,6 +83,7 @@ void __kunmap_atomic(void *kvaddr)
default:
BUG();
}
+ kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 988b6ff34cc..65f897d8c1e 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "uClinux/h8300 (w/o MMU) Kernel Configuration"
-
config H8300
bool
default y
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 97478138e36..933bd388efb 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index df114122ebd..497fa89b5df 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -50,27 +50,29 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
- if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
ret = 0; /* Default return condition */
- addr = addr >> 2; /* temporary hack. */
- if (addr < H8300_REGS_NO)
- tmp = h8300_get_reg(child, addr);
+ if (regno < H8300_REGS_NO)
+ tmp = h8300_get_reg(child, regno);
else {
- switch(addr) {
+ switch (regno) {
case 49:
tmp = child->mm->start_code;
break ;
@@ -88,24 +90,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
}
if (!ret)
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp, datap);
break ;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- if ((addr & 3) || addr < 0 || addr >= sizeof(struct user)) {
+ if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
- addr = addr >> 2; /* temporary hack. */
- if (addr == PT_ORIG_ER0) {
+ if (regno == PT_ORIG_ER0) {
ret = -EIO;
break ;
}
- if (addr < H8300_REGS_NO) {
- ret = h8300_put_reg(child, addr, data);
+ if (regno < H8300_REGS_NO) {
+ ret = h8300_put_reg(child, regno, data);
break ;
}
ret = -EIO;
@@ -116,11 +117,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
tmp = h8300_get_reg(child, i);
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -130,12 +131,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
- if (get_user(tmp, (unsigned long *) data)) {
+ if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
h8300_put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 7c82fa1fc91..e0f5b6d7f84 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "IA-64 Linux Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 3a078ad3aa4..331de723c67 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -202,7 +202,7 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode)
}
static int
-simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+simscsi_queuecommand_lck (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
unsigned int target_id = sc->device->id;
char fname[MAX_ROOT_LEN+16];
@@ -326,6 +326,8 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
return 0;
}
+static DEF_SCSI_QCMD(simscsi_queuecommand)
+
static int
simscsi_host_reset (struct scsi_cmnd *sc)
{
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fa8a859466..6073b187528 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -56,10 +56,10 @@ typedef u64 cputime64_t;
#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
/*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
*/
-#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
-#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
+#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 6b1852f7f97..39e534f5a3b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -618,16 +618,15 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
}
-static int
-pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *
+pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
{
- return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
+ return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
}
static struct file_system_type pfm_fs_type = {
.name = "pfmfs",
- .get_sb = pfmfs_get_sb,
+ .mount = pfmfs_mount,
.kill_sb = kill_anon_super,
};
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7c7909f9bc9..8848f43d819 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
}
long
-arch_ptrace (struct task_struct *child, long request, long addr, long data)
+arch_ptrace (struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
switch (request) {
case PTRACE_PEEKTEXT:
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 3867fd21f33..5c291d65196 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/M32R Kernel Configuration"
-
config M32R
bool
default y
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 0021ade4cba..20743754f2b 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -622,9 +622,11 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/*
@@ -639,8 +641,7 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
* read the word at location addr in the USER area.
*/
case PTRACE_PEEKUSR:
- ret = ptrace_read_user(child, addr,
- (unsigned long __user *)data);
+ ret = ptrace_read_user(child, addr, datap);
break;
/*
@@ -661,11 +662,11 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *)data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *)data);
+ ret = ptrace_setregs(child, datap);
break;
default:
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 77bb0d6baa6..bc9271b8575 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -1,7 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
config M68K
bool
default y
@@ -62,8 +58,6 @@ config HZ
config ARCH_USES_GETTIMEOFFSET
def_bool y
-mainmenu "Linux/68k Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 4a5b284a155..7ef4115b8c4 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,7 +2,9 @@
#define _M68K_IRQFLAGS_H
#include <linux/types.h>
+#ifdef CONFIG_MMU
#include <linux/hardirq.h>
+#endif
#include <linux/preempt.h>
#include <asm/thread_info.h>
#include <asm/entry.h>
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 789f3b2de0e..415d5484916 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,5 +40,6 @@ extern unsigned long hw_timer_offset(void);
extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
extern void config_BSP(char *command, int len);
+extern void do_IRQ(int irq, struct pt_regs *fp);
#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 18732ab2329..c2a1fc23dd7 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 616e59752c2..0b252683cef 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -156,55 +156,57 @@ void user_disable_single_step(struct task_struct *child)
singlestep_disable(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int i, ret = 0;
+ int regno = addr >> 2; /* temporary hack. */
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
if (addr & 3)
goto out_eio;
- addr >>= 2; /* temporary hack. */
- if (addr >= 0 && addr < 19) {
- tmp = get_reg(child, addr);
- } else if (addr >= 21 && addr < 49) {
- tmp = child->thread.fp[addr - 21];
+ if (regno >= 0 && regno < 19) {
+ tmp = get_reg(child, regno);
+ } else if (regno >= 21 && regno < 49) {
+ tmp = child->thread.fp[regno - 21];
/* Convert internal fpu reg representation
* into long double format
*/
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+ if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
tmp = ((tmp & 0xffff0000) << 15) |
((tmp & 0x0000ffff) << 16);
} else
goto out_eio;
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR:
+ /* write the word at location addr in the USER area */
if (addr & 3)
goto out_eio;
- addr >>= 2; /* temporary hack. */
- if (addr == PT_SR) {
+ if (regno == PT_SR) {
data &= SR_MASK;
data |= get_reg(child, PT_SR) & ~SR_MASK;
}
- if (addr >= 0 && addr < 19) {
- if (put_reg(child, addr, data))
+ if (regno >= 0 && regno < 19) {
+ if (put_reg(child, regno, data))
goto out_eio;
- } else if (addr >= 21 && addr < 48) {
+ } else if (regno >= 21 && regno < 48) {
/* Convert long double format
* into internal fpu reg representation
*/
- if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
- data = (unsigned long)data << 15;
+ if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
+ data <<= 15;
data = (data & 0xffff0000) |
((data & 0x0000ffff) >> 1);
}
- child->thread.fp[addr - 21] = data;
+ child->thread.fp[regno - 21] = data;
} else
goto out_eio;
break;
@@ -212,16 +214,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_GETREGS: /* Get all gp regs from the child. */
for (i = 0; i < 19; i++) {
tmp = get_reg(child, i);
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
if (ret)
break;
- data += sizeof(long);
+ datap++;
}
break;
case PTRACE_SETREGS: /* Set all gp regs in the child. */
for (i = 0; i < 19; i++) {
- ret = get_user(tmp, (unsigned long *)data);
+ ret = get_user(tmp, datap);
if (ret)
break;
if (i == PT_SR) {
@@ -229,25 +231,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp |= get_reg(child, PT_SR) & ~SR_MASK;
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
- if (copy_to_user((void *)data, &child->thread.fp,
+ if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
- if (copy_from_user(&child->thread.fp, (void *)data,
+ if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *)data);
+ ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 9287150e5fb..fa9f746cf4a 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "uClinux/68k (w/o MMU) Kernel Configuration"
-
config M68K
bool
default y
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 6d3390590e5..e2a63af5d51 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 6fe7c38cd55..6709fb70733 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -112,9 +112,12 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ int regno = addr >> 2;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -122,53 +125,48 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
- addr = addr >> 2; /* temporary hack. */
ret = -EIO;
- if (addr < 19) {
- tmp = get_reg(child, addr);
- if (addr == PT_SR)
+ if (regno < 19) {
+ tmp = get_reg(child, regno);
+ if (regno == PT_SR)
tmp >>= 16;
- } else if (addr >= 21 && addr < 49) {
- tmp = child->thread.fp[addr - 21];
- } else if (addr == 49) {
+ } else if (regno >= 21 && regno < 49) {
+ tmp = child->thread.fp[regno - 21];
+ } else if (regno == 49) {
tmp = child->mm->start_code;
- } else if (addr == 50) {
+ } else if (regno == 50) {
tmp = child->mm->start_data;
- } else if (addr == 51) {
+ } else if (regno == 51) {
tmp = child->mm->end_code;
} else
break;
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
- addr = addr >> 2; /* temporary hack. */
-
- if (addr == PT_SR) {
+ if (regno == PT_SR) {
data &= SR_MASK;
data <<= 16;
data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
}
- if (addr < 19) {
- if (put_reg(child, addr, data))
+ if (regno < 19) {
+ if (put_reg(child, regno, data))
break;
ret = 0;
break;
}
- if (addr >= 21 && addr < 48)
+ if (regno >= 21 && regno < 48)
{
- child->thread.fp[addr - 21] = data;
+ child->thread.fp[regno - 21] = data;
ret = 0;
}
break;
@@ -180,11 +178,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_reg(child, i);
if (i == PT_SR)
tmp >>= 16;
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -194,7 +192,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int i;
unsigned long tmp;
for (i = 0; i < 19; i++) {
- if (get_user(tmp, (unsigned long *) data)) {
+ if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
@@ -204,7 +202,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
}
put_reg(child, i, tmp);
- data += sizeof(long);
+ datap++;
}
ret = 0;
break;
@@ -213,7 +211,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_GETFPREGS
case PTRACE_GETFPREGS: { /* Get the child FPU state. */
ret = 0;
- if (copy_to_user((void *)data, &child->thread.fp,
+ if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
@@ -223,7 +221,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_SETFPREGS
case PTRACE_SETFPREGS: { /* Set the child FPU state. */
ret = 0;
- if (copy_from_user(&child->thread.fp, (void *)data,
+ if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
@@ -231,8 +229,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *)data);
+ ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index dad40fc2bef..387d5ffdfd3 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,8 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-mainmenu "Linux/Microblaze Kernel Configuration"
-
config MICROBLAZE
def_bool y
select HAVE_MEMBLOCK
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index dc03ffc8174..05ac8cc975d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -73,7 +73,8 @@ static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
return (microblaze_reg_t *)((char *)regs + reg_offs);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int rval;
unsigned long val = 0;
@@ -99,7 +100,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
} else {
rval = -EIO;
}
- } else if (addr >= 0 && addr < PT_SIZE && (addr & 0x3) == 0) {
+ } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
if (request == PTRACE_PEEKUSR)
val = *reg_addr;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 46cae2b163e..67a2fa2caa4 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,18 +4,21 @@ config MIPS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select PERF_USE_VMALLOC
select HAVE_ARCH_KGDB
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_C_RECORDMCOUNT
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_KPROBES
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
-
-mainmenu "Linux/MIPS Kernel Configuration"
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
menu "Machine selection"
@@ -693,6 +696,9 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
select SWAP_IO_SPACE
select HW_HAS_PCI
select ARCH_SUPPORTS_MSI
+ select ZONE_DMA32
+ select USB_ARCH_HAS_OHCI
+ select USB_ARCH_HAS_EHCI
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
@@ -1336,6 +1342,57 @@ config CPU_CAVIUM_OCTEON
can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
Full details can be found at http://www.caviumnetworks.com.
+config CPU_BMIPS3300
+ bool "BMIPS3300"
+ depends on SYS_HAS_CPU_BMIPS3300
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS3300 processors.
+
+config CPU_BMIPS4350
+ bool "BMIPS4350"
+ depends on SYS_HAS_CPU_BMIPS4350
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS4350 ("VIPER") processors.
+
+config CPU_BMIPS4380
+ bool "BMIPS4380"
+ depends on SYS_HAS_CPU_BMIPS4380
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS4380 processors.
+
+config CPU_BMIPS5000
+ bool "BMIPS5000"
+ depends on SYS_HAS_CPU_BMIPS5000
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select SWAP_IO_SPACE
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_HOTPLUG_CPU
+ select WEAK_ORDERING
+ help
+ Broadcom BMIPS5000 processors.
+
endchoice
if CPU_LOONGSON2F
@@ -1454,6 +1511,18 @@ config SYS_HAS_CPU_SB1
config SYS_HAS_CPU_CAVIUM_OCTEON
bool
+config SYS_HAS_CPU_BMIPS3300
+ bool
+
+config SYS_HAS_CPU_BMIPS4350
+ bool
+
+config SYS_HAS_CPU_BMIPS4380
+ bool
+
+config SYS_HAS_CPU_BMIPS5000
+ bool
+
#
# CPU may reorder R->R, R->W, W->R, W->W
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -1930,6 +1999,14 @@ config NODES_SHIFT
default "6"
depends on NEED_MULTIPLE_NODES
+config HW_PERF_EVENTS
+ bool "Enable hardware performance counter support for perf events"
+ depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && CPU_MIPS32
+ default y
+ help
+ Enable hardware performance counter support for perf events. If
+ disabled, perf events will use software events only.
+
source "mm/Kconfig"
config SMP
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 43dc2799773..f437cd1fafb 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -67,6 +67,15 @@ config CMDLINE_OVERRIDE
Normally, you will choose 'N' here.
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit(2GB on MIPS). The debugging option
+ provides another way to check stack overflow happened on kernel mode
+ stack usually caused by nested interruption.
+
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index f4a4b663ebb..7c1102e41fe 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -48,9 +48,6 @@ ifneq ($(SUBARCH),$(ARCH))
endif
endif
-ifndef CONFIG_FUNCTION_TRACER
-cflags-y := -ffunction-sections
-endif
ifdef CONFIG_FUNCTION_GRAPH_TRACER
ifndef KBUILD_MCOUNT_RA_ADDRESS
ifeq ($(call cc-option-yn,-mmcount-ra-address), y)
@@ -159,6 +156,7 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
endif
+cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index c32fbb57441..425dfa5d6e1 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,6 +37,16 @@ static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return readl(gpio_in) & (1 << gpio);
}
+static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0;
+ void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1;
+
+ return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f));
+}
+
static void ar7_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
@@ -51,6 +61,21 @@ static void ar7_gpio_set_value(struct gpio_chip *chip,
writel(tmp, gpio_out);
}
+static void titan_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0;
+ void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1;
+ unsigned tmp;
+
+ tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f));
+ if (value)
+ tmp |= 1 << (gpio & 0x1f);
+ writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0);
+}
+
static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct ar7_gpio_chip *gpch =
@@ -62,6 +87,21 @@ static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
return 0;
}
+static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
+ void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
+
+ if (gpio >= TITAN_GPIO_MAX)
+ return -EINVAL;
+
+ writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_dir1 : gpio_dir0);
+ return 0;
+}
+
static int ar7_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
@@ -75,6 +115,24 @@ static int ar7_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int titan_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ar7_gpio_chip *gpch =
+ container_of(chip, struct ar7_gpio_chip, chip);
+ void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
+ void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
+
+ if (gpio >= TITAN_GPIO_MAX)
+ return -EINVAL;
+
+ titan_gpio_set_value(chip, gpio, value);
+ writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 <<
+ (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0);
+
+ return 0;
+}
+
static struct ar7_gpio_chip ar7_gpio_chip = {
.chip = {
.label = "ar7-gpio",
@@ -87,7 +145,19 @@ static struct ar7_gpio_chip ar7_gpio_chip = {
}
};
-int ar7_gpio_enable(unsigned gpio)
+static struct ar7_gpio_chip titan_gpio_chip = {
+ .chip = {
+ .label = "titan-gpio",
+ .direction_input = titan_gpio_direction_input,
+ .direction_output = titan_gpio_direction_output,
+ .set = titan_gpio_set_value,
+ .get = titan_gpio_get_value,
+ .base = 0,
+ .ngpio = TITAN_GPIO_MAX,
+ }
+};
+
+static inline int ar7_gpio_enable_ar7(unsigned gpio)
{
void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
@@ -95,9 +165,26 @@ int ar7_gpio_enable(unsigned gpio)
return 0;
}
+
+static inline int ar7_gpio_enable_titan(unsigned gpio)
+{
+ void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
+ void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
+
+ writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_en1 : gpio_en0);
+
+ return 0;
+}
+
+int ar7_gpio_enable(unsigned gpio)
+{
+ return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) :
+ ar7_gpio_enable_ar7(gpio);
+}
EXPORT_SYMBOL(ar7_gpio_enable);
-int ar7_gpio_disable(unsigned gpio)
+static inline int ar7_gpio_disable_ar7(unsigned gpio)
{
void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
@@ -105,27 +192,159 @@ int ar7_gpio_disable(unsigned gpio)
return 0;
}
+
+static inline int ar7_gpio_disable_titan(unsigned gpio)
+{
+ void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
+ void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
+
+ writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)),
+ gpio >> 5 ? gpio_en1 : gpio_en0);
+
+ return 0;
+}
+
+int ar7_gpio_disable(unsigned gpio)
+{
+ return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) :
+ ar7_gpio_disable_ar7(gpio);
+}
EXPORT_SYMBOL(ar7_gpio_disable);
-static int __init ar7_gpio_init(void)
+struct titan_gpio_cfg {
+ u32 reg;
+ u32 shift;
+ u32 func;
+};
+
+static struct titan_gpio_cfg titan_gpio_table[] = {
+ /* reg, start bit, mux value */
+ {4, 24, 1},
+ {4, 26, 1},
+ {4, 28, 1},
+ {4, 30, 1},
+ {5, 6, 1},
+ {5, 8, 1},
+ {5, 10, 1},
+ {5, 12, 1},
+ {7, 14, 3},
+ {7, 16, 3},
+ {7, 18, 3},
+ {7, 20, 3},
+ {7, 22, 3},
+ {7, 26, 3},
+ {7, 28, 3},
+ {7, 30, 3},
+ {8, 0, 3},
+ {8, 2, 3},
+ {8, 4, 3},
+ {8, 10, 3},
+ {8, 14, 3},
+ {8, 16, 3},
+ {8, 18, 3},
+ {8, 20, 3},
+ {9, 8, 3},
+ {9, 10, 3},
+ {9, 12, 3},
+ {9, 14, 3},
+ {9, 18, 3},
+ {9, 20, 3},
+ {9, 24, 3},
+ {9, 26, 3},
+ {9, 28, 3},
+ {9, 30, 3},
+ {10, 0, 3},
+ {10, 2, 3},
+ {10, 8, 3},
+ {10, 10, 3},
+ {10, 12, 3},
+ {10, 14, 3},
+ {13, 12, 3},
+ {13, 14, 3},
+ {13, 16, 3},
+ {13, 18, 3},
+ {13, 24, 3},
+ {13, 26, 3},
+ {13, 28, 3},
+ {13, 30, 3},
+ {14, 2, 3},
+ {14, 6, 3},
+ {14, 8, 3},
+ {14, 12, 3}
+};
+
+static int titan_gpio_pinsel(unsigned gpio)
+{
+ struct titan_gpio_cfg gpio_cfg;
+ u32 mux_status, pin_sel_reg, tmp;
+ void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL);
+
+ if (gpio >= ARRAY_SIZE(titan_gpio_table))
+ return -EINVAL;
+
+ gpio_cfg = titan_gpio_table[gpio];
+ pin_sel_reg = gpio_cfg.reg - 1;
+
+ mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3;
+
+ /* Check the mux status */
+ if (!((mux_status == 0) || (mux_status == gpio_cfg.func)))
+ return 0;
+
+ /* Set the pin sel value */
+ tmp = readl(pin_sel + pin_sel_reg);
+ tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift);
+ writel(tmp, pin_sel + pin_sel_reg);
+
+ return 0;
+}
+
+/* Perform minimal Titan GPIO configuration */
+static void titan_gpio_init(void)
+{
+ unsigned i;
+
+ for (i = 44; i < 48; i++) {
+ titan_gpio_pinsel(i);
+ ar7_gpio_enable_titan(i);
+ titan_gpio_direction_input(&titan_gpio_chip.chip, i);
+ }
+}
+
+int __init ar7_gpio_init(void)
{
int ret;
+ struct ar7_gpio_chip *gpch;
+ unsigned size;
+
+ if (!ar7_is_titan()) {
+ gpch = &ar7_gpio_chip;
+ size = 0x10;
+ } else {
+ gpch = &titan_gpio_chip;
+ size = 0x1f;
+ }
- ar7_gpio_chip.regs = ioremap_nocache(AR7_REGS_GPIO,
+ gpch->regs = ioremap_nocache(AR7_REGS_GPIO,
AR7_REGS_GPIO + 0x10);
- if (!ar7_gpio_chip.regs) {
- printk(KERN_ERR "ar7-gpio: failed to ioremap regs\n");
+ if (!gpch->regs) {
+ printk(KERN_ERR "%s: failed to ioremap regs\n",
+ gpch->chip.label);
return -ENOMEM;
}
- ret = gpiochip_add(&ar7_gpio_chip.chip);
+ ret = gpiochip_add(&gpch->chip);
if (ret) {
- printk(KERN_ERR "ar7-gpio: failed to add gpiochip\n");
+ printk(KERN_ERR "%s: failed to add gpiochip\n",
+ gpch->chip.label);
return ret;
}
- printk(KERN_INFO "ar7-gpio: registered %d GPIOs\n",
- ar7_gpio_chip.chip.ngpio);
+ printk(KERN_INFO "%s: registered %d GPIOs\n",
+ gpch->chip.label, gpch->chip.ngpio);
+
+ if (ar7_is_titan())
+ titan_gpio_init();
+
return ret;
}
-arch_initcall(ar7_gpio_init);
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 0da5b2b8dd8..7d2fab39232 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -357,6 +357,11 @@ static struct gpio_led default_leds[] = {
},
};
+static struct gpio_led titan_leds[] = {
+ { .name = "status", .gpio = 8, .active_low = 1, },
+ { .name = "wifi", .gpio = 13, .active_low = 1, },
+};
+
static struct gpio_led dsl502t_leds[] = {
{
.name = "status",
@@ -495,6 +500,9 @@ static void __init detect_leds(void)
} else if (strstr(prid, "DG834")) {
ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
ar7_led_data.leds = dg834g_leds;
+ } else if (strstr(prid, "CYWM") || strstr(prid, "CYWL")) {
+ ar7_led_data.num_leds = ARRAY_SIZE(titan_leds);
+ ar7_led_data.leds = titan_leds;
}
}
@@ -560,6 +568,51 @@ static int __init ar7_register_uarts(void)
return 0;
}
+static void __init titan_fixup_devices(void)
+{
+ /* Set vlynq0 data */
+ vlynq_low_data.reset_bit = 15;
+ vlynq_low_data.gpio_bit = 14;
+
+ /* Set vlynq1 data */
+ vlynq_high_data.reset_bit = 16;
+ vlynq_high_data.gpio_bit = 7;
+
+ /* Set vlynq0 resources */
+ vlynq_low_res[0].start = TITAN_REGS_VLYNQ0;
+ vlynq_low_res[0].end = TITAN_REGS_VLYNQ0 + 0xff;
+ vlynq_low_res[1].start = 33;
+ vlynq_low_res[1].end = 33;
+ vlynq_low_res[2].start = 0x0c000000;
+ vlynq_low_res[2].end = 0x0fffffff;
+ vlynq_low_res[3].start = 80;
+ vlynq_low_res[3].end = 111;
+
+ /* Set vlynq1 resources */
+ vlynq_high_res[0].start = TITAN_REGS_VLYNQ1;
+ vlynq_high_res[0].end = TITAN_REGS_VLYNQ1 + 0xff;
+ vlynq_high_res[1].start = 34;
+ vlynq_high_res[1].end = 34;
+ vlynq_high_res[2].start = 0x40000000;
+ vlynq_high_res[2].end = 0x43ffffff;
+ vlynq_high_res[3].start = 112;
+ vlynq_high_res[3].end = 143;
+
+ /* Set cpmac0 data */
+ cpmac_low_data.phy_mask = 0x40000000;
+
+ /* Set cpmac1 data */
+ cpmac_high_data.phy_mask = 0x80000000;
+
+ /* Set cpmac0 resources */
+ cpmac_low_res[0].start = TITAN_REGS_MAC0;
+ cpmac_low_res[0].end = TITAN_REGS_MAC0 + 0x7ff;
+
+ /* Set cpmac1 resources */
+ cpmac_high_res[0].start = TITAN_REGS_MAC1;
+ cpmac_high_res[0].end = TITAN_REGS_MAC1 + 0x7ff;
+}
+
static int __init ar7_register_devices(void)
{
void __iomem *bootcr;
@@ -574,6 +627,9 @@ static int __init ar7_register_devices(void)
if (res)
pr_warning("unable to register physmap-flash: %d\n", res);
+ if (ar7_is_titan())
+ titan_fixup_devices();
+
ar7_device_disable(vlynq_low_data.reset_bit);
res = platform_device_register(&vlynq_low);
if (res)
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index 52385790e5c..23818d29912 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,6 +246,8 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
+
+ ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
index 3a801d2cb6e..f20b53e597c 100644
--- a/arch/mips/ar7/setup.c
+++ b/arch/mips/ar7/setup.c
@@ -23,6 +23,7 @@
#include <asm/reboot.h>
#include <asm/mach-ar7/ar7.h>
#include <asm/mach-ar7/prom.h>
+#include <asm/mach-ar7/gpio.h>
static void ar7_machine_restart(char *command)
{
@@ -49,6 +50,8 @@ static void ar7_machine_power_off(void)
const char *get_system_type(void)
{
u16 chip_id = ar7_chip_id();
+ u16 titan_variant_id = titan_chip_id();
+
switch (chip_id) {
case AR7_CHIP_7100:
return "TI AR7 (TNETD7100)";
@@ -56,6 +59,17 @@ const char *get_system_type(void)
return "TI AR7 (TNETD7200)";
case AR7_CHIP_7300:
return "TI AR7 (TNETD7300)";
+ case AR7_CHIP_TITAN:
+ switch (titan_variant_id) {
+ case TITAN_CHIP_1050:
+ return "TI AR7 (TNETV1050)";
+ case TITAN_CHIP_1055:
+ return "TI AR7 (TNETV1055)";
+ case TITAN_CHIP_1056:
+ return "TI AR7 (TNETV1056)";
+ case TITAN_CHIP_1060:
+ return "TI AR7 (TNETV1060)";
+ }
default:
return "TI AR7 (unknown)";
}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index cbb7caf86d7..7c7e4d4486c 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpu.h>
+#include <asm/cpu.h>
#include <asm/cpu-info.h>
+#include <asm/mipsregs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
@@ -296,26 +298,24 @@ void __init bcm63xx_cpu_init(void)
expected_cpu_id = 0;
switch (c->cputype) {
- /*
- * BCM6338 as the same PrId as BCM3302 see arch/mips/kernel/cpu-probe.c
- */
- case CPU_BCM3302:
- __cpu_name[cpu] = "Broadcom BCM6338";
- expected_cpu_id = BCM6338_CPU_ID;
- bcm63xx_regs_base = bcm96338_regs_base;
- bcm63xx_irqs = bcm96338_irqs;
+ case CPU_BMIPS3300:
+ if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
+ expected_cpu_id = BCM6348_CPU_ID;
+ bcm63xx_regs_base = bcm96348_regs_base;
+ bcm63xx_irqs = bcm96348_irqs;
+ } else {
+ __cpu_name[cpu] = "Broadcom BCM6338";
+ expected_cpu_id = BCM6338_CPU_ID;
+ bcm63xx_regs_base = bcm96338_regs_base;
+ bcm63xx_irqs = bcm96338_irqs;
+ }
break;
- case CPU_BCM6345:
+ case CPU_BMIPS32:
expected_cpu_id = BCM6345_CPU_ID;
bcm63xx_regs_base = bcm96345_regs_base;
bcm63xx_irqs = bcm96345_irqs;
break;
- case CPU_BCM6348:
- expected_cpu_id = BCM6348_CPU_ID;
- bcm63xx_regs_base = bcm96348_regs_base;
- bcm63xx_irqs = bcm96348_irqs;
- break;
- case CPU_BCM6358:
+ case CPU_BMIPS4350:
expected_cpu_id = BCM6358_CPU_ID;
bcm63xx_regs_base = bcm96358_regs_base;
bcm63xx_irqs = bcm96358_irqs;
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 47323ca452d..caae2285816 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -3,6 +3,17 @@ config CAVIUM_OCTEON_SPECIFIC_OPTIONS
depends on CPU_CAVIUM_OCTEON
default "y"
+config CAVIUM_CN63XXP1
+ bool "Enable CN63XXP1 errata worarounds"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "n"
+ help
+ The CN63XXP1 chip requires build time workarounds to
+ function reliably, select this option to enable them. These
+ workarounds will cause a slight decrease in performance on
+ non-CN63XXP1 hardware, so it is recommended to select "n"
+ unless it is known the workarounds are needed.
+
config CAVIUM_OCTEON_2ND_KERNEL
bool "Build the kernel to be used as a 2nd kernel on the same chip"
depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
@@ -87,3 +98,15 @@ config ARCH_SPARSEMEM_ENABLE
config CAVIUM_OCTEON_HELPER
def_bool y
depends on OCTEON_ETHERNET || PCI
+
+config IOMMU_HELPER
+ bool
+
+config NEED_SG_DMA_LENGTH
+ bool
+
+config SWIOTLB
+ def_bool y
+ depends on CPU_CAVIUM_OCTEON
+ select IOMMU_HELPER
+ select NEED_SG_DMA_LENGTH
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index b6847c8e0dd..26bf71130bf 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -4,14 +4,18 @@
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
+ * Copyright (C) 2009, 2010 Cavium Networks, Inc.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/cpu-info.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-mio-defs.h>
/*
* Set the current core's cvmcount counter to the value of the
@@ -19,11 +23,23 @@
* on-line. This allows for a read from a local cpu register to
* access a synchronized counter.
*
+ * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
*/
void octeon_init_cvmcount(void)
{
unsigned long flags;
unsigned loops = 2;
+ u64 f = 0;
+ u64 rdiv = 0;
+ u64 sdiv = 0;
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+ union cvmx_mio_rst_boot rst_boot;
+ rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ rdiv = rst_boot.s.c_mul; /* CPU clock */
+ sdiv = rst_boot.s.pnr_mul; /* I/O clock */
+ f = (0x8000000000000000ull / sdiv) * 2;
+ }
+
/* Clobber loops so GCC will not unroll the following while loop. */
asm("" : "+r" (loops));
@@ -33,8 +49,20 @@ void octeon_init_cvmcount(void)
* Loop several times so we are executing from the cache,
* which should give more deterministic timing.
*/
- while (loops--)
- write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
+ while (loops--) {
+ u64 ipd_clk_count = cvmx_read_csr(CVMX_IPD_CLK_COUNT);
+ if (rdiv != 0) {
+ ipd_clk_count *= rdiv;
+ if (f != 0) {
+ asm("dmultu\t%[cnt],%[f]\n\t"
+ "mfhi\t%[cnt]"
+ : [cnt] "+r" (ipd_clk_count),
+ [f] "=r" (f)
+ : : "hi", "lo");
+ }
+ }
+ write_c0_cvmcount(ipd_clk_count);
+ }
local_irq_restore(flags);
}
@@ -77,7 +105,7 @@ unsigned long long notrace sched_clock(void)
void __init plat_time_init(void)
{
clocksource_mips.rating = 300;
- clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+ clocksource_set_clock(&clocksource_mips, octeon_get_clock_rate());
clocksource_register(&clocksource_mips);
}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index d22b5a2d64f..1abb66caaa1 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -8,335 +8,342 @@
* Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
* IP32 changes by Ilya.
- * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
- * the kernels original.
+ * Copyright (C) 2010 Cavium Networks, Inc.
*/
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/string.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
#include <linux/scatterlist.h>
+#include <linux/bootmem.h>
+#include <linux/swiotlb.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
-#include <linux/cache.h>
-#include <linux/io.h>
+#include <asm/bootinfo.h>
#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_PCI
+#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
-#include <dma-coherence.h>
+static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
+{
+ if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
+ return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
+ else
+ return paddr;
+}
-#ifdef CONFIG_PCI
-#include <asm/octeon/pci-octeon.h>
-#endif
+static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
+{
+ if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
+ return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
+ else
+ return daddr;
+}
+
+static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+ return octeon_hole_phys_to_dma(paddr);
+}
-#define BAR2_PCI_ADDRESS 0x8000000000ul
+static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ daddr = octeon_hole_dma_to_phys(daddr);
-struct bar1_index_state {
- int16_t ref_count; /* Number of PCI mappings using this index */
- uint16_t address_bits; /* Upper bits of physical address. This is
- shifted 22 bits */
-};
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
-#ifdef CONFIG_PCI
-static DEFINE_RAW_SPINLOCK(bar1_lock);
-static struct bar1_index_state bar1_state[32];
-#endif
+ return daddr;
+}
-dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
+static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
-#ifndef CONFIG_PCI
- /* Without PCI/PCIe this function can be called for Octeon internal
- devices such as USB. These devices all support 64bit addressing */
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+
+ /* Anything in the BAR1 hole or above goes via BAR2 */
+ if (paddr >= 0xf0000000ull)
+ paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+ return paddr;
+}
+
+static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+ daddr -= OCTEON_BAR2_PCI_ADDRESS;
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+ return daddr;
+}
+
+static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
+{
+ if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
+ paddr -= 0x400000000ull;
+
+ /* Anything not in the BAR1 range goes via BAR2 */
+ if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
+ paddr = paddr - octeon_bar1_pci_phys;
+ else
+ paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
+
+ return paddr;
+}
+
+static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
+ dma_addr_t daddr)
+{
+ if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
+ daddr -= OCTEON_BAR2_PCI_ADDRESS;
+ else
+ daddr += octeon_bar1_pci_phys;
+
+ if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
+ daddr += 0x400000000ull;
+ return daddr;
+}
+
+#endif /* CONFIG_PCI */
+
+static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
+ direction, attrs);
mb();
- return virt_to_phys(ptr);
-#else
- unsigned long flags;
- uint64_t dma_mask;
- int64_t start_index;
- dma_addr_t result = -1;
- uint64_t physical = virt_to_phys(ptr);
- int64_t index;
+ return daddr;
+}
+
+static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
mb();
- /*
- * Use the DMA masks to determine the allowed memory
- * region. For us it doesn't limit the actual memory, just the
- * address visible over PCI. Devices with limits need to use
- * lower indexed Bar1 entries.
- */
- if (dev) {
- dma_mask = dev->coherent_dma_mask;
- if (dev->dma_mask)
- dma_mask = *dev->dma_mask;
- } else {
- dma_mask = 0xfffffffful;
- }
+ return r;
+}
- /*
- * Platform devices, such as the internal USB, skip all
- * translation and use Octeon physical addresses directly.
- */
- if (!dev || dev->bus == &platform_bus_type)
- return physical;
+static void octeon_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+{
+ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
+ mb();
+}
- switch (octeon_dma_bar_type) {
- case OCTEON_DMA_BAR_TYPE_PCIE:
- if (unlikely(physical < (16ul << 10)))
- panic("dma_map_single: Not allowed to map first 16KB."
- " It interferes with BAR0 special area\n");
- else if ((physical + size >= (256ul << 20)) &&
- (physical < (512ul << 20)))
- panic("dma_map_single: Not allowed to map bootbus\n");
- else if ((physical + size >= 0x400000000ull) &&
- physical < 0x410000000ull)
- panic("dma_map_single: "
- "Attempt to map illegal memory address 0x%llx\n",
- physical);
- else if (physical >= 0x420000000ull)
- panic("dma_map_single: "
- "Attempt to map illegal memory address 0x%llx\n",
- physical);
- else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE &&
- physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) {
- result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
-
- if (((result+size-1) & dma_mask) != result+size-1)
- panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n",
- physical, physical+size-1, dma_mask);
- goto done;
- }
-
- /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
- if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
- result = physical - 0x400000000ull;
- else
- result = physical;
- if (((result+size-1) & dma_mask) != result+size-1)
- panic("dma_map_single: Attempt to map address "
- "0x%llx-0x%llx, which can't be accessed "
- "according to the dma mask 0x%llx\n",
- physical, physical+size-1, dma_mask);
- goto done;
+static void octeon_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+{
+ swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
+ mb();
+}
- case OCTEON_DMA_BAR_TYPE_BIG:
-#ifdef CONFIG_64BIT
- /* If the device supports 64bit addressing, then use BAR2 */
- if (dma_mask > BAR2_PCI_ADDRESS) {
- result = physical + BAR2_PCI_ADDRESS;
- goto done;
- }
-#endif
- if (unlikely(physical < (4ul << 10))) {
- panic("dma_map_single: Not allowed to map first 4KB. "
- "It interferes with BAR0 special area\n");
- } else if (physical < (256ul << 20)) {
- if (unlikely(physical + size > (256ul << 20)))
- panic("dma_map_single: Requested memory spans "
- "Bar0 0:256MB and bootbus\n");
- result = physical;
- goto done;
- } else if (unlikely(physical < (512ul << 20))) {
- panic("dma_map_single: Not allowed to map bootbus\n");
- } else if (physical < (2ul << 30)) {
- if (unlikely(physical + size > (2ul << 30)))
- panic("dma_map_single: Requested memory spans "
- "Bar0 512MB:2GB and BAR1\n");
- result = physical;
- goto done;
- } else if (physical < (2ul << 30) + (128 << 20)) {
- /* Fall through */
- } else if (physical <
- (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
- if (unlikely
- (physical + size >
- (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
- panic("dma_map_single: Requested memory "
- "extends past Bar1 (4GB-%luMB)\n",
- OCTEON_PCI_BAR1_HOLE_SIZE);
- result = physical;
- goto done;
- } else if ((physical >= 0x410000000ull) &&
- (physical < 0x420000000ull)) {
- if (unlikely(physical + size > 0x420000000ull))
- panic("dma_map_single: Requested memory spans "
- "non existant memory\n");
- /* BAR0 fixed mapping 256MB:512MB ->
- * 16GB+256MB:16GB+512MB */
- result = physical - 0x400000000ull;
- goto done;
- } else {
- /* Continued below switch statement */
- }
- break;
+static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *ret;
- case OCTEON_DMA_BAR_TYPE_SMALL:
-#ifdef CONFIG_64BIT
- /* If the device supports 64bit addressing, then use BAR2 */
- if (dma_mask > BAR2_PCI_ADDRESS) {
- result = physical + BAR2_PCI_ADDRESS;
- goto done;
- }
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+ return ret;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA
+ if (dev == NULL)
+ gfp |= __GFP_DMA;
+ else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
+ gfp |= __GFP_DMA;
+ else
#endif
- /* Continued below switch statement */
- break;
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+ else
+#endif
+ ;
- default:
- panic("dma_map_single: Invalid octeon_dma_bar_type\n");
- }
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
- /* Don't allow mapping to span multiple Bar entries. The hardware guys
- won't guarantee that DMA across boards work */
- if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
- panic("dma_map_single: "
- "Requested memory spans more than one Bar1 entry\n");
+ ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
- start_index = 31;
- else if (unlikely(dma_mask < (1ul << 27)))
- start_index = (dma_mask >> 22);
- else
- start_index = 31;
-
- /* Only one processor can access the Bar register at once */
- raw_spin_lock_irqsave(&bar1_lock, flags);
-
- /* Look through Bar1 for existing mapping that will work */
- for (index = start_index; index >= 0; index--) {
- if ((bar1_state[index].address_bits == physical >> 22) &&
- (bar1_state[index].ref_count)) {
- /* An existing mapping will work, use it */
- bar1_state[index].ref_count++;
- if (unlikely(bar1_state[index].ref_count < 0))
- panic("dma_map_single: "
- "Bar1[%d] reference count overflowed\n",
- (int) index);
- result = (index << 22) | (physical & ((1 << 22) - 1));
- /* Large BAR1 is offset at 2GB */
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
- result += 2ul << 30;
- goto done_unlock;
- }
- }
+ mb();
- /* No existing mappings, look for a free entry */
- for (index = start_index; index >= 0; index--) {
- if (unlikely(bar1_state[index].ref_count == 0)) {
- union cvmx_pci_bar1_indexx bar1_index;
- /* We have a free entry, use it */
- bar1_state[index].ref_count = 1;
- bar1_state[index].address_bits = physical >> 22;
- bar1_index.u32 = 0;
- /* Address bits[35:22] sent to L2C */
- bar1_index.s.addr_idx = physical >> 22;
- /* Don't put PCI accesses in L2. */
- bar1_index.s.ca = 1;
- /* Endian Swap Mode */
- bar1_index.s.end_swp = 1;
- /* Set '1' when the selected address range is valid. */
- bar1_index.s.addr_v = 1;
- octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
- bar1_index.u32);
- /* An existing mapping will work, use it */
- result = (index << 22) | (physical & ((1 << 22) - 1));
- /* Large BAR1 is offset at 2GB */
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
- result += 2ul << 30;
- goto done_unlock;
- }
- }
+ return ret;
+}
- pr_err("dma_map_single: "
- "Can't find empty BAR1 index for physical mapping 0x%llx\n",
- (unsigned long long) physical);
+static void octeon_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ int order = get_order(size);
-done_unlock:
- raw_spin_unlock_irqrestore(&bar1_lock, flags);
-done:
- pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result);
- return result;
-#endif
+ if (dma_release_from_coherent(dev, order, vaddr))
+ return;
+
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
-#ifndef CONFIG_PCI
- /*
- * Without PCI/PCIe this function can be called for Octeon internal
- * devices such as USB. These devices all support 64bit addressing.
- */
- return;
-#else
- unsigned long flags;
- uint64_t index;
+ return paddr;
+}
+static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
+struct octeon_dma_map_ops {
+ struct dma_map_ops dma_map_ops;
+ dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
+ phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
+};
+
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
+ struct octeon_dma_map_ops,
+ dma_map_ops);
+
+ return ops->phys_to_dma(dev, paddr);
+}
+EXPORT_SYMBOL(phys_to_dma);
+
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
+ struct octeon_dma_map_ops,
+ dma_map_ops);
+
+ return ops->dma_to_phys(dev, daddr);
+}
+EXPORT_SYMBOL(dma_to_phys);
+
+static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
+ .dma_map_ops = {
+ .alloc_coherent = octeon_dma_alloc_coherent,
+ .free_coherent = octeon_dma_free_coherent,
+ .map_page = octeon_dma_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = octeon_dma_map_sg,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = octeon_dma_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = octeon_dma_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+ .dma_supported = swiotlb_dma_supported
+ },
+ .phys_to_dma = octeon_unity_phys_to_dma,
+ .dma_to_phys = octeon_unity_dma_to_phys
+};
+
+char *octeon_swiotlb;
+
+void __init plat_swiotlb_setup(void)
+{
+ int i;
+ phys_t max_addr;
+ phys_t addr_size;
+ size_t swiotlbsize;
+ unsigned long swiotlb_nslabs;
+
+ max_addr = 0;
+ addr_size = 0;
+
+ for (i = 0 ; i < boot_mem_map.nr_map; i++) {
+ struct boot_mem_map_entry *e = &boot_mem_map.map[i];
+ if (e->type != BOOT_MEM_RAM)
+ continue;
+
+ /* These addresses map low for PCI. */
+ if (e->addr > 0x410000000ull)
+ continue;
+
+ addr_size += e->size;
+
+ if (max_addr < e->addr + e->size)
+ max_addr = e->addr + e->size;
+
+ }
+
+ swiotlbsize = PAGE_SIZE;
+
+#ifdef CONFIG_PCI
/*
- * Platform devices, such as the internal USB, skip all
- * translation and use Octeon physical addresses directly.
+ * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
+ * size to a maximum of 64MB
*/
- if (dev->bus == &platform_bus_type)
- return;
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+ swiotlbsize = addr_size / 4;
+ if (swiotlbsize > 64 * (1<<20))
+ swiotlbsize = 64 * (1<<20);
+ } else if (max_addr > 0xf0000000ul) {
+ /*
+ * Otherwise only allocate a big iotlb if there is
+ * memory past the BAR1 hole.
+ */
+ swiotlbsize = 64 * (1<<20);
+ }
+#endif
+ swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+ swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
+ swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
+
+ octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
+ swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1);
+
+ mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
+}
+
+#ifdef CONFIG_PCI
+static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
+ .dma_map_ops = {
+ .alloc_coherent = octeon_dma_alloc_coherent,
+ .free_coherent = octeon_dma_free_coherent,
+ .map_page = octeon_dma_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = octeon_dma_map_sg,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = octeon_dma_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = octeon_dma_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+ .dma_supported = swiotlb_dma_supported
+ },
+};
+
+struct dma_map_ops *octeon_pci_dma_map_ops;
+
+void __init octeon_pci_dma_init(void)
+{
switch (octeon_dma_bar_type) {
case OCTEON_DMA_BAR_TYPE_PCIE:
- /* Nothing to do, all mappings are static */
- goto done;
-
+ _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
+ _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
+ break;
case OCTEON_DMA_BAR_TYPE_BIG:
-#ifdef CONFIG_64BIT
- /* Nothing to do for addresses using BAR2 */
- if (dma_addr >= BAR2_PCI_ADDRESS)
- goto done;
-#endif
- if (unlikely(dma_addr < (4ul << 10)))
- panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
- dma_addr);
- else if (dma_addr < (2ul << 30))
- /* Nothing to do for addresses using BAR0 */
- goto done;
- else if (dma_addr < (2ul << 30) + (128ul << 20))
- /* Need to unmap, fall through */
- index = (dma_addr - (2ul << 30)) >> 22;
- else if (dma_addr <
- (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
- goto done; /* Nothing to do for the rest of BAR1 */
- else
- panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
- dma_addr);
- /* Continued below switch statement */
+ _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
+ _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
break;
-
case OCTEON_DMA_BAR_TYPE_SMALL:
-#ifdef CONFIG_64BIT
- /* Nothing to do for addresses using BAR2 */
- if (dma_addr >= BAR2_PCI_ADDRESS)
- goto done;
-#endif
- index = dma_addr >> 22;
- /* Continued below switch statement */
+ _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
+ _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
break;
-
default:
- panic("dma_unmap_single: Invalid octeon_dma_bar_type\n");
+ BUG();
}
-
- if (unlikely(index > 31))
- panic("dma_unmap_single: "
- "Attempt to unmap an invalid address (0x%llx)\n",
- dma_addr);
-
- raw_spin_lock_irqsave(&bar1_lock, flags);
- bar1_state[index].ref_count--;
- if (bar1_state[index].ref_count == 0)
- octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
- else if (unlikely(bar1_state[index].ref_count < 0))
- panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
- (int) index);
- raw_spin_unlock_irqrestore(&bar1_lock, flags);
-done:
- pr_debug("dma_unmap_single 0x%llx\n", dma_addr);
- return;
-#endif
+ octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
}
+#endif /* CONFIG_PCI */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 6abe56f1e09..d38246e33dd 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -26,8 +26,8 @@
***********************license end**************************************/
/*
- * Implementation of the Level 2 Cache (L2C) control, measurement, and
- * debugging facilities.
+ * Implementation of the Level 2 Cache (L2C) control,
+ * measurement, and debugging facilities.
*/
#include <asm/octeon/cvmx.h>
@@ -42,13 +42,7 @@
* if multiple applications or operating systems are running, then it
* is up to the user program to coordinate between them.
*/
-static cvmx_spinlock_t cvmx_l2c_spinlock;
-
-static inline int l2_size_half(void)
-{
- uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
- return !!(val & (1ull << 34));
-}
+cvmx_spinlock_t cvmx_l2c_spinlock;
int cvmx_l2c_get_core_way_partition(uint32_t core)
{
@@ -58,6 +52,9 @@ int cvmx_l2c_get_core_way_partition(uint32_t core)
if (core >= cvmx_octeon_num_cores())
return -1;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
+
/*
* Use the lower two bits of the coreNumber to determine the
* bit offset of the UMSK[] field in the L2C_SPAR register.
@@ -71,17 +68,13 @@ int cvmx_l2c_get_core_way_partition(uint32_t core)
switch (core & 0xC) {
case 0x0:
- return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
case 0x4:
- return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
case 0x8:
- return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
case 0xC:
- return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
- field;
+ return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
}
return 0;
}
@@ -95,48 +88,50 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
mask &= valid_mask;
- /* A UMSK setting which blocks all L2C Ways is an error. */
- if (mask == valid_mask)
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
- /* Check to make sure current mask & new mask don't block all ways */
- if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
- valid_mask)
- return -1;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
+ return 0;
+ }
- /* Use the lower two bits of core to determine the bit offset of the
+ /*
+ * Use the lower two bits of core to determine the bit offset of the
* UMSK[] field in the L2C_SPAR register.
*/
field = (core & 0x3) * 8;
- /* Assign the new mask setting to the UMSK[] field in the appropriate
+ /*
+ * Assign the new mask setting to the UMSK[] field in the appropriate
* L2C_SPAR register based on the core_num.
*
*/
switch (core & 0xC) {
case 0x0:
cvmx_write_csr(CVMX_L2C_SPAR0,
- (cvmx_read_csr(CVMX_L2C_SPAR0) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
+ mask << field);
break;
case 0x4:
cvmx_write_csr(CVMX_L2C_SPAR1,
- (cvmx_read_csr(CVMX_L2C_SPAR1) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
+ mask << field);
break;
case 0x8:
cvmx_write_csr(CVMX_L2C_SPAR2,
- (cvmx_read_csr(CVMX_L2C_SPAR2) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
+ mask << field);
break;
case 0xC:
cvmx_write_csr(CVMX_L2C_SPAR3,
- (cvmx_read_csr(CVMX_L2C_SPAR3) &
- ~(0xFF << field)) | mask << field);
+ (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
+ mask << field);
break;
}
return 0;
@@ -146,84 +141,137 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask)
{
uint32_t valid_mask;
- valid_mask = 0xff;
-
- if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- if (l2_size_half())
- valid_mask = 0xf;
- } else if (l2_size_half())
- valid_mask = 0x3;
-
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
mask &= valid_mask;
- /* A UMSK setting which blocks all L2C Ways is an error. */
- if (mask == valid_mask)
- return -1;
- /* Check to make sure current mask & new mask don't block all ways */
- if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
- valid_mask)
+ /* A UMSK setting which blocks all L2C Ways is an error on some chips */
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
- cvmx_write_csr(CVMX_L2C_SPAR4,
- (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
+ else
+ cvmx_write_csr(CVMX_L2C_SPAR4,
+ (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
return 0;
}
int cvmx_l2c_get_hw_way_partition(void)
{
- return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
+ else
+ return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
}
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
uint32_t clear_on_read)
{
- union cvmx_l2c_pfctl pfctl;
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_l2c_pfctl pfctl;
- pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
- switch (counter) {
- case 0:
- pfctl.s.cnt0sel = event;
- pfctl.s.cnt0ena = 1;
- if (!cvmx_octeon_is_pass1())
+ switch (counter) {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
pfctl.s.cnt0rdclr = clear_on_read;
- break;
- case 1:
- pfctl.s.cnt1sel = event;
- pfctl.s.cnt1ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
pfctl.s.cnt1rdclr = clear_on_read;
- break;
- case 2:
- pfctl.s.cnt2sel = event;
- pfctl.s.cnt2ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
pfctl.s.cnt2rdclr = clear_on_read;
- break;
- case 3:
- default:
- pfctl.s.cnt3sel = event;
- pfctl.s.cnt3ena = 1;
- if (!cvmx_octeon_is_pass1())
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
pfctl.s.cnt3rdclr = clear_on_read;
- break;
- }
+ break;
+ }
- cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+ } else {
+ union cvmx_l2c_tadx_prf l2c_tadx_prf;
+ int tad;
+
+ cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
+ if (clear_on_read)
+ cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
+
+ l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
+
+ switch (counter) {
+ case 0:
+ l2c_tadx_prf.s.cnt0sel = event;
+ break;
+ case 1:
+ l2c_tadx_prf.s.cnt1sel = event;
+ break;
+ case 2:
+ l2c_tadx_prf.s.cnt2sel = event;
+ break;
+ default:
+ case 3:
+ l2c_tadx_prf.s.cnt3sel = event;
+ break;
+ }
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
+ l2c_tadx_prf.u64);
+ }
}
uint64_t cvmx_l2c_read_perf(uint32_t counter)
{
switch (counter) {
case 0:
- return cvmx_read_csr(CVMX_L2C_PFC0);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC0);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
+ return counter;
+ }
case 1:
- return cvmx_read_csr(CVMX_L2C_PFC1);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC1);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
+ return counter;
+ }
case 2:
- return cvmx_read_csr(CVMX_L2C_PFC2);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC2);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
+ return counter;
+ }
case 3:
default:
- return cvmx_read_csr(CVMX_L2C_PFC3);
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ return cvmx_read_csr(CVMX_L2C_PFC3);
+ else {
+ uint64_t counter = 0;
+ int tad;
+ for (tad = 0; tad < CVMX_L2C_TADS; tad++)
+ counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
+ return counter;
+ }
}
}
@@ -240,7 +288,7 @@ static void fault_in(uint64_t addr, int len)
volatile char dummy;
/*
* Adjust addr and length so we get all cache lines even for
- * small ranges spanning two cache lines
+ * small ranges spanning two cache lines.
*/
len += addr & CVMX_CACHE_LINE_MASK;
addr &= ~CVMX_CACHE_LINE_MASK;
@@ -259,67 +307,100 @@ static void fault_in(uint64_t addr, int len)
int cvmx_l2c_lock_line(uint64_t addr)
{
- int retval = 0;
- union cvmx_l2c_dbg l2cdbg;
- union cvmx_l2c_lckbase lckbase;
- union cvmx_l2c_lckoff lckoff;
- union cvmx_l2t_err l2t_err;
- l2cdbg.u64 = 0;
- lckbase.u64 = 0;
- lckoff.u64 = 0;
-
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
-
- /* Clear l2t error bits if set */
- l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
- l2t_err.s.lckerr = 1;
- l2t_err.s.lckerr2 = 1;
- cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ uint64_t assoc = cvmx_l2c_get_num_assoc();
+ uint64_t tag = addr >> shift;
+ uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
+ uint64_t way;
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+
+ CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
+
+ /* Make sure we were able to lock the line */
+ for (way = 0; way < assoc; way++) {
+ CVMX_CACHE_LTGL2I(index | (way << shift), 0);
+ /* make sure CVMX_L2C_TADX_TAG is updated */
+ CVMX_SYNC;
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+ if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
+ break;
+ }
- addr &= ~CVMX_CACHE_LINE_MASK;
+ /* Check if a valid line is found */
+ if (way >= assoc) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
- /* Set this core as debug core */
- l2cdbg.s.ppnum = cvmx_get_core_num();
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
- cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
- cvmx_read_csr(CVMX_L2C_LCKOFF);
-
- if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
- int alias_shift =
- CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
- uint64_t addr_tmp =
- addr ^ (addr & ((1 << alias_shift) - 1)) >>
- CVMX_L2_SET_BITS;
- lckbase.s.lck_base = addr_tmp >> 7;
+ /* Check if lock bit is not set */
+ if (!l2c_tadx_tag.s.lock) {
+ /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
+ return -1;
+ }
+ return way;
} else {
- lckbase.s.lck_base = addr >> 7;
- }
+ int retval = 0;
+ union cvmx_l2c_dbg l2cdbg;
+ union cvmx_l2c_lckbase lckbase;
+ union cvmx_l2c_lckoff lckoff;
+ union cvmx_l2t_err l2t_err;
- lckbase.s.lck_ena = 1;
- cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
- cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- fault_in(addr, CVMX_CACHE_LINE_SIZE);
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
- lckbase.s.lck_ena = 0;
- cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
- cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
- /* Stop being debug core */
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ addr &= ~CVMX_CACHE_LINE_MASK;
- l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
- if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
- retval = 1; /* We were unable to lock the line */
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+ if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+ int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+ uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+ lckbase.s.lck_base = addr_tmp >> 7;
+ } else {
+ lckbase.s.lck_base = addr >> 7;
+ }
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
- return retval;
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ /* Make sure it gets there */
+ cvmx_read_csr(CVMX_L2C_LCKBASE);
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return retval;
+ }
}
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
@@ -336,7 +417,6 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
start += CVMX_CACHE_LINE_SIZE;
len -= CVMX_CACHE_LINE_SIZE;
}
-
return retval;
}
@@ -344,80 +424,73 @@ void cvmx_l2c_flush(void)
{
uint64_t assoc, set;
uint64_t n_assoc, n_set;
- union cvmx_l2c_dbg l2cdbg;
-
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- l2cdbg.u64 = 0;
- if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.finv = 1;
- n_set = CVMX_L2_SETS;
- n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
- for (set = 0; set < n_set; set++) {
- for (assoc = 0; assoc < n_assoc; assoc++) {
- l2cdbg.s.set = assoc;
- /* Enter debug mode, and make sure all other
- ** writes complete before we enter debug
- ** mode */
- CVMX_SYNCW;
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
- (CVMX_MIPS_SPACE_XKPHYS,
- set * CVMX_CACHE_LINE_SIZE), 0);
- CVMX_SYNCW; /* Push STF out to L2 */
- /* Exit debug mode */
- CVMX_SYNC;
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ n_set = cvmx_l2c_get_num_sets();
+ n_assoc = cvmx_l2c_get_num_assoc();
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ uint64_t address;
+ /* These may look like constants, but they aren't... */
+ int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
+ int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+ for (set = 0; set < n_set; set++) {
+ for (assoc = 0; assoc < n_assoc; assoc++) {
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << assoc_shift) | (set << set_shift));
+ CVMX_CACHE_WBIL2I(address, 0);
+ }
}
+ } else {
+ for (set = 0; set < n_set; set++)
+ for (assoc = 0; assoc < n_assoc; assoc++)
+ cvmx_l2c_flush_line(assoc, set);
}
-
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
}
+
int cvmx_l2c_unlock_line(uint64_t address)
{
- int assoc;
- union cvmx_l2c_tag tag;
- union cvmx_l2c_dbg l2cdbg;
- uint32_t tag_addr;
- uint32_t index = cvmx_l2c_address_to_index(address);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+
+ /*
+ * For 63XX, we can flush a line by using the physical
+ * address directly, so finding the cache line used by
+ * the address is only required to provide the proper
+ * return value for the function.
+ */
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_l2c_get_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
+ return tag.s.L;
+ }
+ }
+ } else {
+ int assoc;
+ union cvmx_l2c_tag tag;
+ uint32_t tag_addr;
- cvmx_spinlock_lock(&cvmx_l2c_spinlock);
- /* Compute portion of address that is stored in tag */
- tag_addr =
- ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
- ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
- for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
- tag = cvmx_get_l2c_tag(assoc, index);
+ uint32_t index = cvmx_l2c_address_to_index(address);
- if (tag.s.V && (tag.s.addr == tag_addr)) {
- l2cdbg.u64 = 0;
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.set = assoc;
- l2cdbg.s.finv = 1;
+ /* Compute portion of address that is stored in tag */
+ tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_l2c_get_tag(assoc, index);
- CVMX_SYNC;
- /* Enter debug mode */
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
- (CVMX_MIPS_SPACE_XKPHYS,
- address), 0);
- CVMX_SYNC;
- /* Exit debug mode */
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
- return tag.s.L;
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ cvmx_l2c_flush_line(assoc, index);
+ return tag.s.L;
+ }
}
}
- cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return 0;
}
@@ -445,48 +518,49 @@ union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
uint64_t reserved:40;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:20; /* Phys mem addr (33..14) */
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
uint64_t reserved:41;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:19; /* Phys mem addr (33..15) */
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
uint64_t reserved:42;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:18; /* Phys mem addr (33..16) */
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
uint64_t reserved:43;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:17; /* Phys mem addr (33..17) */
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
uint64_t reserved:44;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:16; /* Phys mem addr (33..18) */
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
};
+
/**
* @INTERNAL
* Function to read a L2C tag. This code make the current core
@@ -503,7 +577,7 @@ union __cvmx_l2c_tag {
static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
{
- uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
+ uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
uint64_t core = cvmx_get_core_num();
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
@@ -512,12 +586,15 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union cvmx_l2c_dbg debug_val;
debug_val.u64 = 0;
/*
- * For low core count parts, the core number is always small enough
- * to stay in the correct field and not set any reserved bits.
+ * For low core count parts, the core number is always small
+ * enough to stay in the correct field and not set any
+ * reserved bits.
*/
debug_val.s.ppnum = core;
debug_val.s.l2t = 1;
debug_val.s.set = assoc;
+
+ local_irq_save(flags);
/*
* Make sure core is quiet (no prefetches, etc.) before
* entering debug mode.
@@ -526,112 +603,139 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
/* Flush L1 to make sure debug load misses L1 */
CVMX_DCACHE_INVALIDATE;
- local_irq_save(flags);
-
/*
* The following must be done in assembly as when in debug
* mode all data loads from L2 return special debug data, not
- * normal memory contents. Also, interrupts must be
- * disabled, since if an interrupt occurs while in debug mode
- * the ISR will get debug data from all its memory reads
- * instead of the contents of memory
+ * normal memory contents. Also, interrupts must be disabled,
+ * since if an interrupt occurs while in debug mode the ISR
+ * will get debug data from all its memory * reads instead of
+ * the contents of memory.
*/
- asm volatile (".set push \n"
- " .set mips64 \n"
- " .set noreorder \n"
- /* Enter debug mode, wait for store */
- " sd %[dbg_val], 0(%[dbg_addr]) \n"
- " ld $0, 0(%[dbg_addr]) \n"
- /* Read L2C tag data */
- " ld %[tag_val], 0(%[tag_addr]) \n"
- /* Exit debug mode, wait for store */
- " sd $0, 0(%[dbg_addr]) \n"
- " ld $0, 0(%[dbg_addr]) \n"
- /* Invalidate dcache to discard debug data */
- " cache 9, 0($0) \n"
- " .set pop" :
- [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
- [dbg_val] "r"(debug_val.u64),
- [tag_addr] "r"(debug_tag_addr) : "memory");
+ asm volatile (
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noreorder\n\t"
+ "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
+ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
+ "ld $0, 0(%[dbg_addr])\n\t"
+ "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
+ ".set pop"
+ : [tag_val] "=r" (tag_val)
+ : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
+ : "memory");
local_irq_restore(flags);
- return tag_val;
+ return tag_val;
}
+
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
- union __cvmx_l2c_tag tmp_tag;
union cvmx_l2c_tag tag;
tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
- cvmx_dprintf
- ("ERROR: cvmx_get_l2c_tag association out of range\n");
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return tag;
}
if ((int)index >= cvmx_l2c_get_num_sets()) {
- cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
- "index out of range (arg: %d, max: %d\n",
- index, cvmx_l2c_get_num_sets());
+ cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
+ (int)index, cvmx_l2c_get_num_sets());
return tag;
}
- /* __read_l2_tag is intended for internal use only */
- tmp_tag = __read_l2_tag(association, index);
-
- /*
- * Convert all tag structure types to generic version, as it
- * can represent all models.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- tag.s.V = tmp_tag.cn58xx.V;
- tag.s.D = tmp_tag.cn58xx.D;
- tag.s.L = tmp_tag.cn58xx.L;
- tag.s.U = tmp_tag.cn58xx.U;
- tag.s.addr = tmp_tag.cn58xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- tag.s.V = tmp_tag.cn38xx.V;
- tag.s.D = tmp_tag.cn38xx.D;
- tag.s.L = tmp_tag.cn38xx.L;
- tag.s.U = tmp_tag.cn38xx.U;
- tag.s.addr = tmp_tag.cn38xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- tag.s.V = tmp_tag.cn31xx.V;
- tag.s.D = tmp_tag.cn31xx.D;
- tag.s.L = tmp_tag.cn31xx.L;
- tag.s.U = tmp_tag.cn31xx.U;
- tag.s.addr = tmp_tag.cn31xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
- tag.s.V = tmp_tag.cn30xx.V;
- tag.s.D = tmp_tag.cn30xx.D;
- tag.s.L = tmp_tag.cn30xx.L;
- tag.s.U = tmp_tag.cn30xx.U;
- tag.s.addr = tmp_tag.cn30xx.addr;
- } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- tag.s.V = tmp_tag.cn50xx.V;
- tag.s.D = tmp_tag.cn50xx.D;
- tag.s.L = tmp_tag.cn50xx.L;
- tag.s.U = tmp_tag.cn50xx.U;
- tag.s.addr = tmp_tag.cn50xx.addr;
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ union cvmx_l2c_tadx_tag l2c_tadx_tag;
+ uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ /*
+ * Use L2 cache Index load tag cache instruction, as
+ * hardware loads the virtual tag for the L2 cache
+ * block with the contents of L2C_TAD0_TAG
+ * register.
+ */
+ CVMX_CACHE_LTGL2I(address, 0);
+ CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
+ l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
+
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.addr = l2c_tadx_tag.s.tag;
} else {
- cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ union __cvmx_l2c_tag tmp_tag;
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version,
+ * as it can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
}
-
return tag;
}
uint32_t cvmx_l2c_address_to_index(uint64_t addr)
{
uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
- union cvmx_l2c_cfg l2c_cfg;
- l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ int indxalias = 0;
- if (l2c_cfg.s.idxalias) {
- idx ^=
- ((addr & CVMX_L2C_ALIAS_MASK) >>
- CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_l2c_ctl l2c_ctl;
+ l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
+ indxalias = !l2c_ctl.s.disidxalias;
+ } else {
+ union cvmx_l2c_cfg l2c_cfg;
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ indxalias = l2c_cfg.s.idxalias;
+ }
+
+ if (indxalias) {
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+ idx ^= idx / cvmx_l2c_get_num_sets();
+ idx ^= a_14_12;
+ } else {
+ idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
}
idx &= CVMX_L2C_IDX_MASK;
return idx;
@@ -652,10 +756,9 @@ int cvmx_l2c_get_set_bits(void)
int l2_set_bits;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
- else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
l2_set_bits = 10; /* 1024 sets */
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
l2_set_bits = 9; /* 512 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_set_bits = 8; /* 256 sets */
@@ -666,7 +769,6 @@ int cvmx_l2c_get_set_bits(void)
l2_set_bits = 11; /* 2048 sets */
}
return l2_set_bits;
-
}
/* Return the number of sets in the L2 Cache */
@@ -682,8 +784,11 @@ int cvmx_l2c_get_num_assoc(void)
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
- OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
+ OCTEON_IS_MODEL(OCTEON_CN50XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN38XX))
l2_assoc = 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
+ l2_assoc = 16;
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_assoc = 4;
@@ -693,11 +798,42 @@ int cvmx_l2c_get_num_assoc(void)
}
/* Check to see if part of the cache is disabled */
- if (cvmx_fuse_read(265))
- l2_assoc = l2_assoc >> 2;
- else if (cvmx_fuse_read(264))
- l2_assoc = l2_assoc >> 1;
-
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ union cvmx_mio_fus_dat3 mio_fus_dat3;
+
+ mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+ /*
+ * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
+ * <2> will be not used for 63xx
+ * <1> disables 1/2 ways
+ * <0> disables 1/4 ways
+ * They are cumulative, so for 63xx:
+ * <1> <0>
+ * 0 0 16-way 2MB cache
+ * 0 1 12-way 1.5MB cache
+ * 1 0 8-way 1MB cache
+ * 1 1 4-way 512KB cache
+ */
+
+ if (mio_fus_dat3.s.l2c_crip == 3)
+ l2_assoc = 4;
+ else if (mio_fus_dat3.s.l2c_crip == 2)
+ l2_assoc = 8;
+ else if (mio_fus_dat3.s.l2c_crip == 1)
+ l2_assoc = 12;
+ } else {
+ union cvmx_l2d_fus3 val;
+ val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ /*
+ * Using shifts here, as bit position names are
+ * different for each model but they all mean the
+ * same.
+ */
+ if ((val.u64 >> 35) & 0x1)
+ l2_assoc = l2_assoc >> 2;
+ else if ((val.u64 >> 34) & 0x1)
+ l2_assoc = l2_assoc >> 1;
+ }
return l2_assoc;
}
@@ -711,24 +847,54 @@ int cvmx_l2c_get_num_assoc(void)
*/
void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
{
- union cvmx_l2c_dbg l2cdbg;
+ /* Check the range of the index. */
+ if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
+ return;
+ }
- l2cdbg.u64 = 0;
- l2cdbg.s.ppnum = cvmx_get_core_num();
- l2cdbg.s.finv = 1;
+ /* Check the range of association. */
+ if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
+ return;
+ }
- l2cdbg.s.set = assoc;
- /*
- * Enter debug mode, and make sure all other writes complete
- * before we enter debug mode.
- */
- asm volatile ("sync" : : : "memory");
- cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
- cvmx_read_csr(CVMX_L2C_DBG);
-
- CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
- /* Exit debug mode */
- asm volatile ("sync" : : : "memory");
- cvmx_write_csr(CVMX_L2C_DBG, 0);
- cvmx_read_csr(CVMX_L2C_DBG);
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+ uint64_t address;
+ /* Create the address based on index and association.
+ * Bits<20:17> select the way of the cache block involved in
+ * the operation
+ * Bits<16:7> of the effect address select the index
+ */
+ address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
+ (index << CVMX_L2C_IDX_ADDR_SHIFT));
+ CVMX_CACHE_WBIL2I(address, 0);
+ } else {
+ union cvmx_l2c_dbg l2cdbg;
+
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /*
+ * Enter debug mode, and make sure all other writes
+ * complete before we enter debug mode
+ */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ index * CVMX_CACHE_LINE_SIZE),
+ 0);
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ }
}
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 62ac30eef5e..cecaf62aef3 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,13 +3,15 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2009 Cavium Networks
+ * Copyright (C) 2004-2010 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/i2c.h>
+#include <linux/usb.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -198,7 +200,7 @@ static int __init octeon_i2c_device_init(void)
num_ports = 1;
for (port = 0; port < num_ports; port++) {
- octeon_i2c_data[port].sys_freq = octeon_get_clock_rate();
+ octeon_i2c_data[port].sys_freq = octeon_get_io_clock_rate();
/*FIXME: should be examined. At the moment is set for 100Khz */
octeon_i2c_data[port].i2c_freq = 100000;
@@ -301,6 +303,10 @@ static int __init octeon_mgmt_device_init(void)
ret = -ENOMEM;
goto out;
}
+ /* No DMA restrictions */
+ pd->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ pd->dev.dma_mask = &pd->dev.coherent_dma_mask;
+
switch (port) {
case 0:
mgmt_port_resource.start = OCTEON_IRQ_MII0;
@@ -332,6 +338,108 @@ out:
}
device_initcall(octeon_mgmt_device_init);
+#ifdef CONFIG_USB
+
+static int __init octeon_ehci_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+
+ struct resource usb_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+
+ /* Only Octeon2 has ehci/ohci */
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return 0;
+
+ if (octeon_is_simulation() || usb_disabled())
+ return 0; /* No USB in the simulator. */
+
+ pd = platform_device_alloc("octeon-ehci", 0);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ usb_resources[0].start = 0x00016F0000000000ULL;
+ usb_resources[0].end = usb_resources[0].start + 0x100;
+
+ usb_resources[1].start = OCTEON_IRQ_USB0;
+ usb_resources[1].end = OCTEON_IRQ_USB0;
+
+ ret = platform_device_add_resources(pd, usb_resources,
+ ARRAY_SIZE(usb_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+out:
+ return ret;
+}
+device_initcall(octeon_ehci_device_init);
+
+static int __init octeon_ohci_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+
+ struct resource usb_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+
+ /* Only Octeon2 has ehci/ohci */
+ if (!OCTEON_IS_MODEL(OCTEON_CN63XX))
+ return 0;
+
+ if (octeon_is_simulation() || usb_disabled())
+ return 0; /* No USB in the simulator. */
+
+ pd = platform_device_alloc("octeon-ohci", 0);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ usb_resources[0].start = 0x00016F0000000400ULL;
+ usb_resources[0].end = usb_resources[0].start + 0x100;
+
+ usb_resources[1].start = OCTEON_IRQ_USB0;
+ usb_resources[1].end = OCTEON_IRQ_USB0;
+
+ ret = platform_device_add_resources(pd, usb_resources,
+ ARRAY_SIZE(usb_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+out:
+ return ret;
+}
+device_initcall(octeon_ohci_device_init);
+
+#endif /* CONFIG_USB */
+
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Platform driver for Octeon SOC");
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
index 12dbf533b77..057f0ae88c9 100644
--- a/arch/mips/cavium-octeon/serial.c
+++ b/arch/mips/cavium-octeon/serial.c
@@ -66,7 +66,7 @@ static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
/* Make simulator output fast*/
p->uartclk = 115200 * 16;
else
- p->uartclk = mips_hpt_frequency;
+ p->uartclk = octeon_get_io_clock_rate();
p->serial_in = octeon_serial_in;
p->serial_out = octeon_serial_out;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 69197cb6c7e..b0c3686c96d 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -33,6 +33,7 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
+#include <asm/octeon/cvmx-mio-defs.h>
#ifdef CONFIG_CAVIUM_DECODE_RSL
extern void cvmx_interrupt_rsl_decode(void);
@@ -96,12 +97,21 @@ int octeon_is_pci_host(void)
*/
uint64_t octeon_get_clock_rate(void)
{
- if (octeon_is_simulation())
- octeon_bootinfo->eclock_hz = 6000000;
- return octeon_bootinfo->eclock_hz;
+ struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
+ return sysinfo->cpu_clock_hz;
}
EXPORT_SYMBOL(octeon_get_clock_rate);
+static u64 octeon_io_clock_rate;
+
+u64 octeon_get_io_clock_rate(void)
+{
+ return octeon_io_clock_rate;
+}
+EXPORT_SYMBOL(octeon_get_io_clock_rate);
+
+
/**
* Write to the LCD display connected to the bootbus. This display
* exists on most Cavium evaluation boards. If it doesn't exist, then
@@ -346,8 +356,18 @@ void octeon_user_io_init(void)
cvmmemctl.s.wbfltime = 0;
/* R/W If set, do not put Istream in the L2 cache. */
cvmmemctl.s.istrnol2 = 0;
- /* R/W The write buffer threshold. */
- cvmmemctl.s.wbthresh = 10;
+
+ /*
+ * R/W The write buffer threshold. As per erratum Core-14752
+ * for CN63XX, a sc/scd might fail if the write buffer is
+ * full. Lowering WBTHRESH greatly lowers the chances of the
+ * write buffer ever being full and triggering the erratum.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
+ cvmmemctl.s.wbthresh = 4;
+ else
+ cvmmemctl.s.wbthresh = 10;
+
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
@@ -365,14 +385,13 @@ void octeon_user_io_init(void)
* is max legal value. */
cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
+ write_c0_cvmmemctl(cvmmemctl.u64);
if (smp_processor_id() == 0)
pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
- write_c0_cvmmemctl(cvmmemctl.u64);
-
/* Move the performance counter interrupts to IRQ 6 */
cvmctl = read_c0_cvmctl();
cvmctl &= ~(7 << 7);
@@ -416,6 +435,41 @@ void __init prom_init(void)
cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+ sysinfo = cvmx_sysinfo_get();
+ memset(sysinfo, 0, sizeof(*sysinfo));
+ sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+ sysinfo->phy_mem_desc_ptr =
+ cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
+ sysinfo->core_mask = octeon_bootinfo->core_mask;
+ sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+ sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+ sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+ sysinfo->board_type = octeon_bootinfo->board_type;
+ sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+ sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+ memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+ sizeof(sysinfo->mac_addr_base));
+ sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+ memcpy(sysinfo->board_serial_number,
+ octeon_bootinfo->board_serial_number,
+ sizeof(sysinfo->board_serial_number));
+ sysinfo->compact_flash_common_base_addr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ sysinfo->compact_flash_attribute_base_addr =
+ octeon_bootinfo->compact_flash_attribute_base_addr;
+ sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+ sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+ sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ /* I/O clock runs at a different rate than the CPU. */
+ union cvmx_mio_rst_boot rst_boot;
+ rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
+ octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+ } else {
+ octeon_io_clock_rate = sysinfo->cpu_clock_hz;
+ }
+
/*
* Only enable the LED controller if we're running on a CN38XX, CN58XX,
* or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -479,33 +533,6 @@ void __init prom_init(void)
}
#endif
- sysinfo = cvmx_sysinfo_get();
- memset(sysinfo, 0, sizeof(*sysinfo));
- sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
- sysinfo->phy_mem_desc_ptr =
- cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
- sysinfo->core_mask = octeon_bootinfo->core_mask;
- sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
- sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
- sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
- sysinfo->board_type = octeon_bootinfo->board_type;
- sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
- sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
- memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
- sizeof(sysinfo->mac_addr_base));
- sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
- memcpy(sysinfo->board_serial_number,
- octeon_bootinfo->board_serial_number,
- sizeof(sysinfo->board_serial_number));
- sysinfo->compact_flash_common_base_addr =
- octeon_bootinfo->compact_flash_common_base_addr;
- sysinfo->compact_flash_attribute_base_addr =
- octeon_bootinfo->compact_flash_attribute_base_addr;
- sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
- sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
- sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
-
-
octeon_check_cpu_bist();
octeon_uart = octeon_get_boot_uart();
@@ -740,6 +767,31 @@ EXPORT_SYMBOL(prom_putchar);
void prom_free_prom_memory(void)
{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
+ /* Check for presence of Core-14449 fix. */
+ u32 insn;
+ u32 *foo;
+
+ foo = &insn;
+
+ asm volatile("# before" : : : "memory");
+ prefetch(foo);
+ asm volatile(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ "bal 1f\n\t"
+ "nop\n"
+ "1:\tlw %0,-12($31)\n\t"
+ ".set pop\n\t"
+ : "=r" (insn) : : "$31", "memory");
+
+ if ((insn >> 26) != 0x33)
+ panic("No PREF instruction at Core-14449 probe point.\n");
+
+ if (((insn >> 16) & 0x1f) != 28)
+ panic("Core-14449 WAR not in place (%04x).\n"
+ "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).\n", insn);
+ }
#ifdef CONFIG_CAVIUM_DECODE_RSL
cvmx_interrupt_rsl_enable();
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 47d87da379f..4a02fe891ab 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -64,18 +64,16 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %1 # atomic_add \n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -109,18 +107,16 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %1 # atomic_sub \n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -156,20 +152,19 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 2f \n"
- " addu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp + i;
} else {
unsigned long flags;
@@ -205,23 +200,24 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+
+ result = temp - i;
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 2f \n"
- " subu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %1, %2 # atomic_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp - i;
} else {
unsigned long flags;
@@ -279,12 +275,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
- " beqz %0, 2f \n"
+ " beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -443,18 +436,16 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %1 # atomic64_add \n"
+ " daddu %0, %2 \n"
+ " scd %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -488,18 +479,16 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %1 # atomic64_sub \n"
+ " dsubu %0, %2 \n"
+ " scd %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -535,20 +524,19 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 2f \n"
- " daddu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %1, %2 # atomic64_add_return \n"
+ " daddu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp + i;
} else {
unsigned long flags;
@@ -587,20 +575,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 2f \n"
- " dsubu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %1, %2 # atomic64_sub_return \n"
+ " dsubu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp - i;
} else {
unsigned long flags;
@@ -658,12 +645,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" bltz %0, 1f \n"
" scd %0, %2 \n"
" .set noreorder \n"
- " beqz %0, 2f \n"
+ " beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index b0ce7ca2851..50b4ef288c5 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -73,30 +73,26 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (1UL << bit), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- __asm__ __volatile__(
- "1: " __LL "%0, %1 # set_bit \n"
- " " __INS "%0, %4, %2, 1 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (bit), "m" (*m), "r" (~0));
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # set_bit \n"
+ " " __INS "%0, %3, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (bit), "r" (~0));
+ } while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) {
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: " __LL "%0, %1 # set_bit \n"
- " or %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # set_bit \n"
+ " or %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (1UL << bit));
+ } while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -134,34 +130,30 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << bit)), "m" (*m));
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (~(1UL << bit)));
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- __asm__ __volatile__(
- "1: " __LL "%0, %1 # clear_bit \n"
- " " __INS "%0, $0, %2, 1 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (bit), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # clear_bit \n"
+ " " __INS "%0, $0, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (bit));
+ } while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) {
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: " __LL "%0, %1 # clear_bit \n"
- " and %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (~(1UL << bit)), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # clear_bit \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (~(1UL << bit)));
+ } while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -213,24 +205,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (1UL << bit));
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: " __LL "%0, %1 # change_bit \n"
- " xor %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (*m)
- : "ir" (1UL << bit), "m" (*m));
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # change_bit \n"
+ " xor %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m)
+ : "ir" (1UL << bit));
+ } while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -272,30 +262,26 @@ static inline int test_and_set_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -340,30 +326,26 @@ static inline int test_and_set_bit_lock(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -410,49 +392,43 @@ static inline int test_and_clear_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
#ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- "1: " __LL "%0, %1 # test_and_clear_bit \n"
- " " __EXT "%2, %0, %3, 1 \n"
- " " __INS "%0, $0, %3, 1 \n"
- " " __SC "%0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "ir" (bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __EXT "%2, %0, %3, 1 \n"
+ " " __INS "%0, $0, %3, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "ir" (bit)
+ : "memory");
+ } while (unlikely(!temp));
#endif
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_clear_bit \n"
- " or %2, %0, %3 \n"
- " xor %2, %3 \n"
- " " __SC "%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
+ " or %2, %0, %3 \n"
+ " xor %2, %3 \n"
+ " " __SC "%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
@@ -499,30 +475,26 @@ static inline int test_and_change_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
- __asm__ __volatile__(
- " .set push \n"
- " .set noreorder \n"
- " .set mips3 \n"
- "1: " __LL "%0, %1 # test_and_change_bit \n"
- " xor %2, %0, %3 \n"
- " " __SC "\t%2, %1 \n"
- " beqz %2, 2f \n"
- " and %2, %0, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " nop \n"
- " .previous \n"
- " .set pop \n"
- : "=&r" (temp), "=m" (*m), "=&r" (res)
- : "r" (1UL << bit), "m" (*m)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 # test_and_change_bit \n"
+ " xor %2, %0, %3 \n"
+ " " __SC "\t%2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+ : "memory");
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 15a8ef0707c..35cd1bab69c 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -125,4 +125,16 @@ extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
*/
extern void plat_mem_setup(void);
+#ifdef CONFIG_SWIOTLB
+/*
+ * Optional platform hook to call swiotlb_setup().
+ */
+extern void plat_swiotlb_setup(void);
+
+#else
+
+static inline void plat_swiotlb_setup(void) {}
+
+#endif /* CONFIG_SWIOTLB */
+
#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 2d28017e95d..d8d1c2805ac 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -44,12 +44,9 @@
" move $1, %z4 \n" \
" .set mips3 \n" \
" " st " $1, %1 \n" \
- " beqz $1, 3f \n" \
- "2: \n" \
- " .subsection 2 \n" \
- "3: b 1b \n" \
- " .previous \n" \
+ " beqz $1, 1b \n" \
" .set pop \n" \
+ "2: \n" \
: "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index b201a8f5b12..06d59dcbe24 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -111,14 +111,16 @@
* These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
*/
-#define PRID_IMP_BCM4710 0x4000
-#define PRID_IMP_BCM3302 0x9000
-#define PRID_IMP_BCM6338 0x9000
-#define PRID_IMP_BCM6345 0x8000
-#define PRID_IMP_BCM6348 0x9100
-#define PRID_IMP_BCM4350 0xA000
-#define PRID_REV_BCM6358 0x0010
-#define PRID_REV_BCM6368 0x0030
+#define PRID_IMP_BMIPS4KC 0x4000
+#define PRID_IMP_BMIPS32 0x8000
+#define PRID_IMP_BMIPS3300 0x9000
+#define PRID_IMP_BMIPS3300_ALT 0x9100
+#define PRID_IMP_BMIPS3300_BUG 0x0000
+#define PRID_IMP_BMIPS43XX 0xa000
+#define PRID_IMP_BMIPS5000 0x5a00
+
+#define PRID_REV_BMIPS4380_LO 0x0040
+#define PRID_REV_BMIPS4380_HI 0x006f
/*
* These are the PRID's for when 23:16 == PRID_COMP_CAVIUM
@@ -131,6 +133,7 @@
#define PRID_IMP_CAVIUM_CN56XX 0x0400
#define PRID_IMP_CAVIUM_CN50XX 0x0600
#define PRID_IMP_CAVIUM_CN52XX 0x0700
+#define PRID_IMP_CAVIUM_CN63XX 0x9000
/*
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC
@@ -223,15 +226,14 @@ enum cpu_type_enum {
* MIPS32 class processors
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
- CPU_ALCHEMY, CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
- CPU_BCM6338, CPU_BCM6345, CPU_BCM6348, CPU_BCM6358,
- CPU_JZRISC,
+ CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
/*
* MIPS64 class processors
*/
CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
- CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+ CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
CPU_LAST
};
diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h
index 06746c5e809..c94fafba9e6 100644
--- a/arch/mips/include/asm/device.h
+++ b/arch/mips/include/asm/device.h
@@ -3,4 +3,17 @@
*
* This file is released under the GPLv2
*/
-#include <asm-generic/device.h>
+#ifndef _ASM_MIPS_DEVICE_H
+#define _ASM_MIPS_DEVICE_H
+
+struct dma_map_ops;
+
+struct dev_archdata {
+ /* DMA operations on that device */
+ struct dma_map_ops *dma_ops;
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_MIPS_DEVICE_H*/
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 18fbf7af8e9..655f849bd08 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,51 +5,41 @@
#include <asm/cache.h>
#include <asm-generic/dma-coherent.h>
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+#include <dma-coherence.h>
-void dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+extern struct dma_map_ops *mips_dma_map_ops;
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ else
+ return mips_dma_map_ops;
+}
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction);
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction)
+ return addr + size <= *dev->dma_mask;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_supported(struct device *dev, u64 mask)
{
- dma_unmap_single(dev, dma_address, size, direction);
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->dma_supported(dev, mask);
}
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-extern int dma_supported(struct device *dev, u64 mask);
+static inline int dma_mapping_error(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ return ops->mapping_error(dev, mask);
+}
static inline int
dma_set_mask(struct device *dev, u64 mask)
@@ -65,4 +55,34 @@ dma_set_mask(struct device *dev, u64 mask)
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *ret;
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+
+ return ret;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ ops->free_coherent(dev, size, vaddr, dma_handle);
+
+ debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+
+void dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index 1353c81065d..2d47da62d5a 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -91,7 +91,10 @@
#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
#endif
#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
+
+#ifndef MAX_DMA32_PFN
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
+#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index bdcdef02d14..fffc8307a80 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -117,7 +117,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define local_cmpxchg(l, o, n) \
((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
-#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
/**
* local_add_unless - add unless the number is a given value
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index 483ffea9ecb..7919d76186b 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -39,6 +39,7 @@
#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
+#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
@@ -50,6 +51,14 @@
#define UR8_REGS_WDT (AR7_REGS_BASE + 0x0b00)
#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
+/* Titan registers */
+#define TITAN_REGS_ESWITCH_BASE (0x08640000)
+#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
+#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
+#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
+#define TITAN_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1c00)
+#define TITAN_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1300)
+
#define AR7_RESET_PERIPHERAL 0x0
#define AR7_RESET_SOFTWARE 0x4
#define AR7_RESET_STATUS 0x8
@@ -59,15 +68,30 @@
#define AR7_RESET_BIT_MDIO 22
#define AR7_RESET_BIT_EPHY 26
+#define TITAN_RESET_BIT_EPHY1 28
+
/* GPIO control registers */
#define AR7_GPIO_INPUT 0x0
#define AR7_GPIO_OUTPUT 0x4
#define AR7_GPIO_DIR 0x8
#define AR7_GPIO_ENABLE 0xc
+#define TITAN_GPIO_INPUT_0 0x0
+#define TITAN_GPIO_INPUT_1 0x4
+#define TITAN_GPIO_OUTPUT_0 0x8
+#define TITAN_GPIO_OUTPUT_1 0xc
+#define TITAN_GPIO_DIR_0 0x10
+#define TITAN_GPIO_DIR_1 0x14
+#define TITAN_GPIO_ENBL_0 0x18
+#define TITAN_GPIO_ENBL_1 0x1c
#define AR7_CHIP_7100 0x18
#define AR7_CHIP_7200 0x2b
#define AR7_CHIP_7300 0x05
+#define AR7_CHIP_TITAN 0x07
+#define TITAN_CHIP_1050 0x0f
+#define TITAN_CHIP_1055 0x0e
+#define TITAN_CHIP_1056 0x0d
+#define TITAN_CHIP_1060 0x07
/* Interrupts */
#define AR7_IRQ_UART0 15
@@ -95,14 +119,29 @@ struct plat_dsl_data {
extern int ar7_cpu_clock, ar7_bus_clock, ar7_dsp_clock;
+static inline int ar7_is_titan(void)
+{
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
+ AR7_CHIP_TITAN;
+}
+
static inline u16 ar7_chip_id(void)
{
- return readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff;
+ return ar7_is_titan() ? AR7_CHIP_TITAN : (readl((void *)
+ KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff);
+}
+
+static inline u16 titan_chip_id(void)
+{
+ unsigned int val = readl((void *)KSEG1ADDR(AR7_REGS_GPIO +
+ TITAN_GPIO_INPUT_1));
+ return ((val >> 12) & 0x0f);
}
static inline u8 ar7_chip_rev(void)
{
- return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x14)) >> 16) & 0xff;
+ return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + (ar7_is_titan() ? 0x24 :
+ 0x14))) >> 16) & 0xff;
}
struct clk {
@@ -161,4 +200,8 @@ static inline void ar7_device_off(u32 bit)
msleep(20);
}
+int __init ar7_gpio_init(void);
+
+int __init ar7_gpio_init(void);
+
#endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
index abc317c0372..c177cd1eed2 100644
--- a/arch/mips/include/asm/mach-ar7/gpio.h
+++ b/arch/mips/include/asm/mach-ar7/gpio.h
@@ -22,7 +22,8 @@
#include <asm/mach-ar7/ar7.h>
#define AR7_GPIO_MAX 32
-#define NR_BUILTIN_GPIO AR7_GPIO_MAX
+#define TITAN_GPIO_MAX 51
+#define NR_BUILTIN_GPIO TITAN_GPIO_MAX
#define gpio_to_irq(gpio) -1
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
new file mode 100644
index 00000000000..5325084d5c4
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
@@ -0,0 +1,97 @@
+#ifndef __BCM963XX_TAG_H
+#define __BCM963XX_TAG_H
+
+#define TAGVER_LEN 4 /* Length of Tag Version */
+#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
+#define SIG1_LEN 20 /* Company Signature 1 Length */
+#define SIG2_LEN 14 /* Company Signature 2 Lenght */
+#define BOARDID_LEN 16 /* Length of BoardId */
+#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
+#define CHIPID_LEN 6 /* Chip Id Length */
+#define IMAGE_LEN 10 /* Length of Length Field */
+#define ADDRESS_LEN 12 /* Length of Address field */
+#define DUALFLAG_LEN 2 /* Dual Image flag Length */
+#define INACTIVEFLAG_LEN 2 /* Inactie Flag Length */
+#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
+#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
+#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
+#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
+#define CRC_LEN 4 /* Length of CRC in bytes */
+#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
+
+#define NUM_PIRELLI 2
+#define IMAGETAG_CRC_START 0xFFFFFFFF
+
+#define PIRELLI_BOARDS { \
+ "AGPF-S0", \
+ "DWV-S0", \
+}
+
+/*
+ * The broadcom firmware assumes the rootfs starts the image,
+ * therefore uses the rootfs start (flash_image_address)
+ * to determine where to flash the image. Since we have the kernel first
+ * we have to give it the kernel address, but the crc uses the length
+ * associated with this address (root_length), which is added to the kernel
+ * length (kernel_length) to determine the length of image to flash and thus
+ * needs to be rootfs + deadcode (jffs2 EOF marker)
+*/
+
+struct bcm_tag {
+ /* 0-3: Version of the image tag */
+ char tag_version[TAGVER_LEN];
+ /* 4-23: Company Line 1 */
+ char sig_1[SIG1_LEN];
+ /* 24-37: Company Line 2 */
+ char sig_2[SIG2_LEN];
+ /* 38-43: Chip this image is for */
+ char chip_id[CHIPID_LEN];
+ /* 44-59: Board name */
+ char board_id[BOARDID_LEN];
+ /* 60-61: Map endianness -- 1 BE 0 LE */
+ char big_endian[ENDIANFLAG_LEN];
+ /* 62-71: Total length of image */
+ char total_length[IMAGE_LEN];
+ /* 72-83: Address in memory of CFE */
+ char cfe__address[ADDRESS_LEN];
+ /* 84-93: Size of CFE */
+ char cfe_length[IMAGE_LEN];
+ /* 94-105: Address in memory of image start
+ * (kernel for OpenWRT, rootfs for stock firmware)
+ */
+ char flash_image_start[ADDRESS_LEN];
+ /* 106-115: Size of rootfs */
+ char root_length[IMAGE_LEN];
+ /* 116-127: Address in memory of kernel */
+ char kernel_address[ADDRESS_LEN];
+ /* 128-137: Size of kernel */
+ char kernel_length[IMAGE_LEN];
+ /* 138-139: Unused at the moment */
+ char dual_image[DUALFLAG_LEN];
+ /* 140-141: Unused at the moment */
+ char inactive_flag[INACTIVEFLAG_LEN];
+ /* 142-161: RSA Signature (not used; some vendors may use this) */
+ char rsa_signature[RSASIG_LEN];
+ /* 162-191: Compilation and related information (not used in OpenWrt) */
+ char information1[TAGINFO1_LEN];
+ /* 192-195: Version flash layout */
+ char flash_layout_ver[FLASHLAYOUTVER_LEN];
+ /* 196-199: kernel+rootfs CRC32 */
+ char fskernel_crc[CRC_LEN];
+ /* 200-215: Unused except on Alice Gate where is is information */
+ char information2[TAGINFO2_LEN];
+ /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
+ char image_crc[CRC_LEN];
+ /* 220-223: CRC32 of rootfs partition */
+ char rootfs_crc[CRC_LEN];
+ /* 224-227: CRC32 of kernel partition */
+ char kernel_crc[CRC_LEN];
+ /* 228-235: Unused at present */
+ char reserved1[8];
+ /* 236-239: CRC32 of header excluding tagVersion */
+ char header_crc[CRC_LEN];
+ /* 240-255: Unused at present */
+ char reserved2[16];
+};
+
+#endif /* __BCM63XX_TAG_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index b952fc7215e..0d5a42b5f47 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -59,7 +59,7 @@
#define cpu_has_veic 0
#define cpu_hwrena_impl_bits 0xc0000000
-#define kernel_uses_smartmips_rixi (cpu_data[0].cputype == CPU_CAVIUM_OCTEON_PLUS)
+#define kernel_uses_smartmips_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
#define ARCH_HAS_IRQ_PER_CPU 1
#define ARCH_HAS_SPINLOCK_PREFETCH 1
@@ -81,4 +81,10 @@ static inline int octeon_has_saa(void)
return id >= 0x000d0300;
}
+/*
+ * The last 256MB are reserved for device to device mappings and the
+ * BAR1 hole.
+ */
+#define MAX_DMA32_PFN (((1ULL << 32) - (1ULL << 28)) >> PAGE_SHIFT)
+
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 17d579471ec..be8fb4240ce 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -15,41 +15,40 @@
struct device;
-dma_addr_t octeon_map_dma_mem(struct device *, void *, size_t);
-void octeon_unmap_dma_mem(struct device *, dma_addr_t);
+extern void octeon_pci_dma_init(void);
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
- return octeon_map_dma_mem(dev, addr, size);
+ BUG();
}
static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page)
{
- return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE);
+ BUG();
}
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
- return dma_addr;
+ BUG();
}
static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction)
{
- octeon_unmap_dma_mem(dev, dma_addr);
+ BUG();
}
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
- return 1;
+ BUG();
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
- mb();
+ BUG();
}
static inline int plat_device_is_coherent(struct device *dev)
@@ -60,7 +59,14 @@ static inline int plat_device_is_coherent(struct device *dev)
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
- return dma_addr == -1;
+ BUG();
}
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+struct dma_map_ops;
+extern struct dma_map_ops *octeon_pci_dma_map_ops;
+extern char *octeon_swiotlb;
+
#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index d3d04018a85..016d0989b14 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -26,14 +26,15 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
return pa;
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
dma_addr_t pa = dev_to_baddr(dev, page_to_phys(page));
return pa;
}
-static unsigned long plat_dma_addr_to_phys(struct device *dev,
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
return dma_addr & ~(0xffUL << 56);
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 37855955b31..c8fb5aacf50 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -37,7 +37,8 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
return pa;
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
dma_addr_t pa;
@@ -50,7 +51,7 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
}
/* This is almost certainly wrong but it's what dma-ip32.c used to use */
-static unsigned long plat_dma_addr_to_phys(struct device *dev,
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
unsigned long addr = dma_addr & RAM_OFFSET_MASK;
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index f93aee59454..302101b54ac 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -12,23 +12,24 @@
struct device;
-static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
+static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
{
return vdma_alloc(virt_to_phys(addr), size);
}
-static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
{
return vdma_alloc(page_to_phys(page), PAGE_SIZE);
}
-static unsigned long plat_dma_addr_to_phys(struct device *dev,
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
return vdma_log2phys(dma_addr);
}
-static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction)
{
vdma_free(dma_addr);
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 335474c155f..4d987097538 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1040,6 +1040,12 @@ do { \
#define read_c0_dtaglo() __read_32bit_c0_register($28, 2)
#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val)
+#define read_c0_ddatalo() __read_32bit_c0_register($28, 3)
+#define write_c0_ddatalo(val) __write_32bit_c0_register($28, 3, val)
+
+#define read_c0_staglo() __read_32bit_c0_register($28, 4)
+#define write_c0_staglo(val) __write_32bit_c0_register($28, 4, val)
+
#define read_c0_taghi() __read_32bit_c0_register($29, 0)
#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val)
@@ -1082,6 +1088,51 @@ do { \
#define read_octeon_c0_dcacheerr() __read_64bit_c0_register($27, 1)
#define write_octeon_c0_dcacheerr(val) __write_64bit_c0_register($27, 1, val)
+/* BMIPS3300 */
+#define read_c0_brcm_config_0() __read_32bit_c0_register($22, 0)
+#define write_c0_brcm_config_0(val) __write_32bit_c0_register($22, 0, val)
+
+#define read_c0_brcm_bus_pll() __read_32bit_c0_register($22, 4)
+#define write_c0_brcm_bus_pll(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_brcm_reset() __read_32bit_c0_register($22, 5)
+#define write_c0_brcm_reset(val) __write_32bit_c0_register($22, 5, val)
+
+/* BMIPS4380 */
+#define read_c0_brcm_cmt_intr() __read_32bit_c0_register($22, 1)
+#define write_c0_brcm_cmt_intr(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_brcm_cmt_ctrl() __read_32bit_c0_register($22, 2)
+#define write_c0_brcm_cmt_ctrl(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_brcm_cmt_local() __read_32bit_c0_register($22, 3)
+#define write_c0_brcm_cmt_local(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_brcm_config_1() __read_32bit_c0_register($22, 5)
+#define write_c0_brcm_config_1(val) __write_32bit_c0_register($22, 5, val)
+
+#define read_c0_brcm_cbr() __read_32bit_c0_register($22, 6)
+#define write_c0_brcm_cbr(val) __write_32bit_c0_register($22, 6, val)
+
+/* BMIPS5000 */
+#define read_c0_brcm_config() __read_32bit_c0_register($22, 0)
+#define write_c0_brcm_config(val) __write_32bit_c0_register($22, 0, val)
+
+#define read_c0_brcm_mode() __read_32bit_c0_register($22, 1)
+#define write_c0_brcm_mode(val) __write_32bit_c0_register($22, 1, val)
+
+#define read_c0_brcm_action() __read_32bit_c0_register($22, 2)
+#define write_c0_brcm_action(val) __write_32bit_c0_register($22, 2, val)
+
+#define read_c0_brcm_edsp() __read_32bit_c0_register($22, 3)
+#define write_c0_brcm_edsp(val) __write_32bit_c0_register($22, 3, val)
+
+#define read_c0_brcm_bootvec() __read_32bit_c0_register($22, 4)
+#define write_c0_brcm_bootvec(val) __write_32bit_c0_register($22, 4, val)
+
+#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7)
+#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
+
/*
* Macros to access the floating point coprocessor control registers
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-agl-defs.h b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
index ec94b9ab7be..30d68f2365e 100644
--- a/arch/mips/include/asm/octeon/cvmx-agl-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,148 +28,80 @@
#ifndef __CVMX_AGL_DEFS_H__
#define __CVMX_AGL_DEFS_H__
-#define CVMX_AGL_GMX_BAD_REG \
- CVMX_ADD_IO_SEG(0x00011800E0000518ull)
-#define CVMX_AGL_GMX_BIST \
- CVMX_ADD_IO_SEG(0x00011800E0000400ull)
-#define CVMX_AGL_GMX_DRV_CTL \
- CVMX_ADD_IO_SEG(0x00011800E00007F0ull)
-#define CVMX_AGL_GMX_INF_MODE \
- CVMX_ADD_IO_SEG(0x00011800E00007F8ull)
-#define CVMX_AGL_GMX_PRTX_CFG(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000010ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000180ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000188ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000190ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000198ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00001A0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00001A8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000108ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000100ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_DECISION(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000040ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000020ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000018ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000030ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000028ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_IFG(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000058ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_INT_EN(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000008ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_INT_REG(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000000ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_JABBER(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000038ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000068ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000050ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000088ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000098ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000A8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000B8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000080ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000C0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000090ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000A0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00000B0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000048ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_RX_BP_DROPX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000420ull + (((offset) & 1) * 8))
-#define CVMX_AGL_GMX_RX_BP_OFFX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000460ull + (((offset) & 1) * 8))
-#define CVMX_AGL_GMX_RX_BP_ONX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000440ull + (((offset) & 1) * 8))
-#define CVMX_AGL_GMX_RX_PRT_INFO \
- CVMX_ADD_IO_SEG(0x00011800E00004E8ull)
-#define CVMX_AGL_GMX_RX_TX_STATUS \
- CVMX_ADD_IO_SEG(0x00011800E00007E8ull)
-#define CVMX_AGL_GMX_SMACX(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000230ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_STAT_BP \
- CVMX_ADD_IO_SEG(0x00011800E0000520ull)
-#define CVMX_AGL_GMX_TXX_APPEND(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000218ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000270ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000240ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000248ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000238ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000258ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000260ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000250ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT0(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000280ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT1(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000288ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT2(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000290ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT3(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000298ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT4(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002A0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT5(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002A8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT6(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002B0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT7(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002B8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT8(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002C0ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STAT9(offset) \
- CVMX_ADD_IO_SEG(0x00011800E00002C8ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000268ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TXX_THRESH(offset) \
- CVMX_ADD_IO_SEG(0x00011800E0000210ull + (((offset) & 1) * 2048))
-#define CVMX_AGL_GMX_TX_BP \
- CVMX_ADD_IO_SEG(0x00011800E00004D0ull)
-#define CVMX_AGL_GMX_TX_COL_ATTEMPT \
- CVMX_ADD_IO_SEG(0x00011800E0000498ull)
-#define CVMX_AGL_GMX_TX_IFG \
- CVMX_ADD_IO_SEG(0x00011800E0000488ull)
-#define CVMX_AGL_GMX_TX_INT_EN \
- CVMX_ADD_IO_SEG(0x00011800E0000508ull)
-#define CVMX_AGL_GMX_TX_INT_REG \
- CVMX_ADD_IO_SEG(0x00011800E0000500ull)
-#define CVMX_AGL_GMX_TX_JAM \
- CVMX_ADD_IO_SEG(0x00011800E0000490ull)
-#define CVMX_AGL_GMX_TX_LFSR \
- CVMX_ADD_IO_SEG(0x00011800E00004F8ull)
-#define CVMX_AGL_GMX_TX_OVR_BP \
- CVMX_ADD_IO_SEG(0x00011800E00004C8ull)
-#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC \
- CVMX_ADD_IO_SEG(0x00011800E00004A0ull)
-#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE \
- CVMX_ADD_IO_SEG(0x00011800E00004A8ull)
+#define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
+#define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
+#define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
+#define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
+#define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
+#define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
+#define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
+#define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
+#define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
+#define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
+#define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
+#define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
+#define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
+#define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
+#define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
+#define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
+#define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
union cvmx_agl_gmx_bad_reg {
uint64_t u64;
@@ -183,14 +115,29 @@ union cvmx_agl_gmx_bad_reg {
uint64_t ovrflw:1;
uint64_t reserved_27_31:5;
uint64_t statovr:1;
+ uint64_t reserved_24_25:2;
+ uint64_t loststat:2;
+ uint64_t reserved_4_21:18;
+ uint64_t out_ovr:2;
+ uint64_t reserved_0_1:2;
+ } s;
+ struct cvmx_agl_gmx_bad_reg_cn52xx {
+ uint64_t reserved_38_63:26;
+ uint64_t txpsh1:1;
+ uint64_t txpop1:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
uint64_t reserved_23_25:3;
uint64_t loststat:1;
uint64_t reserved_4_21:18;
uint64_t out_ovr:2;
uint64_t reserved_0_1:2;
- } s;
- struct cvmx_agl_gmx_bad_reg_s cn52xx;
- struct cvmx_agl_gmx_bad_reg_s cn52xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
struct cvmx_agl_gmx_bad_reg_cn56xx {
uint64_t reserved_35_63:29;
uint64_t txpsh:1;
@@ -205,18 +152,25 @@ union cvmx_agl_gmx_bad_reg {
uint64_t reserved_0_1:2;
} cn56xx;
struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_bad_reg_s cn63xx;
+ struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
};
union cvmx_agl_gmx_bist {
uint64_t u64;
struct cvmx_agl_gmx_bist_s {
+ uint64_t reserved_25_63:39;
+ uint64_t status:25;
+ } s;
+ struct cvmx_agl_gmx_bist_cn52xx {
uint64_t reserved_10_63:54;
uint64_t status:10;
- } s;
- struct cvmx_agl_gmx_bist_s cn52xx;
- struct cvmx_agl_gmx_bist_s cn52xxp1;
- struct cvmx_agl_gmx_bist_s cn56xx;
- struct cvmx_agl_gmx_bist_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xx;
+ struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_bist_s cn63xx;
+ struct cvmx_agl_gmx_bist_s cn63xxp1;
};
union cvmx_agl_gmx_drv_ctl {
@@ -264,7 +218,13 @@ union cvmx_agl_gmx_inf_mode {
union cvmx_agl_gmx_prtx_cfg {
uint64_t u64;
struct cvmx_agl_gmx_prtx_cfg_s {
- uint64_t reserved_6_63:58;
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_7_7:1;
+ uint64_t burst:1;
uint64_t tx_en:1;
uint64_t rx_en:1;
uint64_t slottime:1;
@@ -272,10 +232,20 @@ union cvmx_agl_gmx_prtx_cfg {
uint64_t speed:1;
uint64_t en:1;
} s;
- struct cvmx_agl_gmx_prtx_cfg_s cn52xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn52xxp1;
- struct cvmx_agl_gmx_prtx_cfg_s cn56xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn56xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx {
+ uint64_t reserved_6_63:58;
+ uint64_t tx_en:1;
+ uint64_t rx_en:1;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam0 {
@@ -287,6 +257,8 @@ union cvmx_agl_gmx_rxx_adr_cam0 {
struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam1 {
@@ -298,6 +270,8 @@ union cvmx_agl_gmx_rxx_adr_cam1 {
struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam2 {
@@ -309,6 +283,8 @@ union cvmx_agl_gmx_rxx_adr_cam2 {
struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam3 {
@@ -320,6 +296,8 @@ union cvmx_agl_gmx_rxx_adr_cam3 {
struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam4 {
@@ -331,6 +309,8 @@ union cvmx_agl_gmx_rxx_adr_cam4 {
struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam5 {
@@ -342,6 +322,8 @@ union cvmx_agl_gmx_rxx_adr_cam5 {
struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam_en {
@@ -354,6 +336,8 @@ union cvmx_agl_gmx_rxx_adr_cam_en {
struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_adr_ctl {
@@ -368,6 +352,8 @@ union cvmx_agl_gmx_rxx_adr_ctl {
struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_decision {
@@ -380,11 +366,26 @@ union cvmx_agl_gmx_rxx_decision {
struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
struct cvmx_agl_gmx_rxx_decision_s cn56xx;
struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_chk {
uint64_t u64;
struct cvmx_agl_gmx_rxx_frm_chk_s {
+ uint64_t reserved_10_63:54;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx {
uint64_t reserved_9_63:55;
uint64_t skperr:1;
uint64_t rcverr:1;
@@ -395,17 +396,21 @@ union cvmx_agl_gmx_rxx_frm_chk {
uint64_t maxerr:1;
uint64_t reserved_1_1:1;
uint64_t minerr:1;
- } s;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn52xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn56xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_ctl {
uint64_t u64;
struct cvmx_agl_gmx_rxx_frm_ctl_s {
- uint64_t reserved_10_63:54;
+ uint64_t reserved_13_63:51;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_11_11:1;
+ uint64_t null_dis:1;
uint64_t pre_align:1;
uint64_t pad_len:1;
uint64_t vlan_len:1;
@@ -417,10 +422,24 @@ union cvmx_agl_gmx_rxx_frm_ctl {
uint64_t pre_strp:1;
uint64_t pre_chk:1;
} s;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx {
+ uint64_t reserved_10_63:54;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_max {
@@ -433,6 +452,8 @@ union cvmx_agl_gmx_rxx_frm_max {
struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_frm_min {
@@ -445,6 +466,8 @@ union cvmx_agl_gmx_rxx_frm_min {
struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_ifg {
@@ -457,6 +480,8 @@ union cvmx_agl_gmx_rxx_ifg {
struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_int_en {
@@ -464,6 +489,29 @@ union cvmx_agl_gmx_rxx_int_en {
struct cvmx_agl_gmx_rxx_int_en_s {
uint64_t reserved_20_63:44;
uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
uint64_t reserved_16_18:3;
uint64_t ifgerr:1;
uint64_t coldet:1;
@@ -481,11 +529,12 @@ union cvmx_agl_gmx_rxx_int_en {
uint64_t maxerr:1;
uint64_t reserved_1_1:1;
uint64_t minerr:1;
- } s;
- struct cvmx_agl_gmx_rxx_int_en_s cn52xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_int_en_s cn56xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_int_reg {
@@ -493,6 +542,29 @@ union cvmx_agl_gmx_rxx_int_reg {
struct cvmx_agl_gmx_rxx_int_reg_s {
uint64_t reserved_20_63:44;
uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
uint64_t reserved_16_18:3;
uint64_t ifgerr:1;
uint64_t coldet:1;
@@ -510,11 +582,12 @@ union cvmx_agl_gmx_rxx_int_reg {
uint64_t maxerr:1;
uint64_t reserved_1_1:1;
uint64_t minerr:1;
- } s;
- struct cvmx_agl_gmx_rxx_int_reg_s cn52xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_int_reg_s cn56xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_jabber {
@@ -527,6 +600,8 @@ union cvmx_agl_gmx_rxx_jabber {
struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_pause_drop_time {
@@ -539,6 +614,20 @@ union cvmx_agl_gmx_rxx_pause_drop_time {
struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
+};
+
+union cvmx_agl_gmx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s {
+ uint64_t reserved_4_63:60;
+ uint64_t duplex:1;
+ uint64_t speed:2;
+ uint64_t status:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
+ struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_ctl {
@@ -551,6 +640,8 @@ union cvmx_agl_gmx_rxx_stats_ctl {
struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs {
@@ -563,6 +654,8 @@ union cvmx_agl_gmx_rxx_stats_octs {
struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_ctl {
@@ -575,6 +668,8 @@ union cvmx_agl_gmx_rxx_stats_octs_ctl {
struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_dmac {
@@ -587,6 +682,8 @@ union cvmx_agl_gmx_rxx_stats_octs_dmac {
struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_drp {
@@ -599,6 +696,8 @@ union cvmx_agl_gmx_rxx_stats_octs_drp {
struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts {
@@ -611,6 +710,8 @@ union cvmx_agl_gmx_rxx_stats_pkts {
struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_bad {
@@ -623,6 +724,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_bad {
struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_ctl {
@@ -635,6 +738,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_ctl {
struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_dmac {
@@ -647,6 +752,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_dmac {
struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_drp {
@@ -659,6 +766,8 @@ union cvmx_agl_gmx_rxx_stats_pkts_drp {
struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
};
union cvmx_agl_gmx_rxx_udd_skp {
@@ -673,6 +782,8 @@ union cvmx_agl_gmx_rxx_udd_skp {
struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
};
union cvmx_agl_gmx_rx_bp_dropx {
@@ -685,6 +796,8 @@ union cvmx_agl_gmx_rx_bp_dropx {
struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
};
union cvmx_agl_gmx_rx_bp_offx {
@@ -697,6 +810,8 @@ union cvmx_agl_gmx_rx_bp_offx {
struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
};
union cvmx_agl_gmx_rx_bp_onx {
@@ -709,6 +824,8 @@ union cvmx_agl_gmx_rx_bp_onx {
struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
};
union cvmx_agl_gmx_rx_prt_info {
@@ -728,6 +845,8 @@ union cvmx_agl_gmx_rx_prt_info {
uint64_t commit:1;
} cn56xx;
struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
};
union cvmx_agl_gmx_rx_tx_status {
@@ -747,6 +866,8 @@ union cvmx_agl_gmx_rx_tx_status {
uint64_t rx:1;
} cn56xx;
struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
};
union cvmx_agl_gmx_smacx {
@@ -759,6 +880,8 @@ union cvmx_agl_gmx_smacx {
struct cvmx_agl_gmx_smacx_s cn52xxp1;
struct cvmx_agl_gmx_smacx_s cn56xx;
struct cvmx_agl_gmx_smacx_s cn56xxp1;
+ struct cvmx_agl_gmx_smacx_s cn63xx;
+ struct cvmx_agl_gmx_smacx_s cn63xxp1;
};
union cvmx_agl_gmx_stat_bp {
@@ -772,6 +895,8 @@ union cvmx_agl_gmx_stat_bp {
struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
struct cvmx_agl_gmx_stat_bp_s cn56xx;
struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn63xx;
+ struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
};
union cvmx_agl_gmx_txx_append {
@@ -787,6 +912,18 @@ union cvmx_agl_gmx_txx_append {
struct cvmx_agl_gmx_txx_append_s cn52xxp1;
struct cvmx_agl_gmx_txx_append_s cn56xx;
struct cvmx_agl_gmx_txx_append_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn63xx;
+ struct cvmx_agl_gmx_txx_append_s cn63xxp1;
+};
+
+union cvmx_agl_gmx_txx_clk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_clk_s {
+ uint64_t reserved_6_63:58;
+ uint64_t clk_cnt:6;
+ } s;
+ struct cvmx_agl_gmx_txx_clk_s cn63xx;
+ struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
};
union cvmx_agl_gmx_txx_ctl {
@@ -800,6 +937,8 @@ union cvmx_agl_gmx_txx_ctl {
struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
struct cvmx_agl_gmx_txx_ctl_s cn56xx;
struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_txx_min_pkt {
@@ -812,6 +951,8 @@ union cvmx_agl_gmx_txx_min_pkt {
struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_pkt_interval {
@@ -824,6 +965,8 @@ union cvmx_agl_gmx_txx_pause_pkt_interval {
struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_pkt_time {
@@ -836,6 +979,8 @@ union cvmx_agl_gmx_txx_pause_pkt_time {
struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_togo {
@@ -848,6 +993,8 @@ union cvmx_agl_gmx_txx_pause_togo {
struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
};
union cvmx_agl_gmx_txx_pause_zero {
@@ -860,6 +1007,8 @@ union cvmx_agl_gmx_txx_pause_zero {
struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
};
union cvmx_agl_gmx_txx_soft_pause {
@@ -872,6 +1021,8 @@ union cvmx_agl_gmx_txx_soft_pause {
struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat0 {
@@ -884,6 +1035,8 @@ union cvmx_agl_gmx_txx_stat0 {
struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat0_s cn56xx;
struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat1 {
@@ -896,6 +1049,8 @@ union cvmx_agl_gmx_txx_stat1 {
struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat1_s cn56xx;
struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat2 {
@@ -908,6 +1063,8 @@ union cvmx_agl_gmx_txx_stat2 {
struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat2_s cn56xx;
struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat3 {
@@ -920,6 +1077,8 @@ union cvmx_agl_gmx_txx_stat3 {
struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat3_s cn56xx;
struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat4 {
@@ -932,6 +1091,8 @@ union cvmx_agl_gmx_txx_stat4 {
struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat4_s cn56xx;
struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat5 {
@@ -944,6 +1105,8 @@ union cvmx_agl_gmx_txx_stat5 {
struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat5_s cn56xx;
struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat6 {
@@ -956,6 +1119,8 @@ union cvmx_agl_gmx_txx_stat6 {
struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat6_s cn56xx;
struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat7 {
@@ -968,6 +1133,8 @@ union cvmx_agl_gmx_txx_stat7 {
struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat7_s cn56xx;
struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat8 {
@@ -980,6 +1147,8 @@ union cvmx_agl_gmx_txx_stat8 {
struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat8_s cn56xx;
struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stat9 {
@@ -992,6 +1161,8 @@ union cvmx_agl_gmx_txx_stat9 {
struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
struct cvmx_agl_gmx_txx_stat9_s cn56xx;
struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
};
union cvmx_agl_gmx_txx_stats_ctl {
@@ -1004,6 +1175,8 @@ union cvmx_agl_gmx_txx_stats_ctl {
struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
};
union cvmx_agl_gmx_txx_thresh {
@@ -1016,6 +1189,8 @@ union cvmx_agl_gmx_txx_thresh {
struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
struct cvmx_agl_gmx_txx_thresh_s cn56xx;
struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
};
union cvmx_agl_gmx_tx_bp {
@@ -1031,6 +1206,8 @@ union cvmx_agl_gmx_tx_bp {
uint64_t bp:1;
} cn56xx;
struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
};
union cvmx_agl_gmx_tx_col_attempt {
@@ -1043,6 +1220,8 @@ union cvmx_agl_gmx_tx_col_attempt {
struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
};
union cvmx_agl_gmx_tx_ifg {
@@ -1056,12 +1235,16 @@ union cvmx_agl_gmx_tx_ifg {
struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
struct cvmx_agl_gmx_tx_ifg_s cn56xx;
struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
};
union cvmx_agl_gmx_tx_int_en {
uint64_t u64;
struct cvmx_agl_gmx_tx_int_en_s {
- uint64_t reserved_18_63:46;
+ uint64_t reserved_22_63:42;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
uint64_t late_col:2;
uint64_t reserved_14_15:2;
uint64_t xsdef:2;
@@ -1072,8 +1255,19 @@ union cvmx_agl_gmx_tx_int_en {
uint64_t reserved_1_1:1;
uint64_t pko_nxa:1;
} s;
- struct cvmx_agl_gmx_tx_int_en_s cn52xx;
- struct cvmx_agl_gmx_tx_int_en_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx {
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
struct cvmx_agl_gmx_tx_int_en_cn56xx {
uint64_t reserved_17_63:47;
uint64_t late_col:1;
@@ -1087,12 +1281,16 @@ union cvmx_agl_gmx_tx_int_en {
uint64_t pko_nxa:1;
} cn56xx;
struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
};
union cvmx_agl_gmx_tx_int_reg {
uint64_t u64;
struct cvmx_agl_gmx_tx_int_reg_s {
- uint64_t reserved_18_63:46;
+ uint64_t reserved_22_63:42;
+ uint64_t ptp_lost:2;
+ uint64_t reserved_18_19:2;
uint64_t late_col:2;
uint64_t reserved_14_15:2;
uint64_t xsdef:2;
@@ -1103,8 +1301,19 @@ union cvmx_agl_gmx_tx_int_reg {
uint64_t reserved_1_1:1;
uint64_t pko_nxa:1;
} s;
- struct cvmx_agl_gmx_tx_int_reg_s cn52xx;
- struct cvmx_agl_gmx_tx_int_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx {
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
struct cvmx_agl_gmx_tx_int_reg_cn56xx {
uint64_t reserved_17_63:47;
uint64_t late_col:1;
@@ -1118,6 +1327,8 @@ union cvmx_agl_gmx_tx_int_reg {
uint64_t pko_nxa:1;
} cn56xx;
struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
};
union cvmx_agl_gmx_tx_jam {
@@ -1130,6 +1341,8 @@ union cvmx_agl_gmx_tx_jam {
struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
struct cvmx_agl_gmx_tx_jam_s cn56xx;
struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn63xx;
+ struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
};
union cvmx_agl_gmx_tx_lfsr {
@@ -1142,6 +1355,8 @@ union cvmx_agl_gmx_tx_lfsr {
struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
};
union cvmx_agl_gmx_tx_ovr_bp {
@@ -1165,6 +1380,8 @@ union cvmx_agl_gmx_tx_ovr_bp {
uint64_t ign_full:1;
} cn56xx;
struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
};
union cvmx_agl_gmx_tx_pause_pkt_dmac {
@@ -1177,6 +1394,8 @@ union cvmx_agl_gmx_tx_pause_pkt_dmac {
struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
};
union cvmx_agl_gmx_tx_pause_pkt_type {
@@ -1189,6 +1408,39 @@ union cvmx_agl_gmx_tx_pause_pkt_type {
struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
+};
+
+union cvmx_agl_prtx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_prtx_ctl_s {
+ uint64_t drv_byp:1;
+ uint64_t reserved_62_62:1;
+ uint64_t cmp_pctl:6;
+ uint64_t reserved_54_55:2;
+ uint64_t cmp_nctl:6;
+ uint64_t reserved_46_47:2;
+ uint64_t drv_pctl:6;
+ uint64_t reserved_38_39:2;
+ uint64_t drv_nctl:6;
+ uint64_t reserved_29_31:3;
+ uint64_t clk_set:5;
+ uint64_t clkrx_byp:1;
+ uint64_t reserved_21_22:2;
+ uint64_t clkrx_set:5;
+ uint64_t clktx_byp:1;
+ uint64_t reserved_13_14:2;
+ uint64_t clktx_set:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dllrst:1;
+ uint64_t comp:1;
+ uint64_t enable:1;
+ uint64_t clkrst:1;
+ uint64_t mode:1;
+ } s;
+ struct cvmx_agl_prtx_ctl_s cn63xx;
+ struct cvmx_agl_prtx_ctl_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-asm.h b/arch/mips/include/asm/octeon/cvmx-asm.h
index b21d3fc1ef9..5de5de95311 100644
--- a/arch/mips/include/asm/octeon/cvmx-asm.h
+++ b/arch/mips/include/asm/octeon/cvmx-asm.h
@@ -114,6 +114,17 @@
#define CVMX_DCACHE_INVALIDATE \
{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
+#define CVMX_CACHE(op, address, offset) \
+ asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \
+ : : [rbase] "d" (address) )
+/* fetch and lock the state. */
+#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset)
+/* unlock the state. */
+#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset)
+/* invalidate the cache block and clear the USED bits for the block */
+#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset)
+/* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */
+#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset)
#define CVMX_POP(result, input) \
asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
index f8f05b7764b..27cead37041 100644
--- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,87 +28,61 @@
#ifndef __CVMX_CIU_DEFS_H__
#define __CVMX_CIU_DEFS_H__
-#define CVMX_CIU_BIST \
- CVMX_ADD_IO_SEG(0x0001070000000730ull)
-#define CVMX_CIU_DINT \
- CVMX_ADD_IO_SEG(0x0001070000000720ull)
-#define CVMX_CIU_FUSE \
- CVMX_ADD_IO_SEG(0x0001070000000728ull)
-#define CVMX_CIU_GSTOP \
- CVMX_ADD_IO_SEG(0x0001070000000710ull)
-#define CVMX_CIU_INTX_EN0(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000200ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN0_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002200ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN0_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006200ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000208ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN1_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002208ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN1_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006208ull + (((offset) & 63) * 16))
-#define CVMX_CIU_INTX_EN4_0(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000C80ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_0_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002C80ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_0_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006C80ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000C88ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_1_W1C(offset) \
- CVMX_ADD_IO_SEG(0x0001070000002C88ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_EN4_1_W1S(offset) \
- CVMX_ADD_IO_SEG(0x0001070000006C88ull + (((offset) & 15) * 16))
-#define CVMX_CIU_INTX_SUM0(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000000ull + (((offset) & 63) * 8))
-#define CVMX_CIU_INTX_SUM4(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000C00ull + (((offset) & 15) * 8))
-#define CVMX_CIU_INT_SUM1 \
- CVMX_ADD_IO_SEG(0x0001070000000108ull)
-#define CVMX_CIU_MBOX_CLRX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000680ull + (((offset) & 15) * 8))
-#define CVMX_CIU_MBOX_SETX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000600ull + (((offset) & 15) * 8))
-#define CVMX_CIU_NMI \
- CVMX_ADD_IO_SEG(0x0001070000000718ull)
-#define CVMX_CIU_PCI_INTA \
- CVMX_ADD_IO_SEG(0x0001070000000750ull)
-#define CVMX_CIU_PP_DBG \
- CVMX_ADD_IO_SEG(0x0001070000000708ull)
-#define CVMX_CIU_PP_POKEX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000580ull + (((offset) & 15) * 8))
-#define CVMX_CIU_PP_RST \
- CVMX_ADD_IO_SEG(0x0001070000000700ull)
-#define CVMX_CIU_QLM_DCOK \
- CVMX_ADD_IO_SEG(0x0001070000000760ull)
-#define CVMX_CIU_QLM_JTGC \
- CVMX_ADD_IO_SEG(0x0001070000000768ull)
-#define CVMX_CIU_QLM_JTGD \
- CVMX_ADD_IO_SEG(0x0001070000000770ull)
-#define CVMX_CIU_SOFT_BIST \
- CVMX_ADD_IO_SEG(0x0001070000000738ull)
-#define CVMX_CIU_SOFT_PRST \
- CVMX_ADD_IO_SEG(0x0001070000000748ull)
-#define CVMX_CIU_SOFT_PRST1 \
- CVMX_ADD_IO_SEG(0x0001070000000758ull)
-#define CVMX_CIU_SOFT_RST \
- CVMX_ADD_IO_SEG(0x0001070000000740ull)
-#define CVMX_CIU_TIMX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000480ull + (((offset) & 3) * 8))
-#define CVMX_CIU_WDOGX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000500ull + (((offset) & 15) * 8))
+#define CVMX_CIU_BIST (CVMX_ADD_IO_SEG(0x0001070000000730ull))
+#define CVMX_CIU_BLOCK_INT (CVMX_ADD_IO_SEG(0x00010700000007C0ull))
+#define CVMX_CIU_DINT (CVMX_ADD_IO_SEG(0x0001070000000720ull))
+#define CVMX_CIU_FUSE (CVMX_ADD_IO_SEG(0x0001070000000728ull))
+#define CVMX_CIU_GSTOP (CVMX_ADD_IO_SEG(0x0001070000000710ull))
+#define CVMX_CIU_INT33_SUM0 (CVMX_ADD_IO_SEG(0x0001070000000110ull))
+#define CVMX_CIU_INTX_EN0(offset) (CVMX_ADD_IO_SEG(0x0001070000000200ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002200ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006200ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN1(offset) (CVMX_ADD_IO_SEG(0x0001070000000208ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002208ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006208ull) + ((offset) & 63) * 16)
+#define CVMX_CIU_INTX_EN4_0(offset) (CVMX_ADD_IO_SEG(0x0001070000000C80ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_0_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C80ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_0_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C80ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_1(offset) (CVMX_ADD_IO_SEG(0x0001070000000C88ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_1_W1C(offset) (CVMX_ADD_IO_SEG(0x0001070000002C88ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_EN4_1_W1S(offset) (CVMX_ADD_IO_SEG(0x0001070000006C88ull) + ((offset) & 15) * 16)
+#define CVMX_CIU_INTX_SUM0(offset) (CVMX_ADD_IO_SEG(0x0001070000000000ull) + ((offset) & 63) * 8)
+#define CVMX_CIU_INTX_SUM4(offset) (CVMX_ADD_IO_SEG(0x0001070000000C00ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_INT_DBG_SEL (CVMX_ADD_IO_SEG(0x00010700000007D0ull))
+#define CVMX_CIU_INT_SUM1 (CVMX_ADD_IO_SEG(0x0001070000000108ull))
+#define CVMX_CIU_MBOX_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001070000000680ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_MBOX_SETX(offset) (CVMX_ADD_IO_SEG(0x0001070000000600ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_NMI (CVMX_ADD_IO_SEG(0x0001070000000718ull))
+#define CVMX_CIU_PCI_INTA (CVMX_ADD_IO_SEG(0x0001070000000750ull))
+#define CVMX_CIU_PP_DBG (CVMX_ADD_IO_SEG(0x0001070000000708ull))
+#define CVMX_CIU_PP_POKEX(offset) (CVMX_ADD_IO_SEG(0x0001070000000580ull) + ((offset) & 15) * 8)
+#define CVMX_CIU_PP_RST (CVMX_ADD_IO_SEG(0x0001070000000700ull))
+#define CVMX_CIU_QLM0 (CVMX_ADD_IO_SEG(0x0001070000000780ull))
+#define CVMX_CIU_QLM1 (CVMX_ADD_IO_SEG(0x0001070000000788ull))
+#define CVMX_CIU_QLM2 (CVMX_ADD_IO_SEG(0x0001070000000790ull))
+#define CVMX_CIU_QLM_DCOK (CVMX_ADD_IO_SEG(0x0001070000000760ull))
+#define CVMX_CIU_QLM_JTGC (CVMX_ADD_IO_SEG(0x0001070000000768ull))
+#define CVMX_CIU_QLM_JTGD (CVMX_ADD_IO_SEG(0x0001070000000770ull))
+#define CVMX_CIU_SOFT_BIST (CVMX_ADD_IO_SEG(0x0001070000000738ull))
+#define CVMX_CIU_SOFT_PRST (CVMX_ADD_IO_SEG(0x0001070000000748ull))
+#define CVMX_CIU_SOFT_PRST1 (CVMX_ADD_IO_SEG(0x0001070000000758ull))
+#define CVMX_CIU_SOFT_RST (CVMX_ADD_IO_SEG(0x0001070000000740ull))
+#define CVMX_CIU_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001070000000480ull) + ((offset) & 3) * 8)
+#define CVMX_CIU_WDOGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000500ull) + ((offset) & 15) * 8)
union cvmx_ciu_bist {
uint64_t u64;
struct cvmx_ciu_bist_s {
+ uint64_t reserved_5_63:59;
+ uint64_t bist:5;
+ } s;
+ struct cvmx_ciu_bist_cn30xx {
uint64_t reserved_4_63:60;
uint64_t bist:4;
- } s;
- struct cvmx_ciu_bist_s cn30xx;
- struct cvmx_ciu_bist_s cn31xx;
- struct cvmx_ciu_bist_s cn38xx;
- struct cvmx_ciu_bist_s cn38xxp2;
+ } cn30xx;
+ struct cvmx_ciu_bist_cn30xx cn31xx;
+ struct cvmx_ciu_bist_cn30xx cn38xx;
+ struct cvmx_ciu_bist_cn30xx cn38xxp2;
struct cvmx_ciu_bist_cn50xx {
uint64_t reserved_2_63:62;
uint64_t bist:2;
@@ -118,10 +92,57 @@ union cvmx_ciu_bist {
uint64_t bist:3;
} cn52xx;
struct cvmx_ciu_bist_cn52xx cn52xxp1;
- struct cvmx_ciu_bist_s cn56xx;
- struct cvmx_ciu_bist_s cn56xxp1;
- struct cvmx_ciu_bist_s cn58xx;
- struct cvmx_ciu_bist_s cn58xxp1;
+ struct cvmx_ciu_bist_cn30xx cn56xx;
+ struct cvmx_ciu_bist_cn30xx cn56xxp1;
+ struct cvmx_ciu_bist_cn30xx cn58xx;
+ struct cvmx_ciu_bist_cn30xx cn58xxp1;
+ struct cvmx_ciu_bist_s cn63xx;
+ struct cvmx_ciu_bist_s cn63xxp1;
+};
+
+union cvmx_ciu_block_int {
+ uint64_t u64;
+ struct cvmx_ciu_block_int_s {
+ uint64_t reserved_43_63:21;
+ uint64_t ptp:1;
+ uint64_t dpi:1;
+ uint64_t dfm:1;
+ uint64_t reserved_34_39:6;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t reserved_31_31:1;
+ uint64_t iob:1;
+ uint64_t reserved_29_29:1;
+ uint64_t agl:1;
+ uint64_t reserved_27_27:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t reserved_23_24:2;
+ uint64_t asxpcs0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t reserved_18_19:2;
+ uint64_t lmc0:1;
+ uint64_t l2c:1;
+ uint64_t reserved_15_15:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t sli:1;
+ uint64_t reserved_2_2:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } s;
+ struct cvmx_ciu_block_int_s cn63xx;
+ struct cvmx_ciu_block_int_s cn63xxp1;
};
union cvmx_ciu_dint {
@@ -153,6 +174,11 @@ union cvmx_ciu_dint {
struct cvmx_ciu_dint_cn56xx cn56xxp1;
struct cvmx_ciu_dint_s cn58xx;
struct cvmx_ciu_dint_s cn58xxp1;
+ struct cvmx_ciu_dint_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t dint:6;
+ } cn63xx;
+ struct cvmx_ciu_dint_cn63xx cn63xxp1;
};
union cvmx_ciu_fuse {
@@ -184,6 +210,11 @@ union cvmx_ciu_fuse {
struct cvmx_ciu_fuse_cn56xx cn56xxp1;
struct cvmx_ciu_fuse_s cn58xx;
struct cvmx_ciu_fuse_s cn58xxp1;
+ struct cvmx_ciu_fuse_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t fuse:6;
+ } cn63xx;
+ struct cvmx_ciu_fuse_cn63xx cn63xxp1;
};
union cvmx_ciu_gstop {
@@ -203,6 +234,8 @@ union cvmx_ciu_gstop {
struct cvmx_ciu_gstop_s cn56xxp1;
struct cvmx_ciu_gstop_s cn58xx;
struct cvmx_ciu_gstop_s cn58xxp1;
+ struct cvmx_ciu_gstop_s cn63xx;
+ struct cvmx_ciu_gstop_s cn63xxp1;
};
union cvmx_ciu_intx_en0 {
@@ -343,6 +376,8 @@ union cvmx_ciu_intx_en0 {
struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
struct cvmx_ciu_intx_en0_cn38xx cn58xx;
struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en0_w1c {
@@ -412,6 +447,8 @@ union cvmx_ciu_intx_en0_w1c {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en0_w1s {
@@ -481,12 +518,42 @@ union cvmx_ciu_intx_en0_w1s {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en1 {
uint64_t u64;
struct cvmx_ciu_intx_en1_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -531,12 +598,76 @@ union cvmx_ciu_intx_en1 {
struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
struct cvmx_ciu_intx_en1_cn38xx cn58xx;
struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_en1_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en1_w1c {
uint64_t u64;
struct cvmx_ciu_intx_en1_w1c_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -560,12 +691,76 @@ union cvmx_ciu_intx_en1_w1c {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1c_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en1_w1s {
uint64_t u64;
struct cvmx_ciu_intx_en1_w1s_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -589,6 +784,42 @@ union cvmx_ciu_intx_en1_w1s {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en1_w1s_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en4_0 {
@@ -705,6 +936,8 @@ union cvmx_ciu_intx_en4_0 {
uint64_t workq:16;
} cn58xx;
struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en4_0_w1c {
@@ -774,6 +1007,8 @@ union cvmx_ciu_intx_en4_0_w1c {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en4_0_w1s {
@@ -843,12 +1078,42 @@ union cvmx_ciu_intx_en4_0_w1s {
uint64_t gpio:16;
uint64_t workq:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_en4_1 {
uint64_t u64;
struct cvmx_ciu_intx_en4_1_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -886,12 +1151,76 @@ union cvmx_ciu_intx_en4_1 {
uint64_t wdog:16;
} cn58xx;
struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_en4_1_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en4_1_w1c {
uint64_t u64;
struct cvmx_ciu_intx_en4_1_w1c_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -915,12 +1244,76 @@ union cvmx_ciu_intx_en4_1_w1c {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_en4_1_w1s {
uint64_t u64;
struct cvmx_ciu_intx_en4_1_w1s_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -944,6 +1337,42 @@ union cvmx_ciu_intx_en4_1_w1s {
uint64_t reserved_16_63:48;
uint64_t wdog:16;
} cn58xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn63xx cn63xxp1;
};
union cvmx_ciu_intx_sum0 {
@@ -1084,6 +1513,8 @@ union cvmx_ciu_intx_sum0 {
struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
+ struct cvmx_ciu_intx_sum0_cn52xx cn63xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn63xxp1;
};
union cvmx_ciu_intx_sum4 {
@@ -1200,12 +1631,85 @@ union cvmx_ciu_intx_sum4 {
uint64_t workq:16;
} cn58xx;
struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
+ struct cvmx_ciu_intx_sum4_cn52xx cn63xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn63xxp1;
+};
+
+union cvmx_ciu_int33_sum0 {
+ uint64_t u64;
+ struct cvmx_ciu_int33_sum0_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_int33_sum0_s cn63xx;
+ struct cvmx_ciu_int33_sum0_s cn63xxp1;
+};
+
+union cvmx_ciu_int_dbg_sel {
+ uint64_t u64;
+ struct cvmx_ciu_int_dbg_sel_s {
+ uint64_t reserved_19_63:45;
+ uint64_t sel:3;
+ uint64_t reserved_10_15:6;
+ uint64_t irq:2;
+ uint64_t reserved_3_7:5;
+ uint64_t pp:3;
+ } s;
+ struct cvmx_ciu_int_dbg_sel_s cn63xx;
};
union cvmx_ciu_int_sum1 {
uint64_t u64;
struct cvmx_ciu_int_sum1_s {
- uint64_t reserved_20_63:44;
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
uint64_t nand:1;
uint64_t mii1:1;
uint64_t usb1:1;
@@ -1250,6 +1754,42 @@ union cvmx_ciu_int_sum1 {
struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
struct cvmx_ciu_int_sum1_cn38xx cn58xx;
struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
+ struct cvmx_ciu_int_sum1_cn63xx {
+ uint64_t rst:1;
+ uint64_t reserved_57_62:6;
+ uint64_t dfm:1;
+ uint64_t reserved_53_55:3;
+ uint64_t lmc0:1;
+ uint64_t srio1:1;
+ uint64_t srio0:1;
+ uint64_t pem1:1;
+ uint64_t pem0:1;
+ uint64_t ptp:1;
+ uint64_t agl:1;
+ uint64_t reserved_37_45:9;
+ uint64_t agx0:1;
+ uint64_t dpi:1;
+ uint64_t sli:1;
+ uint64_t usb:1;
+ uint64_t dfa:1;
+ uint64_t key:1;
+ uint64_t rad:1;
+ uint64_t tim:1;
+ uint64_t zip:1;
+ uint64_t pko:1;
+ uint64_t pip:1;
+ uint64_t ipd:1;
+ uint64_t l2c:1;
+ uint64_t pow:1;
+ uint64_t fpa:1;
+ uint64_t iob:1;
+ uint64_t mio:1;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t reserved_6_17:12;
+ uint64_t wdog:6;
+ } cn63xx;
+ struct cvmx_ciu_int_sum1_cn63xx cn63xxp1;
};
union cvmx_ciu_mbox_clrx {
@@ -1269,6 +1809,8 @@ union cvmx_ciu_mbox_clrx {
struct cvmx_ciu_mbox_clrx_s cn56xxp1;
struct cvmx_ciu_mbox_clrx_s cn58xx;
struct cvmx_ciu_mbox_clrx_s cn58xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn63xx;
+ struct cvmx_ciu_mbox_clrx_s cn63xxp1;
};
union cvmx_ciu_mbox_setx {
@@ -1288,6 +1830,8 @@ union cvmx_ciu_mbox_setx {
struct cvmx_ciu_mbox_setx_s cn56xxp1;
struct cvmx_ciu_mbox_setx_s cn58xx;
struct cvmx_ciu_mbox_setx_s cn58xxp1;
+ struct cvmx_ciu_mbox_setx_s cn63xx;
+ struct cvmx_ciu_mbox_setx_s cn63xxp1;
};
union cvmx_ciu_nmi {
@@ -1319,6 +1863,11 @@ union cvmx_ciu_nmi {
struct cvmx_ciu_nmi_cn56xx cn56xxp1;
struct cvmx_ciu_nmi_s cn58xx;
struct cvmx_ciu_nmi_s cn58xxp1;
+ struct cvmx_ciu_nmi_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t nmi:6;
+ } cn63xx;
+ struct cvmx_ciu_nmi_cn63xx cn63xxp1;
};
union cvmx_ciu_pci_inta {
@@ -1338,6 +1887,8 @@ union cvmx_ciu_pci_inta {
struct cvmx_ciu_pci_inta_s cn56xxp1;
struct cvmx_ciu_pci_inta_s cn58xx;
struct cvmx_ciu_pci_inta_s cn58xxp1;
+ struct cvmx_ciu_pci_inta_s cn63xx;
+ struct cvmx_ciu_pci_inta_s cn63xxp1;
};
union cvmx_ciu_pp_dbg {
@@ -1369,12 +1920,17 @@ union cvmx_ciu_pp_dbg {
struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
struct cvmx_ciu_pp_dbg_s cn58xx;
struct cvmx_ciu_pp_dbg_s cn58xxp1;
+ struct cvmx_ciu_pp_dbg_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t ppdbg:6;
+ } cn63xx;
+ struct cvmx_ciu_pp_dbg_cn63xx cn63xxp1;
};
union cvmx_ciu_pp_pokex {
uint64_t u64;
struct cvmx_ciu_pp_pokex_s {
- uint64_t reserved_0_63:64;
+ uint64_t poke:64;
} s;
struct cvmx_ciu_pp_pokex_s cn30xx;
struct cvmx_ciu_pp_pokex_s cn31xx;
@@ -1387,6 +1943,8 @@ union cvmx_ciu_pp_pokex {
struct cvmx_ciu_pp_pokex_s cn56xxp1;
struct cvmx_ciu_pp_pokex_s cn58xx;
struct cvmx_ciu_pp_pokex_s cn58xxp1;
+ struct cvmx_ciu_pp_pokex_s cn63xx;
+ struct cvmx_ciu_pp_pokex_s cn63xxp1;
};
union cvmx_ciu_pp_rst {
@@ -1422,6 +1980,97 @@ union cvmx_ciu_pp_rst {
struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
struct cvmx_ciu_pp_rst_s cn58xx;
struct cvmx_ciu_pp_rst_s cn58xxp1;
+ struct cvmx_ciu_pp_rst_cn63xx {
+ uint64_t reserved_6_63:58;
+ uint64_t rst:5;
+ uint64_t rst0:1;
+ } cn63xx;
+ struct cvmx_ciu_pp_rst_cn63xx cn63xxp1;
+};
+
+union cvmx_ciu_qlm0 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm0_s {
+ uint64_t g2bypass:1;
+ uint64_t reserved_53_62:10;
+ uint64_t g2deemph:5;
+ uint64_t reserved_45_47:3;
+ uint64_t g2margin:5;
+ uint64_t reserved_32_39:8;
+ uint64_t txbypass:1;
+ uint64_t reserved_21_30:10;
+ uint64_t txdeemph:5;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } s;
+ struct cvmx_ciu_qlm0_s cn63xx;
+ struct cvmx_ciu_qlm0_cn63xxp1 {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_20_30:11;
+ uint64_t txdeemph:4;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } cn63xxp1;
+};
+
+union cvmx_ciu_qlm1 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm1_s {
+ uint64_t g2bypass:1;
+ uint64_t reserved_53_62:10;
+ uint64_t g2deemph:5;
+ uint64_t reserved_45_47:3;
+ uint64_t g2margin:5;
+ uint64_t reserved_32_39:8;
+ uint64_t txbypass:1;
+ uint64_t reserved_21_30:10;
+ uint64_t txdeemph:5;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } s;
+ struct cvmx_ciu_qlm1_s cn63xx;
+ struct cvmx_ciu_qlm1_cn63xxp1 {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_20_30:11;
+ uint64_t txdeemph:4;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } cn63xxp1;
+};
+
+union cvmx_ciu_qlm2 {
+ uint64_t u64;
+ struct cvmx_ciu_qlm2_s {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_21_30:10;
+ uint64_t txdeemph:5;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } s;
+ struct cvmx_ciu_qlm2_s cn63xx;
+ struct cvmx_ciu_qlm2_cn63xxp1 {
+ uint64_t reserved_32_63:32;
+ uint64_t txbypass:1;
+ uint64_t reserved_20_30:11;
+ uint64_t txdeemph:4;
+ uint64_t reserved_13_15:3;
+ uint64_t txmargin:5;
+ uint64_t reserved_4_7:4;
+ uint64_t lane_en:4;
+ } cn63xxp1;
};
union cvmx_ciu_qlm_dcok {
@@ -1459,6 +2108,15 @@ union cvmx_ciu_qlm_jtgc {
struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
struct cvmx_ciu_qlm_jtgc_s cn56xx;
struct cvmx_ciu_qlm_jtgc_s cn56xxp1;
+ struct cvmx_ciu_qlm_jtgc_cn63xx {
+ uint64_t reserved_11_63:53;
+ uint64_t clk_div:3;
+ uint64_t reserved_6_7:2;
+ uint64_t mux_sel:2;
+ uint64_t reserved_3_3:1;
+ uint64_t bypass:3;
+ } cn63xx;
+ struct cvmx_ciu_qlm_jtgc_cn63xx cn63xxp1;
};
union cvmx_ciu_qlm_jtgd {
@@ -1493,6 +2151,17 @@ union cvmx_ciu_qlm_jtgd {
uint64_t shft_cnt:5;
uint64_t shft_reg:32;
} cn56xxp1;
+ struct cvmx_ciu_qlm_jtgd_cn63xx {
+ uint64_t capture:1;
+ uint64_t shift:1;
+ uint64_t update:1;
+ uint64_t reserved_43_60:18;
+ uint64_t select:3;
+ uint64_t reserved_37_39:3;
+ uint64_t shft_cnt:5;
+ uint64_t shft_reg:32;
+ } cn63xx;
+ struct cvmx_ciu_qlm_jtgd_cn63xx cn63xxp1;
};
union cvmx_ciu_soft_bist {
@@ -1512,6 +2181,8 @@ union cvmx_ciu_soft_bist {
struct cvmx_ciu_soft_bist_s cn56xxp1;
struct cvmx_ciu_soft_bist_s cn58xx;
struct cvmx_ciu_soft_bist_s cn58xxp1;
+ struct cvmx_ciu_soft_bist_s cn63xx;
+ struct cvmx_ciu_soft_bist_s cn63xxp1;
};
union cvmx_ciu_soft_prst {
@@ -1536,6 +2207,8 @@ union cvmx_ciu_soft_prst {
struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
struct cvmx_ciu_soft_prst_s cn58xx;
struct cvmx_ciu_soft_prst_s cn58xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn63xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn63xxp1;
};
union cvmx_ciu_soft_prst1 {
@@ -1548,6 +2221,8 @@ union cvmx_ciu_soft_prst1 {
struct cvmx_ciu_soft_prst1_s cn52xxp1;
struct cvmx_ciu_soft_prst1_s cn56xx;
struct cvmx_ciu_soft_prst1_s cn56xxp1;
+ struct cvmx_ciu_soft_prst1_s cn63xx;
+ struct cvmx_ciu_soft_prst1_s cn63xxp1;
};
union cvmx_ciu_soft_rst {
@@ -1567,6 +2242,8 @@ union cvmx_ciu_soft_rst {
struct cvmx_ciu_soft_rst_s cn56xxp1;
struct cvmx_ciu_soft_rst_s cn58xx;
struct cvmx_ciu_soft_rst_s cn58xxp1;
+ struct cvmx_ciu_soft_rst_s cn63xx;
+ struct cvmx_ciu_soft_rst_s cn63xxp1;
};
union cvmx_ciu_timx {
@@ -1587,6 +2264,8 @@ union cvmx_ciu_timx {
struct cvmx_ciu_timx_s cn56xxp1;
struct cvmx_ciu_timx_s cn58xx;
struct cvmx_ciu_timx_s cn58xxp1;
+ struct cvmx_ciu_timx_s cn63xx;
+ struct cvmx_ciu_timx_s cn63xxp1;
};
union cvmx_ciu_wdogx {
@@ -1611,6 +2290,8 @@ union cvmx_ciu_wdogx {
struct cvmx_ciu_wdogx_s cn56xxp1;
struct cvmx_ciu_wdogx_s cn58xx;
struct cvmx_ciu_wdogx_s cn58xxp1;
+ struct cvmx_ciu_wdogx_s cn63xx;
+ struct cvmx_ciu_wdogx_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
index 5fdd6ba48a0..395564e8d1f 100644
--- a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,29 +28,22 @@
#ifndef __CVMX_GPIO_DEFS_H__
#define __CVMX_GPIO_DEFS_H__
-#define CVMX_GPIO_BIT_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000800ull + (((offset) & 15) * 8))
-#define CVMX_GPIO_BOOT_ENA \
- CVMX_ADD_IO_SEG(0x00010700000008A8ull)
-#define CVMX_GPIO_CLK_GENX(offset) \
- CVMX_ADD_IO_SEG(0x00010700000008C0ull + (((offset) & 3) * 8))
-#define CVMX_GPIO_DBG_ENA \
- CVMX_ADD_IO_SEG(0x00010700000008A0ull)
-#define CVMX_GPIO_INT_CLR \
- CVMX_ADD_IO_SEG(0x0001070000000898ull)
-#define CVMX_GPIO_RX_DAT \
- CVMX_ADD_IO_SEG(0x0001070000000880ull)
-#define CVMX_GPIO_TX_CLR \
- CVMX_ADD_IO_SEG(0x0001070000000890ull)
-#define CVMX_GPIO_TX_SET \
- CVMX_ADD_IO_SEG(0x0001070000000888ull)
-#define CVMX_GPIO_XBIT_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001070000000900ull + (((offset) & 31) * 8) - 8 * 16)
+#define CVMX_GPIO_BIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8)
+#define CVMX_GPIO_BOOT_ENA (CVMX_ADD_IO_SEG(0x00010700000008A8ull))
+#define CVMX_GPIO_CLK_GENX(offset) (CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8)
+#define CVMX_GPIO_CLK_QLMX(offset) (CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8)
+#define CVMX_GPIO_DBG_ENA (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
+#define CVMX_GPIO_INT_CLR (CVMX_ADD_IO_SEG(0x0001070000000898ull))
+#define CVMX_GPIO_RX_DAT (CVMX_ADD_IO_SEG(0x0001070000000880ull))
+#define CVMX_GPIO_TX_CLR (CVMX_ADD_IO_SEG(0x0001070000000890ull))
+#define CVMX_GPIO_TX_SET (CVMX_ADD_IO_SEG(0x0001070000000888ull))
+#define CVMX_GPIO_XBIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16)
union cvmx_gpio_bit_cfgx {
uint64_t u64;
struct cvmx_gpio_bit_cfgx_s {
- uint64_t reserved_15_63:49;
+ uint64_t reserved_17_63:47;
+ uint64_t synce_sel:2;
uint64_t clk_gen:1;
uint64_t clk_sel:2;
uint64_t fil_sel:4;
@@ -73,12 +66,24 @@ union cvmx_gpio_bit_cfgx {
struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
- struct cvmx_gpio_bit_cfgx_s cn52xx;
- struct cvmx_gpio_bit_cfgx_s cn52xxp1;
- struct cvmx_gpio_bit_cfgx_s cn56xx;
- struct cvmx_gpio_bit_cfgx_s cn56xxp1;
+ struct cvmx_gpio_bit_cfgx_cn52xx {
+ uint64_t reserved_15_63:49;
+ uint64_t clk_gen:1;
+ uint64_t clk_sel:2;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+ } cn52xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn56xx;
+ struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
+ struct cvmx_gpio_bit_cfgx_s cn63xx;
+ struct cvmx_gpio_bit_cfgx_s cn63xxp1;
};
union cvmx_gpio_boot_ena {
@@ -103,6 +108,19 @@ union cvmx_gpio_clk_genx {
struct cvmx_gpio_clk_genx_s cn52xxp1;
struct cvmx_gpio_clk_genx_s cn56xx;
struct cvmx_gpio_clk_genx_s cn56xxp1;
+ struct cvmx_gpio_clk_genx_s cn63xx;
+ struct cvmx_gpio_clk_genx_s cn63xxp1;
+};
+
+union cvmx_gpio_clk_qlmx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_qlmx_s {
+ uint64_t reserved_3_63:61;
+ uint64_t div:1;
+ uint64_t lane_sel:2;
+ } s;
+ struct cvmx_gpio_clk_qlmx_s cn63xx;
+ struct cvmx_gpio_clk_qlmx_s cn63xxp1;
};
union cvmx_gpio_dbg_ena {
@@ -133,6 +151,8 @@ union cvmx_gpio_int_clr {
struct cvmx_gpio_int_clr_s cn56xxp1;
struct cvmx_gpio_int_clr_s cn58xx;
struct cvmx_gpio_int_clr_s cn58xxp1;
+ struct cvmx_gpio_int_clr_s cn63xx;
+ struct cvmx_gpio_int_clr_s cn63xxp1;
};
union cvmx_gpio_rx_dat {
@@ -155,6 +175,8 @@ union cvmx_gpio_rx_dat {
struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
struct cvmx_gpio_rx_dat_cn38xx cn58xx;
struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
+ struct cvmx_gpio_rx_dat_cn38xx cn63xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn63xxp1;
};
union cvmx_gpio_tx_clr {
@@ -177,6 +199,8 @@ union cvmx_gpio_tx_clr {
struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
struct cvmx_gpio_tx_clr_cn38xx cn58xx;
struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
+ struct cvmx_gpio_tx_clr_cn38xx cn63xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn63xxp1;
};
union cvmx_gpio_tx_set {
@@ -199,6 +223,8 @@ union cvmx_gpio_tx_set {
struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
struct cvmx_gpio_tx_set_cn38xx cn58xx;
struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
+ struct cvmx_gpio_tx_set_cn38xx cn63xx;
+ struct cvmx_gpio_tx_set_cn38xx cn63xxp1;
};
union cvmx_gpio_xbit_cfgx {
diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
index 0ee36baec50..d7d856c2483 100644
--- a/arch/mips/include/asm/octeon/cvmx-iob-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,55 +28,39 @@
#ifndef __CVMX_IOB_DEFS_H__
#define __CVMX_IOB_DEFS_H__
-#define CVMX_IOB_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011800F00007F8ull)
-#define CVMX_IOB_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00011800F0000050ull)
-#define CVMX_IOB_DWB_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000028ull)
-#define CVMX_IOB_FAU_TIMEOUT \
- CVMX_ADD_IO_SEG(0x00011800F0000000ull)
-#define CVMX_IOB_I2C_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000010ull)
-#define CVMX_IOB_INB_CONTROL_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000078ull)
-#define CVMX_IOB_INB_CONTROL_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F0000088ull)
-#define CVMX_IOB_INB_DATA_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000070ull)
-#define CVMX_IOB_INB_DATA_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F0000080ull)
-#define CVMX_IOB_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011800F0000060ull)
-#define CVMX_IOB_INT_SUM \
- CVMX_ADD_IO_SEG(0x00011800F0000058ull)
-#define CVMX_IOB_N2C_L2C_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000020ull)
-#define CVMX_IOB_N2C_RSP_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000008ull)
-#define CVMX_IOB_OUTB_COM_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000040ull)
-#define CVMX_IOB_OUTB_CONTROL_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000098ull)
-#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F00000A8ull)
-#define CVMX_IOB_OUTB_DATA_MATCH \
- CVMX_ADD_IO_SEG(0x00011800F0000090ull)
-#define CVMX_IOB_OUTB_DATA_MATCH_ENB \
- CVMX_ADD_IO_SEG(0x00011800F00000A0ull)
-#define CVMX_IOB_OUTB_FPA_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000048ull)
-#define CVMX_IOB_OUTB_REQ_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000038ull)
-#define CVMX_IOB_P2C_REQ_PRI_CNT \
- CVMX_ADD_IO_SEG(0x00011800F0000018ull)
-#define CVMX_IOB_PKT_ERR \
- CVMX_ADD_IO_SEG(0x00011800F0000068ull)
+#define CVMX_IOB_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800F00007F8ull))
+#define CVMX_IOB_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011800F0000050ull))
+#define CVMX_IOB_DWB_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000028ull))
+#define CVMX_IOB_FAU_TIMEOUT (CVMX_ADD_IO_SEG(0x00011800F0000000ull))
+#define CVMX_IOB_I2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000010ull))
+#define CVMX_IOB_INB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000078ull))
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000088ull))
+#define CVMX_IOB_INB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000070ull))
+#define CVMX_IOB_INB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F0000080ull))
+#define CVMX_IOB_INT_ENB (CVMX_ADD_IO_SEG(0x00011800F0000060ull))
+#define CVMX_IOB_INT_SUM (CVMX_ADD_IO_SEG(0x00011800F0000058ull))
+#define CVMX_IOB_N2C_L2C_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000020ull))
+#define CVMX_IOB_N2C_RSP_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000008ull))
+#define CVMX_IOB_OUTB_COM_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000040ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000098ull))
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A8ull))
+#define CVMX_IOB_OUTB_DATA_MATCH (CVMX_ADD_IO_SEG(0x00011800F0000090ull))
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB (CVMX_ADD_IO_SEG(0x00011800F00000A0ull))
+#define CVMX_IOB_OUTB_FPA_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000048ull))
+#define CVMX_IOB_OUTB_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000038ull))
+#define CVMX_IOB_P2C_REQ_PRI_CNT (CVMX_ADD_IO_SEG(0x00011800F0000018ull))
+#define CVMX_IOB_PKT_ERR (CVMX_ADD_IO_SEG(0x00011800F0000068ull))
+#define CVMX_IOB_TO_CMB_CREDITS (CVMX_ADD_IO_SEG(0x00011800F00000B0ull))
union cvmx_iob_bist_status {
uint64_t u64;
struct cvmx_iob_bist_status_s {
- uint64_t reserved_18_63:46;
+ uint64_t reserved_23_63:41;
+ uint64_t xmdfif:1;
+ uint64_t xmcfif:1;
+ uint64_t iorfif:1;
+ uint64_t rsdfif:1;
+ uint64_t iocfif:1;
uint64_t icnrcb:1;
uint64_t icr0:1;
uint64_t icr1:1;
@@ -96,40 +80,81 @@ union cvmx_iob_bist_status {
uint64_t ibd:1;
uint64_t icd:1;
} s;
- struct cvmx_iob_bist_status_s cn30xx;
- struct cvmx_iob_bist_status_s cn31xx;
- struct cvmx_iob_bist_status_s cn38xx;
- struct cvmx_iob_bist_status_s cn38xxp2;
- struct cvmx_iob_bist_status_s cn50xx;
- struct cvmx_iob_bist_status_s cn52xx;
- struct cvmx_iob_bist_status_s cn52xxp1;
- struct cvmx_iob_bist_status_s cn56xx;
- struct cvmx_iob_bist_status_s cn56xxp1;
- struct cvmx_iob_bist_status_s cn58xx;
- struct cvmx_iob_bist_status_s cn58xxp1;
+ struct cvmx_iob_bist_status_cn30xx {
+ uint64_t reserved_18_63:46;
+ uint64_t icnrcb:1;
+ uint64_t icr0:1;
+ uint64_t icr1:1;
+ uint64_t icnr1:1;
+ uint64_t icnr0:1;
+ uint64_t ibdr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibr1:1;
+ uint64_t icnrt:1;
+ uint64_t ibrq0:1;
+ uint64_t ibrq1:1;
+ uint64_t icrn0:1;
+ uint64_t icrn1:1;
+ uint64_t icrp0:1;
+ uint64_t icrp1:1;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+ } cn30xx;
+ struct cvmx_iob_bist_status_cn30xx cn31xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xx;
+ struct cvmx_iob_bist_status_cn30xx cn38xxp2;
+ struct cvmx_iob_bist_status_cn30xx cn50xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xx;
+ struct cvmx_iob_bist_status_cn30xx cn52xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn56xx;
+ struct cvmx_iob_bist_status_cn30xx cn56xxp1;
+ struct cvmx_iob_bist_status_cn30xx cn58xx;
+ struct cvmx_iob_bist_status_cn30xx cn58xxp1;
+ struct cvmx_iob_bist_status_s cn63xx;
+ struct cvmx_iob_bist_status_s cn63xxp1;
};
union cvmx_iob_ctl_status {
uint64_t u64;
struct cvmx_iob_ctl_status_s {
- uint64_t reserved_5_63:59;
+ uint64_t reserved_10_63:54;
+ uint64_t xmc_per:4;
+ uint64_t rr_mode:1;
uint64_t outb_mat:1;
uint64_t inb_mat:1;
uint64_t pko_enb:1;
uint64_t dwb_enb:1;
uint64_t fau_end:1;
} s;
- struct cvmx_iob_ctl_status_s cn30xx;
- struct cvmx_iob_ctl_status_s cn31xx;
- struct cvmx_iob_ctl_status_s cn38xx;
- struct cvmx_iob_ctl_status_s cn38xxp2;
- struct cvmx_iob_ctl_status_s cn50xx;
- struct cvmx_iob_ctl_status_s cn52xx;
- struct cvmx_iob_ctl_status_s cn52xxp1;
- struct cvmx_iob_ctl_status_s cn56xx;
- struct cvmx_iob_ctl_status_s cn56xxp1;
- struct cvmx_iob_ctl_status_s cn58xx;
- struct cvmx_iob_ctl_status_s cn58xxp1;
+ struct cvmx_iob_ctl_status_cn30xx {
+ uint64_t reserved_5_63:59;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+ } cn30xx;
+ struct cvmx_iob_ctl_status_cn30xx cn31xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xx;
+ struct cvmx_iob_ctl_status_cn30xx cn38xxp2;
+ struct cvmx_iob_ctl_status_cn30xx cn50xx;
+ struct cvmx_iob_ctl_status_cn52xx {
+ uint64_t reserved_6_63:58;
+ uint64_t rr_mode:1;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+ } cn52xx;
+ struct cvmx_iob_ctl_status_cn30xx cn52xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn56xx;
+ struct cvmx_iob_ctl_status_cn30xx cn56xxp1;
+ struct cvmx_iob_ctl_status_cn30xx cn58xx;
+ struct cvmx_iob_ctl_status_cn30xx cn58xxp1;
+ struct cvmx_iob_ctl_status_s cn63xx;
+ struct cvmx_iob_ctl_status_s cn63xxp1;
};
union cvmx_iob_dwb_pri_cnt {
@@ -147,6 +172,8 @@ union cvmx_iob_dwb_pri_cnt {
struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
struct cvmx_iob_dwb_pri_cnt_s cn58xx;
struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn63xxp1;
};
union cvmx_iob_fau_timeout {
@@ -167,6 +194,8 @@ union cvmx_iob_fau_timeout {
struct cvmx_iob_fau_timeout_s cn56xxp1;
struct cvmx_iob_fau_timeout_s cn58xx;
struct cvmx_iob_fau_timeout_s cn58xxp1;
+ struct cvmx_iob_fau_timeout_s cn63xx;
+ struct cvmx_iob_fau_timeout_s cn63xxp1;
};
union cvmx_iob_i2c_pri_cnt {
@@ -184,6 +213,8 @@ union cvmx_iob_i2c_pri_cnt {
struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
struct cvmx_iob_i2c_pri_cnt_s cn58xx;
struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn63xxp1;
};
union cvmx_iob_inb_control_match {
@@ -206,6 +237,8 @@ union cvmx_iob_inb_control_match {
struct cvmx_iob_inb_control_match_s cn56xxp1;
struct cvmx_iob_inb_control_match_s cn58xx;
struct cvmx_iob_inb_control_match_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_s cn63xx;
+ struct cvmx_iob_inb_control_match_s cn63xxp1;
};
union cvmx_iob_inb_control_match_enb {
@@ -228,6 +261,8 @@ union cvmx_iob_inb_control_match_enb {
struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
struct cvmx_iob_inb_control_match_enb_s cn58xx;
struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn63xx;
+ struct cvmx_iob_inb_control_match_enb_s cn63xxp1;
};
union cvmx_iob_inb_data_match {
@@ -246,6 +281,8 @@ union cvmx_iob_inb_data_match {
struct cvmx_iob_inb_data_match_s cn56xxp1;
struct cvmx_iob_inb_data_match_s cn58xx;
struct cvmx_iob_inb_data_match_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_s cn63xx;
+ struct cvmx_iob_inb_data_match_s cn63xxp1;
};
union cvmx_iob_inb_data_match_enb {
@@ -264,6 +301,8 @@ union cvmx_iob_inb_data_match_enb {
struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
struct cvmx_iob_inb_data_match_enb_s cn58xx;
struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn63xx;
+ struct cvmx_iob_inb_data_match_enb_s cn63xxp1;
};
union cvmx_iob_int_enb {
@@ -294,6 +333,8 @@ union cvmx_iob_int_enb {
struct cvmx_iob_int_enb_s cn56xxp1;
struct cvmx_iob_int_enb_s cn58xx;
struct cvmx_iob_int_enb_s cn58xxp1;
+ struct cvmx_iob_int_enb_s cn63xx;
+ struct cvmx_iob_int_enb_s cn63xxp1;
};
union cvmx_iob_int_sum {
@@ -324,6 +365,8 @@ union cvmx_iob_int_sum {
struct cvmx_iob_int_sum_s cn56xxp1;
struct cvmx_iob_int_sum_s cn58xx;
struct cvmx_iob_int_sum_s cn58xxp1;
+ struct cvmx_iob_int_sum_s cn63xx;
+ struct cvmx_iob_int_sum_s cn63xxp1;
};
union cvmx_iob_n2c_l2c_pri_cnt {
@@ -341,6 +384,8 @@ union cvmx_iob_n2c_l2c_pri_cnt {
struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1;
};
union cvmx_iob_n2c_rsp_pri_cnt {
@@ -358,6 +403,8 @@ union cvmx_iob_n2c_rsp_pri_cnt {
struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1;
};
union cvmx_iob_outb_com_pri_cnt {
@@ -375,6 +422,8 @@ union cvmx_iob_outb_com_pri_cnt {
struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1;
};
union cvmx_iob_outb_control_match {
@@ -397,6 +446,8 @@ union cvmx_iob_outb_control_match {
struct cvmx_iob_outb_control_match_s cn56xxp1;
struct cvmx_iob_outb_control_match_s cn58xx;
struct cvmx_iob_outb_control_match_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_s cn63xx;
+ struct cvmx_iob_outb_control_match_s cn63xxp1;
};
union cvmx_iob_outb_control_match_enb {
@@ -419,6 +470,8 @@ union cvmx_iob_outb_control_match_enb {
struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
struct cvmx_iob_outb_control_match_enb_s cn58xx;
struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn63xx;
+ struct cvmx_iob_outb_control_match_enb_s cn63xxp1;
};
union cvmx_iob_outb_data_match {
@@ -437,6 +490,8 @@ union cvmx_iob_outb_data_match {
struct cvmx_iob_outb_data_match_s cn56xxp1;
struct cvmx_iob_outb_data_match_s cn58xx;
struct cvmx_iob_outb_data_match_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_s cn63xx;
+ struct cvmx_iob_outb_data_match_s cn63xxp1;
};
union cvmx_iob_outb_data_match_enb {
@@ -455,6 +510,8 @@ union cvmx_iob_outb_data_match_enb {
struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
struct cvmx_iob_outb_data_match_enb_s cn58xx;
struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn63xx;
+ struct cvmx_iob_outb_data_match_enb_s cn63xxp1;
};
union cvmx_iob_outb_fpa_pri_cnt {
@@ -472,6 +529,8 @@ union cvmx_iob_outb_fpa_pri_cnt {
struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1;
};
union cvmx_iob_outb_req_pri_cnt {
@@ -489,6 +548,8 @@ union cvmx_iob_outb_req_pri_cnt {
struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1;
};
union cvmx_iob_p2c_req_pri_cnt {
@@ -506,25 +567,46 @@ union cvmx_iob_p2c_req_pri_cnt {
struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1;
};
union cvmx_iob_pkt_err {
uint64_t u64;
struct cvmx_iob_pkt_err_s {
+ uint64_t reserved_12_63:52;
+ uint64_t vport:6;
+ uint64_t port:6;
+ } s;
+ struct cvmx_iob_pkt_err_cn30xx {
uint64_t reserved_6_63:58;
uint64_t port:6;
+ } cn30xx;
+ struct cvmx_iob_pkt_err_cn30xx cn31xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xx;
+ struct cvmx_iob_pkt_err_cn30xx cn38xxp2;
+ struct cvmx_iob_pkt_err_cn30xx cn50xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xx;
+ struct cvmx_iob_pkt_err_cn30xx cn52xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn56xx;
+ struct cvmx_iob_pkt_err_cn30xx cn56xxp1;
+ struct cvmx_iob_pkt_err_cn30xx cn58xx;
+ struct cvmx_iob_pkt_err_cn30xx cn58xxp1;
+ struct cvmx_iob_pkt_err_s cn63xx;
+ struct cvmx_iob_pkt_err_s cn63xxp1;
+};
+
+union cvmx_iob_to_cmb_credits {
+ uint64_t u64;
+ struct cvmx_iob_to_cmb_credits_s {
+ uint64_t reserved_9_63:55;
+ uint64_t pko_rd:3;
+ uint64_t ncb_rd:3;
+ uint64_t ncb_wr:3;
} s;
- struct cvmx_iob_pkt_err_s cn30xx;
- struct cvmx_iob_pkt_err_s cn31xx;
- struct cvmx_iob_pkt_err_s cn38xx;
- struct cvmx_iob_pkt_err_s cn38xxp2;
- struct cvmx_iob_pkt_err_s cn50xx;
- struct cvmx_iob_pkt_err_s cn52xx;
- struct cvmx_iob_pkt_err_s cn52xxp1;
- struct cvmx_iob_pkt_err_s cn56xx;
- struct cvmx_iob_pkt_err_s cn56xxp1;
- struct cvmx_iob_pkt_err_s cn58xx;
- struct cvmx_iob_pkt_err_s cn58xxp1;
+ struct cvmx_iob_to_cmb_credits_s cn52xx;
+ struct cvmx_iob_to_cmb_credits_s cn63xx;
+ struct cvmx_iob_to_cmb_credits_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
index f8b8fc657d2..e0a5bfe88d0 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,104 +28,57 @@
#ifndef __CVMX_IPD_DEFS_H__
#define __CVMX_IPD_DEFS_H__
-#define CVMX_IPD_1ST_MBUFF_SKIP \
- CVMX_ADD_IO_SEG(0x00014F0000000000ull)
-#define CVMX_IPD_1st_NEXT_PTR_BACK \
- CVMX_ADD_IO_SEG(0x00014F0000000150ull)
-#define CVMX_IPD_2nd_NEXT_PTR_BACK \
- CVMX_ADD_IO_SEG(0x00014F0000000158ull)
-#define CVMX_IPD_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00014F00000007F8ull)
-#define CVMX_IPD_BP_PRT_RED_END \
- CVMX_ADD_IO_SEG(0x00014F0000000328ull)
-#define CVMX_IPD_CLK_COUNT \
- CVMX_ADD_IO_SEG(0x00014F0000000338ull)
-#define CVMX_IPD_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00014F0000000018ull)
-#define CVMX_IPD_INT_ENB \
- CVMX_ADD_IO_SEG(0x00014F0000000160ull)
-#define CVMX_IPD_INT_SUM \
- CVMX_ADD_IO_SEG(0x00014F0000000168ull)
-#define CVMX_IPD_NOT_1ST_MBUFF_SKIP \
- CVMX_ADD_IO_SEG(0x00014F0000000008ull)
-#define CVMX_IPD_PACKET_MBUFF_SIZE \
- CVMX_ADD_IO_SEG(0x00014F0000000010ull)
-#define CVMX_IPD_PKT_PTR_VALID \
- CVMX_ADD_IO_SEG(0x00014F0000000358ull)
-#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000028ull + (((offset) & 63) * 8))
-#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000368ull + (((offset) & 63) * 8) - 8 * 36)
-#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000388ull + (((offset) & 63) * 8) - 8 * 36)
-#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) \
- CVMX_ADD_IO_SEG(0x00014F00000001B8ull + (((offset) & 63) * 8))
-#define CVMX_IPD_PORT_QOS_INTX(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000808ull + (((offset) & 7) * 8))
-#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000848ull + (((offset) & 7) * 8))
-#define CVMX_IPD_PORT_QOS_X_CNT(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000888ull + (((offset) & 511) * 8))
-#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL \
- CVMX_ADD_IO_SEG(0x00014F0000000348ull)
-#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL \
- CVMX_ADD_IO_SEG(0x00014F0000000350ull)
-#define CVMX_IPD_PTR_COUNT \
- CVMX_ADD_IO_SEG(0x00014F0000000320ull)
-#define CVMX_IPD_PWP_PTR_FIFO_CTL \
- CVMX_ADD_IO_SEG(0x00014F0000000340ull)
-#define CVMX_IPD_QOS0_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000178ull)
-#define CVMX_IPD_QOS1_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000180ull)
-#define CVMX_IPD_QOS2_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000188ull)
-#define CVMX_IPD_QOS3_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000190ull)
-#define CVMX_IPD_QOS4_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F0000000198ull)
-#define CVMX_IPD_QOS5_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F00000001A0ull)
-#define CVMX_IPD_QOS6_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F00000001A8ull)
-#define CVMX_IPD_QOS7_RED_MARKS \
- CVMX_ADD_IO_SEG(0x00014F00000001B0ull)
-#define CVMX_IPD_QOSX_RED_MARKS(offset) \
- CVMX_ADD_IO_SEG(0x00014F0000000178ull + (((offset) & 7) * 8))
-#define CVMX_IPD_QUE0_FREE_PAGE_CNT \
- CVMX_ADD_IO_SEG(0x00014F0000000330ull)
-#define CVMX_IPD_RED_PORT_ENABLE \
- CVMX_ADD_IO_SEG(0x00014F00000002D8ull)
-#define CVMX_IPD_RED_PORT_ENABLE2 \
- CVMX_ADD_IO_SEG(0x00014F00000003A8ull)
-#define CVMX_IPD_RED_QUE0_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002E0ull)
-#define CVMX_IPD_RED_QUE1_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002E8ull)
-#define CVMX_IPD_RED_QUE2_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002F0ull)
-#define CVMX_IPD_RED_QUE3_PARAM \
- CVMX_ADD_IO_SEG(0x00014F00000002F8ull)
-#define CVMX_IPD_RED_QUE4_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000300ull)
-#define CVMX_IPD_RED_QUE5_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000308ull)
-#define CVMX_IPD_RED_QUE6_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000310ull)
-#define CVMX_IPD_RED_QUE7_PARAM \
- CVMX_ADD_IO_SEG(0x00014F0000000318ull)
-#define CVMX_IPD_RED_QUEX_PARAM(offset) \
- CVMX_ADD_IO_SEG(0x00014F00000002E0ull + (((offset) & 7) * 8))
-#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT \
- CVMX_ADD_IO_SEG(0x00014F0000000148ull)
-#define CVMX_IPD_SUB_PORT_FCS \
- CVMX_ADD_IO_SEG(0x00014F0000000170ull)
-#define CVMX_IPD_SUB_PORT_QOS_CNT \
- CVMX_ADD_IO_SEG(0x00014F0000000800ull)
-#define CVMX_IPD_WQE_FPA_QUEUE \
- CVMX_ADD_IO_SEG(0x00014F0000000020ull)
-#define CVMX_IPD_WQE_PTR_VALID \
- CVMX_ADD_IO_SEG(0x00014F0000000360ull)
+#define CVMX_IPD_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000000ull))
+#define CVMX_IPD_1st_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000150ull))
+#define CVMX_IPD_2nd_NEXT_PTR_BACK (CVMX_ADD_IO_SEG(0x00014F0000000158ull))
+#define CVMX_IPD_BIST_STATUS (CVMX_ADD_IO_SEG(0x00014F00000007F8ull))
+#define CVMX_IPD_BP_PRT_RED_END (CVMX_ADD_IO_SEG(0x00014F0000000328ull))
+#define CVMX_IPD_CLK_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000338ull))
+#define CVMX_IPD_CTL_STATUS (CVMX_ADD_IO_SEG(0x00014F0000000018ull))
+#define CVMX_IPD_INT_ENB (CVMX_ADD_IO_SEG(0x00014F0000000160ull))
+#define CVMX_IPD_INT_SUM (CVMX_ADD_IO_SEG(0x00014F0000000168ull))
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP (CVMX_ADD_IO_SEG(0x00014F0000000008ull))
+#define CVMX_IPD_PACKET_MBUFF_SIZE (CVMX_ADD_IO_SEG(0x00014F0000000010ull))
+#define CVMX_IPD_PKT_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000358ull))
+#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000028ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) (CVMX_ADD_IO_SEG(0x00014F0000000368ull) + ((offset) & 63) * 8 - 8*36)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT3(offset) (CVMX_ADD_IO_SEG(0x00014F00000003D0ull) + ((offset) & 63) * 8 - 8*40)
+#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000388ull) + ((offset) & 63) * 8 - 8*36)
+#define CVMX_IPD_PORT_BP_COUNTERS3_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000003B0ull) + ((offset) & 63) * 8 - 8*40)
+#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) (CVMX_ADD_IO_SEG(0x00014F00000001B8ull) + ((offset) & 63) * 8)
+#define CVMX_IPD_PORT_QOS_INTX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000808ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) (CVMX_ADD_IO_SEG(0x00014F0000000848ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_PORT_QOS_X_CNT(offset) (CVMX_ADD_IO_SEG(0x00014F0000000888ull) + ((offset) & 511) * 8)
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000348ull))
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000350ull))
+#define CVMX_IPD_PTR_COUNT (CVMX_ADD_IO_SEG(0x00014F0000000320ull))
+#define CVMX_IPD_PWP_PTR_FIFO_CTL (CVMX_ADD_IO_SEG(0x00014F0000000340ull))
+#define CVMX_IPD_QOS0_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(0)
+#define CVMX_IPD_QOS1_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(1)
+#define CVMX_IPD_QOS2_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(2)
+#define CVMX_IPD_QOS3_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(3)
+#define CVMX_IPD_QOS4_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(4)
+#define CVMX_IPD_QOS5_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(5)
+#define CVMX_IPD_QOS6_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(6)
+#define CVMX_IPD_QOS7_RED_MARKS CVMX_IPD_QOSX_RED_MARKS(7)
+#define CVMX_IPD_QOSX_RED_MARKS(offset) (CVMX_ADD_IO_SEG(0x00014F0000000178ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000330ull))
+#define CVMX_IPD_RED_PORT_ENABLE (CVMX_ADD_IO_SEG(0x00014F00000002D8ull))
+#define CVMX_IPD_RED_PORT_ENABLE2 (CVMX_ADD_IO_SEG(0x00014F00000003A8ull))
+#define CVMX_IPD_RED_QUE0_PARAM CVMX_IPD_RED_QUEX_PARAM(0)
+#define CVMX_IPD_RED_QUE1_PARAM CVMX_IPD_RED_QUEX_PARAM(1)
+#define CVMX_IPD_RED_QUE2_PARAM CVMX_IPD_RED_QUEX_PARAM(2)
+#define CVMX_IPD_RED_QUE3_PARAM CVMX_IPD_RED_QUEX_PARAM(3)
+#define CVMX_IPD_RED_QUE4_PARAM CVMX_IPD_RED_QUEX_PARAM(4)
+#define CVMX_IPD_RED_QUE5_PARAM CVMX_IPD_RED_QUEX_PARAM(5)
+#define CVMX_IPD_RED_QUE6_PARAM CVMX_IPD_RED_QUEX_PARAM(6)
+#define CVMX_IPD_RED_QUE7_PARAM CVMX_IPD_RED_QUEX_PARAM(7)
+#define CVMX_IPD_RED_QUEX_PARAM(offset) (CVMX_ADD_IO_SEG(0x00014F00000002E0ull) + ((offset) & 7) * 8)
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT (CVMX_ADD_IO_SEG(0x00014F0000000148ull))
+#define CVMX_IPD_SUB_PORT_FCS (CVMX_ADD_IO_SEG(0x00014F0000000170ull))
+#define CVMX_IPD_SUB_PORT_QOS_CNT (CVMX_ADD_IO_SEG(0x00014F0000000800ull))
+#define CVMX_IPD_WQE_FPA_QUEUE (CVMX_ADD_IO_SEG(0x00014F0000000020ull))
+#define CVMX_IPD_WQE_PTR_VALID (CVMX_ADD_IO_SEG(0x00014F0000000360ull))
union cvmx_ipd_1st_mbuff_skip {
uint64_t u64;
@@ -144,6 +97,8 @@ union cvmx_ipd_1st_mbuff_skip {
struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1;
struct cvmx_ipd_1st_mbuff_skip_s cn58xx;
struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn63xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn63xxp1;
};
union cvmx_ipd_1st_next_ptr_back {
@@ -163,6 +118,8 @@ union cvmx_ipd_1st_next_ptr_back {
struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1;
struct cvmx_ipd_1st_next_ptr_back_s cn58xx;
struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn63xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn63xxp1;
};
union cvmx_ipd_2nd_next_ptr_back {
@@ -182,6 +139,8 @@ union cvmx_ipd_2nd_next_ptr_back {
struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1;
struct cvmx_ipd_2nd_next_ptr_back_s cn58xx;
struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn63xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn63xxp1;
};
union cvmx_ipd_bist_status {
@@ -236,13 +195,15 @@ union cvmx_ipd_bist_status {
struct cvmx_ipd_bist_status_s cn56xxp1;
struct cvmx_ipd_bist_status_cn30xx cn58xx;
struct cvmx_ipd_bist_status_cn30xx cn58xxp1;
+ struct cvmx_ipd_bist_status_s cn63xx;
+ struct cvmx_ipd_bist_status_s cn63xxp1;
};
union cvmx_ipd_bp_prt_red_end {
uint64_t u64;
struct cvmx_ipd_bp_prt_red_end_s {
- uint64_t reserved_40_63:24;
- uint64_t prt_enb:40;
+ uint64_t reserved_44_63:20;
+ uint64_t prt_enb:44;
} s;
struct cvmx_ipd_bp_prt_red_end_cn30xx {
uint64_t reserved_36_63:28;
@@ -252,12 +213,17 @@ union cvmx_ipd_bp_prt_red_end {
struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx;
- struct cvmx_ipd_bp_prt_red_end_s cn52xx;
- struct cvmx_ipd_bp_prt_red_end_s cn52xxp1;
- struct cvmx_ipd_bp_prt_red_end_s cn56xx;
- struct cvmx_ipd_bp_prt_red_end_s cn56xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx {
+ uint64_t reserved_40_63:24;
+ uint64_t prt_enb:40;
+ } cn52xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn52xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xx;
+ struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xxp1;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx;
struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1;
+ struct cvmx_ipd_bp_prt_red_end_s cn63xx;
+ struct cvmx_ipd_bp_prt_red_end_s cn63xxp1;
};
union cvmx_ipd_clk_count {
@@ -276,12 +242,17 @@ union cvmx_ipd_clk_count {
struct cvmx_ipd_clk_count_s cn56xxp1;
struct cvmx_ipd_clk_count_s cn58xx;
struct cvmx_ipd_clk_count_s cn58xxp1;
+ struct cvmx_ipd_clk_count_s cn63xx;
+ struct cvmx_ipd_clk_count_s cn63xxp1;
};
union cvmx_ipd_ctl_status {
uint64_t u64;
struct cvmx_ipd_ctl_status_s {
- uint64_t reserved_15_63:49;
+ uint64_t reserved_18_63:46;
+ uint64_t use_sop:1;
+ uint64_t rst_done:1;
+ uint64_t clken:1;
uint64_t no_wptr:1;
uint64_t pq_apkt:1;
uint64_t pq_nabuf:1;
@@ -322,11 +293,27 @@ union cvmx_ipd_ctl_status {
uint64_t opc_mode:2;
uint64_t ipd_en:1;
} cn38xxp2;
- struct cvmx_ipd_ctl_status_s cn50xx;
- struct cvmx_ipd_ctl_status_s cn52xx;
- struct cvmx_ipd_ctl_status_s cn52xxp1;
- struct cvmx_ipd_ctl_status_s cn56xx;
- struct cvmx_ipd_ctl_status_s cn56xxp1;
+ struct cvmx_ipd_ctl_status_cn50xx {
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn52xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_ipd_ctl_status_cn50xx cn56xx;
+ struct cvmx_ipd_ctl_status_cn50xx cn56xxp1;
struct cvmx_ipd_ctl_status_cn58xx {
uint64_t reserved_12_63:52;
uint64_t ipd_full:1;
@@ -342,6 +329,25 @@ union cvmx_ipd_ctl_status {
uint64_t ipd_en:1;
} cn58xx;
struct cvmx_ipd_ctl_status_cn58xx cn58xxp1;
+ struct cvmx_ipd_ctl_status_s cn63xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+ uint64_t reserved_16_63:48;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn63xxp1;
};
union cvmx_ipd_int_enb {
@@ -391,6 +397,8 @@ union cvmx_ipd_int_enb {
struct cvmx_ipd_int_enb_s cn56xxp1;
struct cvmx_ipd_int_enb_cn38xx cn58xx;
struct cvmx_ipd_int_enb_cn38xx cn58xxp1;
+ struct cvmx_ipd_int_enb_s cn63xx;
+ struct cvmx_ipd_int_enb_s cn63xxp1;
};
union cvmx_ipd_int_sum {
@@ -440,6 +448,8 @@ union cvmx_ipd_int_sum {
struct cvmx_ipd_int_sum_s cn56xxp1;
struct cvmx_ipd_int_sum_cn38xx cn58xx;
struct cvmx_ipd_int_sum_cn38xx cn58xxp1;
+ struct cvmx_ipd_int_sum_s cn63xx;
+ struct cvmx_ipd_int_sum_s cn63xxp1;
};
union cvmx_ipd_not_1st_mbuff_skip {
@@ -459,6 +469,8 @@ union cvmx_ipd_not_1st_mbuff_skip {
struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1;
struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx;
struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn63xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn63xxp1;
};
union cvmx_ipd_packet_mbuff_size {
@@ -478,6 +490,8 @@ union cvmx_ipd_packet_mbuff_size {
struct cvmx_ipd_packet_mbuff_size_s cn56xxp1;
struct cvmx_ipd_packet_mbuff_size_s cn58xx;
struct cvmx_ipd_packet_mbuff_size_s cn58xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn63xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn63xxp1;
};
union cvmx_ipd_pkt_ptr_valid {
@@ -496,6 +510,8 @@ union cvmx_ipd_pkt_ptr_valid {
struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1;
struct cvmx_ipd_pkt_ptr_valid_s cn58xx;
struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn63xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn63xxp1;
};
union cvmx_ipd_portx_bp_page_cnt {
@@ -516,6 +532,8 @@ union cvmx_ipd_portx_bp_page_cnt {
struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1;
struct cvmx_ipd_portx_bp_page_cnt_s cn58xx;
struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn63xxp1;
};
union cvmx_ipd_portx_bp_page_cnt2 {
@@ -529,6 +547,19 @@ union cvmx_ipd_portx_bp_page_cnt2 {
struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1;
struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx;
struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn63xxp1;
+};
+
+union cvmx_ipd_portx_bp_page_cnt3 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt3_s {
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn63xx;
+ struct cvmx_ipd_portx_bp_page_cnt3_s cn63xxp1;
};
union cvmx_ipd_port_bp_counters2_pairx {
@@ -541,6 +572,18 @@ union cvmx_ipd_port_bp_counters2_pairx {
struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1;
struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx;
struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn63xxp1;
+};
+
+union cvmx_ipd_port_bp_counters3_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters3_pairx_s {
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+ } s;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters3_pairx_s cn63xxp1;
};
union cvmx_ipd_port_bp_counters_pairx {
@@ -560,6 +603,8 @@ union cvmx_ipd_port_bp_counters_pairx {
struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1;
struct cvmx_ipd_port_bp_counters_pairx_s cn58xx;
struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn63xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn63xxp1;
};
union cvmx_ipd_port_qos_x_cnt {
@@ -572,6 +617,8 @@ union cvmx_ipd_port_qos_x_cnt {
struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1;
struct cvmx_ipd_port_qos_x_cnt_s cn56xx;
struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cn63xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn63xxp1;
};
union cvmx_ipd_port_qos_intx {
@@ -583,6 +630,8 @@ union cvmx_ipd_port_qos_intx {
struct cvmx_ipd_port_qos_intx_s cn52xxp1;
struct cvmx_ipd_port_qos_intx_s cn56xx;
struct cvmx_ipd_port_qos_intx_s cn56xxp1;
+ struct cvmx_ipd_port_qos_intx_s cn63xx;
+ struct cvmx_ipd_port_qos_intx_s cn63xxp1;
};
union cvmx_ipd_port_qos_int_enbx {
@@ -594,6 +643,8 @@ union cvmx_ipd_port_qos_int_enbx {
struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1;
struct cvmx_ipd_port_qos_int_enbx_s cn56xx;
struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cn63xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn63xxp1;
};
union cvmx_ipd_prc_hold_ptr_fifo_ctl {
@@ -616,6 +667,8 @@ union cvmx_ipd_prc_hold_ptr_fifo_ctl {
struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1;
struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx;
struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xxp1;
};
union cvmx_ipd_prc_port_ptr_fifo_ctl {
@@ -637,6 +690,8 @@ union cvmx_ipd_prc_port_ptr_fifo_ctl {
struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1;
struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx;
struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xxp1;
};
union cvmx_ipd_ptr_count {
@@ -660,6 +715,8 @@ union cvmx_ipd_ptr_count {
struct cvmx_ipd_ptr_count_s cn56xxp1;
struct cvmx_ipd_ptr_count_s cn58xx;
struct cvmx_ipd_ptr_count_s cn58xxp1;
+ struct cvmx_ipd_ptr_count_s cn63xx;
+ struct cvmx_ipd_ptr_count_s cn63xxp1;
};
union cvmx_ipd_pwp_ptr_fifo_ctl {
@@ -683,6 +740,8 @@ union cvmx_ipd_pwp_ptr_fifo_ctl {
struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1;
struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx;
struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xxp1;
};
union cvmx_ipd_qosx_red_marks {
@@ -702,6 +761,8 @@ union cvmx_ipd_qosx_red_marks {
struct cvmx_ipd_qosx_red_marks_s cn56xxp1;
struct cvmx_ipd_qosx_red_marks_s cn58xx;
struct cvmx_ipd_qosx_red_marks_s cn58xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn63xx;
+ struct cvmx_ipd_qosx_red_marks_s cn63xxp1;
};
union cvmx_ipd_que0_free_page_cnt {
@@ -721,6 +782,8 @@ union cvmx_ipd_que0_free_page_cnt {
struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1;
struct cvmx_ipd_que0_free_page_cnt_s cn58xx;
struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn63xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn63xxp1;
};
union cvmx_ipd_red_port_enable {
@@ -741,18 +804,25 @@ union cvmx_ipd_red_port_enable {
struct cvmx_ipd_red_port_enable_s cn56xxp1;
struct cvmx_ipd_red_port_enable_s cn58xx;
struct cvmx_ipd_red_port_enable_s cn58xxp1;
+ struct cvmx_ipd_red_port_enable_s cn63xx;
+ struct cvmx_ipd_red_port_enable_s cn63xxp1;
};
union cvmx_ipd_red_port_enable2 {
uint64_t u64;
struct cvmx_ipd_red_port_enable2_s {
+ uint64_t reserved_8_63:56;
+ uint64_t prt_enb:8;
+ } s;
+ struct cvmx_ipd_red_port_enable2_cn52xx {
uint64_t reserved_4_63:60;
uint64_t prt_enb:4;
- } s;
- struct cvmx_ipd_red_port_enable2_s cn52xx;
- struct cvmx_ipd_red_port_enable2_s cn52xxp1;
- struct cvmx_ipd_red_port_enable2_s cn56xx;
- struct cvmx_ipd_red_port_enable2_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn52xxp1;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn56xx;
+ struct cvmx_ipd_red_port_enable2_cn52xx cn56xxp1;
+ struct cvmx_ipd_red_port_enable2_s cn63xx;
+ struct cvmx_ipd_red_port_enable2_s cn63xxp1;
};
union cvmx_ipd_red_quex_param {
@@ -775,6 +845,8 @@ union cvmx_ipd_red_quex_param {
struct cvmx_ipd_red_quex_param_s cn56xxp1;
struct cvmx_ipd_red_quex_param_s cn58xx;
struct cvmx_ipd_red_quex_param_s cn58xxp1;
+ struct cvmx_ipd_red_quex_param_s cn63xx;
+ struct cvmx_ipd_red_quex_param_s cn63xxp1;
};
union cvmx_ipd_sub_port_bp_page_cnt {
@@ -795,6 +867,8 @@ union cvmx_ipd_sub_port_bp_page_cnt {
struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1;
struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx;
struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xxp1;
};
union cvmx_ipd_sub_port_fcs {
@@ -822,6 +896,8 @@ union cvmx_ipd_sub_port_fcs {
struct cvmx_ipd_sub_port_fcs_s cn56xxp1;
struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx;
struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1;
+ struct cvmx_ipd_sub_port_fcs_s cn63xx;
+ struct cvmx_ipd_sub_port_fcs_s cn63xxp1;
};
union cvmx_ipd_sub_port_qos_cnt {
@@ -835,6 +911,8 @@ union cvmx_ipd_sub_port_qos_cnt {
struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1;
struct cvmx_ipd_sub_port_qos_cnt_s cn56xx;
struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn63xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn63xxp1;
};
union cvmx_ipd_wqe_fpa_queue {
@@ -854,6 +932,8 @@ union cvmx_ipd_wqe_fpa_queue {
struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1;
struct cvmx_ipd_wqe_fpa_queue_s cn58xx;
struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn63xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn63xxp1;
};
union cvmx_ipd_wqe_ptr_valid {
@@ -872,6 +952,8 @@ union cvmx_ipd_wqe_ptr_valid {
struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1;
struct cvmx_ipd_wqe_ptr_valid_s cn58xx;
struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn63xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
index 337583842b5..7a50a0beb47 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,70 +28,113 @@
#ifndef __CVMX_L2C_DEFS_H__
#define __CVMX_L2C_DEFS_H__
-#define CVMX_L2C_BST0 \
- CVMX_ADD_IO_SEG(0x00011800800007F8ull)
-#define CVMX_L2C_BST1 \
- CVMX_ADD_IO_SEG(0x00011800800007F0ull)
-#define CVMX_L2C_BST2 \
- CVMX_ADD_IO_SEG(0x00011800800007E8ull)
-#define CVMX_L2C_CFG \
- CVMX_ADD_IO_SEG(0x0001180080000000ull)
-#define CVMX_L2C_DBG \
- CVMX_ADD_IO_SEG(0x0001180080000030ull)
-#define CVMX_L2C_DUT \
- CVMX_ADD_IO_SEG(0x0001180080000050ull)
-#define CVMX_L2C_GRPWRR0 \
- CVMX_ADD_IO_SEG(0x00011800800000C8ull)
-#define CVMX_L2C_GRPWRR1 \
- CVMX_ADD_IO_SEG(0x00011800800000D0ull)
-#define CVMX_L2C_INT_EN \
- CVMX_ADD_IO_SEG(0x0001180080000100ull)
-#define CVMX_L2C_INT_STAT \
- CVMX_ADD_IO_SEG(0x00011800800000F8ull)
-#define CVMX_L2C_LCKBASE \
- CVMX_ADD_IO_SEG(0x0001180080000058ull)
-#define CVMX_L2C_LCKOFF \
- CVMX_ADD_IO_SEG(0x0001180080000060ull)
-#define CVMX_L2C_LFB0 \
- CVMX_ADD_IO_SEG(0x0001180080000038ull)
-#define CVMX_L2C_LFB1 \
- CVMX_ADD_IO_SEG(0x0001180080000040ull)
-#define CVMX_L2C_LFB2 \
- CVMX_ADD_IO_SEG(0x0001180080000048ull)
-#define CVMX_L2C_LFB3 \
- CVMX_ADD_IO_SEG(0x00011800800000B8ull)
-#define CVMX_L2C_OOB \
- CVMX_ADD_IO_SEG(0x00011800800000D8ull)
-#define CVMX_L2C_OOB1 \
- CVMX_ADD_IO_SEG(0x00011800800000E0ull)
-#define CVMX_L2C_OOB2 \
- CVMX_ADD_IO_SEG(0x00011800800000E8ull)
-#define CVMX_L2C_OOB3 \
- CVMX_ADD_IO_SEG(0x00011800800000F0ull)
-#define CVMX_L2C_PFC0 \
- CVMX_ADD_IO_SEG(0x0001180080000098ull)
-#define CVMX_L2C_PFC1 \
- CVMX_ADD_IO_SEG(0x00011800800000A0ull)
-#define CVMX_L2C_PFC2 \
- CVMX_ADD_IO_SEG(0x00011800800000A8ull)
-#define CVMX_L2C_PFC3 \
- CVMX_ADD_IO_SEG(0x00011800800000B0ull)
-#define CVMX_L2C_PFCTL \
- CVMX_ADD_IO_SEG(0x0001180080000090ull)
-#define CVMX_L2C_PFCX(offset) \
- CVMX_ADD_IO_SEG(0x0001180080000098ull + (((offset) & 3) * 8))
-#define CVMX_L2C_PPGRP \
- CVMX_ADD_IO_SEG(0x00011800800000C0ull)
-#define CVMX_L2C_SPAR0 \
- CVMX_ADD_IO_SEG(0x0001180080000068ull)
-#define CVMX_L2C_SPAR1 \
- CVMX_ADD_IO_SEG(0x0001180080000070ull)
-#define CVMX_L2C_SPAR2 \
- CVMX_ADD_IO_SEG(0x0001180080000078ull)
-#define CVMX_L2C_SPAR3 \
- CVMX_ADD_IO_SEG(0x0001180080000080ull)
-#define CVMX_L2C_SPAR4 \
- CVMX_ADD_IO_SEG(0x0001180080000088ull)
+#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull))
+#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull))
+#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull))
+#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull))
+#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull))
+#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull))
+#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull))
+#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull))
+#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
+#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8)
+#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
+#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull))
+#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 2047) * 8)
+#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull))
+#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull))
+#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull))
+#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull))
+#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull))
+#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull))
+#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull))
+#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull))
+#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull))
+#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull))
+#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull))
+#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull))
+#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
+#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
+#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull))
+#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull))
+#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull))
+#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull))
+#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull))
+#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull))
+#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull))
+#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull))
+#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
+#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
+#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
+#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
+#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8)
+#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull))
+#define CVMX_L2C_QOS_IOBX(block_id) (CVMX_ADD_IO_SEG(0x0001180080880200ull))
+#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 7) * 8)
+#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull))
+#define CVMX_L2C_RSCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800410ull))
+#define CVMX_L2C_RSDX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800418ull))
+#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull))
+#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull))
+#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull))
+#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull))
+#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull))
+#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull))
+#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull))
+#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull))
+#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull))
+#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull))
+#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull))
+#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull))
+#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull))
+#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull))
+#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull))
+#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull))
+#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull))
+#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull))
+#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull))
+#define CVMX_L2C_VIRTID_IOBX(block_id) (CVMX_ADD_IO_SEG(0x00011800808C0200ull))
+#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 7) * 8)
+#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull))
+#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8)
+#define CVMX_L2C_WPAR_IOBX(block_id) (CVMX_ADD_IO_SEG(0x0001180080840200ull))
+#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 7) * 8)
+#define CVMX_L2C_XMCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800400ull))
+#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull))
+#define CVMX_L2C_XMDX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800408ull))
+
+union cvmx_l2c_big_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_big_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t maxdram:4;
+ uint64_t reserved_1_3:3;
+ uint64_t disable:1;
+ } s;
+ struct cvmx_l2c_big_ctl_s cn63xx;
+};
+
+union cvmx_l2c_bst {
+ uint64_t u64;
+ struct cvmx_l2c_bst_s {
+ uint64_t reserved_38_63:26;
+ uint64_t dutfl:6;
+ uint64_t reserved_17_31:15;
+ uint64_t ioccmdfl:1;
+ uint64_t reserved_13_15:3;
+ uint64_t iocdatfl:1;
+ uint64_t reserved_9_11:3;
+ uint64_t dutresfl:1;
+ uint64_t reserved_5_7:3;
+ uint64_t vrtfl:1;
+ uint64_t reserved_1_3:3;
+ uint64_t tdffl:1;
+ } s;
+ struct cvmx_l2c_bst_s cn63xx;
+ struct cvmx_l2c_bst_s cn63xxp1;
+};
union cvmx_l2c_bst0 {
uint64_t u64;
@@ -253,6 +296,48 @@ union cvmx_l2c_bst2 {
struct cvmx_l2c_bst2_cn56xx cn58xxp1;
};
+union cvmx_l2c_bst_memx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_memx_s {
+ uint64_t start_bist:1;
+ uint64_t clear_bist:1;
+ uint64_t reserved_5_61:57;
+ uint64_t rdffl:1;
+ uint64_t vbffl:4;
+ } s;
+ struct cvmx_l2c_bst_memx_s cn63xx;
+ struct cvmx_l2c_bst_memx_s cn63xxp1;
+};
+
+union cvmx_l2c_bst_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_tdtx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t fbfrspfl:8;
+ uint64_t sbffl:8;
+ uint64_t fbffl:8;
+ uint64_t l2dfl:8;
+ } s;
+ struct cvmx_l2c_bst_tdtx_s cn63xx;
+ struct cvmx_l2c_bst_tdtx_cn63xxp1 {
+ uint64_t reserved_24_63:40;
+ uint64_t sbffl:8;
+ uint64_t fbffl:8;
+ uint64_t l2dfl:8;
+ } cn63xxp1;
+};
+
+union cvmx_l2c_bst_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_bst_ttgx_s {
+ uint64_t reserved_17_63:47;
+ uint64_t lrufl:1;
+ uint64_t tagfl:16;
+ } s;
+ struct cvmx_l2c_bst_ttgx_s cn63xx;
+ struct cvmx_l2c_bst_ttgx_s cn63xxp1;
+};
+
union cvmx_l2c_cfg {
uint64_t u64;
struct cvmx_l2c_cfg_s {
@@ -333,6 +418,49 @@ union cvmx_l2c_cfg {
} cn58xxp1;
};
+union cvmx_l2c_cop0_mapx {
+ uint64_t u64;
+ struct cvmx_l2c_cop0_mapx_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_l2c_cop0_mapx_s cn63xx;
+ struct cvmx_l2c_cop0_mapx_s cn63xxp1;
+};
+
+union cvmx_l2c_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_ctl_s {
+ uint64_t reserved_28_63:36;
+ uint64_t disstgl2i:1;
+ uint64_t l2dfsbe:1;
+ uint64_t l2dfdbe:1;
+ uint64_t discclk:1;
+ uint64_t maxvab:4;
+ uint64_t maxlfb:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t xmc_arb_mode:1;
+ uint64_t ef_ena:1;
+ uint64_t ef_cnt:7;
+ uint64_t vab_thresh:4;
+ uint64_t disecc:1;
+ uint64_t disidxalias:1;
+ } s;
+ struct cvmx_l2c_ctl_s cn63xx;
+ struct cvmx_l2c_ctl_cn63xxp1 {
+ uint64_t reserved_25_63:39;
+ uint64_t discclk:1;
+ uint64_t maxvab:4;
+ uint64_t maxlfb:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t xmc_arb_mode:1;
+ uint64_t ef_ena:1;
+ uint64_t ef_cnt:7;
+ uint64_t vab_thresh:4;
+ uint64_t disecc:1;
+ uint64_t disidxalias:1;
+ } cn63xxp1;
+};
+
union cvmx_l2c_dbg {
uint64_t u64;
struct cvmx_l2c_dbg_s {
@@ -349,7 +477,9 @@ union cvmx_l2c_dbg {
uint64_t reserved_13_63:51;
uint64_t lfb_enum:2;
uint64_t lfb_dmp:1;
- uint64_t reserved_5_9:5;
+ uint64_t reserved_7_9:3;
+ uint64_t ppnum:1;
+ uint64_t reserved_5_5:1;
uint64_t set:2;
uint64_t finv:1;
uint64_t l2d:1;
@@ -420,6 +550,79 @@ union cvmx_l2c_dut {
struct cvmx_l2c_dut_s cn58xxp1;
};
+union cvmx_l2c_dut_mapx {
+ uint64_t u64;
+ struct cvmx_l2c_dut_mapx_s {
+ uint64_t reserved_38_63:26;
+ uint64_t tag:28;
+ uint64_t reserved_1_9:9;
+ uint64_t valid:1;
+ } s;
+ struct cvmx_l2c_dut_mapx_s cn63xx;
+ struct cvmx_l2c_dut_mapx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_err_tdtx_s {
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+ uint64_t vdbe:1;
+ uint64_t vsbe:1;
+ uint64_t syn:10;
+ uint64_t reserved_21_49:29;
+ uint64_t wayidx:17;
+ uint64_t reserved_2_3:2;
+ uint64_t type:2;
+ } s;
+ struct cvmx_l2c_err_tdtx_s cn63xx;
+ struct cvmx_l2c_err_tdtx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_err_ttgx_s {
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+ uint64_t noway:1;
+ uint64_t reserved_56_60:5;
+ uint64_t syn:6;
+ uint64_t reserved_21_49:29;
+ uint64_t wayidx:14;
+ uint64_t reserved_2_6:5;
+ uint64_t type:2;
+ } s;
+ struct cvmx_l2c_err_ttgx_s cn63xx;
+ struct cvmx_l2c_err_ttgx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_vbfx {
+ uint64_t u64;
+ struct cvmx_l2c_err_vbfx_s {
+ uint64_t reserved_62_63:2;
+ uint64_t vdbe:1;
+ uint64_t vsbe:1;
+ uint64_t vsyn:10;
+ uint64_t reserved_2_49:48;
+ uint64_t type:2;
+ } s;
+ struct cvmx_l2c_err_vbfx_s cn63xx;
+ struct cvmx_l2c_err_vbfx_s cn63xxp1;
+};
+
+union cvmx_l2c_err_xmc {
+ uint64_t u64;
+ struct cvmx_l2c_err_xmc_s {
+ uint64_t cmd:6;
+ uint64_t reserved_52_57:6;
+ uint64_t sid:4;
+ uint64_t reserved_38_47:10;
+ uint64_t addr:38;
+ } s;
+ struct cvmx_l2c_err_xmc_s cn63xx;
+ struct cvmx_l2c_err_xmc_s cn63xxp1;
+};
+
union cvmx_l2c_grpwrr0 {
uint64_t u64;
struct cvmx_l2c_grpwrr0_s {
@@ -464,6 +667,60 @@ union cvmx_l2c_int_en {
struct cvmx_l2c_int_en_s cn56xxp1;
};
+union cvmx_l2c_int_ena {
+ uint64_t u64;
+ struct cvmx_l2c_int_ena_s {
+ uint64_t reserved_8_63:56;
+ uint64_t bigrd:1;
+ uint64_t bigwr:1;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } s;
+ struct cvmx_l2c_int_ena_s cn63xx;
+ struct cvmx_l2c_int_ena_cn63xxp1 {
+ uint64_t reserved_6_63:58;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } cn63xxp1;
+};
+
+union cvmx_l2c_int_reg {
+ uint64_t u64;
+ struct cvmx_l2c_int_reg_s {
+ uint64_t reserved_17_63:47;
+ uint64_t tad0:1;
+ uint64_t reserved_8_15:8;
+ uint64_t bigrd:1;
+ uint64_t bigwr:1;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } s;
+ struct cvmx_l2c_int_reg_s cn63xx;
+ struct cvmx_l2c_int_reg_cn63xxp1 {
+ uint64_t reserved_17_63:47;
+ uint64_t tad0:1;
+ uint64_t reserved_6_15:10;
+ uint64_t vrtpe:1;
+ uint64_t vrtadrng:1;
+ uint64_t vrtidrng:1;
+ uint64_t vrtwr:1;
+ uint64_t holewr:1;
+ uint64_t holerd:1;
+ } cn63xxp1;
+};
+
union cvmx_l2c_int_stat {
uint64_t u64;
struct cvmx_l2c_int_stat_s {
@@ -484,6 +741,24 @@ union cvmx_l2c_int_stat {
struct cvmx_l2c_int_stat_s cn56xxp1;
};
+union cvmx_l2c_iocx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_iocx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_iocx_pfc_s cn63xx;
+ struct cvmx_l2c_iocx_pfc_s cn63xxp1;
+};
+
+union cvmx_l2c_iorx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_iorx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_iorx_pfc_s cn63xx;
+ struct cvmx_l2c_iorx_pfc_s cn63xxp1;
+};
+
union cvmx_l2c_lckbase {
uint64_t u64;
struct cvmx_l2c_lckbase_s {
@@ -855,6 +1130,59 @@ union cvmx_l2c_ppgrp {
struct cvmx_l2c_ppgrp_s cn56xxp1;
};
+union cvmx_l2c_qos_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_qos_iobx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t dwblvl:2;
+ uint64_t reserved_2_3:2;
+ uint64_t lvl:2;
+ } s;
+ struct cvmx_l2c_qos_iobx_s cn63xx;
+ struct cvmx_l2c_qos_iobx_s cn63xxp1;
+};
+
+union cvmx_l2c_qos_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_qos_ppx_s {
+ uint64_t reserved_2_63:62;
+ uint64_t lvl:2;
+ } s;
+ struct cvmx_l2c_qos_ppx_s cn63xx;
+ struct cvmx_l2c_qos_ppx_s cn63xxp1;
+};
+
+union cvmx_l2c_qos_wgt {
+ uint64_t u64;
+ struct cvmx_l2c_qos_wgt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t wgt3:8;
+ uint64_t wgt2:8;
+ uint64_t wgt1:8;
+ uint64_t wgt0:8;
+ } s;
+ struct cvmx_l2c_qos_wgt_s cn63xx;
+ struct cvmx_l2c_qos_wgt_s cn63xxp1;
+};
+
+union cvmx_l2c_rscx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_rscx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_rscx_pfc_s cn63xx;
+ struct cvmx_l2c_rscx_pfc_s cn63xxp1;
+};
+
+union cvmx_l2c_rsdx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_rsdx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_rsdx_pfc_s cn63xx;
+ struct cvmx_l2c_rsdx_pfc_s cn63xxp1;
+};
+
union cvmx_l2c_spar0 {
uint64_t u64;
struct cvmx_l2c_spar0_s {
@@ -960,4 +1288,282 @@ union cvmx_l2c_spar4 {
struct cvmx_l2c_spar4_s cn58xxp1;
};
+union cvmx_l2c_tadx_ecc0 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ecc0_s {
+ uint64_t reserved_58_63:6;
+ uint64_t ow3ecc:10;
+ uint64_t reserved_42_47:6;
+ uint64_t ow2ecc:10;
+ uint64_t reserved_26_31:6;
+ uint64_t ow1ecc:10;
+ uint64_t reserved_10_15:6;
+ uint64_t ow0ecc:10;
+ } s;
+ struct cvmx_l2c_tadx_ecc0_s cn63xx;
+ struct cvmx_l2c_tadx_ecc0_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_ecc1 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ecc1_s {
+ uint64_t reserved_58_63:6;
+ uint64_t ow7ecc:10;
+ uint64_t reserved_42_47:6;
+ uint64_t ow6ecc:10;
+ uint64_t reserved_26_31:6;
+ uint64_t ow5ecc:10;
+ uint64_t reserved_10_15:6;
+ uint64_t ow4ecc:10;
+ } s;
+ struct cvmx_l2c_tadx_ecc1_s cn63xx;
+ struct cvmx_l2c_tadx_ecc1_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_ien {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_ien_s {
+ uint64_t reserved_9_63:55;
+ uint64_t wrdislmc:1;
+ uint64_t rddislmc:1;
+ uint64_t noway:1;
+ uint64_t vbfdbe:1;
+ uint64_t vbfsbe:1;
+ uint64_t tagdbe:1;
+ uint64_t tagsbe:1;
+ uint64_t l2ddbe:1;
+ uint64_t l2dsbe:1;
+ } s;
+ struct cvmx_l2c_tadx_ien_s cn63xx;
+ struct cvmx_l2c_tadx_ien_cn63xxp1 {
+ uint64_t reserved_7_63:57;
+ uint64_t noway:1;
+ uint64_t vbfdbe:1;
+ uint64_t vbfsbe:1;
+ uint64_t tagdbe:1;
+ uint64_t tagsbe:1;
+ uint64_t l2ddbe:1;
+ uint64_t l2dsbe:1;
+ } cn63xxp1;
+};
+
+union cvmx_l2c_tadx_int {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_int_s {
+ uint64_t reserved_9_63:55;
+ uint64_t wrdislmc:1;
+ uint64_t rddislmc:1;
+ uint64_t noway:1;
+ uint64_t vbfdbe:1;
+ uint64_t vbfsbe:1;
+ uint64_t tagdbe:1;
+ uint64_t tagsbe:1;
+ uint64_t l2ddbe:1;
+ uint64_t l2dsbe:1;
+ } s;
+ struct cvmx_l2c_tadx_int_s cn63xx;
+};
+
+union cvmx_l2c_tadx_pfc0 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc0_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc0_s cn63xx;
+ struct cvmx_l2c_tadx_pfc0_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_pfc1 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc1_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc1_s cn63xx;
+ struct cvmx_l2c_tadx_pfc1_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_pfc2 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc2_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc2_s cn63xx;
+ struct cvmx_l2c_tadx_pfc2_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_pfc3 {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_pfc3_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_tadx_pfc3_s cn63xx;
+ struct cvmx_l2c_tadx_pfc3_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_prf {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_prf_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt3sel:8;
+ uint64_t cnt2sel:8;
+ uint64_t cnt1sel:8;
+ uint64_t cnt0sel:8;
+ } s;
+ struct cvmx_l2c_tadx_prf_s cn63xx;
+ struct cvmx_l2c_tadx_prf_s cn63xxp1;
+};
+
+union cvmx_l2c_tadx_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tadx_tag_s {
+ uint64_t reserved_46_63:18;
+ uint64_t ecc:6;
+ uint64_t reserved_36_39:4;
+ uint64_t tag:19;
+ uint64_t reserved_4_16:13;
+ uint64_t use:1;
+ uint64_t valid:1;
+ uint64_t dirty:1;
+ uint64_t lock:1;
+ } s;
+ struct cvmx_l2c_tadx_tag_s cn63xx;
+ struct cvmx_l2c_tadx_tag_s cn63xxp1;
+};
+
+union cvmx_l2c_ver_id {
+ uint64_t u64;
+ struct cvmx_l2c_ver_id_s {
+ uint64_t mask:64;
+ } s;
+ struct cvmx_l2c_ver_id_s cn63xx;
+ struct cvmx_l2c_ver_id_s cn63xxp1;
+};
+
+union cvmx_l2c_ver_iob {
+ uint64_t u64;
+ struct cvmx_l2c_ver_iob_s {
+ uint64_t reserved_1_63:63;
+ uint64_t mask:1;
+ } s;
+ struct cvmx_l2c_ver_iob_s cn63xx;
+ struct cvmx_l2c_ver_iob_s cn63xxp1;
+};
+
+union cvmx_l2c_ver_msc {
+ uint64_t u64;
+ struct cvmx_l2c_ver_msc_s {
+ uint64_t reserved_2_63:62;
+ uint64_t invl2:1;
+ uint64_t dwb:1;
+ } s;
+ struct cvmx_l2c_ver_msc_s cn63xx;
+};
+
+union cvmx_l2c_ver_pp {
+ uint64_t u64;
+ struct cvmx_l2c_ver_pp_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mask:6;
+ } s;
+ struct cvmx_l2c_ver_pp_s cn63xx;
+ struct cvmx_l2c_ver_pp_s cn63xxp1;
+};
+
+union cvmx_l2c_virtid_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_virtid_iobx_s {
+ uint64_t reserved_14_63:50;
+ uint64_t dwbid:6;
+ uint64_t reserved_6_7:2;
+ uint64_t id:6;
+ } s;
+ struct cvmx_l2c_virtid_iobx_s cn63xx;
+ struct cvmx_l2c_virtid_iobx_s cn63xxp1;
+};
+
+union cvmx_l2c_virtid_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_virtid_ppx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t id:6;
+ } s;
+ struct cvmx_l2c_virtid_ppx_s cn63xx;
+ struct cvmx_l2c_virtid_ppx_s cn63xxp1;
+};
+
+union cvmx_l2c_vrt_ctl {
+ uint64_t u64;
+ struct cvmx_l2c_vrt_ctl_s {
+ uint64_t reserved_9_63:55;
+ uint64_t ooberr:1;
+ uint64_t reserved_7_7:1;
+ uint64_t memsz:3;
+ uint64_t numid:3;
+ uint64_t enable:1;
+ } s;
+ struct cvmx_l2c_vrt_ctl_s cn63xx;
+ struct cvmx_l2c_vrt_ctl_s cn63xxp1;
+};
+
+union cvmx_l2c_vrt_memx {
+ uint64_t u64;
+ struct cvmx_l2c_vrt_memx_s {
+ uint64_t reserved_36_63:28;
+ uint64_t parity:4;
+ uint64_t data:32;
+ } s;
+ struct cvmx_l2c_vrt_memx_s cn63xx;
+ struct cvmx_l2c_vrt_memx_s cn63xxp1;
+};
+
+union cvmx_l2c_wpar_iobx {
+ uint64_t u64;
+ struct cvmx_l2c_wpar_iobx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mask:16;
+ } s;
+ struct cvmx_l2c_wpar_iobx_s cn63xx;
+ struct cvmx_l2c_wpar_iobx_s cn63xxp1;
+};
+
+union cvmx_l2c_wpar_ppx {
+ uint64_t u64;
+ struct cvmx_l2c_wpar_ppx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mask:16;
+ } s;
+ struct cvmx_l2c_wpar_ppx_s cn63xx;
+ struct cvmx_l2c_wpar_ppx_s cn63xxp1;
+};
+
+union cvmx_l2c_xmcx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_xmcx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_xmcx_pfc_s cn63xx;
+ struct cvmx_l2c_xmcx_pfc_s cn63xxp1;
+};
+
+union cvmx_l2c_xmc_cmd {
+ uint64_t u64;
+ struct cvmx_l2c_xmc_cmd_s {
+ uint64_t inuse:1;
+ uint64_t cmd:6;
+ uint64_t reserved_38_56:19;
+ uint64_t addr:38;
+ } s;
+ struct cvmx_l2c_xmc_cmd_s cn63xx;
+ struct cvmx_l2c_xmc_cmd_s cn63xxp1;
+};
+
+union cvmx_l2c_xmdx_pfc {
+ uint64_t u64;
+ struct cvmx_l2c_xmdx_pfc_s {
+ uint64_t count:64;
+ } s;
+ struct cvmx_l2c_xmdx_pfc_s cn63xx;
+ struct cvmx_l2c_xmdx_pfc_s cn63xxp1;
+};
+
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 2a8c0902ea5..0b32c5b118e 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -26,7 +26,6 @@
***********************license end**************************************/
/*
- *
* Interface to the Level 2 Cache (L2C) control, measurement, and debugging
* facilities.
*/
@@ -34,93 +33,126 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-/* Deprecated macro, use function */
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc()
-
-/* Deprecated macro, use function */
-#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits()
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
+#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
-/* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets()
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
-#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT \
- (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
-#define CVMX_L2C_ALIAS_MASK \
- (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+/* Defines for Virtualizations, valid only from Octeon II onwards. */
+#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0)
+#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0)
union cvmx_l2c_tag {
uint64_t u64;
struct {
uint64_t reserved:28;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:32; /* Phys mem (not all bits valid) */
} s;
};
+/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
+#define CVMX_L2C_TADS 1
+
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
- CVMX_L2C_EVENT_CYCLES = 0,
- CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
- CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
- CVMX_L2C_EVENT_DATA_MISS = 3,
- CVMX_L2C_EVENT_DATA_HIT = 4,
- CVMX_L2C_EVENT_MISS = 5,
- CVMX_L2C_EVENT_HIT = 6,
- CVMX_L2C_EVENT_VICTIM_HIT = 7,
- CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
- CVMX_L2C_EVENT_TAG_PROBE = 9,
- CVMX_L2C_EVENT_TAG_UPDATE = 10,
- CVMX_L2C_EVENT_TAG_COMPLETE = 11,
- CVMX_L2C_EVENT_TAG_DIRTY = 12,
- CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
- CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
- CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
- CVMX_L2C_EVENT_WRITE_REQUEST = 17,
- CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
- CVMX_L2C_EVENT_XMC_NOP = 20,
- CVMX_L2C_EVENT_XMC_LDT = 21,
- CVMX_L2C_EVENT_XMC_LDI = 22,
- CVMX_L2C_EVENT_XMC_LDD = 23,
- CVMX_L2C_EVENT_XMC_STF = 24,
- CVMX_L2C_EVENT_XMC_STT = 25,
- CVMX_L2C_EVENT_XMC_STP = 26,
- CVMX_L2C_EVENT_XMC_STC = 27,
- CVMX_L2C_EVENT_XMC_DWB = 28,
- CVMX_L2C_EVENT_XMC_PL2 = 29,
- CVMX_L2C_EVENT_XMC_PSL1 = 30,
- CVMX_L2C_EVENT_XMC_IOBLD = 31,
- CVMX_L2C_EVENT_XMC_IOBST = 32,
- CVMX_L2C_EVENT_XMC_IOBDMA = 33,
- CVMX_L2C_EVENT_XMC_IOBRSP = 34,
- CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
- CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
- CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
- CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
- CVMX_L2C_EVENT_RSC_NOP = 39,
- CVMX_L2C_EVENT_RSC_STDN = 40,
- CVMX_L2C_EVENT_RSC_FILL = 41,
- CVMX_L2C_EVENT_RSC_REFL = 42,
- CVMX_L2C_EVENT_RSC_STIN = 43,
- CVMX_L2C_EVENT_RSC_SCIN = 44,
- CVMX_L2C_EVENT_RSC_SCFL = 45,
- CVMX_L2C_EVENT_RSC_SCDN = 46,
- CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
- CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
- CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
- CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
- CVMX_L2C_EVENT_LRF_REQ = 51,
- CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
- CVMX_L2C_EVENT_DT_WR_INVAL = 53
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53,
+ CVMX_L2C_EVENT_MAX
+};
+
+/* L2C Performance Counter events for Octeon2. */
+enum cvmx_l2c_tad_event {
+ CVMX_L2C_TAD_EVENT_NONE = 0,
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6,
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
+ CVMX_L2C_TAD_EVENT_MAX
};
/**
@@ -132,10 +164,10 @@ enum cvmx_l2c_event {
* @clear_on_read: When asserted, any read of the performance counter
* clears the counter.
*
- * The routine does not clear the counter.
+ * @note The routine does not clear the counter.
*/
-void cvmx_l2c_config_perf(uint32_t counter,
- enum cvmx_l2c_event event, uint32_t clear_on_read);
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read);
+
/**
* Read the given L2 Cache performance counter. The counter must be configured
* before reading, but this routine does not enforce this requirement.
@@ -160,18 +192,18 @@ int cvmx_l2c_get_core_way_partition(uint32_t core);
/**
* Partitions the L2 cache for a core
*
- * @core: The core that the partitioning applies to.
+ * @core: The core that the partitioning applies to.
+ * @mask: The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
- * @mask: The partitioning of the ways expressed as a binary mask. A 0
- * bit allows the core to evict cache lines from a way, while a
- * 1 bit blocks the core from evicting any lines from that
- * way. There must be at least one allowed way (0 bit) in the
- * mask.
- *
- * If any ways are blocked for all cores and the HW blocks, then those
- * ways will never have any cache lines evicted from them. All cores
- * and the hardware blocks are free to read from all ways regardless
- * of the partitioning.
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
@@ -187,19 +219,21 @@ int cvmx_l2c_get_hw_way_partition(void);
/**
* Partitions the L2 cache for the hardware blocks.
*
- * @mask: The partitioning of the ways expressed as a binary mask. A 0
- * bit allows the core to evict cache lines from a way, while a
- * 1 bit blocks the core from evicting any lines from that
- * way. There must be at least one allowed way (0 bit) in the
- * mask.
+ * @mask: The partitioning of the ways expressed as a binary
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
- * If any ways are blocked for all cores and the HW blocks, then those
- * ways will never have any cache lines evicted from them. All cores
- * and the hardware blocks are free to read from all ways regardless
- * of the partitioning.
+
+ * @note If any ways are blocked for all cores and the HW blocks, then
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_hw_way_partition(uint32_t mask);
+
/**
* Locks a line in the L2 cache at the specified physical address
*
@@ -263,13 +297,14 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
*/
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
-/* Wrapper around deprecated old function name */
-static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
- uint32_t index)
+/* Wrapper providing a deprecated old function name */
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated));
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index)
{
return cvmx_l2c_get_tag(association, index);
}
+
/**
* Returns the cache index for a given physical address
*
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
index d7102d455e1..60543e0e77f 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,30 +28,18 @@
#ifndef __CVMX_L2D_DEFS_H__
#define __CVMX_L2D_DEFS_H__
-#define CVMX_L2D_BST0 \
- CVMX_ADD_IO_SEG(0x0001180080000780ull)
-#define CVMX_L2D_BST1 \
- CVMX_ADD_IO_SEG(0x0001180080000788ull)
-#define CVMX_L2D_BST2 \
- CVMX_ADD_IO_SEG(0x0001180080000790ull)
-#define CVMX_L2D_BST3 \
- CVMX_ADD_IO_SEG(0x0001180080000798ull)
-#define CVMX_L2D_ERR \
- CVMX_ADD_IO_SEG(0x0001180080000010ull)
-#define CVMX_L2D_FADR \
- CVMX_ADD_IO_SEG(0x0001180080000018ull)
-#define CVMX_L2D_FSYN0 \
- CVMX_ADD_IO_SEG(0x0001180080000020ull)
-#define CVMX_L2D_FSYN1 \
- CVMX_ADD_IO_SEG(0x0001180080000028ull)
-#define CVMX_L2D_FUS0 \
- CVMX_ADD_IO_SEG(0x00011800800007A0ull)
-#define CVMX_L2D_FUS1 \
- CVMX_ADD_IO_SEG(0x00011800800007A8ull)
-#define CVMX_L2D_FUS2 \
- CVMX_ADD_IO_SEG(0x00011800800007B0ull)
-#define CVMX_L2D_FUS3 \
- CVMX_ADD_IO_SEG(0x00011800800007B8ull)
+#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
+#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
+#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
+#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
+#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
+#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
+#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
+#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
+#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
union cvmx_l2d_bst0 {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
index 2639a3f5ffc..873968f55ee 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,8 +28,7 @@
#ifndef __CVMX_L2T_DEFS_H__
#define __CVMX_L2T_DEFS_H__
-#define CVMX_L2T_ERR \
- CVMX_ADD_IO_SEG(0x0001180080000008ull)
+#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
union cvmx_l2t_err {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h
index 16f174a4dad..e25173bb8bb 100644
--- a/arch/mips/include/asm/octeon/cvmx-led-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,32 +28,19 @@
#ifndef __CVMX_LED_DEFS_H__
#define __CVMX_LED_DEFS_H__
-#define CVMX_LED_BLINK \
- CVMX_ADD_IO_SEG(0x0001180000001A48ull)
-#define CVMX_LED_CLK_PHASE \
- CVMX_ADD_IO_SEG(0x0001180000001A08ull)
-#define CVMX_LED_CYLON \
- CVMX_ADD_IO_SEG(0x0001180000001AF8ull)
-#define CVMX_LED_DBG \
- CVMX_ADD_IO_SEG(0x0001180000001A18ull)
-#define CVMX_LED_EN \
- CVMX_ADD_IO_SEG(0x0001180000001A00ull)
-#define CVMX_LED_POLARITY \
- CVMX_ADD_IO_SEG(0x0001180000001A50ull)
-#define CVMX_LED_PRT \
- CVMX_ADD_IO_SEG(0x0001180000001A10ull)
-#define CVMX_LED_PRT_FMT \
- CVMX_ADD_IO_SEG(0x0001180000001A30ull)
-#define CVMX_LED_PRT_STATUSX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001A80ull + (((offset) & 7) * 8))
-#define CVMX_LED_UDD_CNTX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001A20ull + (((offset) & 1) * 8))
-#define CVMX_LED_UDD_DATX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001A38ull + (((offset) & 1) * 8))
-#define CVMX_LED_UDD_DAT_CLRX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001AC8ull + (((offset) & 1) * 16))
-#define CVMX_LED_UDD_DAT_SETX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001AC0ull + (((offset) & 1) * 16))
+#define CVMX_LED_BLINK (CVMX_ADD_IO_SEG(0x0001180000001A48ull))
+#define CVMX_LED_CLK_PHASE (CVMX_ADD_IO_SEG(0x0001180000001A08ull))
+#define CVMX_LED_CYLON (CVMX_ADD_IO_SEG(0x0001180000001AF8ull))
+#define CVMX_LED_DBG (CVMX_ADD_IO_SEG(0x0001180000001A18ull))
+#define CVMX_LED_EN (CVMX_ADD_IO_SEG(0x0001180000001A00ull))
+#define CVMX_LED_POLARITY (CVMX_ADD_IO_SEG(0x0001180000001A50ull))
+#define CVMX_LED_PRT (CVMX_ADD_IO_SEG(0x0001180000001A10ull))
+#define CVMX_LED_PRT_FMT (CVMX_ADD_IO_SEG(0x0001180000001A30ull))
+#define CVMX_LED_PRT_STATUSX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A80ull) + ((offset) & 7) * 8)
+#define CVMX_LED_UDD_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A20ull) + ((offset) & 1) * 8)
+#define CVMX_LED_UDD_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A38ull) + ((offset) & 1) * 8)
+#define CVMX_LED_UDD_DAT_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + ((offset) & 1) * 16)
+#define CVMX_LED_UDD_DAT_SETX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + ((offset) & 1) * 16)
union cvmx_led_blink {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
index 6555f053098..52b14a333ad 100644
--- a/arch/mips/include/asm/octeon/cvmx-mio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,191 +28,117 @@
#ifndef __CVMX_MIO_DEFS_H__
#define __CVMX_MIO_DEFS_H__
-#define CVMX_MIO_BOOT_BIST_STAT \
- CVMX_ADD_IO_SEG(0x00011800000000F8ull)
-#define CVMX_MIO_BOOT_COMP \
- CVMX_ADD_IO_SEG(0x00011800000000B8ull)
-#define CVMX_MIO_BOOT_DMA_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000100ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_DMA_INTX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000138ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000150ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_DMA_TIMX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000120ull + (((offset) & 3) * 8))
-#define CVMX_MIO_BOOT_ERR \
- CVMX_ADD_IO_SEG(0x00011800000000A0ull)
-#define CVMX_MIO_BOOT_INT \
- CVMX_ADD_IO_SEG(0x00011800000000A8ull)
-#define CVMX_MIO_BOOT_LOC_ADR \
- CVMX_ADD_IO_SEG(0x0001180000000090ull)
-#define CVMX_MIO_BOOT_LOC_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000080ull + (((offset) & 1) * 8))
-#define CVMX_MIO_BOOT_LOC_DAT \
- CVMX_ADD_IO_SEG(0x0001180000000098ull)
-#define CVMX_MIO_BOOT_PIN_DEFS \
- CVMX_ADD_IO_SEG(0x00011800000000C0ull)
-#define CVMX_MIO_BOOT_REG_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000000ull + (((offset) & 7) * 8))
-#define CVMX_MIO_BOOT_REG_TIMX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000040ull + (((offset) & 7) * 8))
-#define CVMX_MIO_BOOT_THR \
- CVMX_ADD_IO_SEG(0x00011800000000B0ull)
-#define CVMX_MIO_FUS_BNK_DATX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001520ull + (((offset) & 3) * 8))
-#define CVMX_MIO_FUS_DAT0 \
- CVMX_ADD_IO_SEG(0x0001180000001400ull)
-#define CVMX_MIO_FUS_DAT1 \
- CVMX_ADD_IO_SEG(0x0001180000001408ull)
-#define CVMX_MIO_FUS_DAT2 \
- CVMX_ADD_IO_SEG(0x0001180000001410ull)
-#define CVMX_MIO_FUS_DAT3 \
- CVMX_ADD_IO_SEG(0x0001180000001418ull)
-#define CVMX_MIO_FUS_EMA \
- CVMX_ADD_IO_SEG(0x0001180000001550ull)
-#define CVMX_MIO_FUS_PDF \
- CVMX_ADD_IO_SEG(0x0001180000001420ull)
-#define CVMX_MIO_FUS_PLL \
- CVMX_ADD_IO_SEG(0x0001180000001580ull)
-#define CVMX_MIO_FUS_PROG \
- CVMX_ADD_IO_SEG(0x0001180000001510ull)
-#define CVMX_MIO_FUS_PROG_TIMES \
- CVMX_ADD_IO_SEG(0x0001180000001518ull)
-#define CVMX_MIO_FUS_RCMD \
- CVMX_ADD_IO_SEG(0x0001180000001500ull)
-#define CVMX_MIO_FUS_SPR_REPAIR_RES \
- CVMX_ADD_IO_SEG(0x0001180000001548ull)
-#define CVMX_MIO_FUS_SPR_REPAIR_SUM \
- CVMX_ADD_IO_SEG(0x0001180000001540ull)
-#define CVMX_MIO_FUS_UNLOCK \
- CVMX_ADD_IO_SEG(0x0001180000001578ull)
-#define CVMX_MIO_FUS_WADR \
- CVMX_ADD_IO_SEG(0x0001180000001508ull)
-#define CVMX_MIO_NDF_DMA_CFG \
- CVMX_ADD_IO_SEG(0x0001180000000168ull)
-#define CVMX_MIO_NDF_DMA_INT \
- CVMX_ADD_IO_SEG(0x0001180000000170ull)
-#define CVMX_MIO_NDF_DMA_INT_EN \
- CVMX_ADD_IO_SEG(0x0001180000000178ull)
-#define CVMX_MIO_PLL_CTL \
- CVMX_ADD_IO_SEG(0x0001180000001448ull)
-#define CVMX_MIO_PLL_SETTING \
- CVMX_ADD_IO_SEG(0x0001180000001440ull)
-#define CVMX_MIO_TWSX_INT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001010ull + (((offset) & 1) * 512))
-#define CVMX_MIO_TWSX_SW_TWSI(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001000ull + (((offset) & 1) * 512))
-#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001018ull + (((offset) & 1) * 512))
-#define CVMX_MIO_TWSX_TWSI_SW(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001008ull + (((offset) & 1) * 512))
-#define CVMX_MIO_UART2_DLH \
- CVMX_ADD_IO_SEG(0x0001180000000488ull)
-#define CVMX_MIO_UART2_DLL \
- CVMX_ADD_IO_SEG(0x0001180000000480ull)
-#define CVMX_MIO_UART2_FAR \
- CVMX_ADD_IO_SEG(0x0001180000000520ull)
-#define CVMX_MIO_UART2_FCR \
- CVMX_ADD_IO_SEG(0x0001180000000450ull)
-#define CVMX_MIO_UART2_HTX \
- CVMX_ADD_IO_SEG(0x0001180000000708ull)
-#define CVMX_MIO_UART2_IER \
- CVMX_ADD_IO_SEG(0x0001180000000408ull)
-#define CVMX_MIO_UART2_IIR \
- CVMX_ADD_IO_SEG(0x0001180000000410ull)
-#define CVMX_MIO_UART2_LCR \
- CVMX_ADD_IO_SEG(0x0001180000000418ull)
-#define CVMX_MIO_UART2_LSR \
- CVMX_ADD_IO_SEG(0x0001180000000428ull)
-#define CVMX_MIO_UART2_MCR \
- CVMX_ADD_IO_SEG(0x0001180000000420ull)
-#define CVMX_MIO_UART2_MSR \
- CVMX_ADD_IO_SEG(0x0001180000000430ull)
-#define CVMX_MIO_UART2_RBR \
- CVMX_ADD_IO_SEG(0x0001180000000400ull)
-#define CVMX_MIO_UART2_RFL \
- CVMX_ADD_IO_SEG(0x0001180000000608ull)
-#define CVMX_MIO_UART2_RFW \
- CVMX_ADD_IO_SEG(0x0001180000000530ull)
-#define CVMX_MIO_UART2_SBCR \
- CVMX_ADD_IO_SEG(0x0001180000000620ull)
-#define CVMX_MIO_UART2_SCR \
- CVMX_ADD_IO_SEG(0x0001180000000438ull)
-#define CVMX_MIO_UART2_SFE \
- CVMX_ADD_IO_SEG(0x0001180000000630ull)
-#define CVMX_MIO_UART2_SRR \
- CVMX_ADD_IO_SEG(0x0001180000000610ull)
-#define CVMX_MIO_UART2_SRT \
- CVMX_ADD_IO_SEG(0x0001180000000638ull)
-#define CVMX_MIO_UART2_SRTS \
- CVMX_ADD_IO_SEG(0x0001180000000618ull)
-#define CVMX_MIO_UART2_STT \
- CVMX_ADD_IO_SEG(0x0001180000000700ull)
-#define CVMX_MIO_UART2_TFL \
- CVMX_ADD_IO_SEG(0x0001180000000600ull)
-#define CVMX_MIO_UART2_TFR \
- CVMX_ADD_IO_SEG(0x0001180000000528ull)
-#define CVMX_MIO_UART2_THR \
- CVMX_ADD_IO_SEG(0x0001180000000440ull)
-#define CVMX_MIO_UART2_USR \
- CVMX_ADD_IO_SEG(0x0001180000000538ull)
-#define CVMX_MIO_UARTX_DLH(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000888ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_DLL(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000880ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_FAR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000920ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_FCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000850ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_HTX(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000B08ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_IER(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000808ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_IIR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000810ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_LCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000818ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_LSR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000828ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_MCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000820ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_MSR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000830ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_RBR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000800ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_RFL(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A08ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_RFW(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000930ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SBCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A20ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SCR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000838ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SFE(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A30ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SRR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A10ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SRT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A38ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_SRTS(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A18ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_STT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000B00ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_TFL(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000A00ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_TFR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000928ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_THR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000840ull + (((offset) & 1) * 1024))
-#define CVMX_MIO_UARTX_USR(offset) \
- CVMX_ADD_IO_SEG(0x0001180000000938ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_BOOT_BIST_STAT (CVMX_ADD_IO_SEG(0x00011800000000F8ull))
+#define CVMX_MIO_BOOT_COMP (CVMX_ADD_IO_SEG(0x00011800000000B8ull))
+#define CVMX_MIO_BOOT_DMA_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000100ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_INTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000138ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) (CVMX_ADD_IO_SEG(0x0001180000000150ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_DMA_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000120ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_BOOT_ERR (CVMX_ADD_IO_SEG(0x00011800000000A0ull))
+#define CVMX_MIO_BOOT_INT (CVMX_ADD_IO_SEG(0x00011800000000A8ull))
+#define CVMX_MIO_BOOT_LOC_ADR (CVMX_ADD_IO_SEG(0x0001180000000090ull))
+#define CVMX_MIO_BOOT_LOC_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000080ull) + ((offset) & 1) * 8)
+#define CVMX_MIO_BOOT_LOC_DAT (CVMX_ADD_IO_SEG(0x0001180000000098ull))
+#define CVMX_MIO_BOOT_PIN_DEFS (CVMX_ADD_IO_SEG(0x00011800000000C0ull))
+#define CVMX_MIO_BOOT_REG_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001180000000000ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_BOOT_REG_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001180000000040ull) + ((offset) & 7) * 8)
+#define CVMX_MIO_BOOT_THR (CVMX_ADD_IO_SEG(0x00011800000000B0ull))
+#define CVMX_MIO_FUS_BNK_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001520ull) + ((offset) & 3) * 8)
+#define CVMX_MIO_FUS_DAT0 (CVMX_ADD_IO_SEG(0x0001180000001400ull))
+#define CVMX_MIO_FUS_DAT1 (CVMX_ADD_IO_SEG(0x0001180000001408ull))
+#define CVMX_MIO_FUS_DAT2 (CVMX_ADD_IO_SEG(0x0001180000001410ull))
+#define CVMX_MIO_FUS_DAT3 (CVMX_ADD_IO_SEG(0x0001180000001418ull))
+#define CVMX_MIO_FUS_EMA (CVMX_ADD_IO_SEG(0x0001180000001550ull))
+#define CVMX_MIO_FUS_PDF (CVMX_ADD_IO_SEG(0x0001180000001420ull))
+#define CVMX_MIO_FUS_PLL (CVMX_ADD_IO_SEG(0x0001180000001580ull))
+#define CVMX_MIO_FUS_PROG (CVMX_ADD_IO_SEG(0x0001180000001510ull))
+#define CVMX_MIO_FUS_PROG_TIMES (CVMX_ADD_IO_SEG(0x0001180000001518ull))
+#define CVMX_MIO_FUS_RCMD (CVMX_ADD_IO_SEG(0x0001180000001500ull))
+#define CVMX_MIO_FUS_READ_TIMES (CVMX_ADD_IO_SEG(0x0001180000001570ull))
+#define CVMX_MIO_FUS_REPAIR_RES0 (CVMX_ADD_IO_SEG(0x0001180000001558ull))
+#define CVMX_MIO_FUS_REPAIR_RES1 (CVMX_ADD_IO_SEG(0x0001180000001560ull))
+#define CVMX_MIO_FUS_REPAIR_RES2 (CVMX_ADD_IO_SEG(0x0001180000001568ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_RES (CVMX_ADD_IO_SEG(0x0001180000001548ull))
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM (CVMX_ADD_IO_SEG(0x0001180000001540ull))
+#define CVMX_MIO_FUS_UNLOCK (CVMX_ADD_IO_SEG(0x0001180000001578ull))
+#define CVMX_MIO_FUS_WADR (CVMX_ADD_IO_SEG(0x0001180000001508ull))
+#define CVMX_MIO_GPIO_COMP (CVMX_ADD_IO_SEG(0x00011800000000C8ull))
+#define CVMX_MIO_NDF_DMA_CFG (CVMX_ADD_IO_SEG(0x0001180000000168ull))
+#define CVMX_MIO_NDF_DMA_INT (CVMX_ADD_IO_SEG(0x0001180000000170ull))
+#define CVMX_MIO_NDF_DMA_INT_EN (CVMX_ADD_IO_SEG(0x0001180000000178ull))
+#define CVMX_MIO_PLL_CTL (CVMX_ADD_IO_SEG(0x0001180000001448ull))
+#define CVMX_MIO_PLL_SETTING (CVMX_ADD_IO_SEG(0x0001180000001440ull))
+#define CVMX_MIO_PTP_CLOCK_CFG (CVMX_ADD_IO_SEG(0x0001070000000F00ull))
+#define CVMX_MIO_PTP_CLOCK_COMP (CVMX_ADD_IO_SEG(0x0001070000000F18ull))
+#define CVMX_MIO_PTP_CLOCK_HI (CVMX_ADD_IO_SEG(0x0001070000000F10ull))
+#define CVMX_MIO_PTP_CLOCK_LO (CVMX_ADD_IO_SEG(0x0001070000000F08ull))
+#define CVMX_MIO_PTP_EVT_CNT (CVMX_ADD_IO_SEG(0x0001070000000F28ull))
+#define CVMX_MIO_PTP_TIMESTAMP (CVMX_ADD_IO_SEG(0x0001070000000F20ull))
+#define CVMX_MIO_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180000001600ull))
+#define CVMX_MIO_RST_CFG (CVMX_ADD_IO_SEG(0x0001180000001610ull))
+#define CVMX_MIO_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180000001618ull) + ((offset) & 1) * 8)
+#define CVMX_MIO_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180000001608ull))
+#define CVMX_MIO_RST_INT (CVMX_ADD_IO_SEG(0x0001180000001628ull))
+#define CVMX_MIO_RST_INT_EN (CVMX_ADD_IO_SEG(0x0001180000001630ull))
+#define CVMX_MIO_TWSX_INT(offset) (CVMX_ADD_IO_SEG(0x0001180000001010ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_SW_TWSI(offset) (CVMX_ADD_IO_SEG(0x0001180000001000ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) (CVMX_ADD_IO_SEG(0x0001180000001018ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_TWSX_TWSI_SW(offset) (CVMX_ADD_IO_SEG(0x0001180000001008ull) + ((offset) & 1) * 512)
+#define CVMX_MIO_UART2_DLH (CVMX_ADD_IO_SEG(0x0001180000000488ull))
+#define CVMX_MIO_UART2_DLL (CVMX_ADD_IO_SEG(0x0001180000000480ull))
+#define CVMX_MIO_UART2_FAR (CVMX_ADD_IO_SEG(0x0001180000000520ull))
+#define CVMX_MIO_UART2_FCR (CVMX_ADD_IO_SEG(0x0001180000000450ull))
+#define CVMX_MIO_UART2_HTX (CVMX_ADD_IO_SEG(0x0001180000000708ull))
+#define CVMX_MIO_UART2_IER (CVMX_ADD_IO_SEG(0x0001180000000408ull))
+#define CVMX_MIO_UART2_IIR (CVMX_ADD_IO_SEG(0x0001180000000410ull))
+#define CVMX_MIO_UART2_LCR (CVMX_ADD_IO_SEG(0x0001180000000418ull))
+#define CVMX_MIO_UART2_LSR (CVMX_ADD_IO_SEG(0x0001180000000428ull))
+#define CVMX_MIO_UART2_MCR (CVMX_ADD_IO_SEG(0x0001180000000420ull))
+#define CVMX_MIO_UART2_MSR (CVMX_ADD_IO_SEG(0x0001180000000430ull))
+#define CVMX_MIO_UART2_RBR (CVMX_ADD_IO_SEG(0x0001180000000400ull))
+#define CVMX_MIO_UART2_RFL (CVMX_ADD_IO_SEG(0x0001180000000608ull))
+#define CVMX_MIO_UART2_RFW (CVMX_ADD_IO_SEG(0x0001180000000530ull))
+#define CVMX_MIO_UART2_SBCR (CVMX_ADD_IO_SEG(0x0001180000000620ull))
+#define CVMX_MIO_UART2_SCR (CVMX_ADD_IO_SEG(0x0001180000000438ull))
+#define CVMX_MIO_UART2_SFE (CVMX_ADD_IO_SEG(0x0001180000000630ull))
+#define CVMX_MIO_UART2_SRR (CVMX_ADD_IO_SEG(0x0001180000000610ull))
+#define CVMX_MIO_UART2_SRT (CVMX_ADD_IO_SEG(0x0001180000000638ull))
+#define CVMX_MIO_UART2_SRTS (CVMX_ADD_IO_SEG(0x0001180000000618ull))
+#define CVMX_MIO_UART2_STT (CVMX_ADD_IO_SEG(0x0001180000000700ull))
+#define CVMX_MIO_UART2_TFL (CVMX_ADD_IO_SEG(0x0001180000000600ull))
+#define CVMX_MIO_UART2_TFR (CVMX_ADD_IO_SEG(0x0001180000000528ull))
+#define CVMX_MIO_UART2_THR (CVMX_ADD_IO_SEG(0x0001180000000440ull))
+#define CVMX_MIO_UART2_USR (CVMX_ADD_IO_SEG(0x0001180000000538ull))
+#define CVMX_MIO_UARTX_DLH(offset) (CVMX_ADD_IO_SEG(0x0001180000000888ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_DLL(offset) (CVMX_ADD_IO_SEG(0x0001180000000880ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_FAR(offset) (CVMX_ADD_IO_SEG(0x0001180000000920ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_FCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000850ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_HTX(offset) (CVMX_ADD_IO_SEG(0x0001180000000B08ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_IER(offset) (CVMX_ADD_IO_SEG(0x0001180000000808ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_IIR(offset) (CVMX_ADD_IO_SEG(0x0001180000000810ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_LCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000818ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_LSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000828ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_MCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000820ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_MSR(offset) (CVMX_ADD_IO_SEG(0x0001180000000830ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RBR(offset) (CVMX_ADD_IO_SEG(0x0001180000000800ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A08ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_RFW(offset) (CVMX_ADD_IO_SEG(0x0001180000000930ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SBCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A20ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SCR(offset) (CVMX_ADD_IO_SEG(0x0001180000000838ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SFE(offset) (CVMX_ADD_IO_SEG(0x0001180000000A30ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRR(offset) (CVMX_ADD_IO_SEG(0x0001180000000A10ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRT(offset) (CVMX_ADD_IO_SEG(0x0001180000000A38ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_SRTS(offset) (CVMX_ADD_IO_SEG(0x0001180000000A18ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_STT(offset) (CVMX_ADD_IO_SEG(0x0001180000000B00ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_TFL(offset) (CVMX_ADD_IO_SEG(0x0001180000000A00ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_TFR(offset) (CVMX_ADD_IO_SEG(0x0001180000000928ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_THR(offset) (CVMX_ADD_IO_SEG(0x0001180000000840ull) + ((offset) & 1) * 1024)
+#define CVMX_MIO_UARTX_USR(offset) (CVMX_ADD_IO_SEG(0x0001180000000938ull) + ((offset) & 1) * 1024)
union cvmx_mio_boot_bist_stat {
uint64_t u64;
struct cvmx_mio_boot_bist_stat_s {
- uint64_t reserved_2_63:62;
- uint64_t loc:1;
- uint64_t ncbi:1;
+ uint64_t reserved_0_63:64;
} s;
struct cvmx_mio_boot_bist_stat_cn30xx {
uint64_t reserved_4_63:60;
@@ -257,20 +183,33 @@ union cvmx_mio_boot_bist_stat {
struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1;
struct cvmx_mio_boot_bist_stat_cn38xx cn58xx;
struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1;
+ struct cvmx_mio_boot_bist_stat_cn63xx {
+ uint64_t reserved_9_63:55;
+ uint64_t stat:9;
+ } cn63xx;
+ struct cvmx_mio_boot_bist_stat_cn63xx cn63xxp1;
};
union cvmx_mio_boot_comp {
uint64_t u64;
struct cvmx_mio_boot_comp_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_mio_boot_comp_cn50xx {
uint64_t reserved_10_63:54;
uint64_t pctl:5;
uint64_t nctl:5;
- } s;
- struct cvmx_mio_boot_comp_s cn50xx;
- struct cvmx_mio_boot_comp_s cn52xx;
- struct cvmx_mio_boot_comp_s cn52xxp1;
- struct cvmx_mio_boot_comp_s cn56xx;
- struct cvmx_mio_boot_comp_s cn56xxp1;
+ } cn50xx;
+ struct cvmx_mio_boot_comp_cn50xx cn52xx;
+ struct cvmx_mio_boot_comp_cn50xx cn52xxp1;
+ struct cvmx_mio_boot_comp_cn50xx cn56xx;
+ struct cvmx_mio_boot_comp_cn50xx cn56xxp1;
+ struct cvmx_mio_boot_comp_cn63xx {
+ uint64_t reserved_12_63:52;
+ uint64_t pctl:6;
+ uint64_t nctl:6;
+ } cn63xx;
+ struct cvmx_mio_boot_comp_cn63xx cn63xxp1;
};
union cvmx_mio_boot_dma_cfgx {
@@ -291,6 +230,8 @@ union cvmx_mio_boot_dma_cfgx {
struct cvmx_mio_boot_dma_cfgx_s cn52xxp1;
struct cvmx_mio_boot_dma_cfgx_s cn56xx;
struct cvmx_mio_boot_dma_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cn63xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn63xxp1;
};
union cvmx_mio_boot_dma_intx {
@@ -304,6 +245,8 @@ union cvmx_mio_boot_dma_intx {
struct cvmx_mio_boot_dma_intx_s cn52xxp1;
struct cvmx_mio_boot_dma_intx_s cn56xx;
struct cvmx_mio_boot_dma_intx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_intx_s cn63xx;
+ struct cvmx_mio_boot_dma_intx_s cn63xxp1;
};
union cvmx_mio_boot_dma_int_enx {
@@ -317,6 +260,8 @@ union cvmx_mio_boot_dma_int_enx {
struct cvmx_mio_boot_dma_int_enx_s cn52xxp1;
struct cvmx_mio_boot_dma_int_enx_s cn56xx;
struct cvmx_mio_boot_dma_int_enx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cn63xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn63xxp1;
};
union cvmx_mio_boot_dma_timx {
@@ -342,6 +287,8 @@ union cvmx_mio_boot_dma_timx {
struct cvmx_mio_boot_dma_timx_s cn52xxp1;
struct cvmx_mio_boot_dma_timx_s cn56xx;
struct cvmx_mio_boot_dma_timx_s cn56xxp1;
+ struct cvmx_mio_boot_dma_timx_s cn63xx;
+ struct cvmx_mio_boot_dma_timx_s cn63xxp1;
};
union cvmx_mio_boot_err {
@@ -362,6 +309,8 @@ union cvmx_mio_boot_err {
struct cvmx_mio_boot_err_s cn56xxp1;
struct cvmx_mio_boot_err_s cn58xx;
struct cvmx_mio_boot_err_s cn58xxp1;
+ struct cvmx_mio_boot_err_s cn63xx;
+ struct cvmx_mio_boot_err_s cn63xxp1;
};
union cvmx_mio_boot_int {
@@ -382,6 +331,8 @@ union cvmx_mio_boot_int {
struct cvmx_mio_boot_int_s cn56xxp1;
struct cvmx_mio_boot_int_s cn58xx;
struct cvmx_mio_boot_int_s cn58xxp1;
+ struct cvmx_mio_boot_int_s cn63xx;
+ struct cvmx_mio_boot_int_s cn63xxp1;
};
union cvmx_mio_boot_loc_adr {
@@ -402,6 +353,8 @@ union cvmx_mio_boot_loc_adr {
struct cvmx_mio_boot_loc_adr_s cn56xxp1;
struct cvmx_mio_boot_loc_adr_s cn58xx;
struct cvmx_mio_boot_loc_adr_s cn58xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn63xx;
+ struct cvmx_mio_boot_loc_adr_s cn63xxp1;
};
union cvmx_mio_boot_loc_cfgx {
@@ -424,6 +377,8 @@ union cvmx_mio_boot_loc_cfgx {
struct cvmx_mio_boot_loc_cfgx_s cn56xxp1;
struct cvmx_mio_boot_loc_cfgx_s cn58xx;
struct cvmx_mio_boot_loc_cfgx_s cn58xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn63xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn63xxp1;
};
union cvmx_mio_boot_loc_dat {
@@ -442,6 +397,8 @@ union cvmx_mio_boot_loc_dat {
struct cvmx_mio_boot_loc_dat_s cn56xxp1;
struct cvmx_mio_boot_loc_dat_s cn58xx;
struct cvmx_mio_boot_loc_dat_s cn58xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn63xx;
+ struct cvmx_mio_boot_loc_dat_s cn63xxp1;
};
union cvmx_mio_boot_pin_defs {
@@ -478,6 +435,8 @@ union cvmx_mio_boot_pin_defs {
uint64_t term:2;
uint64_t reserved_0_8:9;
} cn56xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn63xx;
+ struct cvmx_mio_boot_pin_defs_cn52xx cn63xxp1;
};
union cvmx_mio_boot_reg_cfgx {
@@ -539,6 +498,8 @@ union cvmx_mio_boot_reg_cfgx {
struct cvmx_mio_boot_reg_cfgx_s cn56xxp1;
struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx;
struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cn63xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn63xxp1;
};
union cvmx_mio_boot_reg_timx {
@@ -583,6 +544,8 @@ union cvmx_mio_boot_reg_timx {
struct cvmx_mio_boot_reg_timx_s cn56xxp1;
struct cvmx_mio_boot_reg_timx_s cn58xx;
struct cvmx_mio_boot_reg_timx_s cn58xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn63xx;
+ struct cvmx_mio_boot_reg_timx_s cn63xxp1;
};
union cvmx_mio_boot_thr {
@@ -611,6 +574,8 @@ union cvmx_mio_boot_thr {
struct cvmx_mio_boot_thr_s cn56xxp1;
struct cvmx_mio_boot_thr_cn30xx cn58xx;
struct cvmx_mio_boot_thr_cn30xx cn58xxp1;
+ struct cvmx_mio_boot_thr_s cn63xx;
+ struct cvmx_mio_boot_thr_s cn63xxp1;
};
union cvmx_mio_fus_bnk_datx {
@@ -625,6 +590,8 @@ union cvmx_mio_fus_bnk_datx {
struct cvmx_mio_fus_bnk_datx_s cn56xxp1;
struct cvmx_mio_fus_bnk_datx_s cn58xx;
struct cvmx_mio_fus_bnk_datx_s cn58xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn63xx;
+ struct cvmx_mio_fus_bnk_datx_s cn63xxp1;
};
union cvmx_mio_fus_dat0 {
@@ -644,6 +611,8 @@ union cvmx_mio_fus_dat0 {
struct cvmx_mio_fus_dat0_s cn56xxp1;
struct cvmx_mio_fus_dat0_s cn58xx;
struct cvmx_mio_fus_dat0_s cn58xxp1;
+ struct cvmx_mio_fus_dat0_s cn63xx;
+ struct cvmx_mio_fus_dat0_s cn63xxp1;
};
union cvmx_mio_fus_dat1 {
@@ -663,12 +632,15 @@ union cvmx_mio_fus_dat1 {
struct cvmx_mio_fus_dat1_s cn56xxp1;
struct cvmx_mio_fus_dat1_s cn58xx;
struct cvmx_mio_fus_dat1_s cn58xxp1;
+ struct cvmx_mio_fus_dat1_s cn63xx;
+ struct cvmx_mio_fus_dat1_s cn63xxp1;
};
union cvmx_mio_fus_dat2 {
uint64_t u64;
struct cvmx_mio_fus_dat2_s {
- uint64_t reserved_34_63:30;
+ uint64_t reserved_35_63:29;
+ uint64_t dorm_crypto:1;
uint64_t fus318:1;
uint64_t raid_en:1;
uint64_t reserved_30_31:2;
@@ -775,14 +747,38 @@ union cvmx_mio_fus_dat2 {
uint64_t pp_dis:16;
} cn58xx;
struct cvmx_mio_fus_dat2_cn58xx cn58xxp1;
+ struct cvmx_mio_fus_dat2_cn63xx {
+ uint64_t reserved_35_63:29;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_29_31:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_24_25:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_6_15:10;
+ uint64_t pp_dis:6;
+ } cn63xx;
+ struct cvmx_mio_fus_dat2_cn63xx cn63xxp1;
};
union cvmx_mio_fus_dat3 {
uint64_t u64;
struct cvmx_mio_fus_dat3_s {
- uint64_t reserved_32_63:32;
+ uint64_t reserved_58_63:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_40_40:1;
+ uint64_t ema:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
uint64_t pll_div4:1;
- uint64_t zip_crip:2;
+ uint64_t reserved_29_30:2;
uint64_t bar2_en:1;
uint64_t efus_lck:1;
uint64_t efus_ign:1;
@@ -801,7 +797,17 @@ union cvmx_mio_fus_dat3 {
uint64_t nodfa_dte:1;
uint64_t icache:24;
} cn30xx;
- struct cvmx_mio_fus_dat3_s cn31xx;
+ struct cvmx_mio_fus_dat3_cn31xx {
+ uint64_t reserved_32_63:32;
+ uint64_t pll_div4:1;
+ uint64_t zip_crip:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+ } cn31xx;
struct cvmx_mio_fus_dat3_cn38xx {
uint64_t reserved_31_63:33;
uint64_t zip_crip:2;
@@ -828,6 +834,27 @@ union cvmx_mio_fus_dat3 {
struct cvmx_mio_fus_dat3_cn38xx cn56xxp1;
struct cvmx_mio_fus_dat3_cn38xx cn58xx;
struct cvmx_mio_fus_dat3_cn38xx cn58xxp1;
+ struct cvmx_mio_fus_dat3_cn63xx {
+ uint64_t reserved_58_63:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_40_40:1;
+ uint64_t ema:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t reserved_0_23:24;
+ } cn63xx;
+ struct cvmx_mio_fus_dat3_cn63xx cn63xxp1;
};
union cvmx_mio_fus_ema {
@@ -848,6 +875,8 @@ union cvmx_mio_fus_ema {
uint64_t ema:2;
} cn58xx;
struct cvmx_mio_fus_ema_cn58xx cn58xxp1;
+ struct cvmx_mio_fus_ema_s cn63xx;
+ struct cvmx_mio_fus_ema_s cn63xxp1;
};
union cvmx_mio_fus_pdf {
@@ -861,60 +890,96 @@ union cvmx_mio_fus_pdf {
struct cvmx_mio_fus_pdf_s cn56xx;
struct cvmx_mio_fus_pdf_s cn56xxp1;
struct cvmx_mio_fus_pdf_s cn58xx;
+ struct cvmx_mio_fus_pdf_s cn63xx;
+ struct cvmx_mio_fus_pdf_s cn63xxp1;
};
union cvmx_mio_fus_pll {
uint64_t u64;
struct cvmx_mio_fus_pll_s {
- uint64_t reserved_2_63:62;
+ uint64_t reserved_8_63:56;
+ uint64_t c_cout_rst:1;
+ uint64_t c_cout_sel:2;
+ uint64_t pnr_cout_rst:1;
+ uint64_t pnr_cout_sel:2;
uint64_t rfslip:1;
uint64_t fbslip:1;
} s;
- struct cvmx_mio_fus_pll_s cn50xx;
- struct cvmx_mio_fus_pll_s cn52xx;
- struct cvmx_mio_fus_pll_s cn52xxp1;
- struct cvmx_mio_fus_pll_s cn56xx;
- struct cvmx_mio_fus_pll_s cn56xxp1;
- struct cvmx_mio_fus_pll_s cn58xx;
- struct cvmx_mio_fus_pll_s cn58xxp1;
+ struct cvmx_mio_fus_pll_cn50xx {
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+ } cn50xx;
+ struct cvmx_mio_fus_pll_cn50xx cn52xx;
+ struct cvmx_mio_fus_pll_cn50xx cn52xxp1;
+ struct cvmx_mio_fus_pll_cn50xx cn56xx;
+ struct cvmx_mio_fus_pll_cn50xx cn56xxp1;
+ struct cvmx_mio_fus_pll_cn50xx cn58xx;
+ struct cvmx_mio_fus_pll_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_pll_s cn63xx;
+ struct cvmx_mio_fus_pll_s cn63xxp1;
};
union cvmx_mio_fus_prog {
uint64_t u64;
struct cvmx_mio_fus_prog_s {
- uint64_t reserved_1_63:63;
+ uint64_t reserved_2_63:62;
+ uint64_t soft:1;
uint64_t prog:1;
} s;
- struct cvmx_mio_fus_prog_s cn30xx;
- struct cvmx_mio_fus_prog_s cn31xx;
- struct cvmx_mio_fus_prog_s cn38xx;
- struct cvmx_mio_fus_prog_s cn38xxp2;
- struct cvmx_mio_fus_prog_s cn50xx;
- struct cvmx_mio_fus_prog_s cn52xx;
- struct cvmx_mio_fus_prog_s cn52xxp1;
- struct cvmx_mio_fus_prog_s cn56xx;
- struct cvmx_mio_fus_prog_s cn56xxp1;
- struct cvmx_mio_fus_prog_s cn58xx;
- struct cvmx_mio_fus_prog_s cn58xxp1;
+ struct cvmx_mio_fus_prog_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t prog:1;
+ } cn30xx;
+ struct cvmx_mio_fus_prog_cn30xx cn31xx;
+ struct cvmx_mio_fus_prog_cn30xx cn38xx;
+ struct cvmx_mio_fus_prog_cn30xx cn38xxp2;
+ struct cvmx_mio_fus_prog_cn30xx cn50xx;
+ struct cvmx_mio_fus_prog_cn30xx cn52xx;
+ struct cvmx_mio_fus_prog_cn30xx cn52xxp1;
+ struct cvmx_mio_fus_prog_cn30xx cn56xx;
+ struct cvmx_mio_fus_prog_cn30xx cn56xxp1;
+ struct cvmx_mio_fus_prog_cn30xx cn58xx;
+ struct cvmx_mio_fus_prog_cn30xx cn58xxp1;
+ struct cvmx_mio_fus_prog_s cn63xx;
+ struct cvmx_mio_fus_prog_s cn63xxp1;
};
union cvmx_mio_fus_prog_times {
uint64_t u64;
struct cvmx_mio_fus_prog_times_s {
+ uint64_t reserved_35_63:29;
+ uint64_t vgate_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t prog_pin:1;
+ uint64_t reserved_6_31:26;
+ uint64_t setup:6;
+ } s;
+ struct cvmx_mio_fus_prog_times_cn50xx {
uint64_t reserved_33_63:31;
uint64_t prog_pin:1;
uint64_t out:8;
uint64_t sclk_lo:4;
uint64_t sclk_hi:12;
uint64_t setup:8;
- } s;
- struct cvmx_mio_fus_prog_times_s cn50xx;
- struct cvmx_mio_fus_prog_times_s cn52xx;
- struct cvmx_mio_fus_prog_times_s cn52xxp1;
- struct cvmx_mio_fus_prog_times_s cn56xx;
- struct cvmx_mio_fus_prog_times_s cn56xxp1;
- struct cvmx_mio_fus_prog_times_s cn58xx;
- struct cvmx_mio_fus_prog_times_s cn58xxp1;
+ } cn50xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn52xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn52xxp1;
+ struct cvmx_mio_fus_prog_times_cn50xx cn56xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn56xxp1;
+ struct cvmx_mio_fus_prog_times_cn50xx cn58xx;
+ struct cvmx_mio_fus_prog_times_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_prog_times_cn63xx {
+ uint64_t reserved_35_63:29;
+ uint64_t vgate_pin:1;
+ uint64_t fsrc_pin:1;
+ uint64_t prog_pin:1;
+ uint64_t out:7;
+ uint64_t sclk_lo:4;
+ uint64_t sclk_hi:15;
+ uint64_t setup:6;
+ } cn63xx;
+ struct cvmx_mio_fus_prog_times_cn63xx cn63xxp1;
};
union cvmx_mio_fus_rcmd {
@@ -948,6 +1013,57 @@ union cvmx_mio_fus_rcmd {
struct cvmx_mio_fus_rcmd_s cn56xxp1;
struct cvmx_mio_fus_rcmd_cn30xx cn58xx;
struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1;
+ struct cvmx_mio_fus_rcmd_s cn63xx;
+ struct cvmx_mio_fus_rcmd_s cn63xxp1;
+};
+
+union cvmx_mio_fus_read_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_read_times_s {
+ uint64_t reserved_26_63:38;
+ uint64_t sch:4;
+ uint64_t fsh:4;
+ uint64_t prh:4;
+ uint64_t sdh:4;
+ uint64_t setup:10;
+ } s;
+ struct cvmx_mio_fus_read_times_s cn63xx;
+ struct cvmx_mio_fus_read_times_s cn63xxp1;
+};
+
+union cvmx_mio_fus_repair_res0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res0_s {
+ uint64_t reserved_55_63:9;
+ uint64_t too_many:1;
+ uint64_t repair2:18;
+ uint64_t repair1:18;
+ uint64_t repair0:18;
+ } s;
+ struct cvmx_mio_fus_repair_res0_s cn63xx;
+ struct cvmx_mio_fus_repair_res0_s cn63xxp1;
+};
+
+union cvmx_mio_fus_repair_res1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res1_s {
+ uint64_t reserved_54_63:10;
+ uint64_t repair5:18;
+ uint64_t repair4:18;
+ uint64_t repair3:18;
+ } s;
+ struct cvmx_mio_fus_repair_res1_s cn63xx;
+ struct cvmx_mio_fus_repair_res1_s cn63xxp1;
+};
+
+union cvmx_mio_fus_repair_res2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_repair_res2_s {
+ uint64_t reserved_18_63:46;
+ uint64_t repair6:18;
+ } s;
+ struct cvmx_mio_fus_repair_res2_s cn63xx;
+ struct cvmx_mio_fus_repair_res2_s cn63xxp1;
};
union cvmx_mio_fus_spr_repair_res {
@@ -968,6 +1084,8 @@ union cvmx_mio_fus_spr_repair_res {
struct cvmx_mio_fus_spr_repair_res_s cn56xxp1;
struct cvmx_mio_fus_spr_repair_res_s cn58xx;
struct cvmx_mio_fus_spr_repair_res_s cn58xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn63xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn63xxp1;
};
union cvmx_mio_fus_spr_repair_sum {
@@ -986,6 +1104,8 @@ union cvmx_mio_fus_spr_repair_sum {
struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1;
struct cvmx_mio_fus_spr_repair_sum_s cn58xx;
struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn63xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn63xxp1;
};
union cvmx_mio_fus_unlock {
@@ -1021,6 +1141,22 @@ union cvmx_mio_fus_wadr {
struct cvmx_mio_fus_wadr_cn52xx cn56xxp1;
struct cvmx_mio_fus_wadr_cn50xx cn58xx;
struct cvmx_mio_fus_wadr_cn50xx cn58xxp1;
+ struct cvmx_mio_fus_wadr_cn63xx {
+ uint64_t reserved_4_63:60;
+ uint64_t addr:4;
+ } cn63xx;
+ struct cvmx_mio_fus_wadr_cn63xx cn63xxp1;
+};
+
+union cvmx_mio_gpio_comp {
+ uint64_t u64;
+ struct cvmx_mio_gpio_comp_s {
+ uint64_t reserved_12_63:52;
+ uint64_t pctl:6;
+ uint64_t nctl:6;
+ } s;
+ struct cvmx_mio_gpio_comp_s cn63xx;
+ struct cvmx_mio_gpio_comp_s cn63xxp1;
};
union cvmx_mio_ndf_dma_cfg {
@@ -1038,6 +1174,8 @@ union cvmx_mio_ndf_dma_cfg {
uint64_t adr:36;
} s;
struct cvmx_mio_ndf_dma_cfg_s cn52xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn63xx;
+ struct cvmx_mio_ndf_dma_cfg_s cn63xxp1;
};
union cvmx_mio_ndf_dma_int {
@@ -1047,6 +1185,8 @@ union cvmx_mio_ndf_dma_int {
uint64_t done:1;
} s;
struct cvmx_mio_ndf_dma_int_s cn52xx;
+ struct cvmx_mio_ndf_dma_int_s cn63xx;
+ struct cvmx_mio_ndf_dma_int_s cn63xxp1;
};
union cvmx_mio_ndf_dma_int_en {
@@ -1056,6 +1196,8 @@ union cvmx_mio_ndf_dma_int_en {
uint64_t done:1;
} s;
struct cvmx_mio_ndf_dma_int_en_s cn52xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn63xx;
+ struct cvmx_mio_ndf_dma_int_en_s cn63xxp1;
};
union cvmx_mio_pll_ctl {
@@ -1078,6 +1220,173 @@ union cvmx_mio_pll_setting {
struct cvmx_mio_pll_setting_s cn31xx;
};
+union cvmx_mio_ptp_clock_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_cfg_s {
+ uint64_t reserved_24_63:40;
+ uint64_t evcnt_in:6;
+ uint64_t evcnt_edge:1;
+ uint64_t evcnt_en:1;
+ uint64_t tstmp_in:6;
+ uint64_t tstmp_edge:1;
+ uint64_t tstmp_en:1;
+ uint64_t ext_clk_in:6;
+ uint64_t ext_clk_en:1;
+ uint64_t ptp_en:1;
+ } s;
+ struct cvmx_mio_ptp_clock_cfg_s cn63xx;
+ struct cvmx_mio_ptp_clock_cfg_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_clock_comp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_comp_s {
+ uint64_t nanosec:32;
+ uint64_t frnanosec:32;
+ } s;
+ struct cvmx_mio_ptp_clock_comp_s cn63xx;
+ struct cvmx_mio_ptp_clock_comp_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_clock_hi {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_hi_s {
+ uint64_t nanosec:64;
+ } s;
+ struct cvmx_mio_ptp_clock_hi_s cn63xx;
+ struct cvmx_mio_ptp_clock_hi_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_clock_lo {
+ uint64_t u64;
+ struct cvmx_mio_ptp_clock_lo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t frnanosec:32;
+ } s;
+ struct cvmx_mio_ptp_clock_lo_s cn63xx;
+ struct cvmx_mio_ptp_clock_lo_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_evt_cnt {
+ uint64_t u64;
+ struct cvmx_mio_ptp_evt_cnt_s {
+ uint64_t cntr:64;
+ } s;
+ struct cvmx_mio_ptp_evt_cnt_s cn63xx;
+ struct cvmx_mio_ptp_evt_cnt_s cn63xxp1;
+};
+
+union cvmx_mio_ptp_timestamp {
+ uint64_t u64;
+ struct cvmx_mio_ptp_timestamp_s {
+ uint64_t nanosec:64;
+ } s;
+ struct cvmx_mio_ptp_timestamp_s cn63xx;
+ struct cvmx_mio_ptp_timestamp_s cn63xxp1;
+};
+
+union cvmx_mio_rst_boot {
+ uint64_t u64;
+ struct cvmx_mio_rst_boot_s {
+ uint64_t reserved_36_63:28;
+ uint64_t c_mul:6;
+ uint64_t pnr_mul:6;
+ uint64_t qlm2_spd:4;
+ uint64_t qlm1_spd:4;
+ uint64_t qlm0_spd:4;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+ } s;
+ struct cvmx_mio_rst_boot_s cn63xx;
+ struct cvmx_mio_rst_boot_s cn63xxp1;
+};
+
+union cvmx_mio_rst_cfg {
+ uint64_t u64;
+ struct cvmx_mio_rst_cfg_s {
+ uint64_t bist_delay:58;
+ uint64_t reserved_3_5:3;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+ } s;
+ struct cvmx_mio_rst_cfg_s cn63xx;
+ struct cvmx_mio_rst_cfg_cn63xxp1 {
+ uint64_t bist_delay:58;
+ uint64_t reserved_2_5:4;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+ } cn63xxp1;
+};
+
+union cvmx_mio_rst_ctlx {
+ uint64_t u64;
+ struct cvmx_mio_rst_ctlx_s {
+ uint64_t reserved_10_63:54;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+ } s;
+ struct cvmx_mio_rst_ctlx_s cn63xx;
+ struct cvmx_mio_rst_ctlx_cn63xxp1 {
+ uint64_t reserved_9_63:55;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t prtmode:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+ } cn63xxp1;
+};
+
+union cvmx_mio_rst_delay {
+ uint64_t u64;
+ struct cvmx_mio_rst_delay_s {
+ uint64_t reserved_32_63:32;
+ uint64_t soft_rst_dly:16;
+ uint64_t warm_rst_dly:16;
+ } s;
+ struct cvmx_mio_rst_delay_s cn63xx;
+ struct cvmx_mio_rst_delay_s cn63xxp1;
+};
+
+union cvmx_mio_rst_int {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_s {
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+ } s;
+ struct cvmx_mio_rst_int_s cn63xx;
+ struct cvmx_mio_rst_int_s cn63xxp1;
+};
+
+union cvmx_mio_rst_int_en {
+ uint64_t u64;
+ struct cvmx_mio_rst_int_en_s {
+ uint64_t reserved_10_63:54;
+ uint64_t perst1:1;
+ uint64_t perst0:1;
+ uint64_t reserved_2_7:6;
+ uint64_t rst_link1:1;
+ uint64_t rst_link0:1;
+ } s;
+ struct cvmx_mio_rst_int_en_s cn63xx;
+ struct cvmx_mio_rst_int_en_s cn63xxp1;
+};
+
union cvmx_mio_twsx_int {
uint64_t u64;
struct cvmx_mio_twsx_int_s {
@@ -1115,6 +1424,8 @@ union cvmx_mio_twsx_int {
struct cvmx_mio_twsx_int_s cn56xxp1;
struct cvmx_mio_twsx_int_s cn58xx;
struct cvmx_mio_twsx_int_s cn58xxp1;
+ struct cvmx_mio_twsx_int_s cn63xx;
+ struct cvmx_mio_twsx_int_s cn63xxp1;
};
union cvmx_mio_twsx_sw_twsi {
@@ -1144,6 +1455,8 @@ union cvmx_mio_twsx_sw_twsi {
struct cvmx_mio_twsx_sw_twsi_s cn56xxp1;
struct cvmx_mio_twsx_sw_twsi_s cn58xx;
struct cvmx_mio_twsx_sw_twsi_s cn58xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn63xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn63xxp1;
};
union cvmx_mio_twsx_sw_twsi_ext {
@@ -1164,6 +1477,8 @@ union cvmx_mio_twsx_sw_twsi_ext {
struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1;
struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx;
struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn63xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn63xxp1;
};
union cvmx_mio_twsx_twsi_sw {
@@ -1184,6 +1499,8 @@ union cvmx_mio_twsx_twsi_sw {
struct cvmx_mio_twsx_twsi_sw_s cn56xxp1;
struct cvmx_mio_twsx_twsi_sw_s cn58xx;
struct cvmx_mio_twsx_twsi_sw_s cn58xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn63xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn63xxp1;
};
union cvmx_mio_uartx_dlh {
@@ -1203,6 +1520,8 @@ union cvmx_mio_uartx_dlh {
struct cvmx_mio_uartx_dlh_s cn56xxp1;
struct cvmx_mio_uartx_dlh_s cn58xx;
struct cvmx_mio_uartx_dlh_s cn58xxp1;
+ struct cvmx_mio_uartx_dlh_s cn63xx;
+ struct cvmx_mio_uartx_dlh_s cn63xxp1;
};
union cvmx_mio_uartx_dll {
@@ -1222,6 +1541,8 @@ union cvmx_mio_uartx_dll {
struct cvmx_mio_uartx_dll_s cn56xxp1;
struct cvmx_mio_uartx_dll_s cn58xx;
struct cvmx_mio_uartx_dll_s cn58xxp1;
+ struct cvmx_mio_uartx_dll_s cn63xx;
+ struct cvmx_mio_uartx_dll_s cn63xxp1;
};
union cvmx_mio_uartx_far {
@@ -1241,6 +1562,8 @@ union cvmx_mio_uartx_far {
struct cvmx_mio_uartx_far_s cn56xxp1;
struct cvmx_mio_uartx_far_s cn58xx;
struct cvmx_mio_uartx_far_s cn58xxp1;
+ struct cvmx_mio_uartx_far_s cn63xx;
+ struct cvmx_mio_uartx_far_s cn63xxp1;
};
union cvmx_mio_uartx_fcr {
@@ -1265,6 +1588,8 @@ union cvmx_mio_uartx_fcr {
struct cvmx_mio_uartx_fcr_s cn56xxp1;
struct cvmx_mio_uartx_fcr_s cn58xx;
struct cvmx_mio_uartx_fcr_s cn58xxp1;
+ struct cvmx_mio_uartx_fcr_s cn63xx;
+ struct cvmx_mio_uartx_fcr_s cn63xxp1;
};
union cvmx_mio_uartx_htx {
@@ -1284,6 +1609,8 @@ union cvmx_mio_uartx_htx {
struct cvmx_mio_uartx_htx_s cn56xxp1;
struct cvmx_mio_uartx_htx_s cn58xx;
struct cvmx_mio_uartx_htx_s cn58xxp1;
+ struct cvmx_mio_uartx_htx_s cn63xx;
+ struct cvmx_mio_uartx_htx_s cn63xxp1;
};
union cvmx_mio_uartx_ier {
@@ -1308,6 +1635,8 @@ union cvmx_mio_uartx_ier {
struct cvmx_mio_uartx_ier_s cn56xxp1;
struct cvmx_mio_uartx_ier_s cn58xx;
struct cvmx_mio_uartx_ier_s cn58xxp1;
+ struct cvmx_mio_uartx_ier_s cn63xx;
+ struct cvmx_mio_uartx_ier_s cn63xxp1;
};
union cvmx_mio_uartx_iir {
@@ -1329,6 +1658,8 @@ union cvmx_mio_uartx_iir {
struct cvmx_mio_uartx_iir_s cn56xxp1;
struct cvmx_mio_uartx_iir_s cn58xx;
struct cvmx_mio_uartx_iir_s cn58xxp1;
+ struct cvmx_mio_uartx_iir_s cn63xx;
+ struct cvmx_mio_uartx_iir_s cn63xxp1;
};
union cvmx_mio_uartx_lcr {
@@ -1354,6 +1685,8 @@ union cvmx_mio_uartx_lcr {
struct cvmx_mio_uartx_lcr_s cn56xxp1;
struct cvmx_mio_uartx_lcr_s cn58xx;
struct cvmx_mio_uartx_lcr_s cn58xxp1;
+ struct cvmx_mio_uartx_lcr_s cn63xx;
+ struct cvmx_mio_uartx_lcr_s cn63xxp1;
};
union cvmx_mio_uartx_lsr {
@@ -1380,6 +1713,8 @@ union cvmx_mio_uartx_lsr {
struct cvmx_mio_uartx_lsr_s cn56xxp1;
struct cvmx_mio_uartx_lsr_s cn58xx;
struct cvmx_mio_uartx_lsr_s cn58xxp1;
+ struct cvmx_mio_uartx_lsr_s cn63xx;
+ struct cvmx_mio_uartx_lsr_s cn63xxp1;
};
union cvmx_mio_uartx_mcr {
@@ -1404,6 +1739,8 @@ union cvmx_mio_uartx_mcr {
struct cvmx_mio_uartx_mcr_s cn56xxp1;
struct cvmx_mio_uartx_mcr_s cn58xx;
struct cvmx_mio_uartx_mcr_s cn58xxp1;
+ struct cvmx_mio_uartx_mcr_s cn63xx;
+ struct cvmx_mio_uartx_mcr_s cn63xxp1;
};
union cvmx_mio_uartx_msr {
@@ -1430,6 +1767,8 @@ union cvmx_mio_uartx_msr {
struct cvmx_mio_uartx_msr_s cn56xxp1;
struct cvmx_mio_uartx_msr_s cn58xx;
struct cvmx_mio_uartx_msr_s cn58xxp1;
+ struct cvmx_mio_uartx_msr_s cn63xx;
+ struct cvmx_mio_uartx_msr_s cn63xxp1;
};
union cvmx_mio_uartx_rbr {
@@ -1449,6 +1788,8 @@ union cvmx_mio_uartx_rbr {
struct cvmx_mio_uartx_rbr_s cn56xxp1;
struct cvmx_mio_uartx_rbr_s cn58xx;
struct cvmx_mio_uartx_rbr_s cn58xxp1;
+ struct cvmx_mio_uartx_rbr_s cn63xx;
+ struct cvmx_mio_uartx_rbr_s cn63xxp1;
};
union cvmx_mio_uartx_rfl {
@@ -1468,6 +1809,8 @@ union cvmx_mio_uartx_rfl {
struct cvmx_mio_uartx_rfl_s cn56xxp1;
struct cvmx_mio_uartx_rfl_s cn58xx;
struct cvmx_mio_uartx_rfl_s cn58xxp1;
+ struct cvmx_mio_uartx_rfl_s cn63xx;
+ struct cvmx_mio_uartx_rfl_s cn63xxp1;
};
union cvmx_mio_uartx_rfw {
@@ -1489,6 +1832,8 @@ union cvmx_mio_uartx_rfw {
struct cvmx_mio_uartx_rfw_s cn56xxp1;
struct cvmx_mio_uartx_rfw_s cn58xx;
struct cvmx_mio_uartx_rfw_s cn58xxp1;
+ struct cvmx_mio_uartx_rfw_s cn63xx;
+ struct cvmx_mio_uartx_rfw_s cn63xxp1;
};
union cvmx_mio_uartx_sbcr {
@@ -1508,6 +1853,8 @@ union cvmx_mio_uartx_sbcr {
struct cvmx_mio_uartx_sbcr_s cn56xxp1;
struct cvmx_mio_uartx_sbcr_s cn58xx;
struct cvmx_mio_uartx_sbcr_s cn58xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn63xx;
+ struct cvmx_mio_uartx_sbcr_s cn63xxp1;
};
union cvmx_mio_uartx_scr {
@@ -1527,6 +1874,8 @@ union cvmx_mio_uartx_scr {
struct cvmx_mio_uartx_scr_s cn56xxp1;
struct cvmx_mio_uartx_scr_s cn58xx;
struct cvmx_mio_uartx_scr_s cn58xxp1;
+ struct cvmx_mio_uartx_scr_s cn63xx;
+ struct cvmx_mio_uartx_scr_s cn63xxp1;
};
union cvmx_mio_uartx_sfe {
@@ -1546,6 +1895,8 @@ union cvmx_mio_uartx_sfe {
struct cvmx_mio_uartx_sfe_s cn56xxp1;
struct cvmx_mio_uartx_sfe_s cn58xx;
struct cvmx_mio_uartx_sfe_s cn58xxp1;
+ struct cvmx_mio_uartx_sfe_s cn63xx;
+ struct cvmx_mio_uartx_sfe_s cn63xxp1;
};
union cvmx_mio_uartx_srr {
@@ -1567,6 +1918,8 @@ union cvmx_mio_uartx_srr {
struct cvmx_mio_uartx_srr_s cn56xxp1;
struct cvmx_mio_uartx_srr_s cn58xx;
struct cvmx_mio_uartx_srr_s cn58xxp1;
+ struct cvmx_mio_uartx_srr_s cn63xx;
+ struct cvmx_mio_uartx_srr_s cn63xxp1;
};
union cvmx_mio_uartx_srt {
@@ -1586,6 +1939,8 @@ union cvmx_mio_uartx_srt {
struct cvmx_mio_uartx_srt_s cn56xxp1;
struct cvmx_mio_uartx_srt_s cn58xx;
struct cvmx_mio_uartx_srt_s cn58xxp1;
+ struct cvmx_mio_uartx_srt_s cn63xx;
+ struct cvmx_mio_uartx_srt_s cn63xxp1;
};
union cvmx_mio_uartx_srts {
@@ -1605,6 +1960,8 @@ union cvmx_mio_uartx_srts {
struct cvmx_mio_uartx_srts_s cn56xxp1;
struct cvmx_mio_uartx_srts_s cn58xx;
struct cvmx_mio_uartx_srts_s cn58xxp1;
+ struct cvmx_mio_uartx_srts_s cn63xx;
+ struct cvmx_mio_uartx_srts_s cn63xxp1;
};
union cvmx_mio_uartx_stt {
@@ -1624,6 +1981,8 @@ union cvmx_mio_uartx_stt {
struct cvmx_mio_uartx_stt_s cn56xxp1;
struct cvmx_mio_uartx_stt_s cn58xx;
struct cvmx_mio_uartx_stt_s cn58xxp1;
+ struct cvmx_mio_uartx_stt_s cn63xx;
+ struct cvmx_mio_uartx_stt_s cn63xxp1;
};
union cvmx_mio_uartx_tfl {
@@ -1643,6 +2002,8 @@ union cvmx_mio_uartx_tfl {
struct cvmx_mio_uartx_tfl_s cn56xxp1;
struct cvmx_mio_uartx_tfl_s cn58xx;
struct cvmx_mio_uartx_tfl_s cn58xxp1;
+ struct cvmx_mio_uartx_tfl_s cn63xx;
+ struct cvmx_mio_uartx_tfl_s cn63xxp1;
};
union cvmx_mio_uartx_tfr {
@@ -1662,6 +2023,8 @@ union cvmx_mio_uartx_tfr {
struct cvmx_mio_uartx_tfr_s cn56xxp1;
struct cvmx_mio_uartx_tfr_s cn58xx;
struct cvmx_mio_uartx_tfr_s cn58xxp1;
+ struct cvmx_mio_uartx_tfr_s cn63xx;
+ struct cvmx_mio_uartx_tfr_s cn63xxp1;
};
union cvmx_mio_uartx_thr {
@@ -1681,6 +2044,8 @@ union cvmx_mio_uartx_thr {
struct cvmx_mio_uartx_thr_s cn56xxp1;
struct cvmx_mio_uartx_thr_s cn58xx;
struct cvmx_mio_uartx_thr_s cn58xxp1;
+ struct cvmx_mio_uartx_thr_s cn63xx;
+ struct cvmx_mio_uartx_thr_s cn63xxp1;
};
union cvmx_mio_uartx_usr {
@@ -1704,6 +2069,8 @@ union cvmx_mio_uartx_usr {
struct cvmx_mio_uartx_usr_s cn56xxp1;
struct cvmx_mio_uartx_usr_s cn58xx;
struct cvmx_mio_uartx_usr_s cn58xxp1;
+ struct cvmx_mio_uartx_usr_s cn63xx;
+ struct cvmx_mio_uartx_usr_s cn63xxp1;
};
union cvmx_mio_uart2_dlh {
diff --git a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
index dab6dca492f..7057c447e69 100644
--- a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,52 +28,52 @@
#ifndef __CVMX_MIXX_DEFS_H__
#define __CVMX_MIXX_DEFS_H__
-#define CVMX_MIXX_BIST(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100078ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_CTL(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100020ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_INTENA(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100050ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRCNT(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100030ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRHWM(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100028ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRING1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100010ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_IRING2(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100018ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ISR(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100048ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORCNT(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100040ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORHWM(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100038ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORING1(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100000ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_ORING2(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100008ull + (((offset) & 1) * 2048))
-#define CVMX_MIXX_REMCNT(offset) \
- CVMX_ADD_IO_SEG(0x0001070000100058ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_BIST(offset) (CVMX_ADD_IO_SEG(0x0001070000100078ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100020ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_INTENA(offset) (CVMX_ADD_IO_SEG(0x0001070000100050ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100030ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100028ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100010ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_IRING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100018ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ISR(offset) (CVMX_ADD_IO_SEG(0x0001070000100048ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100040ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100038ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100000ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_ORING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100008ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_REMCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100058ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_TSCTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100068ull) + ((offset) & 1) * 2048)
+#define CVMX_MIXX_TSTAMP(offset) (CVMX_ADD_IO_SEG(0x0001070000100060ull) + ((offset) & 1) * 2048)
union cvmx_mixx_bist {
uint64_t u64;
struct cvmx_mixx_bist_s {
- uint64_t reserved_4_63:60;
+ uint64_t reserved_6_63:58;
+ uint64_t opfdat:1;
+ uint64_t mrgdat:1;
uint64_t mrqdat:1;
uint64_t ipfdat:1;
uint64_t irfdat:1;
uint64_t orfdat:1;
} s;
- struct cvmx_mixx_bist_s cn52xx;
- struct cvmx_mixx_bist_s cn52xxp1;
- struct cvmx_mixx_bist_s cn56xx;
- struct cvmx_mixx_bist_s cn56xxp1;
+ struct cvmx_mixx_bist_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t mrqdat:1;
+ uint64_t ipfdat:1;
+ uint64_t irfdat:1;
+ uint64_t orfdat:1;
+ } cn52xx;
+ struct cvmx_mixx_bist_cn52xx cn52xxp1;
+ struct cvmx_mixx_bist_cn52xx cn56xx;
+ struct cvmx_mixx_bist_cn52xx cn56xxp1;
+ struct cvmx_mixx_bist_s cn63xx;
+ struct cvmx_mixx_bist_s cn63xxp1;
};
union cvmx_mixx_ctl {
uint64_t u64;
struct cvmx_mixx_ctl_s {
- uint64_t reserved_8_63:56;
+ uint64_t reserved_12_63:52;
+ uint64_t ts_thresh:4;
uint64_t crc_strip:1;
uint64_t busy:1;
uint64_t en:1;
@@ -82,16 +82,28 @@ union cvmx_mixx_ctl {
uint64_t nbtarb:1;
uint64_t mrq_hwm:2;
} s;
- struct cvmx_mixx_ctl_s cn52xx;
- struct cvmx_mixx_ctl_s cn52xxp1;
- struct cvmx_mixx_ctl_s cn56xx;
- struct cvmx_mixx_ctl_s cn56xxp1;
+ struct cvmx_mixx_ctl_cn52xx {
+ uint64_t reserved_8_63:56;
+ uint64_t crc_strip:1;
+ uint64_t busy:1;
+ uint64_t en:1;
+ uint64_t reset:1;
+ uint64_t lendian:1;
+ uint64_t nbtarb:1;
+ uint64_t mrq_hwm:2;
+ } cn52xx;
+ struct cvmx_mixx_ctl_cn52xx cn52xxp1;
+ struct cvmx_mixx_ctl_cn52xx cn56xx;
+ struct cvmx_mixx_ctl_cn52xx cn56xxp1;
+ struct cvmx_mixx_ctl_s cn63xx;
+ struct cvmx_mixx_ctl_s cn63xxp1;
};
union cvmx_mixx_intena {
uint64_t u64;
struct cvmx_mixx_intena_s {
- uint64_t reserved_7_63:57;
+ uint64_t reserved_8_63:56;
+ uint64_t tsena:1;
uint64_t orunena:1;
uint64_t irunena:1;
uint64_t data_drpena:1;
@@ -100,10 +112,21 @@ union cvmx_mixx_intena {
uint64_t ivfena:1;
uint64_t ovfena:1;
} s;
- struct cvmx_mixx_intena_s cn52xx;
- struct cvmx_mixx_intena_s cn52xxp1;
- struct cvmx_mixx_intena_s cn56xx;
- struct cvmx_mixx_intena_s cn56xxp1;
+ struct cvmx_mixx_intena_cn52xx {
+ uint64_t reserved_7_63:57;
+ uint64_t orunena:1;
+ uint64_t irunena:1;
+ uint64_t data_drpena:1;
+ uint64_t ithena:1;
+ uint64_t othena:1;
+ uint64_t ivfena:1;
+ uint64_t ovfena:1;
+ } cn52xx;
+ struct cvmx_mixx_intena_cn52xx cn52xxp1;
+ struct cvmx_mixx_intena_cn52xx cn56xx;
+ struct cvmx_mixx_intena_cn52xx cn56xxp1;
+ struct cvmx_mixx_intena_s cn63xx;
+ struct cvmx_mixx_intena_s cn63xxp1;
};
union cvmx_mixx_ircnt {
@@ -116,6 +139,8 @@ union cvmx_mixx_ircnt {
struct cvmx_mixx_ircnt_s cn52xxp1;
struct cvmx_mixx_ircnt_s cn56xx;
struct cvmx_mixx_ircnt_s cn56xxp1;
+ struct cvmx_mixx_ircnt_s cn63xx;
+ struct cvmx_mixx_ircnt_s cn63xxp1;
};
union cvmx_mixx_irhwm {
@@ -129,6 +154,8 @@ union cvmx_mixx_irhwm {
struct cvmx_mixx_irhwm_s cn52xxp1;
struct cvmx_mixx_irhwm_s cn56xx;
struct cvmx_mixx_irhwm_s cn56xxp1;
+ struct cvmx_mixx_irhwm_s cn63xx;
+ struct cvmx_mixx_irhwm_s cn63xxp1;
};
union cvmx_mixx_iring1 {
@@ -136,14 +163,21 @@ union cvmx_mixx_iring1 {
struct cvmx_mixx_iring1_s {
uint64_t reserved_60_63:4;
uint64_t isize:20;
+ uint64_t ibase:37;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mixx_iring1_cn52xx {
+ uint64_t reserved_60_63:4;
+ uint64_t isize:20;
uint64_t reserved_36_39:4;
uint64_t ibase:33;
uint64_t reserved_0_2:3;
- } s;
- struct cvmx_mixx_iring1_s cn52xx;
- struct cvmx_mixx_iring1_s cn52xxp1;
- struct cvmx_mixx_iring1_s cn56xx;
- struct cvmx_mixx_iring1_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_mixx_iring1_cn52xx cn52xxp1;
+ struct cvmx_mixx_iring1_cn52xx cn56xx;
+ struct cvmx_mixx_iring1_cn52xx cn56xxp1;
+ struct cvmx_mixx_iring1_s cn63xx;
+ struct cvmx_mixx_iring1_s cn63xxp1;
};
union cvmx_mixx_iring2 {
@@ -158,12 +192,15 @@ union cvmx_mixx_iring2 {
struct cvmx_mixx_iring2_s cn52xxp1;
struct cvmx_mixx_iring2_s cn56xx;
struct cvmx_mixx_iring2_s cn56xxp1;
+ struct cvmx_mixx_iring2_s cn63xx;
+ struct cvmx_mixx_iring2_s cn63xxp1;
};
union cvmx_mixx_isr {
uint64_t u64;
struct cvmx_mixx_isr_s {
- uint64_t reserved_7_63:57;
+ uint64_t reserved_8_63:56;
+ uint64_t ts:1;
uint64_t orun:1;
uint64_t irun:1;
uint64_t data_drp:1;
@@ -172,10 +209,21 @@ union cvmx_mixx_isr {
uint64_t idblovf:1;
uint64_t odblovf:1;
} s;
- struct cvmx_mixx_isr_s cn52xx;
- struct cvmx_mixx_isr_s cn52xxp1;
- struct cvmx_mixx_isr_s cn56xx;
- struct cvmx_mixx_isr_s cn56xxp1;
+ struct cvmx_mixx_isr_cn52xx {
+ uint64_t reserved_7_63:57;
+ uint64_t orun:1;
+ uint64_t irun:1;
+ uint64_t data_drp:1;
+ uint64_t irthresh:1;
+ uint64_t orthresh:1;
+ uint64_t idblovf:1;
+ uint64_t odblovf:1;
+ } cn52xx;
+ struct cvmx_mixx_isr_cn52xx cn52xxp1;
+ struct cvmx_mixx_isr_cn52xx cn56xx;
+ struct cvmx_mixx_isr_cn52xx cn56xxp1;
+ struct cvmx_mixx_isr_s cn63xx;
+ struct cvmx_mixx_isr_s cn63xxp1;
};
union cvmx_mixx_orcnt {
@@ -188,6 +236,8 @@ union cvmx_mixx_orcnt {
struct cvmx_mixx_orcnt_s cn52xxp1;
struct cvmx_mixx_orcnt_s cn56xx;
struct cvmx_mixx_orcnt_s cn56xxp1;
+ struct cvmx_mixx_orcnt_s cn63xx;
+ struct cvmx_mixx_orcnt_s cn63xxp1;
};
union cvmx_mixx_orhwm {
@@ -200,6 +250,8 @@ union cvmx_mixx_orhwm {
struct cvmx_mixx_orhwm_s cn52xxp1;
struct cvmx_mixx_orhwm_s cn56xx;
struct cvmx_mixx_orhwm_s cn56xxp1;
+ struct cvmx_mixx_orhwm_s cn63xx;
+ struct cvmx_mixx_orhwm_s cn63xxp1;
};
union cvmx_mixx_oring1 {
@@ -207,14 +259,21 @@ union cvmx_mixx_oring1 {
struct cvmx_mixx_oring1_s {
uint64_t reserved_60_63:4;
uint64_t osize:20;
+ uint64_t obase:37;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mixx_oring1_cn52xx {
+ uint64_t reserved_60_63:4;
+ uint64_t osize:20;
uint64_t reserved_36_39:4;
uint64_t obase:33;
uint64_t reserved_0_2:3;
- } s;
- struct cvmx_mixx_oring1_s cn52xx;
- struct cvmx_mixx_oring1_s cn52xxp1;
- struct cvmx_mixx_oring1_s cn56xx;
- struct cvmx_mixx_oring1_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_mixx_oring1_cn52xx cn52xxp1;
+ struct cvmx_mixx_oring1_cn52xx cn56xx;
+ struct cvmx_mixx_oring1_cn52xx cn56xxp1;
+ struct cvmx_mixx_oring1_s cn63xx;
+ struct cvmx_mixx_oring1_s cn63xxp1;
};
union cvmx_mixx_oring2 {
@@ -229,6 +288,8 @@ union cvmx_mixx_oring2 {
struct cvmx_mixx_oring2_s cn52xxp1;
struct cvmx_mixx_oring2_s cn56xx;
struct cvmx_mixx_oring2_s cn56xxp1;
+ struct cvmx_mixx_oring2_s cn63xx;
+ struct cvmx_mixx_oring2_s cn63xxp1;
};
union cvmx_mixx_remcnt {
@@ -243,6 +304,31 @@ union cvmx_mixx_remcnt {
struct cvmx_mixx_remcnt_s cn52xxp1;
struct cvmx_mixx_remcnt_s cn56xx;
struct cvmx_mixx_remcnt_s cn56xxp1;
+ struct cvmx_mixx_remcnt_s cn63xx;
+ struct cvmx_mixx_remcnt_s cn63xxp1;
+};
+
+union cvmx_mixx_tsctl {
+ uint64_t u64;
+ struct cvmx_mixx_tsctl_s {
+ uint64_t reserved_21_63:43;
+ uint64_t tsavl:5;
+ uint64_t reserved_13_15:3;
+ uint64_t tstot:5;
+ uint64_t reserved_5_7:3;
+ uint64_t tscnt:5;
+ } s;
+ struct cvmx_mixx_tsctl_s cn63xx;
+ struct cvmx_mixx_tsctl_s cn63xxp1;
+};
+
+union cvmx_mixx_tstamp {
+ uint64_t u64;
+ struct cvmx_mixx_tstamp_s {
+ uint64_t tstamp:64;
+ } s;
+ struct cvmx_mixx_tstamp_s cn63xx;
+ struct cvmx_mixx_tstamp_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
index 4b347bb8ce8..9899a9d2ba7 100644
--- a/arch/mips/include/asm/octeon/cvmx-npei-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,206 +28,114 @@
#ifndef __CVMX_NPEI_DEFS_H__
#define __CVMX_NPEI_DEFS_H__
-#define CVMX_NPEI_BAR1_INDEXX(offset) \
- (0x0000000000000000ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_BIST_STATUS \
- (0x0000000000000580ull)
-#define CVMX_NPEI_BIST_STATUS2 \
- (0x0000000000000680ull)
-#define CVMX_NPEI_CTL_PORT0 \
- (0x0000000000000250ull)
-#define CVMX_NPEI_CTL_PORT1 \
- (0x0000000000000260ull)
-#define CVMX_NPEI_CTL_STATUS \
- (0x0000000000000570ull)
-#define CVMX_NPEI_CTL_STATUS2 \
- (0x0000000000003C00ull)
-#define CVMX_NPEI_DATA_OUT_CNT \
- (0x00000000000005F0ull)
-#define CVMX_NPEI_DBG_DATA \
- (0x0000000000000510ull)
-#define CVMX_NPEI_DBG_SELECT \
- (0x0000000000000500ull)
-#define CVMX_NPEI_DMA0_INT_LEVEL \
- (0x00000000000005C0ull)
-#define CVMX_NPEI_DMA1_INT_LEVEL \
- (0x00000000000005D0ull)
-#define CVMX_NPEI_DMAX_COUNTS(offset) \
- (0x0000000000000450ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMAX_DBELL(offset) \
- (0x00000000000003B0ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) \
- (0x0000000000000400ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMAX_NADDR(offset) \
- (0x00000000000004A0ull + (((offset) & 7) * 16))
-#define CVMX_NPEI_DMA_CNTS \
- (0x00000000000005E0ull)
-#define CVMX_NPEI_DMA_CONTROL \
- (0x00000000000003A0ull)
-#define CVMX_NPEI_INT_A_ENB \
- (0x0000000000000560ull)
-#define CVMX_NPEI_INT_A_ENB2 \
- (0x0000000000003CE0ull)
-#define CVMX_NPEI_INT_A_SUM \
- (0x0000000000000550ull)
-#define CVMX_NPEI_INT_ENB \
- (0x0000000000000540ull)
-#define CVMX_NPEI_INT_ENB2 \
- (0x0000000000003CD0ull)
-#define CVMX_NPEI_INT_INFO \
- (0x0000000000000590ull)
-#define CVMX_NPEI_INT_SUM \
- (0x0000000000000530ull)
-#define CVMX_NPEI_INT_SUM2 \
- (0x0000000000003CC0ull)
-#define CVMX_NPEI_LAST_WIN_RDATA0 \
- (0x0000000000000600ull)
-#define CVMX_NPEI_LAST_WIN_RDATA1 \
- (0x0000000000000610ull)
-#define CVMX_NPEI_MEM_ACCESS_CTL \
- (0x00000000000004F0ull)
-#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) \
- (0x0000000000000340ull + (((offset) & 31) * 16) - 16 * 12)
-#define CVMX_NPEI_MSI_ENB0 \
- (0x0000000000003C50ull)
-#define CVMX_NPEI_MSI_ENB1 \
- (0x0000000000003C60ull)
-#define CVMX_NPEI_MSI_ENB2 \
- (0x0000000000003C70ull)
-#define CVMX_NPEI_MSI_ENB3 \
- (0x0000000000003C80ull)
-#define CVMX_NPEI_MSI_RCV0 \
- (0x0000000000003C10ull)
-#define CVMX_NPEI_MSI_RCV1 \
- (0x0000000000003C20ull)
-#define CVMX_NPEI_MSI_RCV2 \
- (0x0000000000003C30ull)
-#define CVMX_NPEI_MSI_RCV3 \
- (0x0000000000003C40ull)
-#define CVMX_NPEI_MSI_RD_MAP \
- (0x0000000000003CA0ull)
-#define CVMX_NPEI_MSI_W1C_ENB0 \
- (0x0000000000003CF0ull)
-#define CVMX_NPEI_MSI_W1C_ENB1 \
- (0x0000000000003D00ull)
-#define CVMX_NPEI_MSI_W1C_ENB2 \
- (0x0000000000003D10ull)
-#define CVMX_NPEI_MSI_W1C_ENB3 \
- (0x0000000000003D20ull)
-#define CVMX_NPEI_MSI_W1S_ENB0 \
- (0x0000000000003D30ull)
-#define CVMX_NPEI_MSI_W1S_ENB1 \
- (0x0000000000003D40ull)
-#define CVMX_NPEI_MSI_W1S_ENB2 \
- (0x0000000000003D50ull)
-#define CVMX_NPEI_MSI_W1S_ENB3 \
- (0x0000000000003D60ull)
-#define CVMX_NPEI_MSI_WR_MAP \
- (0x0000000000003C90ull)
-#define CVMX_NPEI_PCIE_CREDIT_CNT \
- (0x0000000000003D70ull)
-#define CVMX_NPEI_PCIE_MSI_RCV \
- (0x0000000000003CB0ull)
-#define CVMX_NPEI_PCIE_MSI_RCV_B1 \
- (0x0000000000000650ull)
-#define CVMX_NPEI_PCIE_MSI_RCV_B2 \
- (0x0000000000000660ull)
-#define CVMX_NPEI_PCIE_MSI_RCV_B3 \
- (0x0000000000000670ull)
-#define CVMX_NPEI_PKTX_CNTS(offset) \
- (0x0000000000002400ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) \
- (0x0000000000002800ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \
- (0x0000000000002C00ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \
- (0x0000000000003000ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) \
- (0x0000000000003400ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_IN_BP(offset) \
- (0x0000000000003800ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) \
- (0x0000000000001400ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \
- (0x0000000000001800ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \
- (0x0000000000001C00ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKT_CNT_INT \
- (0x0000000000001110ull)
-#define CVMX_NPEI_PKT_CNT_INT_ENB \
- (0x0000000000001130ull)
-#define CVMX_NPEI_PKT_DATA_OUT_ES \
- (0x00000000000010B0ull)
-#define CVMX_NPEI_PKT_DATA_OUT_NS \
- (0x00000000000010A0ull)
-#define CVMX_NPEI_PKT_DATA_OUT_ROR \
- (0x0000000000001090ull)
-#define CVMX_NPEI_PKT_DPADDR \
- (0x0000000000001080ull)
-#define CVMX_NPEI_PKT_INPUT_CONTROL \
- (0x0000000000001150ull)
-#define CVMX_NPEI_PKT_INSTR_ENB \
- (0x0000000000001000ull)
-#define CVMX_NPEI_PKT_INSTR_RD_SIZE \
- (0x0000000000001190ull)
-#define CVMX_NPEI_PKT_INSTR_SIZE \
- (0x0000000000001020ull)
-#define CVMX_NPEI_PKT_INT_LEVELS \
- (0x0000000000001100ull)
-#define CVMX_NPEI_PKT_IN_BP \
- (0x00000000000006B0ull)
-#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) \
- (0x0000000000002000ull + (((offset) & 31) * 16))
-#define CVMX_NPEI_PKT_IN_INSTR_COUNTS \
- (0x00000000000006A0ull)
-#define CVMX_NPEI_PKT_IN_PCIE_PORT \
- (0x00000000000011A0ull)
-#define CVMX_NPEI_PKT_IPTR \
- (0x0000000000001070ull)
-#define CVMX_NPEI_PKT_OUTPUT_WMARK \
- (0x0000000000001160ull)
-#define CVMX_NPEI_PKT_OUT_BMODE \
- (0x00000000000010D0ull)
-#define CVMX_NPEI_PKT_OUT_ENB \
- (0x0000000000001010ull)
-#define CVMX_NPEI_PKT_PCIE_PORT \
- (0x00000000000010E0ull)
-#define CVMX_NPEI_PKT_PORT_IN_RST \
- (0x0000000000000690ull)
-#define CVMX_NPEI_PKT_SLIST_ES \
- (0x0000000000001050ull)
-#define CVMX_NPEI_PKT_SLIST_ID_SIZE \
- (0x0000000000001180ull)
-#define CVMX_NPEI_PKT_SLIST_NS \
- (0x0000000000001040ull)
-#define CVMX_NPEI_PKT_SLIST_ROR \
- (0x0000000000001030ull)
-#define CVMX_NPEI_PKT_TIME_INT \
- (0x0000000000001120ull)
-#define CVMX_NPEI_PKT_TIME_INT_ENB \
- (0x0000000000001140ull)
-#define CVMX_NPEI_RSL_INT_BLOCKS \
- (0x0000000000000520ull)
-#define CVMX_NPEI_SCRATCH_1 \
- (0x0000000000000270ull)
-#define CVMX_NPEI_STATE1 \
- (0x0000000000000620ull)
-#define CVMX_NPEI_STATE2 \
- (0x0000000000000630ull)
-#define CVMX_NPEI_STATE3 \
- (0x0000000000000640ull)
-#define CVMX_NPEI_WINDOW_CTL \
- (0x0000000000000380ull)
-#define CVMX_NPEI_WIN_RD_ADDR \
- (0x0000000000000210ull)
-#define CVMX_NPEI_WIN_RD_DATA \
- (0x0000000000000240ull)
-#define CVMX_NPEI_WIN_WR_ADDR \
- (0x0000000000000200ull)
-#define CVMX_NPEI_WIN_WR_DATA \
- (0x0000000000000220ull)
-#define CVMX_NPEI_WIN_WR_MASK \
- (0x0000000000000230ull)
+#define CVMX_NPEI_BAR1_INDEXX(offset) (0x0000000000000000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_BIST_STATUS (0x0000000000000580ull)
+#define CVMX_NPEI_BIST_STATUS2 (0x0000000000000680ull)
+#define CVMX_NPEI_CTL_PORT0 (0x0000000000000250ull)
+#define CVMX_NPEI_CTL_PORT1 (0x0000000000000260ull)
+#define CVMX_NPEI_CTL_STATUS (0x0000000000000570ull)
+#define CVMX_NPEI_CTL_STATUS2 (0x0000000000003C00ull)
+#define CVMX_NPEI_DATA_OUT_CNT (0x00000000000005F0ull)
+#define CVMX_NPEI_DBG_DATA (0x0000000000000510ull)
+#define CVMX_NPEI_DBG_SELECT (0x0000000000000500ull)
+#define CVMX_NPEI_DMA0_INT_LEVEL (0x00000000000005C0ull)
+#define CVMX_NPEI_DMA1_INT_LEVEL (0x00000000000005D0ull)
+#define CVMX_NPEI_DMAX_COUNTS(offset) (0x0000000000000450ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_DBELL(offset) (0x00000000000003B0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) (0x0000000000000400ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMAX_NADDR(offset) (0x00000000000004A0ull + ((offset) & 7) * 16)
+#define CVMX_NPEI_DMA_CNTS (0x00000000000005E0ull)
+#define CVMX_NPEI_DMA_CONTROL (0x00000000000003A0ull)
+#define CVMX_NPEI_DMA_PCIE_REQ_NUM (0x00000000000005B0ull)
+#define CVMX_NPEI_DMA_STATE1 (0x00000000000006C0ull)
+#define CVMX_NPEI_DMA_STATE1_P1 (0x0000000000000680ull)
+#define CVMX_NPEI_DMA_STATE2 (0x00000000000006D0ull)
+#define CVMX_NPEI_DMA_STATE2_P1 (0x0000000000000690ull)
+#define CVMX_NPEI_DMA_STATE3_P1 (0x00000000000006A0ull)
+#define CVMX_NPEI_DMA_STATE4_P1 (0x00000000000006B0ull)
+#define CVMX_NPEI_DMA_STATE5_P1 (0x00000000000006C0ull)
+#define CVMX_NPEI_INT_A_ENB (0x0000000000000560ull)
+#define CVMX_NPEI_INT_A_ENB2 (0x0000000000003CE0ull)
+#define CVMX_NPEI_INT_A_SUM (0x0000000000000550ull)
+#define CVMX_NPEI_INT_ENB (0x0000000000000540ull)
+#define CVMX_NPEI_INT_ENB2 (0x0000000000003CD0ull)
+#define CVMX_NPEI_INT_INFO (0x0000000000000590ull)
+#define CVMX_NPEI_INT_SUM (0x0000000000000530ull)
+#define CVMX_NPEI_INT_SUM2 (0x0000000000003CC0ull)
+#define CVMX_NPEI_LAST_WIN_RDATA0 (0x0000000000000600ull)
+#define CVMX_NPEI_LAST_WIN_RDATA1 (0x0000000000000610ull)
+#define CVMX_NPEI_MEM_ACCESS_CTL (0x00000000000004F0ull)
+#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) (0x0000000000000340ull + ((offset) & 31) * 16 - 16*12)
+#define CVMX_NPEI_MSI_ENB0 (0x0000000000003C50ull)
+#define CVMX_NPEI_MSI_ENB1 (0x0000000000003C60ull)
+#define CVMX_NPEI_MSI_ENB2 (0x0000000000003C70ull)
+#define CVMX_NPEI_MSI_ENB3 (0x0000000000003C80ull)
+#define CVMX_NPEI_MSI_RCV0 (0x0000000000003C10ull)
+#define CVMX_NPEI_MSI_RCV1 (0x0000000000003C20ull)
+#define CVMX_NPEI_MSI_RCV2 (0x0000000000003C30ull)
+#define CVMX_NPEI_MSI_RCV3 (0x0000000000003C40ull)
+#define CVMX_NPEI_MSI_RD_MAP (0x0000000000003CA0ull)
+#define CVMX_NPEI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
+#define CVMX_NPEI_MSI_W1C_ENB1 (0x0000000000003D00ull)
+#define CVMX_NPEI_MSI_W1C_ENB2 (0x0000000000003D10ull)
+#define CVMX_NPEI_MSI_W1C_ENB3 (0x0000000000003D20ull)
+#define CVMX_NPEI_MSI_W1S_ENB0 (0x0000000000003D30ull)
+#define CVMX_NPEI_MSI_W1S_ENB1 (0x0000000000003D40ull)
+#define CVMX_NPEI_MSI_W1S_ENB2 (0x0000000000003D50ull)
+#define CVMX_NPEI_MSI_W1S_ENB3 (0x0000000000003D60ull)
+#define CVMX_NPEI_MSI_WR_MAP (0x0000000000003C90ull)
+#define CVMX_NPEI_PCIE_CREDIT_CNT (0x0000000000003D70ull)
+#define CVMX_NPEI_PCIE_MSI_RCV (0x0000000000003CB0ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
+#define CVMX_NPEI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
+#define CVMX_NPEI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_CNT_INT (0x0000000000001110ull)
+#define CVMX_NPEI_PKT_CNT_INT_ENB (0x0000000000001130ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
+#define CVMX_NPEI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
+#define CVMX_NPEI_PKT_DPADDR (0x0000000000001080ull)
+#define CVMX_NPEI_PKT_INPUT_CONTROL (0x0000000000001150ull)
+#define CVMX_NPEI_PKT_INSTR_ENB (0x0000000000001000ull)
+#define CVMX_NPEI_PKT_INSTR_RD_SIZE (0x0000000000001190ull)
+#define CVMX_NPEI_PKT_INSTR_SIZE (0x0000000000001020ull)
+#define CVMX_NPEI_PKT_INT_LEVELS (0x0000000000001100ull)
+#define CVMX_NPEI_PKT_IN_BP (0x00000000000006B0ull)
+#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
+#define CVMX_NPEI_PKT_IN_INSTR_COUNTS (0x00000000000006A0ull)
+#define CVMX_NPEI_PKT_IN_PCIE_PORT (0x00000000000011A0ull)
+#define CVMX_NPEI_PKT_IPTR (0x0000000000001070ull)
+#define CVMX_NPEI_PKT_OUTPUT_WMARK (0x0000000000001160ull)
+#define CVMX_NPEI_PKT_OUT_BMODE (0x00000000000010D0ull)
+#define CVMX_NPEI_PKT_OUT_ENB (0x0000000000001010ull)
+#define CVMX_NPEI_PKT_PCIE_PORT (0x00000000000010E0ull)
+#define CVMX_NPEI_PKT_PORT_IN_RST (0x0000000000000690ull)
+#define CVMX_NPEI_PKT_SLIST_ES (0x0000000000001050ull)
+#define CVMX_NPEI_PKT_SLIST_ID_SIZE (0x0000000000001180ull)
+#define CVMX_NPEI_PKT_SLIST_NS (0x0000000000001040ull)
+#define CVMX_NPEI_PKT_SLIST_ROR (0x0000000000001030ull)
+#define CVMX_NPEI_PKT_TIME_INT (0x0000000000001120ull)
+#define CVMX_NPEI_PKT_TIME_INT_ENB (0x0000000000001140ull)
+#define CVMX_NPEI_RSL_INT_BLOCKS (0x0000000000000520ull)
+#define CVMX_NPEI_SCRATCH_1 (0x0000000000000270ull)
+#define CVMX_NPEI_STATE1 (0x0000000000000620ull)
+#define CVMX_NPEI_STATE2 (0x0000000000000630ull)
+#define CVMX_NPEI_STATE3 (0x0000000000000640ull)
+#define CVMX_NPEI_WINDOW_CTL (0x0000000000000380ull)
+#define CVMX_NPEI_WIN_RD_ADDR (0x0000000000000210ull)
+#define CVMX_NPEI_WIN_RD_DATA (0x0000000000000240ull)
+#define CVMX_NPEI_WIN_WR_ADDR (0x0000000000000200ull)
+#define CVMX_NPEI_WIN_WR_DATA (0x0000000000000220ull)
+#define CVMX_NPEI_WIN_WR_MASK (0x0000000000000230ull)
union cvmx_npei_bar1_indexx {
uint32_t u32;
@@ -248,9 +156,7 @@ union cvmx_npei_bist_status {
uint64_t u64;
struct cvmx_npei_bist_status_s {
uint64_t pkt_rdf:1;
- uint64_t pkt_pmem:1;
- uint64_t pkt_p1:1;
- uint64_t reserved_60_60:1;
+ uint64_t reserved_60_62:3;
uint64_t pcr_gim:1;
uint64_t pkt_pif:1;
uint64_t pcsr_int:1;
@@ -301,9 +207,7 @@ union cvmx_npei_bist_status {
} s;
struct cvmx_npei_bist_status_cn52xx {
uint64_t pkt_rdf:1;
- uint64_t pkt_pmem:1;
- uint64_t pkt_p1:1;
- uint64_t reserved_60_60:1;
+ uint64_t reserved_60_62:3;
uint64_t pcr_gim:1;
uint64_t pkt_pif:1;
uint64_t pcsr_int:1;
@@ -410,66 +314,7 @@ union cvmx_npei_bist_status {
uint64_t msi:1;
uint64_t ncb_cmd:1;
} cn52xxp1;
- struct cvmx_npei_bist_status_cn56xx {
- uint64_t pkt_rdf:1;
- uint64_t reserved_60_62:3;
- uint64_t pcr_gim:1;
- uint64_t pkt_pif:1;
- uint64_t pcsr_int:1;
- uint64_t pcsr_im:1;
- uint64_t pcsr_cnt:1;
- uint64_t pcsr_id:1;
- uint64_t pcsr_sl:1;
- uint64_t pkt_imem:1;
- uint64_t pkt_pfm:1;
- uint64_t pkt_pof:1;
- uint64_t reserved_48_49:2;
- uint64_t pkt_pop0:1;
- uint64_t pkt_pop1:1;
- uint64_t d0_mem:1;
- uint64_t d1_mem:1;
- uint64_t d2_mem:1;
- uint64_t d3_mem:1;
- uint64_t d4_mem:1;
- uint64_t ds_mem:1;
- uint64_t reserved_36_39:4;
- uint64_t d0_pst:1;
- uint64_t d1_pst:1;
- uint64_t d2_pst:1;
- uint64_t d3_pst:1;
- uint64_t d4_pst:1;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p1_o:1;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t p2n1_po:1;
- uint64_t p2n1_no:1;
- uint64_t p2n1_co:1;
- uint64_t p2n0_po:1;
- uint64_t p2n0_no:1;
- uint64_t p2n0_co:1;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t csm0:1;
- uint64_t csm1:1;
- uint64_t dif0:1;
- uint64_t dif1:1;
- uint64_t dif2:1;
- uint64_t dif3:1;
- uint64_t dif4:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
- } cn56xx;
+ struct cvmx_npei_bist_status_cn52xx cn56xx;
struct cvmx_npei_bist_status_cn56xxp1 {
uint64_t reserved_58_63:6;
uint64_t pcsr_int:1;
@@ -536,7 +381,16 @@ union cvmx_npei_bist_status {
union cvmx_npei_bist_status2 {
uint64_t u64;
struct cvmx_npei_bist_status2_s {
- uint64_t reserved_5_63:59;
+ uint64_t reserved_14_63:50;
+ uint64_t prd_tag:1;
+ uint64_t prd_st0:1;
+ uint64_t prd_st1:1;
+ uint64_t prd_err:1;
+ uint64_t nrd_st:1;
+ uint64_t nwe_st:1;
+ uint64_t nwe_wr0:1;
+ uint64_t nwe_wr1:1;
+ uint64_t pkt_rd:1;
uint64_t psc_p0:1;
uint64_t psc_p1:1;
uint64_t pkt_gd:1;
@@ -630,8 +484,7 @@ union cvmx_npei_ctl_status {
} cn52xxp1;
struct cvmx_npei_ctl_status_s cn56xx;
struct cvmx_npei_ctl_status_cn56xxp1 {
- uint64_t reserved_16_63:48;
- uint64_t ring_en:1;
+ uint64_t reserved_15_63:49;
uint64_t lnk_rst:1;
uint64_t arb:1;
uint64_t pkt_bp:4;
@@ -756,14 +609,14 @@ union cvmx_npei_dmax_ibuff_saddr {
uint64_t saddr:29;
uint64_t reserved_0_6:7;
} s;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xx {
+ struct cvmx_npei_dmax_ibuff_saddr_s cn52xx;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
uint64_t reserved_36_63:28;
uint64_t saddr:29;
uint64_t reserved_0_6:7;
- } cn52xx;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn52xxp1;
+ } cn52xxp1;
struct cvmx_npei_dmax_ibuff_saddr_s cn56xx;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn56xxp1;
+ struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1;
};
union cvmx_npei_dmax_naddr {
@@ -817,7 +670,8 @@ union cvmx_npei_dma_cnts {
union cvmx_npei_dma_control {
uint64_t u64;
struct cvmx_npei_dma_control_s {
- uint64_t reserved_39_63:25;
+ uint64_t reserved_40_63:24;
+ uint64_t p_32b_m:1;
uint64_t dma4_enb:1;
uint64_t dma3_enb:1;
uint64_t dma2_enb:1;
@@ -853,7 +707,161 @@ union cvmx_npei_dma_control {
uint64_t csize:14;
} cn52xxp1;
struct cvmx_npei_dma_control_s cn56xx;
- struct cvmx_npei_dma_control_s cn56xxp1;
+ struct cvmx_npei_dma_control_cn56xxp1 {
+ uint64_t reserved_39_63:25;
+ uint64_t dma4_enb:1;
+ uint64_t dma3_enb:1;
+ uint64_t dma2_enb:1;
+ uint64_t dma1_enb:1;
+ uint64_t dma0_enb:1;
+ uint64_t b0_lend:1;
+ uint64_t dwb_denb:1;
+ uint64_t dwb_ichk:9;
+ uint64_t fpa_que:3;
+ uint64_t o_add1:1;
+ uint64_t o_ro:1;
+ uint64_t o_ns:1;
+ uint64_t o_es:2;
+ uint64_t o_mode:1;
+ uint64_t csize:14;
+ } cn56xxp1;
+};
+
+union cvmx_npei_dma_pcie_req_num {
+ uint64_t u64;
+ struct cvmx_npei_dma_pcie_req_num_s {
+ uint64_t dma_arb:1;
+ uint64_t reserved_53_62:10;
+ uint64_t pkt_cnt:5;
+ uint64_t reserved_45_47:3;
+ uint64_t dma4_cnt:5;
+ uint64_t reserved_37_39:3;
+ uint64_t dma3_cnt:5;
+ uint64_t reserved_29_31:3;
+ uint64_t dma2_cnt:5;
+ uint64_t reserved_21_23:3;
+ uint64_t dma1_cnt:5;
+ uint64_t reserved_13_15:3;
+ uint64_t dma0_cnt:5;
+ uint64_t reserved_5_7:3;
+ uint64_t dma_cnt:5;
+ } s;
+ struct cvmx_npei_dma_pcie_req_num_s cn52xx;
+ struct cvmx_npei_dma_pcie_req_num_s cn56xx;
+};
+
+union cvmx_npei_dma_state1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_s {
+ uint64_t reserved_40_63:24;
+ uint64_t d4_dwe:8;
+ uint64_t d3_dwe:8;
+ uint64_t d2_dwe:8;
+ uint64_t d1_dwe:8;
+ uint64_t d0_dwe:8;
+ } s;
+ struct cvmx_npei_dma_state1_s cn52xx;
+};
+
+union cvmx_npei_dma_state1_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state1_p1_s {
+ uint64_t reserved_60_63:4;
+ uint64_t d0_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t d4_difst:7;
+ uint64_t d0_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t d4_reqst:5;
+ } s;
+ struct cvmx_npei_dma_state1_p1_cn52xxp1 {
+ uint64_t reserved_60_63:4;
+ uint64_t d0_difst:7;
+ uint64_t d1_difst:7;
+ uint64_t d2_difst:7;
+ uint64_t d3_difst:7;
+ uint64_t reserved_25_31:7;
+ uint64_t d0_reqst:5;
+ uint64_t d1_reqst:5;
+ uint64_t d2_reqst:5;
+ uint64_t d3_reqst:5;
+ uint64_t reserved_0_4:5;
+ } cn52xxp1;
+ struct cvmx_npei_dma_state1_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state2 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_s {
+ uint64_t reserved_28_63:36;
+ uint64_t ndwe:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ndre:5;
+ uint64_t reserved_10_15:6;
+ uint64_t prd:10;
+ } s;
+ struct cvmx_npei_dma_state2_s cn52xx;
+};
+
+union cvmx_npei_dma_state2_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state2_p1_s {
+ uint64_t reserved_45_63:19;
+ uint64_t d0_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t d4_dffst:9;
+ } s;
+ struct cvmx_npei_dma_state2_p1_cn52xxp1 {
+ uint64_t reserved_45_63:19;
+ uint64_t d0_dffst:9;
+ uint64_t d1_dffst:9;
+ uint64_t d2_dffst:9;
+ uint64_t d3_dffst:9;
+ uint64_t reserved_0_8:9;
+ } cn52xxp1;
+ struct cvmx_npei_dma_state2_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state3_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state3_p1_s {
+ uint64_t reserved_60_63:4;
+ uint64_t d0_drest:15;
+ uint64_t d1_drest:15;
+ uint64_t d2_drest:15;
+ uint64_t d3_drest:15;
+ } s;
+ struct cvmx_npei_dma_state3_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state3_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state4_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state4_p1_s {
+ uint64_t reserved_52_63:12;
+ uint64_t d0_dwest:13;
+ uint64_t d1_dwest:13;
+ uint64_t d2_dwest:13;
+ uint64_t d3_dwest:13;
+ } s;
+ struct cvmx_npei_dma_state4_p1_s cn52xxp1;
+ struct cvmx_npei_dma_state4_p1_s cn56xxp1;
+};
+
+union cvmx_npei_dma_state5_p1 {
+ uint64_t u64;
+ struct cvmx_npei_dma_state5_p1_s {
+ uint64_t reserved_28_63:36;
+ uint64_t d4_drest:15;
+ uint64_t d4_dwest:13;
+ } s;
+ struct cvmx_npei_dma_state5_p1_s cn56xxp1;
};
union cvmx_npei_int_a_enb {
@@ -871,17 +879,7 @@ union cvmx_npei_int_a_enb {
uint64_t dma1_cpl:1;
uint64_t dma0_cpl:1;
} s;
- struct cvmx_npei_int_a_enb_cn52xx {
- uint64_t reserved_8_63:56;
- uint64_t p1_rdlk:1;
- uint64_t p0_rdlk:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t dma1_cpl:1;
- uint64_t dma0_cpl:1;
- } cn52xx;
+ struct cvmx_npei_int_a_enb_s cn52xx;
struct cvmx_npei_int_a_enb_cn52xxp1 {
uint64_t reserved_2_63:62;
uint64_t dma1_cpl:1;
@@ -905,16 +903,7 @@ union cvmx_npei_int_a_enb2 {
uint64_t dma1_cpl:1;
uint64_t dma0_cpl:1;
} s;
- struct cvmx_npei_int_a_enb2_cn52xx {
- uint64_t reserved_8_63:56;
- uint64_t p1_rdlk:1;
- uint64_t p0_rdlk:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t reserved_0_1:2;
- } cn52xx;
+ struct cvmx_npei_int_a_enb2_s cn52xx;
struct cvmx_npei_int_a_enb2_cn52xxp1 {
uint64_t reserved_2_63:62;
uint64_t dma1_cpl:1;
@@ -938,17 +927,7 @@ union cvmx_npei_int_a_sum {
uint64_t dma1_cpl:1;
uint64_t dma0_cpl:1;
} s;
- struct cvmx_npei_int_a_sum_cn52xx {
- uint64_t reserved_8_63:56;
- uint64_t p1_rdlk:1;
- uint64_t p0_rdlk:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t dma1_cpl:1;
- uint64_t dma0_cpl:1;
- } cn52xx;
+ struct cvmx_npei_int_a_sum_s cn52xx;
struct cvmx_npei_int_a_sum_cn52xxp1 {
uint64_t reserved_2_63:62;
uint64_t dma1_cpl:1;
@@ -1550,10 +1529,7 @@ union cvmx_npei_int_sum {
uint64_t c0_se:1;
uint64_t reserved_20_20:1;
uint64_t c0_aeri:1;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
+ uint64_t reserved_15_18:4;
uint64_t dtime1:1;
uint64_t dtime0:1;
uint64_t dcnt1:1;
@@ -1959,7 +1935,6 @@ union cvmx_npei_pktx_cnts {
} s;
struct cvmx_npei_pktx_cnts_s cn52xx;
struct cvmx_npei_pktx_cnts_s cn56xx;
- struct cvmx_npei_pktx_cnts_s cn56xxp1;
};
union cvmx_npei_pktx_in_bp {
@@ -1970,7 +1945,6 @@ union cvmx_npei_pktx_in_bp {
} s;
struct cvmx_npei_pktx_in_bp_s cn52xx;
struct cvmx_npei_pktx_in_bp_s cn56xx;
- struct cvmx_npei_pktx_in_bp_s cn56xxp1;
};
union cvmx_npei_pktx_instr_baddr {
@@ -1981,7 +1955,6 @@ union cvmx_npei_pktx_instr_baddr {
} s;
struct cvmx_npei_pktx_instr_baddr_s cn52xx;
struct cvmx_npei_pktx_instr_baddr_s cn56xx;
- struct cvmx_npei_pktx_instr_baddr_s cn56xxp1;
};
union cvmx_npei_pktx_instr_baoff_dbell {
@@ -1992,7 +1965,6 @@ union cvmx_npei_pktx_instr_baoff_dbell {
} s;
struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx;
struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx;
- struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xxp1;
};
union cvmx_npei_pktx_instr_fifo_rsize {
@@ -2006,7 +1978,6 @@ union cvmx_npei_pktx_instr_fifo_rsize {
} s;
struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx;
struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx;
- struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xxp1;
};
union cvmx_npei_pktx_instr_header {
@@ -2014,21 +1985,20 @@ union cvmx_npei_pktx_instr_header {
struct cvmx_npei_pktx_instr_header_s {
uint64_t reserved_44_63:20;
uint64_t pbp:1;
- uint64_t rsv_f:5;
+ uint64_t reserved_38_42:5;
uint64_t rparmode:2;
- uint64_t rsv_e:1;
+ uint64_t reserved_35_35:1;
uint64_t rskp_len:7;
- uint64_t rsv_d:6;
+ uint64_t reserved_22_27:6;
uint64_t use_ihdr:1;
- uint64_t rsv_c:5;
+ uint64_t reserved_16_20:5;
uint64_t par_mode:2;
- uint64_t rsv_b:1;
+ uint64_t reserved_13_13:1;
uint64_t skp_len:7;
- uint64_t rsv_a:6;
+ uint64_t reserved_0_5:6;
} s;
struct cvmx_npei_pktx_instr_header_s cn52xx;
struct cvmx_npei_pktx_instr_header_s cn56xx;
- struct cvmx_npei_pktx_instr_header_s cn56xxp1;
};
union cvmx_npei_pktx_slist_baddr {
@@ -2039,7 +2009,6 @@ union cvmx_npei_pktx_slist_baddr {
} s;
struct cvmx_npei_pktx_slist_baddr_s cn52xx;
struct cvmx_npei_pktx_slist_baddr_s cn56xx;
- struct cvmx_npei_pktx_slist_baddr_s cn56xxp1;
};
union cvmx_npei_pktx_slist_baoff_dbell {
@@ -2050,7 +2019,6 @@ union cvmx_npei_pktx_slist_baoff_dbell {
} s;
struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx;
struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx;
- struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xxp1;
};
union cvmx_npei_pktx_slist_fifo_rsize {
@@ -2061,7 +2029,6 @@ union cvmx_npei_pktx_slist_fifo_rsize {
} s;
struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx;
struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx;
- struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xxp1;
};
union cvmx_npei_pkt_cnt_int {
@@ -2072,7 +2039,6 @@ union cvmx_npei_pkt_cnt_int {
} s;
struct cvmx_npei_pkt_cnt_int_s cn52xx;
struct cvmx_npei_pkt_cnt_int_s cn56xx;
- struct cvmx_npei_pkt_cnt_int_s cn56xxp1;
};
union cvmx_npei_pkt_cnt_int_enb {
@@ -2083,7 +2049,6 @@ union cvmx_npei_pkt_cnt_int_enb {
} s;
struct cvmx_npei_pkt_cnt_int_enb_s cn52xx;
struct cvmx_npei_pkt_cnt_int_enb_s cn56xx;
- struct cvmx_npei_pkt_cnt_int_enb_s cn56xxp1;
};
union cvmx_npei_pkt_data_out_es {
@@ -2093,7 +2058,6 @@ union cvmx_npei_pkt_data_out_es {
} s;
struct cvmx_npei_pkt_data_out_es_s cn52xx;
struct cvmx_npei_pkt_data_out_es_s cn56xx;
- struct cvmx_npei_pkt_data_out_es_s cn56xxp1;
};
union cvmx_npei_pkt_data_out_ns {
@@ -2104,7 +2068,6 @@ union cvmx_npei_pkt_data_out_ns {
} s;
struct cvmx_npei_pkt_data_out_ns_s cn52xx;
struct cvmx_npei_pkt_data_out_ns_s cn56xx;
- struct cvmx_npei_pkt_data_out_ns_s cn56xxp1;
};
union cvmx_npei_pkt_data_out_ror {
@@ -2115,7 +2078,6 @@ union cvmx_npei_pkt_data_out_ror {
} s;
struct cvmx_npei_pkt_data_out_ror_s cn52xx;
struct cvmx_npei_pkt_data_out_ror_s cn56xx;
- struct cvmx_npei_pkt_data_out_ror_s cn56xxp1;
};
union cvmx_npei_pkt_dpaddr {
@@ -2126,7 +2088,6 @@ union cvmx_npei_pkt_dpaddr {
} s;
struct cvmx_npei_pkt_dpaddr_s cn52xx;
struct cvmx_npei_pkt_dpaddr_s cn56xx;
- struct cvmx_npei_pkt_dpaddr_s cn56xxp1;
};
union cvmx_npei_pkt_in_bp {
@@ -2135,6 +2096,7 @@ union cvmx_npei_pkt_in_bp {
uint64_t reserved_32_63:32;
uint64_t bp:32;
} s;
+ struct cvmx_npei_pkt_in_bp_s cn52xx;
struct cvmx_npei_pkt_in_bp_s cn56xx;
};
@@ -2146,7 +2108,6 @@ union cvmx_npei_pkt_in_donex_cnts {
} s;
struct cvmx_npei_pkt_in_donex_cnts_s cn52xx;
struct cvmx_npei_pkt_in_donex_cnts_s cn56xx;
- struct cvmx_npei_pkt_in_donex_cnts_s cn56xxp1;
};
union cvmx_npei_pkt_in_instr_counts {
@@ -2184,7 +2145,6 @@ union cvmx_npei_pkt_input_control {
} s;
struct cvmx_npei_pkt_input_control_s cn52xx;
struct cvmx_npei_pkt_input_control_s cn56xx;
- struct cvmx_npei_pkt_input_control_s cn56xxp1;
};
union cvmx_npei_pkt_instr_enb {
@@ -2195,7 +2155,6 @@ union cvmx_npei_pkt_instr_enb {
} s;
struct cvmx_npei_pkt_instr_enb_s cn52xx;
struct cvmx_npei_pkt_instr_enb_s cn56xx;
- struct cvmx_npei_pkt_instr_enb_s cn56xxp1;
};
union cvmx_npei_pkt_instr_rd_size {
@@ -2215,7 +2174,6 @@ union cvmx_npei_pkt_instr_size {
} s;
struct cvmx_npei_pkt_instr_size_s cn52xx;
struct cvmx_npei_pkt_instr_size_s cn56xx;
- struct cvmx_npei_pkt_instr_size_s cn56xxp1;
};
union cvmx_npei_pkt_int_levels {
@@ -2227,7 +2185,6 @@ union cvmx_npei_pkt_int_levels {
} s;
struct cvmx_npei_pkt_int_levels_s cn52xx;
struct cvmx_npei_pkt_int_levels_s cn56xx;
- struct cvmx_npei_pkt_int_levels_s cn56xxp1;
};
union cvmx_npei_pkt_iptr {
@@ -2238,7 +2195,6 @@ union cvmx_npei_pkt_iptr {
} s;
struct cvmx_npei_pkt_iptr_s cn52xx;
struct cvmx_npei_pkt_iptr_s cn56xx;
- struct cvmx_npei_pkt_iptr_s cn56xxp1;
};
union cvmx_npei_pkt_out_bmode {
@@ -2249,7 +2205,6 @@ union cvmx_npei_pkt_out_bmode {
} s;
struct cvmx_npei_pkt_out_bmode_s cn52xx;
struct cvmx_npei_pkt_out_bmode_s cn56xx;
- struct cvmx_npei_pkt_out_bmode_s cn56xxp1;
};
union cvmx_npei_pkt_out_enb {
@@ -2260,7 +2215,6 @@ union cvmx_npei_pkt_out_enb {
} s;
struct cvmx_npei_pkt_out_enb_s cn52xx;
struct cvmx_npei_pkt_out_enb_s cn56xx;
- struct cvmx_npei_pkt_out_enb_s cn56xxp1;
};
union cvmx_npei_pkt_output_wmark {
@@ -2280,7 +2234,6 @@ union cvmx_npei_pkt_pcie_port {
} s;
struct cvmx_npei_pkt_pcie_port_s cn52xx;
struct cvmx_npei_pkt_pcie_port_s cn56xx;
- struct cvmx_npei_pkt_pcie_port_s cn56xxp1;
};
union cvmx_npei_pkt_port_in_rst {
@@ -2300,7 +2253,6 @@ union cvmx_npei_pkt_slist_es {
} s;
struct cvmx_npei_pkt_slist_es_s cn52xx;
struct cvmx_npei_pkt_slist_es_s cn56xx;
- struct cvmx_npei_pkt_slist_es_s cn56xxp1;
};
union cvmx_npei_pkt_slist_id_size {
@@ -2312,7 +2264,6 @@ union cvmx_npei_pkt_slist_id_size {
} s;
struct cvmx_npei_pkt_slist_id_size_s cn52xx;
struct cvmx_npei_pkt_slist_id_size_s cn56xx;
- struct cvmx_npei_pkt_slist_id_size_s cn56xxp1;
};
union cvmx_npei_pkt_slist_ns {
@@ -2323,7 +2274,6 @@ union cvmx_npei_pkt_slist_ns {
} s;
struct cvmx_npei_pkt_slist_ns_s cn52xx;
struct cvmx_npei_pkt_slist_ns_s cn56xx;
- struct cvmx_npei_pkt_slist_ns_s cn56xxp1;
};
union cvmx_npei_pkt_slist_ror {
@@ -2334,7 +2284,6 @@ union cvmx_npei_pkt_slist_ror {
} s;
struct cvmx_npei_pkt_slist_ror_s cn52xx;
struct cvmx_npei_pkt_slist_ror_s cn56xx;
- struct cvmx_npei_pkt_slist_ror_s cn56xxp1;
};
union cvmx_npei_pkt_time_int {
@@ -2345,7 +2294,6 @@ union cvmx_npei_pkt_time_int {
} s;
struct cvmx_npei_pkt_time_int_s cn52xx;
struct cvmx_npei_pkt_time_int_s cn56xx;
- struct cvmx_npei_pkt_time_int_s cn56xxp1;
};
union cvmx_npei_pkt_time_int_enb {
@@ -2356,7 +2304,6 @@ union cvmx_npei_pkt_time_int_enb {
} s;
struct cvmx_npei_pkt_time_int_enb_s cn52xx;
struct cvmx_npei_pkt_time_int_enb_s cn56xx;
- struct cvmx_npei_pkt_time_int_enb_s cn56xxp1;
};
union cvmx_npei_rsl_int_blocks {
@@ -2371,7 +2318,8 @@ union cvmx_npei_rsl_int_blocks {
uint64_t asxpcs0:1;
uint64_t reserved_21_21:1;
uint64_t pip:1;
- uint64_t reserved_18_19:2;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
uint64_t lmc0:1;
uint64_t l2c:1;
uint64_t usb1:1;
@@ -2383,7 +2331,7 @@ union cvmx_npei_rsl_int_blocks {
uint64_t ipd:1;
uint64_t reserved_8_8:1;
uint64_t zip:1;
- uint64_t reserved_6_6:1;
+ uint64_t dfa:1;
uint64_t fpa:1;
uint64_t key:1;
uint64_t npei:1;
@@ -2393,37 +2341,8 @@ union cvmx_npei_rsl_int_blocks {
} s;
struct cvmx_npei_rsl_int_blocks_s cn52xx;
struct cvmx_npei_rsl_int_blocks_s cn52xxp1;
- struct cvmx_npei_rsl_int_blocks_cn56xx {
- uint64_t reserved_31_63:33;
- uint64_t iob:1;
- uint64_t lmc1:1;
- uint64_t agl:1;
- uint64_t reserved_24_27:4;
- uint64_t asxpcs1:1;
- uint64_t asxpcs0:1;
- uint64_t reserved_21_21:1;
- uint64_t pip:1;
- uint64_t reserved_18_19:2;
- uint64_t lmc0:1;
- uint64_t l2c:1;
- uint64_t reserved_15_15:1;
- uint64_t rad:1;
- uint64_t usb:1;
- uint64_t pow:1;
- uint64_t tim:1;
- uint64_t pko:1;
- uint64_t ipd:1;
- uint64_t reserved_8_8:1;
- uint64_t zip:1;
- uint64_t reserved_6_6:1;
- uint64_t fpa:1;
- uint64_t key:1;
- uint64_t npei:1;
- uint64_t gmx1:1;
- uint64_t gmx0:1;
- uint64_t mio:1;
- } cn56xx;
- struct cvmx_npei_rsl_int_blocks_cn56xx cn56xxp1;
+ struct cvmx_npei_rsl_int_blocks_s cn56xx;
+ struct cvmx_npei_rsl_int_blocks_s cn56xxp1;
};
union cvmx_npei_scratch_1 {
diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
index 4e03cd8561e..f089c780060 100644
--- a/arch/mips/include/asm/octeon/cvmx-npi-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,246 +28,126 @@
#ifndef __CVMX_NPI_DEFS_H__
#define __CVMX_NPI_DEFS_H__
-#define CVMX_NPI_BASE_ADDR_INPUT0 \
- CVMX_ADD_IO_SEG(0x00011F0000000070ull)
-#define CVMX_NPI_BASE_ADDR_INPUT1 \
- CVMX_ADD_IO_SEG(0x00011F0000000080ull)
-#define CVMX_NPI_BASE_ADDR_INPUT2 \
- CVMX_ADD_IO_SEG(0x00011F0000000090ull)
-#define CVMX_NPI_BASE_ADDR_INPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000A0ull)
-#define CVMX_NPI_BASE_ADDR_INPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000070ull + (((offset) & 3) * 16))
-#define CVMX_NPI_BASE_ADDR_OUTPUT0 \
- CVMX_ADD_IO_SEG(0x00011F00000000B8ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUT1 \
- CVMX_ADD_IO_SEG(0x00011F00000000C0ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUT2 \
- CVMX_ADD_IO_SEG(0x00011F00000000C8ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000D0ull)
-#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000000B8ull + (((offset) & 3) * 8))
-#define CVMX_NPI_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011F00000003F8ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT0 \
- CVMX_ADD_IO_SEG(0x00011F00000000E0ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT1 \
- CVMX_ADD_IO_SEG(0x00011F00000000E8ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT2 \
- CVMX_ADD_IO_SEG(0x00011F00000000F0ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000F8ull)
-#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000000E0ull + (((offset) & 3) * 8))
-#define CVMX_NPI_COMP_CTL \
- CVMX_ADD_IO_SEG(0x00011F0000000218ull)
-#define CVMX_NPI_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00011F0000000010ull)
-#define CVMX_NPI_DBG_SELECT \
- CVMX_ADD_IO_SEG(0x00011F0000000008ull)
-#define CVMX_NPI_DMA_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000000128ull)
-#define CVMX_NPI_DMA_HIGHP_COUNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000148ull)
-#define CVMX_NPI_DMA_HIGHP_NADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000158ull)
-#define CVMX_NPI_DMA_LOWP_COUNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000140ull)
-#define CVMX_NPI_DMA_LOWP_NADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000150ull)
-#define CVMX_NPI_HIGHP_DBELL \
- CVMX_ADD_IO_SEG(0x00011F0000000120ull)
-#define CVMX_NPI_HIGHP_IBUFF_SADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000110ull)
-#define CVMX_NPI_INPUT_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000000138ull)
-#define CVMX_NPI_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000000020ull)
-#define CVMX_NPI_INT_SUM \
- CVMX_ADD_IO_SEG(0x00011F0000000018ull)
-#define CVMX_NPI_LOWP_DBELL \
- CVMX_ADD_IO_SEG(0x00011F0000000118ull)
-#define CVMX_NPI_LOWP_IBUFF_SADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000108ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID3 \
- CVMX_ADD_IO_SEG(0x00011F0000000028ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID4 \
- CVMX_ADD_IO_SEG(0x00011F0000000030ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID5 \
- CVMX_ADD_IO_SEG(0x00011F0000000038ull)
-#define CVMX_NPI_MEM_ACCESS_SUBID6 \
- CVMX_ADD_IO_SEG(0x00011F0000000040ull)
-#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000028ull + (((offset) & 7) * 8) - 8 * 3)
-#define CVMX_NPI_MSI_RCV \
- (0x0000000000000190ull)
-#define CVMX_NPI_NPI_MSI_RCV \
- CVMX_ADD_IO_SEG(0x00011F0000001190ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT0 \
- CVMX_ADD_IO_SEG(0x00011F0000000050ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT1 \
- CVMX_ADD_IO_SEG(0x00011F0000000058ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT2 \
- CVMX_ADD_IO_SEG(0x00011F0000000060ull)
-#define CVMX_NPI_NUM_DESC_OUTPUT3 \
- CVMX_ADD_IO_SEG(0x00011F0000000068ull)
-#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000050ull + (((offset) & 3) * 8))
-#define CVMX_NPI_OUTPUT_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000000100ull)
-#define CVMX_NPI_P0_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000180ull)
-#define CVMX_NPI_P0_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001C0ull)
-#define CVMX_NPI_P0_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001A0ull)
-#define CVMX_NPI_P0_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000160ull)
-#define CVMX_NPI_P1_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000188ull)
-#define CVMX_NPI_P1_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001C8ull)
-#define CVMX_NPI_P1_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001A8ull)
-#define CVMX_NPI_P1_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000168ull)
-#define CVMX_NPI_P2_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000190ull)
-#define CVMX_NPI_P2_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001D0ull)
-#define CVMX_NPI_P2_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001B0ull)
-#define CVMX_NPI_P2_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000170ull)
-#define CVMX_NPI_P3_DBPAIR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F0000000198ull)
-#define CVMX_NPI_P3_INSTR_ADDR \
- CVMX_ADD_IO_SEG(0x00011F00000001D8ull)
-#define CVMX_NPI_P3_INSTR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000001B8ull)
-#define CVMX_NPI_P3_PAIR_CNTS \
- CVMX_ADD_IO_SEG(0x00011F0000000178ull)
-#define CVMX_NPI_PCI_BAR1_INDEXX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000001100ull + (((offset) & 31) * 4))
-#define CVMX_NPI_PCI_BIST_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011C0ull)
-#define CVMX_NPI_PCI_BURST_SIZE \
- CVMX_ADD_IO_SEG(0x00011F00000000D8ull)
-#define CVMX_NPI_PCI_CFG00 \
- CVMX_ADD_IO_SEG(0x00011F0000001800ull)
-#define CVMX_NPI_PCI_CFG01 \
- CVMX_ADD_IO_SEG(0x00011F0000001804ull)
-#define CVMX_NPI_PCI_CFG02 \
- CVMX_ADD_IO_SEG(0x00011F0000001808ull)
-#define CVMX_NPI_PCI_CFG03 \
- CVMX_ADD_IO_SEG(0x00011F000000180Cull)
-#define CVMX_NPI_PCI_CFG04 \
- CVMX_ADD_IO_SEG(0x00011F0000001810ull)
-#define CVMX_NPI_PCI_CFG05 \
- CVMX_ADD_IO_SEG(0x00011F0000001814ull)
-#define CVMX_NPI_PCI_CFG06 \
- CVMX_ADD_IO_SEG(0x00011F0000001818ull)
-#define CVMX_NPI_PCI_CFG07 \
- CVMX_ADD_IO_SEG(0x00011F000000181Cull)
-#define CVMX_NPI_PCI_CFG08 \
- CVMX_ADD_IO_SEG(0x00011F0000001820ull)
-#define CVMX_NPI_PCI_CFG09 \
- CVMX_ADD_IO_SEG(0x00011F0000001824ull)
-#define CVMX_NPI_PCI_CFG10 \
- CVMX_ADD_IO_SEG(0x00011F0000001828ull)
-#define CVMX_NPI_PCI_CFG11 \
- CVMX_ADD_IO_SEG(0x00011F000000182Cull)
-#define CVMX_NPI_PCI_CFG12 \
- CVMX_ADD_IO_SEG(0x00011F0000001830ull)
-#define CVMX_NPI_PCI_CFG13 \
- CVMX_ADD_IO_SEG(0x00011F0000001834ull)
-#define CVMX_NPI_PCI_CFG15 \
- CVMX_ADD_IO_SEG(0x00011F000000183Cull)
-#define CVMX_NPI_PCI_CFG16 \
- CVMX_ADD_IO_SEG(0x00011F0000001840ull)
-#define CVMX_NPI_PCI_CFG17 \
- CVMX_ADD_IO_SEG(0x00011F0000001844ull)
-#define CVMX_NPI_PCI_CFG18 \
- CVMX_ADD_IO_SEG(0x00011F0000001848ull)
-#define CVMX_NPI_PCI_CFG19 \
- CVMX_ADD_IO_SEG(0x00011F000000184Cull)
-#define CVMX_NPI_PCI_CFG20 \
- CVMX_ADD_IO_SEG(0x00011F0000001850ull)
-#define CVMX_NPI_PCI_CFG21 \
- CVMX_ADD_IO_SEG(0x00011F0000001854ull)
-#define CVMX_NPI_PCI_CFG22 \
- CVMX_ADD_IO_SEG(0x00011F0000001858ull)
-#define CVMX_NPI_PCI_CFG56 \
- CVMX_ADD_IO_SEG(0x00011F00000018E0ull)
-#define CVMX_NPI_PCI_CFG57 \
- CVMX_ADD_IO_SEG(0x00011F00000018E4ull)
-#define CVMX_NPI_PCI_CFG58 \
- CVMX_ADD_IO_SEG(0x00011F00000018E8ull)
-#define CVMX_NPI_PCI_CFG59 \
- CVMX_ADD_IO_SEG(0x00011F00000018ECull)
-#define CVMX_NPI_PCI_CFG60 \
- CVMX_ADD_IO_SEG(0x00011F00000018F0ull)
-#define CVMX_NPI_PCI_CFG61 \
- CVMX_ADD_IO_SEG(0x00011F00000018F4ull)
-#define CVMX_NPI_PCI_CFG62 \
- CVMX_ADD_IO_SEG(0x00011F00000018F8ull)
-#define CVMX_NPI_PCI_CFG63 \
- CVMX_ADD_IO_SEG(0x00011F00000018FCull)
-#define CVMX_NPI_PCI_CNT_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011B8ull)
-#define CVMX_NPI_PCI_CTL_STATUS_2 \
- CVMX_ADD_IO_SEG(0x00011F000000118Cull)
-#define CVMX_NPI_PCI_INT_ARB_CFG \
- CVMX_ADD_IO_SEG(0x00011F0000000130ull)
-#define CVMX_NPI_PCI_INT_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F00000011A0ull)
-#define CVMX_NPI_PCI_INT_SUM2 \
- CVMX_ADD_IO_SEG(0x00011F0000001198ull)
-#define CVMX_NPI_PCI_READ_CMD \
- CVMX_ADD_IO_SEG(0x00011F0000000048ull)
-#define CVMX_NPI_PCI_READ_CMD_6 \
- CVMX_ADD_IO_SEG(0x00011F0000001180ull)
-#define CVMX_NPI_PCI_READ_CMD_C \
- CVMX_ADD_IO_SEG(0x00011F0000001184ull)
-#define CVMX_NPI_PCI_READ_CMD_E \
- CVMX_ADD_IO_SEG(0x00011F0000001188ull)
-#define CVMX_NPI_PCI_SCM_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011A8ull)
-#define CVMX_NPI_PCI_TSR_REG \
- CVMX_ADD_IO_SEG(0x00011F00000011B0ull)
-#define CVMX_NPI_PORT32_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F00000001F8ull)
-#define CVMX_NPI_PORT33_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F0000000200ull)
-#define CVMX_NPI_PORT34_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F0000000208ull)
-#define CVMX_NPI_PORT35_INSTR_HDR \
- CVMX_ADD_IO_SEG(0x00011F0000000210ull)
-#define CVMX_NPI_PORT_BP_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F00000001F0ull)
-#define CVMX_NPI_PX_DBPAIR_ADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000180ull + (((offset) & 3) * 8))
-#define CVMX_NPI_PX_INSTR_ADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000001C0ull + (((offset) & 3) * 8))
-#define CVMX_NPI_PX_INSTR_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000001A0ull + (((offset) & 3) * 8))
-#define CVMX_NPI_PX_PAIR_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000160ull + (((offset) & 3) * 8))
-#define CVMX_NPI_RSL_INT_BLOCKS \
- CVMX_ADD_IO_SEG(0x00011F0000000000ull)
-#define CVMX_NPI_SIZE_INPUT0 \
- CVMX_ADD_IO_SEG(0x00011F0000000078ull)
-#define CVMX_NPI_SIZE_INPUT1 \
- CVMX_ADD_IO_SEG(0x00011F0000000088ull)
-#define CVMX_NPI_SIZE_INPUT2 \
- CVMX_ADD_IO_SEG(0x00011F0000000098ull)
-#define CVMX_NPI_SIZE_INPUT3 \
- CVMX_ADD_IO_SEG(0x00011F00000000A8ull)
-#define CVMX_NPI_SIZE_INPUTX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000000078ull + (((offset) & 3) * 16))
-#define CVMX_NPI_WIN_READ_TO \
- CVMX_ADD_IO_SEG(0x00011F00000001E0ull)
+#define CVMX_NPI_BASE_ADDR_INPUT0 CVMX_NPI_BASE_ADDR_INPUTX(0)
+#define CVMX_NPI_BASE_ADDR_INPUT1 CVMX_NPI_BASE_ADDR_INPUTX(1)
+#define CVMX_NPI_BASE_ADDR_INPUT2 CVMX_NPI_BASE_ADDR_INPUTX(2)
+#define CVMX_NPI_BASE_ADDR_INPUT3 CVMX_NPI_BASE_ADDR_INPUTX(3)
+#define CVMX_NPI_BASE_ADDR_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000070ull) + ((offset) & 3) * 16)
+#define CVMX_NPI_BASE_ADDR_OUTPUT0 CVMX_NPI_BASE_ADDR_OUTPUTX(0)
+#define CVMX_NPI_BASE_ADDR_OUTPUT1 CVMX_NPI_BASE_ADDR_OUTPUTX(1)
+#define CVMX_NPI_BASE_ADDR_OUTPUT2 CVMX_NPI_BASE_ADDR_OUTPUTX(2)
+#define CVMX_NPI_BASE_ADDR_OUTPUT3 CVMX_NPI_BASE_ADDR_OUTPUTX(3)
+#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000B8ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F00000003F8ull))
+#define CVMX_NPI_BUFF_SIZE_OUTPUT0 CVMX_NPI_BUFF_SIZE_OUTPUTX(0)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT1 CVMX_NPI_BUFF_SIZE_OUTPUTX(1)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT2 CVMX_NPI_BUFF_SIZE_OUTPUTX(2)
+#define CVMX_NPI_BUFF_SIZE_OUTPUT3 CVMX_NPI_BUFF_SIZE_OUTPUTX(3)
+#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F00000000E0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_COMP_CTL (CVMX_ADD_IO_SEG(0x00011F0000000218ull))
+#define CVMX_NPI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000000010ull))
+#define CVMX_NPI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000000008ull))
+#define CVMX_NPI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000128ull))
+#define CVMX_NPI_DMA_HIGHP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000148ull))
+#define CVMX_NPI_DMA_HIGHP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000158ull))
+#define CVMX_NPI_DMA_LOWP_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000000140ull))
+#define CVMX_NPI_DMA_LOWP_NADDR (CVMX_ADD_IO_SEG(0x00011F0000000150ull))
+#define CVMX_NPI_HIGHP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000120ull))
+#define CVMX_NPI_HIGHP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000110ull))
+#define CVMX_NPI_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000138ull))
+#define CVMX_NPI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000000020ull))
+#define CVMX_NPI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000000018ull))
+#define CVMX_NPI_LOWP_DBELL (CVMX_ADD_IO_SEG(0x00011F0000000118ull))
+#define CVMX_NPI_LOWP_IBUFF_SADDR (CVMX_ADD_IO_SEG(0x00011F0000000108ull))
+#define CVMX_NPI_MEM_ACCESS_SUBID3 CVMX_NPI_MEM_ACCESS_SUBIDX(3)
+#define CVMX_NPI_MEM_ACCESS_SUBID4 CVMX_NPI_MEM_ACCESS_SUBIDX(4)
+#define CVMX_NPI_MEM_ACCESS_SUBID5 CVMX_NPI_MEM_ACCESS_SUBIDX(5)
+#define CVMX_NPI_MEM_ACCESS_SUBID6 CVMX_NPI_MEM_ACCESS_SUBIDX(6)
+#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000028ull) + ((offset) & 7) * 8 - 8*3)
+#define CVMX_NPI_MSI_RCV (0x0000000000000190ull)
+#define CVMX_NPI_NPI_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000001190ull))
+#define CVMX_NPI_NUM_DESC_OUTPUT0 CVMX_NPI_NUM_DESC_OUTPUTX(0)
+#define CVMX_NPI_NUM_DESC_OUTPUT1 CVMX_NPI_NUM_DESC_OUTPUTX(1)
+#define CVMX_NPI_NUM_DESC_OUTPUT2 CVMX_NPI_NUM_DESC_OUTPUTX(2)
+#define CVMX_NPI_NUM_DESC_OUTPUT3 CVMX_NPI_NUM_DESC_OUTPUTX(3)
+#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000050ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_OUTPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000000100ull))
+#define CVMX_NPI_P0_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(0)
+#define CVMX_NPI_P0_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(0)
+#define CVMX_NPI_P0_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(0)
+#define CVMX_NPI_P1_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(1)
+#define CVMX_NPI_P1_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(1)
+#define CVMX_NPI_P1_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(1)
+#define CVMX_NPI_P2_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(2)
+#define CVMX_NPI_P2_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(2)
+#define CVMX_NPI_P2_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(2)
+#define CVMX_NPI_P3_DBPAIR_ADDR CVMX_NPI_PX_DBPAIR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_ADDR CVMX_NPI_PX_INSTR_ADDR(3)
+#define CVMX_NPI_P3_INSTR_CNTS CVMX_NPI_PX_INSTR_CNTS(3)
+#define CVMX_NPI_P3_PAIR_CNTS CVMX_NPI_PX_PAIR_CNTS(3)
+#define CVMX_NPI_PCI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000001100ull) + ((offset) & 31) * 4)
+#define CVMX_NPI_PCI_BIST_REG (CVMX_ADD_IO_SEG(0x00011F00000011C0ull))
+#define CVMX_NPI_PCI_BURST_SIZE (CVMX_ADD_IO_SEG(0x00011F00000000D8ull))
+#define CVMX_NPI_PCI_CFG00 (CVMX_ADD_IO_SEG(0x00011F0000001800ull))
+#define CVMX_NPI_PCI_CFG01 (CVMX_ADD_IO_SEG(0x00011F0000001804ull))
+#define CVMX_NPI_PCI_CFG02 (CVMX_ADD_IO_SEG(0x00011F0000001808ull))
+#define CVMX_NPI_PCI_CFG03 (CVMX_ADD_IO_SEG(0x00011F000000180Cull))
+#define CVMX_NPI_PCI_CFG04 (CVMX_ADD_IO_SEG(0x00011F0000001810ull))
+#define CVMX_NPI_PCI_CFG05 (CVMX_ADD_IO_SEG(0x00011F0000001814ull))
+#define CVMX_NPI_PCI_CFG06 (CVMX_ADD_IO_SEG(0x00011F0000001818ull))
+#define CVMX_NPI_PCI_CFG07 (CVMX_ADD_IO_SEG(0x00011F000000181Cull))
+#define CVMX_NPI_PCI_CFG08 (CVMX_ADD_IO_SEG(0x00011F0000001820ull))
+#define CVMX_NPI_PCI_CFG09 (CVMX_ADD_IO_SEG(0x00011F0000001824ull))
+#define CVMX_NPI_PCI_CFG10 (CVMX_ADD_IO_SEG(0x00011F0000001828ull))
+#define CVMX_NPI_PCI_CFG11 (CVMX_ADD_IO_SEG(0x00011F000000182Cull))
+#define CVMX_NPI_PCI_CFG12 (CVMX_ADD_IO_SEG(0x00011F0000001830ull))
+#define CVMX_NPI_PCI_CFG13 (CVMX_ADD_IO_SEG(0x00011F0000001834ull))
+#define CVMX_NPI_PCI_CFG15 (CVMX_ADD_IO_SEG(0x00011F000000183Cull))
+#define CVMX_NPI_PCI_CFG16 (CVMX_ADD_IO_SEG(0x00011F0000001840ull))
+#define CVMX_NPI_PCI_CFG17 (CVMX_ADD_IO_SEG(0x00011F0000001844ull))
+#define CVMX_NPI_PCI_CFG18 (CVMX_ADD_IO_SEG(0x00011F0000001848ull))
+#define CVMX_NPI_PCI_CFG19 (CVMX_ADD_IO_SEG(0x00011F000000184Cull))
+#define CVMX_NPI_PCI_CFG20 (CVMX_ADD_IO_SEG(0x00011F0000001850ull))
+#define CVMX_NPI_PCI_CFG21 (CVMX_ADD_IO_SEG(0x00011F0000001854ull))
+#define CVMX_NPI_PCI_CFG22 (CVMX_ADD_IO_SEG(0x00011F0000001858ull))
+#define CVMX_NPI_PCI_CFG56 (CVMX_ADD_IO_SEG(0x00011F00000018E0ull))
+#define CVMX_NPI_PCI_CFG57 (CVMX_ADD_IO_SEG(0x00011F00000018E4ull))
+#define CVMX_NPI_PCI_CFG58 (CVMX_ADD_IO_SEG(0x00011F00000018E8ull))
+#define CVMX_NPI_PCI_CFG59 (CVMX_ADD_IO_SEG(0x00011F00000018ECull))
+#define CVMX_NPI_PCI_CFG60 (CVMX_ADD_IO_SEG(0x00011F00000018F0ull))
+#define CVMX_NPI_PCI_CFG61 (CVMX_ADD_IO_SEG(0x00011F00000018F4ull))
+#define CVMX_NPI_PCI_CFG62 (CVMX_ADD_IO_SEG(0x00011F00000018F8ull))
+#define CVMX_NPI_PCI_CFG63 (CVMX_ADD_IO_SEG(0x00011F00000018FCull))
+#define CVMX_NPI_PCI_CNT_REG (CVMX_ADD_IO_SEG(0x00011F00000011B8ull))
+#define CVMX_NPI_PCI_CTL_STATUS_2 (CVMX_ADD_IO_SEG(0x00011F000000118Cull))
+#define CVMX_NPI_PCI_INT_ARB_CFG (CVMX_ADD_IO_SEG(0x00011F0000000130ull))
+#define CVMX_NPI_PCI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F00000011A0ull))
+#define CVMX_NPI_PCI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F0000001198ull))
+#define CVMX_NPI_PCI_READ_CMD (CVMX_ADD_IO_SEG(0x00011F0000000048ull))
+#define CVMX_NPI_PCI_READ_CMD_6 (CVMX_ADD_IO_SEG(0x00011F0000001180ull))
+#define CVMX_NPI_PCI_READ_CMD_C (CVMX_ADD_IO_SEG(0x00011F0000001184ull))
+#define CVMX_NPI_PCI_READ_CMD_E (CVMX_ADD_IO_SEG(0x00011F0000001188ull))
+#define CVMX_NPI_PCI_SCM_REG (CVMX_ADD_IO_SEG(0x00011F00000011A8ull))
+#define CVMX_NPI_PCI_TSR_REG (CVMX_ADD_IO_SEG(0x00011F00000011B0ull))
+#define CVMX_NPI_PORT32_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F00000001F8ull))
+#define CVMX_NPI_PORT33_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000200ull))
+#define CVMX_NPI_PORT34_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000208ull))
+#define CVMX_NPI_PORT35_INSTR_HDR (CVMX_ADD_IO_SEG(0x00011F0000000210ull))
+#define CVMX_NPI_PORT_BP_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000001F0ull))
+#define CVMX_NPI_PX_DBPAIR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000000180ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_INSTR_ADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000001C0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_INSTR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F00000001A0ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_PX_PAIR_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000000160ull) + ((offset) & 3) * 8)
+#define CVMX_NPI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000000000ull))
+#define CVMX_NPI_SIZE_INPUT0 CVMX_NPI_SIZE_INPUTX(0)
+#define CVMX_NPI_SIZE_INPUT1 CVMX_NPI_SIZE_INPUTX(1)
+#define CVMX_NPI_SIZE_INPUT2 CVMX_NPI_SIZE_INPUTX(2)
+#define CVMX_NPI_SIZE_INPUT3 CVMX_NPI_SIZE_INPUTX(3)
+#define CVMX_NPI_SIZE_INPUTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000000078ull) + ((offset) & 3) * 16)
+#define CVMX_NPI_WIN_READ_TO (CVMX_ADD_IO_SEG(0x00011F00000001E0ull))
union cvmx_npi_base_addr_inputx {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
index 90f8d653575..6ff6d9d357b 100644
--- a/arch/mips/include/asm/octeon/cvmx-pci-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,184 +28,91 @@
#ifndef __CVMX_PCI_DEFS_H__
#define __CVMX_PCI_DEFS_H__
-#define CVMX_PCI_BAR1_INDEXX(offset) \
- (0x0000000000000100ull + (((offset) & 31) * 4))
-#define CVMX_PCI_BIST_REG \
- (0x00000000000001C0ull)
-#define CVMX_PCI_CFG00 \
- (0x0000000000000000ull)
-#define CVMX_PCI_CFG01 \
- (0x0000000000000004ull)
-#define CVMX_PCI_CFG02 \
- (0x0000000000000008ull)
-#define CVMX_PCI_CFG03 \
- (0x000000000000000Cull)
-#define CVMX_PCI_CFG04 \
- (0x0000000000000010ull)
-#define CVMX_PCI_CFG05 \
- (0x0000000000000014ull)
-#define CVMX_PCI_CFG06 \
- (0x0000000000000018ull)
-#define CVMX_PCI_CFG07 \
- (0x000000000000001Cull)
-#define CVMX_PCI_CFG08 \
- (0x0000000000000020ull)
-#define CVMX_PCI_CFG09 \
- (0x0000000000000024ull)
-#define CVMX_PCI_CFG10 \
- (0x0000000000000028ull)
-#define CVMX_PCI_CFG11 \
- (0x000000000000002Cull)
-#define CVMX_PCI_CFG12 \
- (0x0000000000000030ull)
-#define CVMX_PCI_CFG13 \
- (0x0000000000000034ull)
-#define CVMX_PCI_CFG15 \
- (0x000000000000003Cull)
-#define CVMX_PCI_CFG16 \
- (0x0000000000000040ull)
-#define CVMX_PCI_CFG17 \
- (0x0000000000000044ull)
-#define CVMX_PCI_CFG18 \
- (0x0000000000000048ull)
-#define CVMX_PCI_CFG19 \
- (0x000000000000004Cull)
-#define CVMX_PCI_CFG20 \
- (0x0000000000000050ull)
-#define CVMX_PCI_CFG21 \
- (0x0000000000000054ull)
-#define CVMX_PCI_CFG22 \
- (0x0000000000000058ull)
-#define CVMX_PCI_CFG56 \
- (0x00000000000000E0ull)
-#define CVMX_PCI_CFG57 \
- (0x00000000000000E4ull)
-#define CVMX_PCI_CFG58 \
- (0x00000000000000E8ull)
-#define CVMX_PCI_CFG59 \
- (0x00000000000000ECull)
-#define CVMX_PCI_CFG60 \
- (0x00000000000000F0ull)
-#define CVMX_PCI_CFG61 \
- (0x00000000000000F4ull)
-#define CVMX_PCI_CFG62 \
- (0x00000000000000F8ull)
-#define CVMX_PCI_CFG63 \
- (0x00000000000000FCull)
-#define CVMX_PCI_CNT_REG \
- (0x00000000000001B8ull)
-#define CVMX_PCI_CTL_STATUS_2 \
- (0x000000000000018Cull)
-#define CVMX_PCI_DBELL_0 \
- (0x0000000000000080ull)
-#define CVMX_PCI_DBELL_1 \
- (0x0000000000000088ull)
-#define CVMX_PCI_DBELL_2 \
- (0x0000000000000090ull)
-#define CVMX_PCI_DBELL_3 \
- (0x0000000000000098ull)
-#define CVMX_PCI_DBELL_X(offset) \
- (0x0000000000000080ull + (((offset) & 3) * 8))
-#define CVMX_PCI_DMA_CNT0 \
- (0x00000000000000A0ull)
-#define CVMX_PCI_DMA_CNT1 \
- (0x00000000000000A8ull)
-#define CVMX_PCI_DMA_CNTX(offset) \
- (0x00000000000000A0ull + (((offset) & 1) * 8))
-#define CVMX_PCI_DMA_INT_LEV0 \
- (0x00000000000000A4ull)
-#define CVMX_PCI_DMA_INT_LEV1 \
- (0x00000000000000ACull)
-#define CVMX_PCI_DMA_INT_LEVX(offset) \
- (0x00000000000000A4ull + (((offset) & 1) * 8))
-#define CVMX_PCI_DMA_TIME0 \
- (0x00000000000000B0ull)
-#define CVMX_PCI_DMA_TIME1 \
- (0x00000000000000B4ull)
-#define CVMX_PCI_DMA_TIMEX(offset) \
- (0x00000000000000B0ull + (((offset) & 1) * 4))
-#define CVMX_PCI_INSTR_COUNT0 \
- (0x0000000000000084ull)
-#define CVMX_PCI_INSTR_COUNT1 \
- (0x000000000000008Cull)
-#define CVMX_PCI_INSTR_COUNT2 \
- (0x0000000000000094ull)
-#define CVMX_PCI_INSTR_COUNT3 \
- (0x000000000000009Cull)
-#define CVMX_PCI_INSTR_COUNTX(offset) \
- (0x0000000000000084ull + (((offset) & 3) * 8))
-#define CVMX_PCI_INT_ENB \
- (0x0000000000000038ull)
-#define CVMX_PCI_INT_ENB2 \
- (0x00000000000001A0ull)
-#define CVMX_PCI_INT_SUM \
- (0x0000000000000030ull)
-#define CVMX_PCI_INT_SUM2 \
- (0x0000000000000198ull)
-#define CVMX_PCI_MSI_RCV \
- (0x00000000000000F0ull)
-#define CVMX_PCI_PKTS_SENT0 \
- (0x0000000000000040ull)
-#define CVMX_PCI_PKTS_SENT1 \
- (0x0000000000000050ull)
-#define CVMX_PCI_PKTS_SENT2 \
- (0x0000000000000060ull)
-#define CVMX_PCI_PKTS_SENT3 \
- (0x0000000000000070ull)
-#define CVMX_PCI_PKTS_SENTX(offset) \
- (0x0000000000000040ull + (((offset) & 3) * 16))
-#define CVMX_PCI_PKTS_SENT_INT_LEV0 \
- (0x0000000000000048ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEV1 \
- (0x0000000000000058ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEV2 \
- (0x0000000000000068ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEV3 \
- (0x0000000000000078ull)
-#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) \
- (0x0000000000000048ull + (((offset) & 3) * 16))
-#define CVMX_PCI_PKTS_SENT_TIME0 \
- (0x000000000000004Cull)
-#define CVMX_PCI_PKTS_SENT_TIME1 \
- (0x000000000000005Cull)
-#define CVMX_PCI_PKTS_SENT_TIME2 \
- (0x000000000000006Cull)
-#define CVMX_PCI_PKTS_SENT_TIME3 \
- (0x000000000000007Cull)
-#define CVMX_PCI_PKTS_SENT_TIMEX(offset) \
- (0x000000000000004Cull + (((offset) & 3) * 16))
-#define CVMX_PCI_PKT_CREDITS0 \
- (0x0000000000000044ull)
-#define CVMX_PCI_PKT_CREDITS1 \
- (0x0000000000000054ull)
-#define CVMX_PCI_PKT_CREDITS2 \
- (0x0000000000000064ull)
-#define CVMX_PCI_PKT_CREDITS3 \
- (0x0000000000000074ull)
-#define CVMX_PCI_PKT_CREDITSX(offset) \
- (0x0000000000000044ull + (((offset) & 3) * 16))
-#define CVMX_PCI_READ_CMD_6 \
- (0x0000000000000180ull)
-#define CVMX_PCI_READ_CMD_C \
- (0x0000000000000184ull)
-#define CVMX_PCI_READ_CMD_E \
- (0x0000000000000188ull)
-#define CVMX_PCI_READ_TIMEOUT \
- CVMX_ADD_IO_SEG(0x00011F00000000B0ull)
-#define CVMX_PCI_SCM_REG \
- (0x00000000000001A8ull)
-#define CVMX_PCI_TSR_REG \
- (0x00000000000001B0ull)
-#define CVMX_PCI_WIN_RD_ADDR \
- (0x0000000000000008ull)
-#define CVMX_PCI_WIN_RD_DATA \
- (0x0000000000000020ull)
-#define CVMX_PCI_WIN_WR_ADDR \
- (0x0000000000000000ull)
-#define CVMX_PCI_WIN_WR_DATA \
- (0x0000000000000010ull)
-#define CVMX_PCI_WIN_WR_MASK \
- (0x0000000000000018ull)
+#define CVMX_PCI_BAR1_INDEXX(offset) (0x0000000000000100ull + ((offset) & 31) * 4)
+#define CVMX_PCI_BIST_REG (0x00000000000001C0ull)
+#define CVMX_PCI_CFG00 (0x0000000000000000ull)
+#define CVMX_PCI_CFG01 (0x0000000000000004ull)
+#define CVMX_PCI_CFG02 (0x0000000000000008ull)
+#define CVMX_PCI_CFG03 (0x000000000000000Cull)
+#define CVMX_PCI_CFG04 (0x0000000000000010ull)
+#define CVMX_PCI_CFG05 (0x0000000000000014ull)
+#define CVMX_PCI_CFG06 (0x0000000000000018ull)
+#define CVMX_PCI_CFG07 (0x000000000000001Cull)
+#define CVMX_PCI_CFG08 (0x0000000000000020ull)
+#define CVMX_PCI_CFG09 (0x0000000000000024ull)
+#define CVMX_PCI_CFG10 (0x0000000000000028ull)
+#define CVMX_PCI_CFG11 (0x000000000000002Cull)
+#define CVMX_PCI_CFG12 (0x0000000000000030ull)
+#define CVMX_PCI_CFG13 (0x0000000000000034ull)
+#define CVMX_PCI_CFG15 (0x000000000000003Cull)
+#define CVMX_PCI_CFG16 (0x0000000000000040ull)
+#define CVMX_PCI_CFG17 (0x0000000000000044ull)
+#define CVMX_PCI_CFG18 (0x0000000000000048ull)
+#define CVMX_PCI_CFG19 (0x000000000000004Cull)
+#define CVMX_PCI_CFG20 (0x0000000000000050ull)
+#define CVMX_PCI_CFG21 (0x0000000000000054ull)
+#define CVMX_PCI_CFG22 (0x0000000000000058ull)
+#define CVMX_PCI_CFG56 (0x00000000000000E0ull)
+#define CVMX_PCI_CFG57 (0x00000000000000E4ull)
+#define CVMX_PCI_CFG58 (0x00000000000000E8ull)
+#define CVMX_PCI_CFG59 (0x00000000000000ECull)
+#define CVMX_PCI_CFG60 (0x00000000000000F0ull)
+#define CVMX_PCI_CFG61 (0x00000000000000F4ull)
+#define CVMX_PCI_CFG62 (0x00000000000000F8ull)
+#define CVMX_PCI_CFG63 (0x00000000000000FCull)
+#define CVMX_PCI_CNT_REG (0x00000000000001B8ull)
+#define CVMX_PCI_CTL_STATUS_2 (0x000000000000018Cull)
+#define CVMX_PCI_DBELL_X(offset) (0x0000000000000080ull + ((offset) & 3) * 8)
+#define CVMX_PCI_DMA_CNT0 CVMX_PCI_DMA_CNTX(0)
+#define CVMX_PCI_DMA_CNT1 CVMX_PCI_DMA_CNTX(1)
+#define CVMX_PCI_DMA_CNTX(offset) (0x00000000000000A0ull + ((offset) & 1) * 8)
+#define CVMX_PCI_DMA_INT_LEV0 CVMX_PCI_DMA_INT_LEVX(0)
+#define CVMX_PCI_DMA_INT_LEV1 CVMX_PCI_DMA_INT_LEVX(1)
+#define CVMX_PCI_DMA_INT_LEVX(offset) (0x00000000000000A4ull + ((offset) & 1) * 8)
+#define CVMX_PCI_DMA_TIME0 CVMX_PCI_DMA_TIMEX(0)
+#define CVMX_PCI_DMA_TIME1 CVMX_PCI_DMA_TIMEX(1)
+#define CVMX_PCI_DMA_TIMEX(offset) (0x00000000000000B0ull + ((offset) & 1) * 4)
+#define CVMX_PCI_INSTR_COUNT0 CVMX_PCI_INSTR_COUNTX(0)
+#define CVMX_PCI_INSTR_COUNT1 CVMX_PCI_INSTR_COUNTX(1)
+#define CVMX_PCI_INSTR_COUNT2 CVMX_PCI_INSTR_COUNTX(2)
+#define CVMX_PCI_INSTR_COUNT3 CVMX_PCI_INSTR_COUNTX(3)
+#define CVMX_PCI_INSTR_COUNTX(offset) (0x0000000000000084ull + ((offset) & 3) * 8)
+#define CVMX_PCI_INT_ENB (0x0000000000000038ull)
+#define CVMX_PCI_INT_ENB2 (0x00000000000001A0ull)
+#define CVMX_PCI_INT_SUM (0x0000000000000030ull)
+#define CVMX_PCI_INT_SUM2 (0x0000000000000198ull)
+#define CVMX_PCI_MSI_RCV (0x00000000000000F0ull)
+#define CVMX_PCI_PKTS_SENT0 CVMX_PCI_PKTS_SENTX(0)
+#define CVMX_PCI_PKTS_SENT1 CVMX_PCI_PKTS_SENTX(1)
+#define CVMX_PCI_PKTS_SENT2 CVMX_PCI_PKTS_SENTX(2)
+#define CVMX_PCI_PKTS_SENT3 CVMX_PCI_PKTS_SENTX(3)
+#define CVMX_PCI_PKTS_SENTX(offset) (0x0000000000000040ull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKTS_SENT_INT_LEV0 CVMX_PCI_PKTS_SENT_INT_LEVX(0)
+#define CVMX_PCI_PKTS_SENT_INT_LEV1 CVMX_PCI_PKTS_SENT_INT_LEVX(1)
+#define CVMX_PCI_PKTS_SENT_INT_LEV2 CVMX_PCI_PKTS_SENT_INT_LEVX(2)
+#define CVMX_PCI_PKTS_SENT_INT_LEV3 CVMX_PCI_PKTS_SENT_INT_LEVX(3)
+#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) (0x0000000000000048ull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKTS_SENT_TIME0 CVMX_PCI_PKTS_SENT_TIMEX(0)
+#define CVMX_PCI_PKTS_SENT_TIME1 CVMX_PCI_PKTS_SENT_TIMEX(1)
+#define CVMX_PCI_PKTS_SENT_TIME2 CVMX_PCI_PKTS_SENT_TIMEX(2)
+#define CVMX_PCI_PKTS_SENT_TIME3 CVMX_PCI_PKTS_SENT_TIMEX(3)
+#define CVMX_PCI_PKTS_SENT_TIMEX(offset) (0x000000000000004Cull + ((offset) & 3) * 16)
+#define CVMX_PCI_PKT_CREDITS0 CVMX_PCI_PKT_CREDITSX(0)
+#define CVMX_PCI_PKT_CREDITS1 CVMX_PCI_PKT_CREDITSX(1)
+#define CVMX_PCI_PKT_CREDITS2 CVMX_PCI_PKT_CREDITSX(2)
+#define CVMX_PCI_PKT_CREDITS3 CVMX_PCI_PKT_CREDITSX(3)
+#define CVMX_PCI_PKT_CREDITSX(offset) (0x0000000000000044ull + ((offset) & 3) * 16)
+#define CVMX_PCI_READ_CMD_6 (0x0000000000000180ull)
+#define CVMX_PCI_READ_CMD_C (0x0000000000000184ull)
+#define CVMX_PCI_READ_CMD_E (0x0000000000000188ull)
+#define CVMX_PCI_READ_TIMEOUT (CVMX_ADD_IO_SEG(0x00011F00000000B0ull))
+#define CVMX_PCI_SCM_REG (0x00000000000001A8ull)
+#define CVMX_PCI_TSR_REG (0x00000000000001B0ull)
+#define CVMX_PCI_WIN_RD_ADDR (0x0000000000000008ull)
+#define CVMX_PCI_WIN_RD_DATA (0x0000000000000020ull)
+#define CVMX_PCI_WIN_WR_ADDR (0x0000000000000000ull)
+#define CVMX_PCI_WIN_WR_DATA (0x0000000000000010ull)
+#define CVMX_PCI_WIN_WR_MASK (0x0000000000000018ull)
union cvmx_pci_bar1_indexx {
uint32_t u32;
diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
index 75574c91894..f8cb88902ef 100644
--- a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,158 +28,83 @@
#ifndef __CVMX_PCIERCX_DEFS_H__
#define __CVMX_PCIERCX_DEFS_H__
-#define CVMX_PCIERCX_CFG000(offset) \
- (0x0000000000000000ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG001(offset) \
- (0x0000000000000004ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG002(offset) \
- (0x0000000000000008ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG003(offset) \
- (0x000000000000000Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG004(offset) \
- (0x0000000000000010ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG005(offset) \
- (0x0000000000000014ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG006(offset) \
- (0x0000000000000018ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG007(offset) \
- (0x000000000000001Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG008(offset) \
- (0x0000000000000020ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG009(offset) \
- (0x0000000000000024ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG010(offset) \
- (0x0000000000000028ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG011(offset) \
- (0x000000000000002Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG012(offset) \
- (0x0000000000000030ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG013(offset) \
- (0x0000000000000034ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG014(offset) \
- (0x0000000000000038ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG015(offset) \
- (0x000000000000003Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG016(offset) \
- (0x0000000000000040ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG017(offset) \
- (0x0000000000000044ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG020(offset) \
- (0x0000000000000050ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG021(offset) \
- (0x0000000000000054ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG022(offset) \
- (0x0000000000000058ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG023(offset) \
- (0x000000000000005Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG028(offset) \
- (0x0000000000000070ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG029(offset) \
- (0x0000000000000074ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG030(offset) \
- (0x0000000000000078ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG031(offset) \
- (0x000000000000007Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG032(offset) \
- (0x0000000000000080ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG033(offset) \
- (0x0000000000000084ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG034(offset) \
- (0x0000000000000088ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG035(offset) \
- (0x000000000000008Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG036(offset) \
- (0x0000000000000090ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG037(offset) \
- (0x0000000000000094ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG038(offset) \
- (0x0000000000000098ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG039(offset) \
- (0x000000000000009Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG040(offset) \
- (0x00000000000000A0ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG041(offset) \
- (0x00000000000000A4ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG042(offset) \
- (0x00000000000000A8ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG064(offset) \
- (0x0000000000000100ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG065(offset) \
- (0x0000000000000104ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG066(offset) \
- (0x0000000000000108ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG067(offset) \
- (0x000000000000010Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG068(offset) \
- (0x0000000000000110ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG069(offset) \
- (0x0000000000000114ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG070(offset) \
- (0x0000000000000118ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG071(offset) \
- (0x000000000000011Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG072(offset) \
- (0x0000000000000120ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG073(offset) \
- (0x0000000000000124ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG074(offset) \
- (0x0000000000000128ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG075(offset) \
- (0x000000000000012Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG076(offset) \
- (0x0000000000000130ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG077(offset) \
- (0x0000000000000134ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG448(offset) \
- (0x0000000000000700ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG449(offset) \
- (0x0000000000000704ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG450(offset) \
- (0x0000000000000708ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG451(offset) \
- (0x000000000000070Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG452(offset) \
- (0x0000000000000710ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG453(offset) \
- (0x0000000000000714ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG454(offset) \
- (0x0000000000000718ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG455(offset) \
- (0x000000000000071Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG456(offset) \
- (0x0000000000000720ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG458(offset) \
- (0x0000000000000728ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG459(offset) \
- (0x000000000000072Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG460(offset) \
- (0x0000000000000730ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG461(offset) \
- (0x0000000000000734ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG462(offset) \
- (0x0000000000000738ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG463(offset) \
- (0x000000000000073Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG464(offset) \
- (0x0000000000000740ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG465(offset) \
- (0x0000000000000744ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG466(offset) \
- (0x0000000000000748ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG467(offset) \
- (0x000000000000074Cull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG468(offset) \
- (0x0000000000000750ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG490(offset) \
- (0x00000000000007A8ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG491(offset) \
- (0x00000000000007ACull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG492(offset) \
- (0x00000000000007B0ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG516(offset) \
- (0x0000000000000810ull + (((offset) & 1) * 0))
-#define CVMX_PCIERCX_CFG517(offset) \
- (0x0000000000000814ull + (((offset) & 1) * 0))
+#define CVMX_PCIERCX_CFG000(block_id) (0x0000000000000000ull)
+#define CVMX_PCIERCX_CFG001(block_id) (0x0000000000000004ull)
+#define CVMX_PCIERCX_CFG002(block_id) (0x0000000000000008ull)
+#define CVMX_PCIERCX_CFG003(block_id) (0x000000000000000Cull)
+#define CVMX_PCIERCX_CFG004(block_id) (0x0000000000000010ull)
+#define CVMX_PCIERCX_CFG005(block_id) (0x0000000000000014ull)
+#define CVMX_PCIERCX_CFG006(block_id) (0x0000000000000018ull)
+#define CVMX_PCIERCX_CFG007(block_id) (0x000000000000001Cull)
+#define CVMX_PCIERCX_CFG008(block_id) (0x0000000000000020ull)
+#define CVMX_PCIERCX_CFG009(block_id) (0x0000000000000024ull)
+#define CVMX_PCIERCX_CFG010(block_id) (0x0000000000000028ull)
+#define CVMX_PCIERCX_CFG011(block_id) (0x000000000000002Cull)
+#define CVMX_PCIERCX_CFG012(block_id) (0x0000000000000030ull)
+#define CVMX_PCIERCX_CFG013(block_id) (0x0000000000000034ull)
+#define CVMX_PCIERCX_CFG014(block_id) (0x0000000000000038ull)
+#define CVMX_PCIERCX_CFG015(block_id) (0x000000000000003Cull)
+#define CVMX_PCIERCX_CFG016(block_id) (0x0000000000000040ull)
+#define CVMX_PCIERCX_CFG017(block_id) (0x0000000000000044ull)
+#define CVMX_PCIERCX_CFG020(block_id) (0x0000000000000050ull)
+#define CVMX_PCIERCX_CFG021(block_id) (0x0000000000000054ull)
+#define CVMX_PCIERCX_CFG022(block_id) (0x0000000000000058ull)
+#define CVMX_PCIERCX_CFG023(block_id) (0x000000000000005Cull)
+#define CVMX_PCIERCX_CFG028(block_id) (0x0000000000000070ull)
+#define CVMX_PCIERCX_CFG029(block_id) (0x0000000000000074ull)
+#define CVMX_PCIERCX_CFG030(block_id) (0x0000000000000078ull)
+#define CVMX_PCIERCX_CFG031(block_id) (0x000000000000007Cull)
+#define CVMX_PCIERCX_CFG032(block_id) (0x0000000000000080ull)
+#define CVMX_PCIERCX_CFG033(block_id) (0x0000000000000084ull)
+#define CVMX_PCIERCX_CFG034(block_id) (0x0000000000000088ull)
+#define CVMX_PCIERCX_CFG035(block_id) (0x000000000000008Cull)
+#define CVMX_PCIERCX_CFG036(block_id) (0x0000000000000090ull)
+#define CVMX_PCIERCX_CFG037(block_id) (0x0000000000000094ull)
+#define CVMX_PCIERCX_CFG038(block_id) (0x0000000000000098ull)
+#define CVMX_PCIERCX_CFG039(block_id) (0x000000000000009Cull)
+#define CVMX_PCIERCX_CFG040(block_id) (0x00000000000000A0ull)
+#define CVMX_PCIERCX_CFG041(block_id) (0x00000000000000A4ull)
+#define CVMX_PCIERCX_CFG042(block_id) (0x00000000000000A8ull)
+#define CVMX_PCIERCX_CFG064(block_id) (0x0000000000000100ull)
+#define CVMX_PCIERCX_CFG065(block_id) (0x0000000000000104ull)
+#define CVMX_PCIERCX_CFG066(block_id) (0x0000000000000108ull)
+#define CVMX_PCIERCX_CFG067(block_id) (0x000000000000010Cull)
+#define CVMX_PCIERCX_CFG068(block_id) (0x0000000000000110ull)
+#define CVMX_PCIERCX_CFG069(block_id) (0x0000000000000114ull)
+#define CVMX_PCIERCX_CFG070(block_id) (0x0000000000000118ull)
+#define CVMX_PCIERCX_CFG071(block_id) (0x000000000000011Cull)
+#define CVMX_PCIERCX_CFG072(block_id) (0x0000000000000120ull)
+#define CVMX_PCIERCX_CFG073(block_id) (0x0000000000000124ull)
+#define CVMX_PCIERCX_CFG074(block_id) (0x0000000000000128ull)
+#define CVMX_PCIERCX_CFG075(block_id) (0x000000000000012Cull)
+#define CVMX_PCIERCX_CFG076(block_id) (0x0000000000000130ull)
+#define CVMX_PCIERCX_CFG077(block_id) (0x0000000000000134ull)
+#define CVMX_PCIERCX_CFG448(block_id) (0x0000000000000700ull)
+#define CVMX_PCIERCX_CFG449(block_id) (0x0000000000000704ull)
+#define CVMX_PCIERCX_CFG450(block_id) (0x0000000000000708ull)
+#define CVMX_PCIERCX_CFG451(block_id) (0x000000000000070Cull)
+#define CVMX_PCIERCX_CFG452(block_id) (0x0000000000000710ull)
+#define CVMX_PCIERCX_CFG453(block_id) (0x0000000000000714ull)
+#define CVMX_PCIERCX_CFG454(block_id) (0x0000000000000718ull)
+#define CVMX_PCIERCX_CFG455(block_id) (0x000000000000071Cull)
+#define CVMX_PCIERCX_CFG456(block_id) (0x0000000000000720ull)
+#define CVMX_PCIERCX_CFG458(block_id) (0x0000000000000728ull)
+#define CVMX_PCIERCX_CFG459(block_id) (0x000000000000072Cull)
+#define CVMX_PCIERCX_CFG460(block_id) (0x0000000000000730ull)
+#define CVMX_PCIERCX_CFG461(block_id) (0x0000000000000734ull)
+#define CVMX_PCIERCX_CFG462(block_id) (0x0000000000000738ull)
+#define CVMX_PCIERCX_CFG463(block_id) (0x000000000000073Cull)
+#define CVMX_PCIERCX_CFG464(block_id) (0x0000000000000740ull)
+#define CVMX_PCIERCX_CFG465(block_id) (0x0000000000000744ull)
+#define CVMX_PCIERCX_CFG466(block_id) (0x0000000000000748ull)
+#define CVMX_PCIERCX_CFG467(block_id) (0x000000000000074Cull)
+#define CVMX_PCIERCX_CFG468(block_id) (0x0000000000000750ull)
+#define CVMX_PCIERCX_CFG490(block_id) (0x00000000000007A8ull)
+#define CVMX_PCIERCX_CFG491(block_id) (0x00000000000007ACull)
+#define CVMX_PCIERCX_CFG492(block_id) (0x00000000000007B0ull)
+#define CVMX_PCIERCX_CFG515(block_id) (0x000000000000080Cull)
+#define CVMX_PCIERCX_CFG516(block_id) (0x0000000000000810ull)
+#define CVMX_PCIERCX_CFG517(block_id) (0x0000000000000814ull)
union cvmx_pciercx_cfg000 {
uint32_t u32;
@@ -191,6 +116,8 @@ union cvmx_pciercx_cfg000 {
struct cvmx_pciercx_cfg000_s cn52xxp1;
struct cvmx_pciercx_cfg000_s cn56xx;
struct cvmx_pciercx_cfg000_s cn56xxp1;
+ struct cvmx_pciercx_cfg000_s cn63xx;
+ struct cvmx_pciercx_cfg000_s cn63xxp1;
};
union cvmx_pciercx_cfg001 {
@@ -225,6 +152,8 @@ union cvmx_pciercx_cfg001 {
struct cvmx_pciercx_cfg001_s cn52xxp1;
struct cvmx_pciercx_cfg001_s cn56xx;
struct cvmx_pciercx_cfg001_s cn56xxp1;
+ struct cvmx_pciercx_cfg001_s cn63xx;
+ struct cvmx_pciercx_cfg001_s cn63xxp1;
};
union cvmx_pciercx_cfg002 {
@@ -239,6 +168,8 @@ union cvmx_pciercx_cfg002 {
struct cvmx_pciercx_cfg002_s cn52xxp1;
struct cvmx_pciercx_cfg002_s cn56xx;
struct cvmx_pciercx_cfg002_s cn56xxp1;
+ struct cvmx_pciercx_cfg002_s cn63xx;
+ struct cvmx_pciercx_cfg002_s cn63xxp1;
};
union cvmx_pciercx_cfg003 {
@@ -254,6 +185,8 @@ union cvmx_pciercx_cfg003 {
struct cvmx_pciercx_cfg003_s cn52xxp1;
struct cvmx_pciercx_cfg003_s cn56xx;
struct cvmx_pciercx_cfg003_s cn56xxp1;
+ struct cvmx_pciercx_cfg003_s cn63xx;
+ struct cvmx_pciercx_cfg003_s cn63xxp1;
};
union cvmx_pciercx_cfg004 {
@@ -265,6 +198,8 @@ union cvmx_pciercx_cfg004 {
struct cvmx_pciercx_cfg004_s cn52xxp1;
struct cvmx_pciercx_cfg004_s cn56xx;
struct cvmx_pciercx_cfg004_s cn56xxp1;
+ struct cvmx_pciercx_cfg004_s cn63xx;
+ struct cvmx_pciercx_cfg004_s cn63xxp1;
};
union cvmx_pciercx_cfg005 {
@@ -276,6 +211,8 @@ union cvmx_pciercx_cfg005 {
struct cvmx_pciercx_cfg005_s cn52xxp1;
struct cvmx_pciercx_cfg005_s cn56xx;
struct cvmx_pciercx_cfg005_s cn56xxp1;
+ struct cvmx_pciercx_cfg005_s cn63xx;
+ struct cvmx_pciercx_cfg005_s cn63xxp1;
};
union cvmx_pciercx_cfg006 {
@@ -290,6 +227,8 @@ union cvmx_pciercx_cfg006 {
struct cvmx_pciercx_cfg006_s cn52xxp1;
struct cvmx_pciercx_cfg006_s cn56xx;
struct cvmx_pciercx_cfg006_s cn56xxp1;
+ struct cvmx_pciercx_cfg006_s cn63xx;
+ struct cvmx_pciercx_cfg006_s cn63xxp1;
};
union cvmx_pciercx_cfg007 {
@@ -317,6 +256,8 @@ union cvmx_pciercx_cfg007 {
struct cvmx_pciercx_cfg007_s cn52xxp1;
struct cvmx_pciercx_cfg007_s cn56xx;
struct cvmx_pciercx_cfg007_s cn56xxp1;
+ struct cvmx_pciercx_cfg007_s cn63xx;
+ struct cvmx_pciercx_cfg007_s cn63xxp1;
};
union cvmx_pciercx_cfg008 {
@@ -331,6 +272,8 @@ union cvmx_pciercx_cfg008 {
struct cvmx_pciercx_cfg008_s cn52xxp1;
struct cvmx_pciercx_cfg008_s cn56xx;
struct cvmx_pciercx_cfg008_s cn56xxp1;
+ struct cvmx_pciercx_cfg008_s cn63xx;
+ struct cvmx_pciercx_cfg008_s cn63xxp1;
};
union cvmx_pciercx_cfg009 {
@@ -347,6 +290,8 @@ union cvmx_pciercx_cfg009 {
struct cvmx_pciercx_cfg009_s cn52xxp1;
struct cvmx_pciercx_cfg009_s cn56xx;
struct cvmx_pciercx_cfg009_s cn56xxp1;
+ struct cvmx_pciercx_cfg009_s cn63xx;
+ struct cvmx_pciercx_cfg009_s cn63xxp1;
};
union cvmx_pciercx_cfg010 {
@@ -358,6 +303,8 @@ union cvmx_pciercx_cfg010 {
struct cvmx_pciercx_cfg010_s cn52xxp1;
struct cvmx_pciercx_cfg010_s cn56xx;
struct cvmx_pciercx_cfg010_s cn56xxp1;
+ struct cvmx_pciercx_cfg010_s cn63xx;
+ struct cvmx_pciercx_cfg010_s cn63xxp1;
};
union cvmx_pciercx_cfg011 {
@@ -369,6 +316,8 @@ union cvmx_pciercx_cfg011 {
struct cvmx_pciercx_cfg011_s cn52xxp1;
struct cvmx_pciercx_cfg011_s cn56xx;
struct cvmx_pciercx_cfg011_s cn56xxp1;
+ struct cvmx_pciercx_cfg011_s cn63xx;
+ struct cvmx_pciercx_cfg011_s cn63xxp1;
};
union cvmx_pciercx_cfg012 {
@@ -381,6 +330,8 @@ union cvmx_pciercx_cfg012 {
struct cvmx_pciercx_cfg012_s cn52xxp1;
struct cvmx_pciercx_cfg012_s cn56xx;
struct cvmx_pciercx_cfg012_s cn56xxp1;
+ struct cvmx_pciercx_cfg012_s cn63xx;
+ struct cvmx_pciercx_cfg012_s cn63xxp1;
};
union cvmx_pciercx_cfg013 {
@@ -393,6 +344,8 @@ union cvmx_pciercx_cfg013 {
struct cvmx_pciercx_cfg013_s cn52xxp1;
struct cvmx_pciercx_cfg013_s cn56xx;
struct cvmx_pciercx_cfg013_s cn56xxp1;
+ struct cvmx_pciercx_cfg013_s cn63xx;
+ struct cvmx_pciercx_cfg013_s cn63xxp1;
};
union cvmx_pciercx_cfg014 {
@@ -404,6 +357,8 @@ union cvmx_pciercx_cfg014 {
struct cvmx_pciercx_cfg014_s cn52xxp1;
struct cvmx_pciercx_cfg014_s cn56xx;
struct cvmx_pciercx_cfg014_s cn56xxp1;
+ struct cvmx_pciercx_cfg014_s cn63xx;
+ struct cvmx_pciercx_cfg014_s cn63xxp1;
};
union cvmx_pciercx_cfg015 {
@@ -429,6 +384,8 @@ union cvmx_pciercx_cfg015 {
struct cvmx_pciercx_cfg015_s cn52xxp1;
struct cvmx_pciercx_cfg015_s cn56xx;
struct cvmx_pciercx_cfg015_s cn56xxp1;
+ struct cvmx_pciercx_cfg015_s cn63xx;
+ struct cvmx_pciercx_cfg015_s cn63xxp1;
};
union cvmx_pciercx_cfg016 {
@@ -449,6 +406,8 @@ union cvmx_pciercx_cfg016 {
struct cvmx_pciercx_cfg016_s cn52xxp1;
struct cvmx_pciercx_cfg016_s cn56xx;
struct cvmx_pciercx_cfg016_s cn56xxp1;
+ struct cvmx_pciercx_cfg016_s cn63xx;
+ struct cvmx_pciercx_cfg016_s cn63xxp1;
};
union cvmx_pciercx_cfg017 {
@@ -471,6 +430,8 @@ union cvmx_pciercx_cfg017 {
struct cvmx_pciercx_cfg017_s cn52xxp1;
struct cvmx_pciercx_cfg017_s cn56xx;
struct cvmx_pciercx_cfg017_s cn56xxp1;
+ struct cvmx_pciercx_cfg017_s cn63xx;
+ struct cvmx_pciercx_cfg017_s cn63xxp1;
};
union cvmx_pciercx_cfg020 {
@@ -488,6 +449,8 @@ union cvmx_pciercx_cfg020 {
struct cvmx_pciercx_cfg020_s cn52xxp1;
struct cvmx_pciercx_cfg020_s cn56xx;
struct cvmx_pciercx_cfg020_s cn56xxp1;
+ struct cvmx_pciercx_cfg020_s cn63xx;
+ struct cvmx_pciercx_cfg020_s cn63xxp1;
};
union cvmx_pciercx_cfg021 {
@@ -500,6 +463,8 @@ union cvmx_pciercx_cfg021 {
struct cvmx_pciercx_cfg021_s cn52xxp1;
struct cvmx_pciercx_cfg021_s cn56xx;
struct cvmx_pciercx_cfg021_s cn56xxp1;
+ struct cvmx_pciercx_cfg021_s cn63xx;
+ struct cvmx_pciercx_cfg021_s cn63xxp1;
};
union cvmx_pciercx_cfg022 {
@@ -511,6 +476,8 @@ union cvmx_pciercx_cfg022 {
struct cvmx_pciercx_cfg022_s cn52xxp1;
struct cvmx_pciercx_cfg022_s cn56xx;
struct cvmx_pciercx_cfg022_s cn56xxp1;
+ struct cvmx_pciercx_cfg022_s cn63xx;
+ struct cvmx_pciercx_cfg022_s cn63xxp1;
};
union cvmx_pciercx_cfg023 {
@@ -523,6 +490,8 @@ union cvmx_pciercx_cfg023 {
struct cvmx_pciercx_cfg023_s cn52xxp1;
struct cvmx_pciercx_cfg023_s cn56xx;
struct cvmx_pciercx_cfg023_s cn56xxp1;
+ struct cvmx_pciercx_cfg023_s cn63xx;
+ struct cvmx_pciercx_cfg023_s cn63xxp1;
};
union cvmx_pciercx_cfg028 {
@@ -540,6 +509,8 @@ union cvmx_pciercx_cfg028 {
struct cvmx_pciercx_cfg028_s cn52xxp1;
struct cvmx_pciercx_cfg028_s cn56xx;
struct cvmx_pciercx_cfg028_s cn56xxp1;
+ struct cvmx_pciercx_cfg028_s cn63xx;
+ struct cvmx_pciercx_cfg028_s cn63xxp1;
};
union cvmx_pciercx_cfg029 {
@@ -561,6 +532,8 @@ union cvmx_pciercx_cfg029 {
struct cvmx_pciercx_cfg029_s cn52xxp1;
struct cvmx_pciercx_cfg029_s cn56xx;
struct cvmx_pciercx_cfg029_s cn56xxp1;
+ struct cvmx_pciercx_cfg029_s cn63xx;
+ struct cvmx_pciercx_cfg029_s cn63xxp1;
};
union cvmx_pciercx_cfg030 {
@@ -590,6 +563,8 @@ union cvmx_pciercx_cfg030 {
struct cvmx_pciercx_cfg030_s cn52xxp1;
struct cvmx_pciercx_cfg030_s cn56xx;
struct cvmx_pciercx_cfg030_s cn56xxp1;
+ struct cvmx_pciercx_cfg030_s cn63xx;
+ struct cvmx_pciercx_cfg030_s cn63xxp1;
};
union cvmx_pciercx_cfg031 {
@@ -611,6 +586,8 @@ union cvmx_pciercx_cfg031 {
struct cvmx_pciercx_cfg031_s cn52xxp1;
struct cvmx_pciercx_cfg031_s cn56xx;
struct cvmx_pciercx_cfg031_s cn56xxp1;
+ struct cvmx_pciercx_cfg031_s cn63xx;
+ struct cvmx_pciercx_cfg031_s cn63xxp1;
};
union cvmx_pciercx_cfg032 {
@@ -641,6 +618,8 @@ union cvmx_pciercx_cfg032 {
struct cvmx_pciercx_cfg032_s cn52xxp1;
struct cvmx_pciercx_cfg032_s cn56xx;
struct cvmx_pciercx_cfg032_s cn56xxp1;
+ struct cvmx_pciercx_cfg032_s cn63xx;
+ struct cvmx_pciercx_cfg032_s cn63xxp1;
};
union cvmx_pciercx_cfg033 {
@@ -663,6 +642,8 @@ union cvmx_pciercx_cfg033 {
struct cvmx_pciercx_cfg033_s cn52xxp1;
struct cvmx_pciercx_cfg033_s cn56xx;
struct cvmx_pciercx_cfg033_s cn56xxp1;
+ struct cvmx_pciercx_cfg033_s cn63xx;
+ struct cvmx_pciercx_cfg033_s cn63xxp1;
};
union cvmx_pciercx_cfg034 {
@@ -695,6 +676,8 @@ union cvmx_pciercx_cfg034 {
struct cvmx_pciercx_cfg034_s cn52xxp1;
struct cvmx_pciercx_cfg034_s cn56xx;
struct cvmx_pciercx_cfg034_s cn56xxp1;
+ struct cvmx_pciercx_cfg034_s cn63xx;
+ struct cvmx_pciercx_cfg034_s cn63xxp1;
};
union cvmx_pciercx_cfg035 {
@@ -713,6 +696,8 @@ union cvmx_pciercx_cfg035 {
struct cvmx_pciercx_cfg035_s cn52xxp1;
struct cvmx_pciercx_cfg035_s cn56xx;
struct cvmx_pciercx_cfg035_s cn56xxp1;
+ struct cvmx_pciercx_cfg035_s cn63xx;
+ struct cvmx_pciercx_cfg035_s cn63xxp1;
};
union cvmx_pciercx_cfg036 {
@@ -727,6 +712,8 @@ union cvmx_pciercx_cfg036 {
struct cvmx_pciercx_cfg036_s cn52xxp1;
struct cvmx_pciercx_cfg036_s cn56xx;
struct cvmx_pciercx_cfg036_s cn56xxp1;
+ struct cvmx_pciercx_cfg036_s cn63xx;
+ struct cvmx_pciercx_cfg036_s cn63xxp1;
};
union cvmx_pciercx_cfg037 {
@@ -740,6 +727,8 @@ union cvmx_pciercx_cfg037 {
struct cvmx_pciercx_cfg037_s cn52xxp1;
struct cvmx_pciercx_cfg037_s cn56xx;
struct cvmx_pciercx_cfg037_s cn56xxp1;
+ struct cvmx_pciercx_cfg037_s cn63xx;
+ struct cvmx_pciercx_cfg037_s cn63xxp1;
};
union cvmx_pciercx_cfg038 {
@@ -753,28 +742,51 @@ union cvmx_pciercx_cfg038 {
struct cvmx_pciercx_cfg038_s cn52xxp1;
struct cvmx_pciercx_cfg038_s cn56xx;
struct cvmx_pciercx_cfg038_s cn56xxp1;
+ struct cvmx_pciercx_cfg038_s cn63xx;
+ struct cvmx_pciercx_cfg038_s cn63xxp1;
};
union cvmx_pciercx_cfg039 {
uint32_t u32;
struct cvmx_pciercx_cfg039_s {
- uint32_t reserved_0_31:32;
+ uint32_t reserved_9_31:23;
+ uint32_t cls:1;
+ uint32_t slsv:7;
+ uint32_t reserved_0_0:1;
} s;
- struct cvmx_pciercx_cfg039_s cn52xx;
- struct cvmx_pciercx_cfg039_s cn52xxp1;
- struct cvmx_pciercx_cfg039_s cn56xx;
- struct cvmx_pciercx_cfg039_s cn56xxp1;
+ struct cvmx_pciercx_cfg039_cn52xx {
+ uint32_t reserved_0_31:32;
+ } cn52xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg039_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg039_s cn63xx;
+ struct cvmx_pciercx_cfg039_cn52xx cn63xxp1;
};
union cvmx_pciercx_cfg040 {
uint32_t u32;
struct cvmx_pciercx_cfg040_s {
+ uint32_t reserved_17_31:15;
+ uint32_t cdl:1;
+ uint32_t reserved_13_15:3;
+ uint32_t cde:1;
+ uint32_t csos:1;
+ uint32_t emc:1;
+ uint32_t tm:3;
+ uint32_t sde:1;
+ uint32_t hasd:1;
+ uint32_t ec:1;
+ uint32_t tls:4;
+ } s;
+ struct cvmx_pciercx_cfg040_cn52xx {
uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pciercx_cfg040_s cn52xx;
- struct cvmx_pciercx_cfg040_s cn52xxp1;
- struct cvmx_pciercx_cfg040_s cn56xx;
- struct cvmx_pciercx_cfg040_s cn56xxp1;
+ } cn52xx;
+ struct cvmx_pciercx_cfg040_cn52xx cn52xxp1;
+ struct cvmx_pciercx_cfg040_cn52xx cn56xx;
+ struct cvmx_pciercx_cfg040_cn52xx cn56xxp1;
+ struct cvmx_pciercx_cfg040_s cn63xx;
+ struct cvmx_pciercx_cfg040_s cn63xxp1;
};
union cvmx_pciercx_cfg041 {
@@ -786,6 +798,8 @@ union cvmx_pciercx_cfg041 {
struct cvmx_pciercx_cfg041_s cn52xxp1;
struct cvmx_pciercx_cfg041_s cn56xx;
struct cvmx_pciercx_cfg041_s cn56xxp1;
+ struct cvmx_pciercx_cfg041_s cn63xx;
+ struct cvmx_pciercx_cfg041_s cn63xxp1;
};
union cvmx_pciercx_cfg042 {
@@ -797,6 +811,8 @@ union cvmx_pciercx_cfg042 {
struct cvmx_pciercx_cfg042_s cn52xxp1;
struct cvmx_pciercx_cfg042_s cn56xx;
struct cvmx_pciercx_cfg042_s cn56xxp1;
+ struct cvmx_pciercx_cfg042_s cn63xx;
+ struct cvmx_pciercx_cfg042_s cn63xxp1;
};
union cvmx_pciercx_cfg064 {
@@ -810,6 +826,8 @@ union cvmx_pciercx_cfg064 {
struct cvmx_pciercx_cfg064_s cn52xxp1;
struct cvmx_pciercx_cfg064_s cn56xx;
struct cvmx_pciercx_cfg064_s cn56xxp1;
+ struct cvmx_pciercx_cfg064_s cn63xx;
+ struct cvmx_pciercx_cfg064_s cn63xxp1;
};
union cvmx_pciercx_cfg065 {
@@ -834,6 +852,8 @@ union cvmx_pciercx_cfg065 {
struct cvmx_pciercx_cfg065_s cn52xxp1;
struct cvmx_pciercx_cfg065_s cn56xx;
struct cvmx_pciercx_cfg065_s cn56xxp1;
+ struct cvmx_pciercx_cfg065_s cn63xx;
+ struct cvmx_pciercx_cfg065_s cn63xxp1;
};
union cvmx_pciercx_cfg066 {
@@ -858,6 +878,8 @@ union cvmx_pciercx_cfg066 {
struct cvmx_pciercx_cfg066_s cn52xxp1;
struct cvmx_pciercx_cfg066_s cn56xx;
struct cvmx_pciercx_cfg066_s cn56xxp1;
+ struct cvmx_pciercx_cfg066_s cn63xx;
+ struct cvmx_pciercx_cfg066_s cn63xxp1;
};
union cvmx_pciercx_cfg067 {
@@ -882,6 +904,8 @@ union cvmx_pciercx_cfg067 {
struct cvmx_pciercx_cfg067_s cn52xxp1;
struct cvmx_pciercx_cfg067_s cn56xx;
struct cvmx_pciercx_cfg067_s cn56xxp1;
+ struct cvmx_pciercx_cfg067_s cn63xx;
+ struct cvmx_pciercx_cfg067_s cn63xxp1;
};
union cvmx_pciercx_cfg068 {
@@ -901,6 +925,8 @@ union cvmx_pciercx_cfg068 {
struct cvmx_pciercx_cfg068_s cn52xxp1;
struct cvmx_pciercx_cfg068_s cn56xx;
struct cvmx_pciercx_cfg068_s cn56xxp1;
+ struct cvmx_pciercx_cfg068_s cn63xx;
+ struct cvmx_pciercx_cfg068_s cn63xxp1;
};
union cvmx_pciercx_cfg069 {
@@ -920,6 +946,8 @@ union cvmx_pciercx_cfg069 {
struct cvmx_pciercx_cfg069_s cn52xxp1;
struct cvmx_pciercx_cfg069_s cn56xx;
struct cvmx_pciercx_cfg069_s cn56xxp1;
+ struct cvmx_pciercx_cfg069_s cn63xx;
+ struct cvmx_pciercx_cfg069_s cn63xxp1;
};
union cvmx_pciercx_cfg070 {
@@ -936,6 +964,8 @@ union cvmx_pciercx_cfg070 {
struct cvmx_pciercx_cfg070_s cn52xxp1;
struct cvmx_pciercx_cfg070_s cn56xx;
struct cvmx_pciercx_cfg070_s cn56xxp1;
+ struct cvmx_pciercx_cfg070_s cn63xx;
+ struct cvmx_pciercx_cfg070_s cn63xxp1;
};
union cvmx_pciercx_cfg071 {
@@ -947,6 +977,8 @@ union cvmx_pciercx_cfg071 {
struct cvmx_pciercx_cfg071_s cn52xxp1;
struct cvmx_pciercx_cfg071_s cn56xx;
struct cvmx_pciercx_cfg071_s cn56xxp1;
+ struct cvmx_pciercx_cfg071_s cn63xx;
+ struct cvmx_pciercx_cfg071_s cn63xxp1;
};
union cvmx_pciercx_cfg072 {
@@ -958,6 +990,8 @@ union cvmx_pciercx_cfg072 {
struct cvmx_pciercx_cfg072_s cn52xxp1;
struct cvmx_pciercx_cfg072_s cn56xx;
struct cvmx_pciercx_cfg072_s cn56xxp1;
+ struct cvmx_pciercx_cfg072_s cn63xx;
+ struct cvmx_pciercx_cfg072_s cn63xxp1;
};
union cvmx_pciercx_cfg073 {
@@ -969,6 +1003,8 @@ union cvmx_pciercx_cfg073 {
struct cvmx_pciercx_cfg073_s cn52xxp1;
struct cvmx_pciercx_cfg073_s cn56xx;
struct cvmx_pciercx_cfg073_s cn56xxp1;
+ struct cvmx_pciercx_cfg073_s cn63xx;
+ struct cvmx_pciercx_cfg073_s cn63xxp1;
};
union cvmx_pciercx_cfg074 {
@@ -980,6 +1016,8 @@ union cvmx_pciercx_cfg074 {
struct cvmx_pciercx_cfg074_s cn52xxp1;
struct cvmx_pciercx_cfg074_s cn56xx;
struct cvmx_pciercx_cfg074_s cn56xxp1;
+ struct cvmx_pciercx_cfg074_s cn63xx;
+ struct cvmx_pciercx_cfg074_s cn63xxp1;
};
union cvmx_pciercx_cfg075 {
@@ -994,6 +1032,8 @@ union cvmx_pciercx_cfg075 {
struct cvmx_pciercx_cfg075_s cn52xxp1;
struct cvmx_pciercx_cfg075_s cn56xx;
struct cvmx_pciercx_cfg075_s cn56xxp1;
+ struct cvmx_pciercx_cfg075_s cn63xx;
+ struct cvmx_pciercx_cfg075_s cn63xxp1;
};
union cvmx_pciercx_cfg076 {
@@ -1013,6 +1053,8 @@ union cvmx_pciercx_cfg076 {
struct cvmx_pciercx_cfg076_s cn52xxp1;
struct cvmx_pciercx_cfg076_s cn56xx;
struct cvmx_pciercx_cfg076_s cn56xxp1;
+ struct cvmx_pciercx_cfg076_s cn63xx;
+ struct cvmx_pciercx_cfg076_s cn63xxp1;
};
union cvmx_pciercx_cfg077 {
@@ -1025,6 +1067,8 @@ union cvmx_pciercx_cfg077 {
struct cvmx_pciercx_cfg077_s cn52xxp1;
struct cvmx_pciercx_cfg077_s cn56xx;
struct cvmx_pciercx_cfg077_s cn56xxp1;
+ struct cvmx_pciercx_cfg077_s cn63xx;
+ struct cvmx_pciercx_cfg077_s cn63xxp1;
};
union cvmx_pciercx_cfg448 {
@@ -1037,6 +1081,8 @@ union cvmx_pciercx_cfg448 {
struct cvmx_pciercx_cfg448_s cn52xxp1;
struct cvmx_pciercx_cfg448_s cn56xx;
struct cvmx_pciercx_cfg448_s cn56xxp1;
+ struct cvmx_pciercx_cfg448_s cn63xx;
+ struct cvmx_pciercx_cfg448_s cn63xxp1;
};
union cvmx_pciercx_cfg449 {
@@ -1048,6 +1094,8 @@ union cvmx_pciercx_cfg449 {
struct cvmx_pciercx_cfg449_s cn52xxp1;
struct cvmx_pciercx_cfg449_s cn56xx;
struct cvmx_pciercx_cfg449_s cn56xxp1;
+ struct cvmx_pciercx_cfg449_s cn63xx;
+ struct cvmx_pciercx_cfg449_s cn63xxp1;
};
union cvmx_pciercx_cfg450 {
@@ -1064,6 +1112,8 @@ union cvmx_pciercx_cfg450 {
struct cvmx_pciercx_cfg450_s cn52xxp1;
struct cvmx_pciercx_cfg450_s cn56xx;
struct cvmx_pciercx_cfg450_s cn56xxp1;
+ struct cvmx_pciercx_cfg450_s cn63xx;
+ struct cvmx_pciercx_cfg450_s cn63xxp1;
};
union cvmx_pciercx_cfg451 {
@@ -1080,6 +1130,8 @@ union cvmx_pciercx_cfg451 {
struct cvmx_pciercx_cfg451_s cn52xxp1;
struct cvmx_pciercx_cfg451_s cn56xx;
struct cvmx_pciercx_cfg451_s cn56xxp1;
+ struct cvmx_pciercx_cfg451_s cn63xx;
+ struct cvmx_pciercx_cfg451_s cn63xxp1;
};
union cvmx_pciercx_cfg452 {
@@ -1103,6 +1155,8 @@ union cvmx_pciercx_cfg452 {
struct cvmx_pciercx_cfg452_s cn52xxp1;
struct cvmx_pciercx_cfg452_s cn56xx;
struct cvmx_pciercx_cfg452_s cn56xxp1;
+ struct cvmx_pciercx_cfg452_s cn63xx;
+ struct cvmx_pciercx_cfg452_s cn63xxp1;
};
union cvmx_pciercx_cfg453 {
@@ -1118,6 +1172,8 @@ union cvmx_pciercx_cfg453 {
struct cvmx_pciercx_cfg453_s cn52xxp1;
struct cvmx_pciercx_cfg453_s cn56xx;
struct cvmx_pciercx_cfg453_s cn56xxp1;
+ struct cvmx_pciercx_cfg453_s cn63xx;
+ struct cvmx_pciercx_cfg453_s cn63xxp1;
};
union cvmx_pciercx_cfg454 {
@@ -1136,6 +1192,8 @@ union cvmx_pciercx_cfg454 {
struct cvmx_pciercx_cfg454_s cn52xxp1;
struct cvmx_pciercx_cfg454_s cn56xx;
struct cvmx_pciercx_cfg454_s cn56xxp1;
+ struct cvmx_pciercx_cfg454_s cn63xx;
+ struct cvmx_pciercx_cfg454_s cn63xxp1;
};
union cvmx_pciercx_cfg455 {
@@ -1165,6 +1223,8 @@ union cvmx_pciercx_cfg455 {
struct cvmx_pciercx_cfg455_s cn52xxp1;
struct cvmx_pciercx_cfg455_s cn56xx;
struct cvmx_pciercx_cfg455_s cn56xxp1;
+ struct cvmx_pciercx_cfg455_s cn63xx;
+ struct cvmx_pciercx_cfg455_s cn63xxp1;
};
union cvmx_pciercx_cfg456 {
@@ -1178,6 +1238,8 @@ union cvmx_pciercx_cfg456 {
struct cvmx_pciercx_cfg456_s cn52xxp1;
struct cvmx_pciercx_cfg456_s cn56xx;
struct cvmx_pciercx_cfg456_s cn56xxp1;
+ struct cvmx_pciercx_cfg456_s cn63xx;
+ struct cvmx_pciercx_cfg456_s cn63xxp1;
};
union cvmx_pciercx_cfg458 {
@@ -1189,6 +1251,8 @@ union cvmx_pciercx_cfg458 {
struct cvmx_pciercx_cfg458_s cn52xxp1;
struct cvmx_pciercx_cfg458_s cn56xx;
struct cvmx_pciercx_cfg458_s cn56xxp1;
+ struct cvmx_pciercx_cfg458_s cn63xx;
+ struct cvmx_pciercx_cfg458_s cn63xxp1;
};
union cvmx_pciercx_cfg459 {
@@ -1200,6 +1264,8 @@ union cvmx_pciercx_cfg459 {
struct cvmx_pciercx_cfg459_s cn52xxp1;
struct cvmx_pciercx_cfg459_s cn56xx;
struct cvmx_pciercx_cfg459_s cn56xxp1;
+ struct cvmx_pciercx_cfg459_s cn63xx;
+ struct cvmx_pciercx_cfg459_s cn63xxp1;
};
union cvmx_pciercx_cfg460 {
@@ -1213,6 +1279,8 @@ union cvmx_pciercx_cfg460 {
struct cvmx_pciercx_cfg460_s cn52xxp1;
struct cvmx_pciercx_cfg460_s cn56xx;
struct cvmx_pciercx_cfg460_s cn56xxp1;
+ struct cvmx_pciercx_cfg460_s cn63xx;
+ struct cvmx_pciercx_cfg460_s cn63xxp1;
};
union cvmx_pciercx_cfg461 {
@@ -1226,6 +1294,8 @@ union cvmx_pciercx_cfg461 {
struct cvmx_pciercx_cfg461_s cn52xxp1;
struct cvmx_pciercx_cfg461_s cn56xx;
struct cvmx_pciercx_cfg461_s cn56xxp1;
+ struct cvmx_pciercx_cfg461_s cn63xx;
+ struct cvmx_pciercx_cfg461_s cn63xxp1;
};
union cvmx_pciercx_cfg462 {
@@ -1239,6 +1309,8 @@ union cvmx_pciercx_cfg462 {
struct cvmx_pciercx_cfg462_s cn52xxp1;
struct cvmx_pciercx_cfg462_s cn56xx;
struct cvmx_pciercx_cfg462_s cn56xxp1;
+ struct cvmx_pciercx_cfg462_s cn63xx;
+ struct cvmx_pciercx_cfg462_s cn63xxp1;
};
union cvmx_pciercx_cfg463 {
@@ -1253,6 +1325,8 @@ union cvmx_pciercx_cfg463 {
struct cvmx_pciercx_cfg463_s cn52xxp1;
struct cvmx_pciercx_cfg463_s cn56xx;
struct cvmx_pciercx_cfg463_s cn56xxp1;
+ struct cvmx_pciercx_cfg463_s cn63xx;
+ struct cvmx_pciercx_cfg463_s cn63xxp1;
};
union cvmx_pciercx_cfg464 {
@@ -1267,6 +1341,8 @@ union cvmx_pciercx_cfg464 {
struct cvmx_pciercx_cfg464_s cn52xxp1;
struct cvmx_pciercx_cfg464_s cn56xx;
struct cvmx_pciercx_cfg464_s cn56xxp1;
+ struct cvmx_pciercx_cfg464_s cn63xx;
+ struct cvmx_pciercx_cfg464_s cn63xxp1;
};
union cvmx_pciercx_cfg465 {
@@ -1281,6 +1357,8 @@ union cvmx_pciercx_cfg465 {
struct cvmx_pciercx_cfg465_s cn52xxp1;
struct cvmx_pciercx_cfg465_s cn56xx;
struct cvmx_pciercx_cfg465_s cn56xxp1;
+ struct cvmx_pciercx_cfg465_s cn63xx;
+ struct cvmx_pciercx_cfg465_s cn63xxp1;
};
union cvmx_pciercx_cfg466 {
@@ -1298,6 +1376,8 @@ union cvmx_pciercx_cfg466 {
struct cvmx_pciercx_cfg466_s cn52xxp1;
struct cvmx_pciercx_cfg466_s cn56xx;
struct cvmx_pciercx_cfg466_s cn56xxp1;
+ struct cvmx_pciercx_cfg466_s cn63xx;
+ struct cvmx_pciercx_cfg466_s cn63xxp1;
};
union cvmx_pciercx_cfg467 {
@@ -1313,6 +1393,8 @@ union cvmx_pciercx_cfg467 {
struct cvmx_pciercx_cfg467_s cn52xxp1;
struct cvmx_pciercx_cfg467_s cn56xx;
struct cvmx_pciercx_cfg467_s cn56xxp1;
+ struct cvmx_pciercx_cfg467_s cn63xx;
+ struct cvmx_pciercx_cfg467_s cn63xxp1;
};
union cvmx_pciercx_cfg468 {
@@ -1328,6 +1410,8 @@ union cvmx_pciercx_cfg468 {
struct cvmx_pciercx_cfg468_s cn52xxp1;
struct cvmx_pciercx_cfg468_s cn56xx;
struct cvmx_pciercx_cfg468_s cn56xxp1;
+ struct cvmx_pciercx_cfg468_s cn63xx;
+ struct cvmx_pciercx_cfg468_s cn63xxp1;
};
union cvmx_pciercx_cfg490 {
@@ -1342,6 +1426,8 @@ union cvmx_pciercx_cfg490 {
struct cvmx_pciercx_cfg490_s cn52xxp1;
struct cvmx_pciercx_cfg490_s cn56xx;
struct cvmx_pciercx_cfg490_s cn56xxp1;
+ struct cvmx_pciercx_cfg490_s cn63xx;
+ struct cvmx_pciercx_cfg490_s cn63xxp1;
};
union cvmx_pciercx_cfg491 {
@@ -1356,6 +1442,8 @@ union cvmx_pciercx_cfg491 {
struct cvmx_pciercx_cfg491_s cn52xxp1;
struct cvmx_pciercx_cfg491_s cn56xx;
struct cvmx_pciercx_cfg491_s cn56xxp1;
+ struct cvmx_pciercx_cfg491_s cn63xx;
+ struct cvmx_pciercx_cfg491_s cn63xxp1;
};
union cvmx_pciercx_cfg492 {
@@ -1370,6 +1458,23 @@ union cvmx_pciercx_cfg492 {
struct cvmx_pciercx_cfg492_s cn52xxp1;
struct cvmx_pciercx_cfg492_s cn56xx;
struct cvmx_pciercx_cfg492_s cn56xxp1;
+ struct cvmx_pciercx_cfg492_s cn63xx;
+ struct cvmx_pciercx_cfg492_s cn63xxp1;
+};
+
+union cvmx_pciercx_cfg515 {
+ uint32_t u32;
+ struct cvmx_pciercx_cfg515_s {
+ uint32_t reserved_21_31:11;
+ uint32_t s_d_e:1;
+ uint32_t ctcrb:1;
+ uint32_t cpyts:1;
+ uint32_t dsc:1;
+ uint32_t le:9;
+ uint32_t n_fts:8;
+ } s;
+ struct cvmx_pciercx_cfg515_s cn63xx;
+ struct cvmx_pciercx_cfg515_s cn63xxp1;
};
union cvmx_pciercx_cfg516 {
@@ -1381,6 +1486,8 @@ union cvmx_pciercx_cfg516 {
struct cvmx_pciercx_cfg516_s cn52xxp1;
struct cvmx_pciercx_cfg516_s cn56xx;
struct cvmx_pciercx_cfg516_s cn56xxp1;
+ struct cvmx_pciercx_cfg516_s cn63xx;
+ struct cvmx_pciercx_cfg516_s cn63xxp1;
};
union cvmx_pciercx_cfg517 {
@@ -1392,6 +1499,8 @@ union cvmx_pciercx_cfg517 {
struct cvmx_pciercx_cfg517_s cn52xxp1;
struct cvmx_pciercx_cfg517_s cn56xx;
struct cvmx_pciercx_cfg517_s cn56xxp1;
+ struct cvmx_pciercx_cfg517_s cn63xx;
+ struct cvmx_pciercx_cfg517_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
index f40cfaf8445..aef84851a94 100644
--- a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,38 +28,22 @@
#ifndef __CVMX_PESCX_DEFS_H__
#define __CVMX_PESCX_DEFS_H__
-#define CVMX_PESCX_BIST_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000018ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_BIST_STATUS2(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000418ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CFG_RD(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000030ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CFG_WR(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000028ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CPL_LUT_VALID(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000098ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CTL_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000000ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_CTL_STATUS2(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000400ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_DBG_INFO(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000008ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_DBG_INFO_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C80000A0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_DIAG_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000020ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2N_BAR0_START(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000080ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2N_BAR1_START(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000088ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2N_BAR2_START(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000090ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2P_BARX_END(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000048ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_P2P_BARX_START(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000040ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PESCX_TLP_CREDITS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800C8000038ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PESCX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000018ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000418ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000030ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000028ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000098ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000000ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_CTL_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000400ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000008ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C80000A0ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000020ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000080ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000088ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000090ull) + ((block_id) & 1) * 0x8000000ull)
+#define CVMX_PESCX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#define CVMX_PESCX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
+#define CVMX_PESCX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000038ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_pescx_bist_status {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
index 5ea5dc571b5..5ab8679d89a 100644
--- a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -35,195 +35,191 @@
#ifndef __CVMX_PEXP_DEFS_H__
#define __CVMX_PEXP_DEFS_H__
-#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008000ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011F0000008580ull)
-#define CVMX_PEXP_NPEI_BIST_STATUS2 \
- CVMX_ADD_IO_SEG(0x00011F0000008680ull)
-#define CVMX_PEXP_NPEI_CTL_PORT0 \
- CVMX_ADD_IO_SEG(0x00011F0000008250ull)
-#define CVMX_PEXP_NPEI_CTL_PORT1 \
- CVMX_ADD_IO_SEG(0x00011F0000008260ull)
-#define CVMX_PEXP_NPEI_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x00011F0000008570ull)
-#define CVMX_PEXP_NPEI_CTL_STATUS2 \
- CVMX_ADD_IO_SEG(0x00011F000000BC00ull)
-#define CVMX_PEXP_NPEI_DATA_OUT_CNT \
- CVMX_ADD_IO_SEG(0x00011F00000085F0ull)
-#define CVMX_PEXP_NPEI_DBG_DATA \
- CVMX_ADD_IO_SEG(0x00011F0000008510ull)
-#define CVMX_PEXP_NPEI_DBG_SELECT \
- CVMX_ADD_IO_SEG(0x00011F0000008500ull)
-#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL \
- CVMX_ADD_IO_SEG(0x00011F00000085C0ull)
-#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL \
- CVMX_ADD_IO_SEG(0x00011F00000085D0ull)
-#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008450ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000083B0ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008400ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F00000084A0ull + (((offset) & 7) * 16))
-#define CVMX_PEXP_NPEI_DMA_CNTS \
- CVMX_ADD_IO_SEG(0x00011F00000085E0ull)
-#define CVMX_PEXP_NPEI_DMA_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F00000083A0ull)
-#define CVMX_PEXP_NPEI_INT_A_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000008560ull)
-#define CVMX_PEXP_NPEI_INT_A_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BCE0ull)
-#define CVMX_PEXP_NPEI_INT_A_SUM \
- CVMX_ADD_IO_SEG(0x00011F0000008550ull)
-#define CVMX_PEXP_NPEI_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000008540ull)
-#define CVMX_PEXP_NPEI_INT_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BCD0ull)
-#define CVMX_PEXP_NPEI_INT_INFO \
- CVMX_ADD_IO_SEG(0x00011F0000008590ull)
-#define CVMX_PEXP_NPEI_INT_SUM \
- CVMX_ADD_IO_SEG(0x00011F0000008530ull)
-#define CVMX_PEXP_NPEI_INT_SUM2 \
- CVMX_ADD_IO_SEG(0x00011F000000BCC0ull)
-#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 \
- CVMX_ADD_IO_SEG(0x00011F0000008600ull)
-#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 \
- CVMX_ADD_IO_SEG(0x00011F0000008610ull)
-#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL \
- CVMX_ADD_IO_SEG(0x00011F00000084F0ull)
-#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000008280ull + (((offset) & 31) * 16) - 16 * 12)
-#define CVMX_PEXP_NPEI_MSI_ENB0 \
- CVMX_ADD_IO_SEG(0x00011F000000BC50ull)
-#define CVMX_PEXP_NPEI_MSI_ENB1 \
- CVMX_ADD_IO_SEG(0x00011F000000BC60ull)
-#define CVMX_PEXP_NPEI_MSI_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BC70ull)
-#define CVMX_PEXP_NPEI_MSI_ENB3 \
- CVMX_ADD_IO_SEG(0x00011F000000BC80ull)
-#define CVMX_PEXP_NPEI_MSI_RCV0 \
- CVMX_ADD_IO_SEG(0x00011F000000BC10ull)
-#define CVMX_PEXP_NPEI_MSI_RCV1 \
- CVMX_ADD_IO_SEG(0x00011F000000BC20ull)
-#define CVMX_PEXP_NPEI_MSI_RCV2 \
- CVMX_ADD_IO_SEG(0x00011F000000BC30ull)
-#define CVMX_PEXP_NPEI_MSI_RCV3 \
- CVMX_ADD_IO_SEG(0x00011F000000BC40ull)
-#define CVMX_PEXP_NPEI_MSI_RD_MAP \
- CVMX_ADD_IO_SEG(0x00011F000000BCA0ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 \
- CVMX_ADD_IO_SEG(0x00011F000000BCF0ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 \
- CVMX_ADD_IO_SEG(0x00011F000000BD00ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BD10ull)
-#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 \
- CVMX_ADD_IO_SEG(0x00011F000000BD20ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 \
- CVMX_ADD_IO_SEG(0x00011F000000BD30ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 \
- CVMX_ADD_IO_SEG(0x00011F000000BD40ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 \
- CVMX_ADD_IO_SEG(0x00011F000000BD50ull)
-#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 \
- CVMX_ADD_IO_SEG(0x00011F000000BD60ull)
-#define CVMX_PEXP_NPEI_MSI_WR_MAP \
- CVMX_ADD_IO_SEG(0x00011F000000BC90ull)
-#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT \
- CVMX_ADD_IO_SEG(0x00011F000000BD70ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV \
- CVMX_ADD_IO_SEG(0x00011F000000BCB0ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 \
- CVMX_ADD_IO_SEG(0x00011F0000008650ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 \
- CVMX_ADD_IO_SEG(0x00011F0000008660ull)
-#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 \
- CVMX_ADD_IO_SEG(0x00011F0000008670ull)
-#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000A400ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000A800ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000AC00ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000B000ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000B400ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000B800ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000009400ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000009800ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \
- CVMX_ADD_IO_SEG(0x00011F0000009C00ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKT_CNT_INT \
- CVMX_ADD_IO_SEG(0x00011F0000009110ull)
-#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009130ull)
-#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES \
- CVMX_ADD_IO_SEG(0x00011F00000090B0ull)
-#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS \
- CVMX_ADD_IO_SEG(0x00011F00000090A0ull)
-#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR \
- CVMX_ADD_IO_SEG(0x00011F0000009090ull)
-#define CVMX_PEXP_NPEI_PKT_DPADDR \
- CVMX_ADD_IO_SEG(0x00011F0000009080ull)
-#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL \
- CVMX_ADD_IO_SEG(0x00011F0000009150ull)
-#define CVMX_PEXP_NPEI_PKT_INSTR_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009000ull)
-#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE \
- CVMX_ADD_IO_SEG(0x00011F0000009190ull)
-#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE \
- CVMX_ADD_IO_SEG(0x00011F0000009020ull)
-#define CVMX_PEXP_NPEI_PKT_INT_LEVELS \
- CVMX_ADD_IO_SEG(0x00011F0000009100ull)
-#define CVMX_PEXP_NPEI_PKT_IN_BP \
- CVMX_ADD_IO_SEG(0x00011F00000086B0ull)
-#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) \
- CVMX_ADD_IO_SEG(0x00011F000000A000ull + (((offset) & 31) * 16))
-#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS \
- CVMX_ADD_IO_SEG(0x00011F00000086A0ull)
-#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT \
- CVMX_ADD_IO_SEG(0x00011F00000091A0ull)
-#define CVMX_PEXP_NPEI_PKT_IPTR \
- CVMX_ADD_IO_SEG(0x00011F0000009070ull)
-#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK \
- CVMX_ADD_IO_SEG(0x00011F0000009160ull)
-#define CVMX_PEXP_NPEI_PKT_OUT_BMODE \
- CVMX_ADD_IO_SEG(0x00011F00000090D0ull)
-#define CVMX_PEXP_NPEI_PKT_OUT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009010ull)
-#define CVMX_PEXP_NPEI_PKT_PCIE_PORT \
- CVMX_ADD_IO_SEG(0x00011F00000090E0ull)
-#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST \
- CVMX_ADD_IO_SEG(0x00011F0000008690ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_ES \
- CVMX_ADD_IO_SEG(0x00011F0000009050ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE \
- CVMX_ADD_IO_SEG(0x00011F0000009180ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_NS \
- CVMX_ADD_IO_SEG(0x00011F0000009040ull)
-#define CVMX_PEXP_NPEI_PKT_SLIST_ROR \
- CVMX_ADD_IO_SEG(0x00011F0000009030ull)
-#define CVMX_PEXP_NPEI_PKT_TIME_INT \
- CVMX_ADD_IO_SEG(0x00011F0000009120ull)
-#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB \
- CVMX_ADD_IO_SEG(0x00011F0000009140ull)
-#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS \
- CVMX_ADD_IO_SEG(0x00011F0000008520ull)
-#define CVMX_PEXP_NPEI_SCRATCH_1 \
- CVMX_ADD_IO_SEG(0x00011F0000008270ull)
-#define CVMX_PEXP_NPEI_STATE1 \
- CVMX_ADD_IO_SEG(0x00011F0000008620ull)
-#define CVMX_PEXP_NPEI_STATE2 \
- CVMX_ADD_IO_SEG(0x00011F0000008630ull)
-#define CVMX_PEXP_NPEI_STATE3 \
- CVMX_ADD_IO_SEG(0x00011F0000008640ull)
-#define CVMX_PEXP_NPEI_WINDOW_CTL \
- CVMX_ADD_IO_SEG(0x00011F0000008380ull)
+#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008580ull))
+#define CVMX_PEXP_NPEI_BIST_STATUS2 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#define CVMX_PEXP_NPEI_CTL_PORT0 (CVMX_ADD_IO_SEG(0x00011F0000008250ull))
+#define CVMX_PEXP_NPEI_CTL_PORT1 (CVMX_ADD_IO_SEG(0x00011F0000008260ull))
+#define CVMX_PEXP_NPEI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008570ull))
+#define CVMX_PEXP_NPEI_CTL_STATUS2 (CVMX_ADD_IO_SEG(0x00011F000000BC00ull))
+#define CVMX_PEXP_NPEI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000085F0ull))
+#define CVMX_PEXP_NPEI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000008510ull))
+#define CVMX_PEXP_NPEI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000008500ull))
+#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085C0ull))
+#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085D0ull))
+#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000008450ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F00000083B0ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000008400ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000084A0ull) + ((offset) & 7) * 16)
+#define CVMX_PEXP_NPEI_DMA_CNTS (CVMX_ADD_IO_SEG(0x00011F00000085E0ull))
+#define CVMX_PEXP_NPEI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000083A0ull))
+#define CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM (CVMX_ADD_IO_SEG(0x00011F00000085B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE1_P1 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
+#define CVMX_PEXP_NPEI_DMA_STATE2 (CVMX_ADD_IO_SEG(0x00011F00000086D0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE2_P1 (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_DMA_STATE3_P1 (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE4_P1 (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_DMA_STATE5_P1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
+#define CVMX_PEXP_NPEI_INT_A_ENB (CVMX_ADD_IO_SEG(0x00011F0000008560ull))
+#define CVMX_PEXP_NPEI_INT_A_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCE0ull))
+#define CVMX_PEXP_NPEI_INT_A_SUM (CVMX_ADD_IO_SEG(0x00011F0000008550ull))
+#define CVMX_PEXP_NPEI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000008540ull))
+#define CVMX_PEXP_NPEI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCD0ull))
+#define CVMX_PEXP_NPEI_INT_INFO (CVMX_ADD_IO_SEG(0x00011F0000008590ull))
+#define CVMX_PEXP_NPEI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000008530ull))
+#define CVMX_PEXP_NPEI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F000000BCC0ull))
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000008600ull))
+#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000008610ull))
+#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000084F0ull))
+#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008280ull) + ((offset) & 31) * 16 - 16*12)
+#define CVMX_PEXP_NPEI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BC50ull))
+#define CVMX_PEXP_NPEI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BC60ull))
+#define CVMX_PEXP_NPEI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BC70ull))
+#define CVMX_PEXP_NPEI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BC80ull))
+#define CVMX_PEXP_NPEI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F000000BC10ull))
+#define CVMX_PEXP_NPEI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F000000BC20ull))
+#define CVMX_PEXP_NPEI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F000000BC30ull))
+#define CVMX_PEXP_NPEI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F000000BC40ull))
+#define CVMX_PEXP_NPEI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F000000BCA0ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BCF0ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD00ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD10ull))
+#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD20ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BD30ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD40ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD50ull))
+#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD60ull))
+#define CVMX_PEXP_NPEI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F000000BC90ull))
+#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F000000BD70ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F000000BCB0ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000008650ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000008660ull))
+#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000008670ull))
+#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F000000A800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F000000AC00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F000000B000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F000000B400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F000000B800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000009400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000009800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000009C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000009110ull))
+#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009130ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000090B0ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000090A0ull))
+#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000009090ull))
+#define CVMX_PEXP_NPEI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000009080ull))
+#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000009150ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000009000ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009190ull))
+#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009020ull))
+#define CVMX_PEXP_NPEI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000009100ull))
+#define CVMX_PEXP_NPEI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
+#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
+#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000091A0ull))
+#define CVMX_PEXP_NPEI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000009070ull))
+#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000009160ull))
+#define CVMX_PEXP_NPEI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000090D0ull))
+#define CVMX_PEXP_NPEI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009010ull))
+#define CVMX_PEXP_NPEI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000090E0ull))
+#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000009050ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009180ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000009040ull))
+#define CVMX_PEXP_NPEI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000009030ull))
+#define CVMX_PEXP_NPEI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000009120ull))
+#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009140ull))
+#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000008520ull))
+#define CVMX_PEXP_NPEI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F0000008270ull))
+#define CVMX_PEXP_NPEI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000008620ull))
+#define CVMX_PEXP_NPEI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000008630ull))
+#define CVMX_PEXP_NPEI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000008640ull))
+#define CVMX_PEXP_NPEI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F0000008380ull))
+#define CVMX_PEXP_SLI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010580ull))
+#define CVMX_PEXP_SLI_CTL_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010050ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010570ull))
+#define CVMX_PEXP_SLI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000105F0ull))
+#define CVMX_PEXP_SLI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000010310ull))
+#define CVMX_PEXP_SLI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000010300ull))
+#define CVMX_PEXP_SLI_DMAX_CNT(offset) (CVMX_ADD_IO_SEG(0x00011F0000010400ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_DMAX_INT_LEVEL(offset) (CVMX_ADD_IO_SEG(0x00011F00000103E0ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_DMAX_TIM(offset) (CVMX_ADD_IO_SEG(0x00011F0000010420ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_INT_ENB_CIU (CVMX_ADD_IO_SEG(0x00011F0000013CD0ull))
+#define CVMX_PEXP_SLI_INT_ENB_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010340ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000010330ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000010600ull))
+#define CVMX_PEXP_SLI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000010610ull))
+#define CVMX_PEXP_SLI_MAC_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F0000013D70ull))
+#define CVMX_PEXP_SLI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000102F0ull))
+#define CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F00000100E0ull) + ((offset) & 31) * 16 - 16*12)
+#define CVMX_PEXP_SLI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013C50ull))
+#define CVMX_PEXP_SLI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013C60ull))
+#define CVMX_PEXP_SLI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013C70ull))
+#define CVMX_PEXP_SLI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013C80ull))
+#define CVMX_PEXP_SLI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F0000013C10ull))
+#define CVMX_PEXP_SLI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F0000013C20ull))
+#define CVMX_PEXP_SLI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F0000013C30ull))
+#define CVMX_PEXP_SLI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F0000013C40ull))
+#define CVMX_PEXP_SLI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F0000013CA0ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013CF0ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D00ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D10ull))
+#define CVMX_PEXP_SLI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D20ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013D30ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D40ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D50ull))
+#define CVMX_PEXP_SLI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D60ull))
+#define CVMX_PEXP_SLI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F0000013C90ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000013CB0ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000010650ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000010660ull))
+#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000010670ull))
+#define CVMX_PEXP_SLI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000012800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000012C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000013000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F0000013400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F0000013800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_OUT_SIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000010C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000011400ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000011800ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000011C00ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000011130ull))
+#define CVMX_PEXP_SLI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011150ull))
+#define CVMX_PEXP_SLI_PKT_CTL (CVMX_ADD_IO_SEG(0x00011F0000011220ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000110B0ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000110A0ull))
+#define CVMX_PEXP_SLI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000011090ull))
+#define CVMX_PEXP_SLI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000011080ull))
+#define CVMX_PEXP_SLI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000011170ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000011000ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F00000111A0ull))
+#define CVMX_PEXP_SLI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000011020ull))
+#define CVMX_PEXP_SLI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000011120ull))
+#define CVMX_PEXP_SLI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F0000011210ull))
+#define CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012000ull) + ((offset) & 31) * 16)
+#define CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000011200ull))
+#define CVMX_PEXP_SLI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000111B0ull))
+#define CVMX_PEXP_SLI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000011070ull))
+#define CVMX_PEXP_SLI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000011180ull))
+#define CVMX_PEXP_SLI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000110D0ull))
+#define CVMX_PEXP_SLI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011010ull))
+#define CVMX_PEXP_SLI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000110E0ull))
+#define CVMX_PEXP_SLI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F00000111F0ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000011050ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000011040ull))
+#define CVMX_PEXP_SLI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000011030ull))
+#define CVMX_PEXP_SLI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000011140ull))
+#define CVMX_PEXP_SLI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011160ull))
+#define CVMX_PEXP_SLI_S2M_PORTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011F0000013D80ull) + ((offset) & 1) * 16)
+#define CVMX_PEXP_SLI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F00000103C0ull))
+#define CVMX_PEXP_SLI_SCRATCH_2 (CVMX_ADD_IO_SEG(0x00011F00000103D0ull))
+#define CVMX_PEXP_SLI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000010620ull))
+#define CVMX_PEXP_SLI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000010630ull))
+#define CVMX_PEXP_SLI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000010640ull))
+#define CVMX_PEXP_SLI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F00000102E0ull))
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
index 2d82e24be51..39fd75b03f7 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,52 +28,29 @@
#ifndef __CVMX_POW_DEFS_H__
#define __CVMX_POW_DEFS_H__
-#define CVMX_POW_BIST_STAT \
- CVMX_ADD_IO_SEG(0x00016700000003F8ull)
-#define CVMX_POW_DS_PC \
- CVMX_ADD_IO_SEG(0x0001670000000398ull)
-#define CVMX_POW_ECC_ERR \
- CVMX_ADD_IO_SEG(0x0001670000000218ull)
-#define CVMX_POW_INT_CTL \
- CVMX_ADD_IO_SEG(0x0001670000000220ull)
-#define CVMX_POW_IQ_CNTX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000340ull + (((offset) & 7) * 8))
-#define CVMX_POW_IQ_COM_CNT \
- CVMX_ADD_IO_SEG(0x0001670000000388ull)
-#define CVMX_POW_IQ_INT \
- CVMX_ADD_IO_SEG(0x0001670000000238ull)
-#define CVMX_POW_IQ_INT_EN \
- CVMX_ADD_IO_SEG(0x0001670000000240ull)
-#define CVMX_POW_IQ_THRX(offset) \
- CVMX_ADD_IO_SEG(0x00016700000003A0ull + (((offset) & 7) * 8))
-#define CVMX_POW_NOS_CNT \
- CVMX_ADD_IO_SEG(0x0001670000000228ull)
-#define CVMX_POW_NW_TIM \
- CVMX_ADD_IO_SEG(0x0001670000000210ull)
-#define CVMX_POW_PF_RST_MSK \
- CVMX_ADD_IO_SEG(0x0001670000000230ull)
-#define CVMX_POW_PP_GRP_MSKX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000000ull + (((offset) & 15) * 8))
-#define CVMX_POW_QOS_RNDX(offset) \
- CVMX_ADD_IO_SEG(0x00016700000001C0ull + (((offset) & 7) * 8))
-#define CVMX_POW_QOS_THRX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000180ull + (((offset) & 7) * 8))
-#define CVMX_POW_TS_PC \
- CVMX_ADD_IO_SEG(0x0001670000000390ull)
-#define CVMX_POW_WA_COM_PC \
- CVMX_ADD_IO_SEG(0x0001670000000380ull)
-#define CVMX_POW_WA_PCX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000300ull + (((offset) & 7) * 8))
-#define CVMX_POW_WQ_INT \
- CVMX_ADD_IO_SEG(0x0001670000000200ull)
-#define CVMX_POW_WQ_INT_CNTX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000100ull + (((offset) & 15) * 8))
-#define CVMX_POW_WQ_INT_PC \
- CVMX_ADD_IO_SEG(0x0001670000000208ull)
-#define CVMX_POW_WQ_INT_THRX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000080ull + (((offset) & 15) * 8))
-#define CVMX_POW_WS_PCX(offset) \
- CVMX_ADD_IO_SEG(0x0001670000000280ull + (((offset) & 15) * 8))
+#define CVMX_POW_BIST_STAT (CVMX_ADD_IO_SEG(0x00016700000003F8ull))
+#define CVMX_POW_DS_PC (CVMX_ADD_IO_SEG(0x0001670000000398ull))
+#define CVMX_POW_ECC_ERR (CVMX_ADD_IO_SEG(0x0001670000000218ull))
+#define CVMX_POW_INT_CTL (CVMX_ADD_IO_SEG(0x0001670000000220ull))
+#define CVMX_POW_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8)
+#define CVMX_POW_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000000388ull))
+#define CVMX_POW_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000000238ull))
+#define CVMX_POW_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000000240ull))
+#define CVMX_POW_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8)
+#define CVMX_POW_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000000228ull))
+#define CVMX_POW_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000000210ull))
+#define CVMX_POW_PF_RST_MSK (CVMX_ADD_IO_SEG(0x0001670000000230ull))
+#define CVMX_POW_PP_GRP_MSKX(offset) (CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8)
+#define CVMX_POW_QOS_RNDX(offset) (CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8)
+#define CVMX_POW_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8)
+#define CVMX_POW_TS_PC (CVMX_ADD_IO_SEG(0x0001670000000390ull))
+#define CVMX_POW_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000000380ull))
+#define CVMX_POW_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8)
+#define CVMX_POW_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000000200ull))
+#define CVMX_POW_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8)
+#define CVMX_POW_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000000208ull))
+#define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
+#define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
union cvmx_pow_bist_stat {
uint64_t u64;
@@ -160,6 +137,19 @@ union cvmx_pow_bist_stat {
struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
struct cvmx_pow_bist_stat_cn38xx cn58xx;
struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
+ struct cvmx_pow_bist_stat_cn63xx {
+ uint64_t reserved_22_63:42;
+ uint64_t pp:6;
+ uint64_t reserved_12_15:4;
+ uint64_t cam:1;
+ uint64_t nbr:3;
+ uint64_t nbt:4;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+ } cn63xx;
+ struct cvmx_pow_bist_stat_cn63xx cn63xxp1;
};
union cvmx_pow_ds_pc {
@@ -179,6 +169,8 @@ union cvmx_pow_ds_pc {
struct cvmx_pow_ds_pc_s cn56xxp1;
struct cvmx_pow_ds_pc_s cn58xx;
struct cvmx_pow_ds_pc_s cn58xxp1;
+ struct cvmx_pow_ds_pc_s cn63xx;
+ struct cvmx_pow_ds_pc_s cn63xxp1;
};
union cvmx_pow_ecc_err {
@@ -219,6 +211,8 @@ union cvmx_pow_ecc_err {
struct cvmx_pow_ecc_err_s cn56xxp1;
struct cvmx_pow_ecc_err_s cn58xx;
struct cvmx_pow_ecc_err_s cn58xxp1;
+ struct cvmx_pow_ecc_err_s cn63xx;
+ struct cvmx_pow_ecc_err_s cn63xxp1;
};
union cvmx_pow_int_ctl {
@@ -239,6 +233,8 @@ union cvmx_pow_int_ctl {
struct cvmx_pow_int_ctl_s cn56xxp1;
struct cvmx_pow_int_ctl_s cn58xx;
struct cvmx_pow_int_ctl_s cn58xxp1;
+ struct cvmx_pow_int_ctl_s cn63xx;
+ struct cvmx_pow_int_ctl_s cn63xxp1;
};
union cvmx_pow_iq_cntx {
@@ -258,6 +254,8 @@ union cvmx_pow_iq_cntx {
struct cvmx_pow_iq_cntx_s cn56xxp1;
struct cvmx_pow_iq_cntx_s cn58xx;
struct cvmx_pow_iq_cntx_s cn58xxp1;
+ struct cvmx_pow_iq_cntx_s cn63xx;
+ struct cvmx_pow_iq_cntx_s cn63xxp1;
};
union cvmx_pow_iq_com_cnt {
@@ -277,6 +275,8 @@ union cvmx_pow_iq_com_cnt {
struct cvmx_pow_iq_com_cnt_s cn56xxp1;
struct cvmx_pow_iq_com_cnt_s cn58xx;
struct cvmx_pow_iq_com_cnt_s cn58xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn63xx;
+ struct cvmx_pow_iq_com_cnt_s cn63xxp1;
};
union cvmx_pow_iq_int {
@@ -289,6 +289,8 @@ union cvmx_pow_iq_int {
struct cvmx_pow_iq_int_s cn52xxp1;
struct cvmx_pow_iq_int_s cn56xx;
struct cvmx_pow_iq_int_s cn56xxp1;
+ struct cvmx_pow_iq_int_s cn63xx;
+ struct cvmx_pow_iq_int_s cn63xxp1;
};
union cvmx_pow_iq_int_en {
@@ -301,6 +303,8 @@ union cvmx_pow_iq_int_en {
struct cvmx_pow_iq_int_en_s cn52xxp1;
struct cvmx_pow_iq_int_en_s cn56xx;
struct cvmx_pow_iq_int_en_s cn56xxp1;
+ struct cvmx_pow_iq_int_en_s cn63xx;
+ struct cvmx_pow_iq_int_en_s cn63xxp1;
};
union cvmx_pow_iq_thrx {
@@ -313,6 +317,8 @@ union cvmx_pow_iq_thrx {
struct cvmx_pow_iq_thrx_s cn52xxp1;
struct cvmx_pow_iq_thrx_s cn56xx;
struct cvmx_pow_iq_thrx_s cn56xxp1;
+ struct cvmx_pow_iq_thrx_s cn63xx;
+ struct cvmx_pow_iq_thrx_s cn63xxp1;
};
union cvmx_pow_nos_cnt {
@@ -341,6 +347,11 @@ union cvmx_pow_nos_cnt {
struct cvmx_pow_nos_cnt_s cn56xxp1;
struct cvmx_pow_nos_cnt_s cn58xx;
struct cvmx_pow_nos_cnt_s cn58xxp1;
+ struct cvmx_pow_nos_cnt_cn63xx {
+ uint64_t reserved_11_63:53;
+ uint64_t nos_cnt:11;
+ } cn63xx;
+ struct cvmx_pow_nos_cnt_cn63xx cn63xxp1;
};
union cvmx_pow_nw_tim {
@@ -360,6 +371,8 @@ union cvmx_pow_nw_tim {
struct cvmx_pow_nw_tim_s cn56xxp1;
struct cvmx_pow_nw_tim_s cn58xx;
struct cvmx_pow_nw_tim_s cn58xxp1;
+ struct cvmx_pow_nw_tim_s cn63xx;
+ struct cvmx_pow_nw_tim_s cn63xxp1;
};
union cvmx_pow_pf_rst_msk {
@@ -375,6 +388,8 @@ union cvmx_pow_pf_rst_msk {
struct cvmx_pow_pf_rst_msk_s cn56xxp1;
struct cvmx_pow_pf_rst_msk_s cn58xx;
struct cvmx_pow_pf_rst_msk_s cn58xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn63xx;
+ struct cvmx_pow_pf_rst_msk_s cn63xxp1;
};
union cvmx_pow_pp_grp_mskx {
@@ -405,6 +420,8 @@ union cvmx_pow_pp_grp_mskx {
struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
struct cvmx_pow_pp_grp_mskx_s cn58xx;
struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn63xx;
+ struct cvmx_pow_pp_grp_mskx_s cn63xxp1;
};
union cvmx_pow_qos_rndx {
@@ -427,6 +444,8 @@ union cvmx_pow_qos_rndx {
struct cvmx_pow_qos_rndx_s cn56xxp1;
struct cvmx_pow_qos_rndx_s cn58xx;
struct cvmx_pow_qos_rndx_s cn58xxp1;
+ struct cvmx_pow_qos_rndx_s cn63xx;
+ struct cvmx_pow_qos_rndx_s cn63xxp1;
};
union cvmx_pow_qos_thrx {
@@ -485,6 +504,19 @@ union cvmx_pow_qos_thrx {
struct cvmx_pow_qos_thrx_s cn56xxp1;
struct cvmx_pow_qos_thrx_s cn58xx;
struct cvmx_pow_qos_thrx_s cn58xxp1;
+ struct cvmx_pow_qos_thrx_cn63xx {
+ uint64_t reserved_59_63:5;
+ uint64_t des_cnt:11;
+ uint64_t reserved_47_47:1;
+ uint64_t buf_cnt:11;
+ uint64_t reserved_35_35:1;
+ uint64_t free_cnt:11;
+ uint64_t reserved_22_23:2;
+ uint64_t max_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t min_thr:10;
+ } cn63xx;
+ struct cvmx_pow_qos_thrx_cn63xx cn63xxp1;
};
union cvmx_pow_ts_pc {
@@ -504,6 +536,8 @@ union cvmx_pow_ts_pc {
struct cvmx_pow_ts_pc_s cn56xxp1;
struct cvmx_pow_ts_pc_s cn58xx;
struct cvmx_pow_ts_pc_s cn58xxp1;
+ struct cvmx_pow_ts_pc_s cn63xx;
+ struct cvmx_pow_ts_pc_s cn63xxp1;
};
union cvmx_pow_wa_com_pc {
@@ -523,6 +557,8 @@ union cvmx_pow_wa_com_pc {
struct cvmx_pow_wa_com_pc_s cn56xxp1;
struct cvmx_pow_wa_com_pc_s cn58xx;
struct cvmx_pow_wa_com_pc_s cn58xxp1;
+ struct cvmx_pow_wa_com_pc_s cn63xx;
+ struct cvmx_pow_wa_com_pc_s cn63xxp1;
};
union cvmx_pow_wa_pcx {
@@ -542,6 +578,8 @@ union cvmx_pow_wa_pcx {
struct cvmx_pow_wa_pcx_s cn56xxp1;
struct cvmx_pow_wa_pcx_s cn58xx;
struct cvmx_pow_wa_pcx_s cn58xxp1;
+ struct cvmx_pow_wa_pcx_s cn63xx;
+ struct cvmx_pow_wa_pcx_s cn63xxp1;
};
union cvmx_pow_wq_int {
@@ -562,6 +600,8 @@ union cvmx_pow_wq_int {
struct cvmx_pow_wq_int_s cn56xxp1;
struct cvmx_pow_wq_int_s cn58xx;
struct cvmx_pow_wq_int_s cn58xxp1;
+ struct cvmx_pow_wq_int_s cn63xx;
+ struct cvmx_pow_wq_int_s cn63xxp1;
};
union cvmx_pow_wq_int_cntx {
@@ -604,6 +644,15 @@ union cvmx_pow_wq_int_cntx {
struct cvmx_pow_wq_int_cntx_s cn56xxp1;
struct cvmx_pow_wq_int_cntx_s cn58xx;
struct cvmx_pow_wq_int_cntx_s cn58xxp1;
+ struct cvmx_pow_wq_int_cntx_cn63xx {
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_cnt:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_cnt:11;
+ } cn63xx;
+ struct cvmx_pow_wq_int_cntx_cn63xx cn63xxp1;
};
union cvmx_pow_wq_int_pc {
@@ -626,6 +675,8 @@ union cvmx_pow_wq_int_pc {
struct cvmx_pow_wq_int_pc_s cn56xxp1;
struct cvmx_pow_wq_int_pc_s cn58xx;
struct cvmx_pow_wq_int_pc_s cn58xxp1;
+ struct cvmx_pow_wq_int_pc_s cn63xx;
+ struct cvmx_pow_wq_int_pc_s cn63xxp1;
};
union cvmx_pow_wq_int_thrx {
@@ -674,6 +725,16 @@ union cvmx_pow_wq_int_thrx {
struct cvmx_pow_wq_int_thrx_s cn56xxp1;
struct cvmx_pow_wq_int_thrx_s cn58xx;
struct cvmx_pow_wq_int_thrx_s cn58xxp1;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_thr:10;
+ } cn63xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx cn63xxp1;
};
union cvmx_pow_ws_pcx {
@@ -693,6 +754,8 @@ union cvmx_pow_ws_pcx {
struct cvmx_pow_ws_pcx_s cn56xxp1;
struct cvmx_pow_ws_pcx_s cn58xx;
struct cvmx_pow_ws_pcx_s cn58xxp1;
+ struct cvmx_pow_ws_pcx_s cn63xx;
+ struct cvmx_pow_ws_pcx_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
index 4586958c97b..c45da1f35ea 100644
--- a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -30,10 +30,11 @@
#include <linux/types.h>
-#define CVMX_RNM_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x0001180040000008ull)
-#define CVMX_RNM_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x0001180040000000ull)
+#define CVMX_RNM_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001180040000008ull))
+#define CVMX_RNM_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180040000000ull))
+#define CVMX_RNM_EER_DBG (CVMX_ADD_IO_SEG(0x0001180040000018ull))
+#define CVMX_RNM_EER_KEY (CVMX_ADD_IO_SEG(0x0001180040000010ull))
+#define CVMX_RNM_SERIAL_NUM (CVMX_ADD_IO_SEG(0x0001180040000020ull))
union cvmx_rnm_bist_status {
uint64_t u64;
@@ -53,12 +54,16 @@ union cvmx_rnm_bist_status {
struct cvmx_rnm_bist_status_s cn56xxp1;
struct cvmx_rnm_bist_status_s cn58xx;
struct cvmx_rnm_bist_status_s cn58xxp1;
+ struct cvmx_rnm_bist_status_s cn63xx;
+ struct cvmx_rnm_bist_status_s cn63xxp1;
};
union cvmx_rnm_ctl_status {
uint64_t u64;
struct cvmx_rnm_ctl_status_s {
- uint64_t reserved_9_63:55;
+ uint64_t reserved_11_63:53;
+ uint64_t eer_lck:1;
+ uint64_t eer_val:1;
uint64_t ent_sel:4;
uint64_t exp_ent:1;
uint64_t rng_rst:1;
@@ -76,13 +81,49 @@ union cvmx_rnm_ctl_status {
struct cvmx_rnm_ctl_status_cn30xx cn31xx;
struct cvmx_rnm_ctl_status_cn30xx cn38xx;
struct cvmx_rnm_ctl_status_cn30xx cn38xxp2;
- struct cvmx_rnm_ctl_status_s cn50xx;
- struct cvmx_rnm_ctl_status_s cn52xx;
- struct cvmx_rnm_ctl_status_s cn52xxp1;
- struct cvmx_rnm_ctl_status_s cn56xx;
- struct cvmx_rnm_ctl_status_s cn56xxp1;
- struct cvmx_rnm_ctl_status_s cn58xx;
- struct cvmx_rnm_ctl_status_s cn58xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx {
+ uint64_t reserved_9_63:55;
+ uint64_t ent_sel:4;
+ uint64_t exp_ent:1;
+ uint64_t rng_rst:1;
+ uint64_t rnm_rst:1;
+ uint64_t rng_en:1;
+ uint64_t ent_en:1;
+ } cn50xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn52xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn52xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx cn56xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn56xxp1;
+ struct cvmx_rnm_ctl_status_cn50xx cn58xx;
+ struct cvmx_rnm_ctl_status_cn50xx cn58xxp1;
+ struct cvmx_rnm_ctl_status_s cn63xx;
+ struct cvmx_rnm_ctl_status_s cn63xxp1;
+};
+
+union cvmx_rnm_eer_dbg {
+ uint64_t u64;
+ struct cvmx_rnm_eer_dbg_s {
+ uint64_t dat:64;
+ } s;
+ struct cvmx_rnm_eer_dbg_s cn63xx;
+ struct cvmx_rnm_eer_dbg_s cn63xxp1;
+};
+
+union cvmx_rnm_eer_key {
+ uint64_t u64;
+ struct cvmx_rnm_eer_key_s {
+ uint64_t key:64;
+ } s;
+ struct cvmx_rnm_eer_key_s cn63xx;
+ struct cvmx_rnm_eer_key_s cn63xxp1;
+};
+
+union cvmx_rnm_serial_num {
+ uint64_t u64;
+ struct cvmx_rnm_serial_num_s {
+ uint64_t dat:64;
+ } s;
+ struct cvmx_rnm_serial_num_s cn63xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
index 9ae45fcbe3e..4f3c0666e94 100644
--- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,16 +28,11 @@
#ifndef __CVMX_SMIX_DEFS_H__
#define __CVMX_SMIX_DEFS_H__
-#define CVMX_SMIX_CLK(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_CMD(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_EN(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_RD_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_WR_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_CLK(offset) (CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_CMD(offset) (CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_EN(offset) (CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_RD_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256)
+#define CVMX_SMIX_WR_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256)
union cvmx_smix_clk {
uint64_t u64;
@@ -56,7 +51,8 @@ union cvmx_smix_clk {
struct cvmx_smix_clk_cn30xx {
uint64_t reserved_21_63:43;
uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
+ uint64_t sample_mode:1;
+ uint64_t reserved_14_14:1;
uint64_t clk_idle:1;
uint64_t preamble:1;
uint64_t sample:4;
@@ -65,23 +61,15 @@ union cvmx_smix_clk {
struct cvmx_smix_clk_cn30xx cn31xx;
struct cvmx_smix_clk_cn30xx cn38xx;
struct cvmx_smix_clk_cn30xx cn38xxp2;
- struct cvmx_smix_clk_cn50xx {
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } cn50xx;
+ struct cvmx_smix_clk_s cn50xx;
struct cvmx_smix_clk_s cn52xx;
- struct cvmx_smix_clk_cn50xx cn52xxp1;
+ struct cvmx_smix_clk_s cn52xxp1;
struct cvmx_smix_clk_s cn56xx;
- struct cvmx_smix_clk_cn50xx cn56xxp1;
+ struct cvmx_smix_clk_s cn56xxp1;
struct cvmx_smix_clk_cn30xx cn58xx;
struct cvmx_smix_clk_cn30xx cn58xxp1;
+ struct cvmx_smix_clk_s cn63xx;
+ struct cvmx_smix_clk_s cn63xxp1;
};
union cvmx_smix_cmd {
@@ -112,6 +100,8 @@ union cvmx_smix_cmd {
struct cvmx_smix_cmd_s cn56xxp1;
struct cvmx_smix_cmd_cn30xx cn58xx;
struct cvmx_smix_cmd_cn30xx cn58xxp1;
+ struct cvmx_smix_cmd_s cn63xx;
+ struct cvmx_smix_cmd_s cn63xxp1;
};
union cvmx_smix_en {
@@ -131,6 +121,8 @@ union cvmx_smix_en {
struct cvmx_smix_en_s cn56xxp1;
struct cvmx_smix_en_s cn58xx;
struct cvmx_smix_en_s cn58xxp1;
+ struct cvmx_smix_en_s cn63xx;
+ struct cvmx_smix_en_s cn63xxp1;
};
union cvmx_smix_rd_dat {
@@ -152,6 +144,8 @@ union cvmx_smix_rd_dat {
struct cvmx_smix_rd_dat_s cn56xxp1;
struct cvmx_smix_rd_dat_s cn58xx;
struct cvmx_smix_rd_dat_s cn58xxp1;
+ struct cvmx_smix_rd_dat_s cn63xx;
+ struct cvmx_smix_rd_dat_s cn63xxp1;
};
union cvmx_smix_wr_dat {
@@ -173,6 +167,8 @@ union cvmx_smix_wr_dat {
struct cvmx_smix_wr_dat_s cn56xxp1;
struct cvmx_smix_wr_dat_s cn58xx;
struct cvmx_smix_wr_dat_s cn58xxp1;
+ struct cvmx_smix_wr_dat_s cn63xx;
+ struct cvmx_smix_wr_dat_s cn63xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
new file mode 100644
index 00000000000..594f1b68cd6
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
@@ -0,0 +1,261 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2010 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_UCTLX_TYPEDEFS_H__
+#define __CVMX_UCTLX_TYPEDEFS_H__
+
+#define CVMX_UCTLX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A0ull))
+#define CVMX_UCTLX_CLK_RST_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000000ull))
+#define CVMX_UCTLX_EHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000080ull))
+#define CVMX_UCTLX_EHCI_FLA(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A8ull))
+#define CVMX_UCTLX_ERTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000090ull))
+#define CVMX_UCTLX_IF_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000030ull))
+#define CVMX_UCTLX_INT_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000028ull))
+#define CVMX_UCTLX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x000118006F000020ull))
+#define CVMX_UCTLX_OHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000088ull))
+#define CVMX_UCTLX_ORTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000098ull))
+#define CVMX_UCTLX_PPAF_WM(block_id) (CVMX_ADD_IO_SEG(0x000118006F000038ull))
+#define CVMX_UCTLX_UPHY_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F000008ull))
+#define CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(offset, block_id) (CVMX_ADD_IO_SEG(0x000118006F000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
+
+union cvmx_uctlx_bist_status {
+ uint64_t u64;
+ struct cvmx_uctlx_bist_status_s {
+ uint64_t reserved_6_63:58;
+ uint64_t data_bis:1;
+ uint64_t desc_bis:1;
+ uint64_t erbm_bis:1;
+ uint64_t orbm_bis:1;
+ uint64_t wrbm_bis:1;
+ uint64_t ppaf_bis:1;
+ } s;
+ struct cvmx_uctlx_bist_status_s cn63xx;
+ struct cvmx_uctlx_bist_status_s cn63xxp1;
+};
+
+union cvmx_uctlx_clk_rst_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_clk_rst_ctl_s {
+ uint64_t reserved_25_63:39;
+ uint64_t clear_bist:1;
+ uint64_t start_bist:1;
+ uint64_t ehci_sm:1;
+ uint64_t ohci_clkcktrst:1;
+ uint64_t ohci_sm:1;
+ uint64_t ohci_susp_lgcy:1;
+ uint64_t app_start_clk:1;
+ uint64_t o_clkdiv_rst:1;
+ uint64_t h_clkdiv_byp:1;
+ uint64_t h_clkdiv_rst:1;
+ uint64_t h_clkdiv_en:1;
+ uint64_t o_clkdiv_en:1;
+ uint64_t h_div:4;
+ uint64_t p_refclk_sel:2;
+ uint64_t p_refclk_div:2;
+ uint64_t reserved_4_4:1;
+ uint64_t p_com_on:1;
+ uint64_t p_por:1;
+ uint64_t p_prst:1;
+ uint64_t hrst:1;
+ } s;
+ struct cvmx_uctlx_clk_rst_ctl_s cn63xx;
+ struct cvmx_uctlx_clk_rst_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_ehci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_ctl_s {
+ uint64_t reserved_20_63:44;
+ uint64_t desc_rbm:1;
+ uint64_t reg_nb:1;
+ uint64_t l2c_dc:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_desc_emod:2;
+ uint64_t inv_reg_a2:1;
+ uint64_t ehci_64b_addr_en:1;
+ uint64_t l2c_addr_msb:8;
+ } s;
+ struct cvmx_uctlx_ehci_ctl_s cn63xx;
+ struct cvmx_uctlx_ehci_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_ehci_fla {
+ uint64_t u64;
+ struct cvmx_uctlx_ehci_fla_s {
+ uint64_t reserved_6_63:58;
+ uint64_t fla:6;
+ } s;
+ struct cvmx_uctlx_ehci_fla_s cn63xx;
+ struct cvmx_uctlx_ehci_fla_s cn63xxp1;
+};
+
+union cvmx_uctlx_erto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_erto_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t to_val:27;
+ uint64_t reserved_0_4:5;
+ } s;
+ struct cvmx_uctlx_erto_ctl_s cn63xx;
+ struct cvmx_uctlx_erto_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_if_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_if_ena_s {
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+ } s;
+ struct cvmx_uctlx_if_ena_s cn63xx;
+ struct cvmx_uctlx_if_ena_s cn63xxp1;
+};
+
+union cvmx_uctlx_int_ena {
+ uint64_t u64;
+ struct cvmx_uctlx_int_ena_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ec_ovf_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t wb_pop_e:1;
+ uint64_t wb_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t pp_psh_f:1;
+ } s;
+ struct cvmx_uctlx_int_ena_s cn63xx;
+ struct cvmx_uctlx_int_ena_s cn63xxp1;
+};
+
+union cvmx_uctlx_int_reg {
+ uint64_t u64;
+ struct cvmx_uctlx_int_reg_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ec_ovf_e:1;
+ uint64_t oc_ovf_e:1;
+ uint64_t wb_pop_e:1;
+ uint64_t wb_psh_f:1;
+ uint64_t cf_psh_f:1;
+ uint64_t or_psh_f:1;
+ uint64_t er_psh_f:1;
+ uint64_t pp_psh_f:1;
+ } s;
+ struct cvmx_uctlx_int_reg_s cn63xx;
+ struct cvmx_uctlx_int_reg_s cn63xxp1;
+};
+
+union cvmx_uctlx_ohci_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_ohci_ctl_s {
+ uint64_t reserved_19_63:45;
+ uint64_t reg_nb:1;
+ uint64_t l2c_dc:1;
+ uint64_t l2c_bc:1;
+ uint64_t l2c_0pag:1;
+ uint64_t l2c_stt:1;
+ uint64_t l2c_buff_emod:2;
+ uint64_t l2c_desc_emod:2;
+ uint64_t inv_reg_a2:1;
+ uint64_t reserved_8_8:1;
+ uint64_t l2c_addr_msb:8;
+ } s;
+ struct cvmx_uctlx_ohci_ctl_s cn63xx;
+ struct cvmx_uctlx_ohci_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_orto_ctl {
+ uint64_t u64;
+ struct cvmx_uctlx_orto_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t to_val:24;
+ uint64_t reserved_0_7:8;
+ } s;
+ struct cvmx_uctlx_orto_ctl_s cn63xx;
+ struct cvmx_uctlx_orto_ctl_s cn63xxp1;
+};
+
+union cvmx_uctlx_ppaf_wm {
+ uint64_t u64;
+ struct cvmx_uctlx_ppaf_wm_s {
+ uint64_t reserved_5_63:59;
+ uint64_t wm:5;
+ } s;
+ struct cvmx_uctlx_ppaf_wm_s cn63xx;
+ struct cvmx_uctlx_ppaf_wm_s cn63xxp1;
+};
+
+union cvmx_uctlx_uphy_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_ctl_status_s {
+ uint64_t reserved_10_63:54;
+ uint64_t bist_done:1;
+ uint64_t bist_err:1;
+ uint64_t hsbist:1;
+ uint64_t fsbist:1;
+ uint64_t lsbist:1;
+ uint64_t siddq:1;
+ uint64_t vtest_en:1;
+ uint64_t uphy_bist:1;
+ uint64_t bist_en:1;
+ uint64_t ate_reset:1;
+ } s;
+ struct cvmx_uctlx_uphy_ctl_status_s cn63xx;
+ struct cvmx_uctlx_uphy_ctl_status_s cn63xxp1;
+};
+
+union cvmx_uctlx_uphy_portx_ctl_status {
+ uint64_t u64;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s {
+ uint64_t reserved_43_63:21;
+ uint64_t tdata_out:4;
+ uint64_t txbiststuffenh:1;
+ uint64_t txbiststuffen:1;
+ uint64_t dmpulldown:1;
+ uint64_t dppulldown:1;
+ uint64_t vbusvldext:1;
+ uint64_t portreset:1;
+ uint64_t txhsvxtune:2;
+ uint64_t txvreftune:4;
+ uint64_t txrisetune:1;
+ uint64_t txpreemphasistune:1;
+ uint64_t txfslstune:4;
+ uint64_t sqrxtune:3;
+ uint64_t compdistune:3;
+ uint64_t loop_en:1;
+ uint64_t tclk:1;
+ uint64_t tdata_sel:1;
+ uint64_t taddr_in:4;
+ uint64_t tdata_in:8;
+ } s;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xx;
+ struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index cf50336eca2..700f88e31ca 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -35,14 +35,6 @@
#ifndef __OCTEON_MODEL_H__
#define __OCTEON_MODEL_H__
-/* NOTE: These must match what is checked in common-config.mk */
-/* Defines to represent the different versions of Octeon. */
-
-/*
- * IMPORTANT: When the default pass is updated for an Octeon Model,
- * the corresponding change must also be made in the oct-sim script.
- */
-
/*
* The defines below should be used with the OCTEON_IS_MODEL() macro
* to determine what model of chip the software is running on. Models
@@ -71,6 +63,21 @@
#define OM_IGNORE_MINOR_REVISION 0x08000000
#define OM_FLAG_MASK 0xff000000
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000 /* Match all cn5XXX Octeon models. */
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 /* Match all cn6XXX Octeon models. */
+
+/*
+ * CN6XXX models with new revision encoding
+ */
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+
/*
* CN5XXX models with new revision encoding
*/
@@ -189,6 +196,9 @@
| OM_MATCH_PREVIOUS_MODELS \
| OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+
/* The revision byte (low byte) has two different encodings.
* CN3XXX:
*
@@ -222,6 +232,7 @@
| OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK \
& 0x00fffff8)
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
@@ -273,6 +284,15 @@ static inline int __OCTEON_IS_MODEL_COMPILE__(uint32_t arg_model,
__OCTEON_MATCH_MASK__((chip_model), (arg_model),
OCTEON_58XX_MODEL_REV_MASK))
return 1;
+
+ if (((arg_model & OM_MATCH_5XXX_FAMILY_MODELS) == OM_MATCH_5XXX_FAMILY_MODELS) &&
+ ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0))
+ return 1;
+
+ if (((arg_model & OM_MATCH_6XXX_FAMILY_MODELS) == OM_MATCH_6XXX_FAMILY_MODELS) &&
+ ((chip_model) >= OCTEON_CN63XX_PASS1_0))
+ return 1;
+
if ((arg_model & OM_MATCH_PREVIOUS_MODELS) &&
((chip_model & OCTEON_58XX_MODEL_MASK) <
(arg_model & OCTEON_58XX_MODEL_MASK)))
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 917a6c413b1..6b34afd0d4e 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -35,6 +35,7 @@ extern int octeon_is_simulation(void);
extern int octeon_is_pci_host(void);
extern int octeon_usb_is_ref_clk(void);
extern uint64_t octeon_get_clock_rate(void);
+extern u64 octeon_get_io_clock_rate(void);
extern const char *octeon_board_type_string(void);
extern const char *octeon_get_pci_interrupts(void);
extern int octeon_get_southbridge_interrupt(void);
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index ece78043acf..fba2ba200f5 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -36,6 +36,16 @@ extern int (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
u8 slot, u8 pin);
/*
+ * For PCI (not PCIe) the BAR2 base address.
+ */
+#define OCTEON_BAR2_PCI_ADDRESS 0x8000000000ull
+
+/*
+ * For PCI (not PCIe) the base of the memory mapped by BAR1
+ */
+extern u64 octeon_bar1_pci_phys;
+
+/*
* The following defines are used when octeon_dma_bar_type =
* OCTEON_DMA_BAR_TYPE_BIG
*/
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 5f4b9d4e411..f1f508e4f97 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -839,7 +839,7 @@ struct bridge_controller {
nasid_t nasid;
unsigned int widget_id;
unsigned int irq_cpu;
- dma64_addr_t baddr;
+ u64 baddr;
unsigned int pci_int[8];
};
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
new file mode 100644
index 00000000000..e00007cf816
--- /dev/null
+++ b/arch/mips/include/asm/perf_event.h
@@ -0,0 +1,25 @@
+/*
+ * linux/arch/mips/include/asm/perf_event.h
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MIPS_PERF_EVENT_H__
+#define __MIPS_PERF_EVENT_H__
+
+/*
+ * MIPS performance counters do not raise NMI upon overflow, a regular
+ * interrupt will be signaled. Hence we can do the pending perf event
+ * work at the tail of the irq handler.
+ */
+static inline void
+set_perf_event_pending(void)
+{
+}
+
+#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index f00896087dd..55908fd56b1 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -113,10 +113,10 @@
#endif
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
-#if PGDIR_SIZE >= TASK_SIZE
+#if PGDIR_SIZE >= TASK_SIZE64
#define USER_PTRS_PER_PGD (1)
#else
-#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
#endif
#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 0d629bb93cb..ead6928fa6b 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -50,13 +50,10 @@ extern unsigned int vced_count, vcei_count;
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE 0x7fff8000UL
-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE ((TASK_SIZE / 3) & ~(PAGE_SIZE))
+#ifdef __KERNEL__
+#define STACK_TOP_MAX TASK_SIZE
+#endif
#define TASK_IS_32BIT_ADDR 1
@@ -71,28 +68,29 @@ extern unsigned int vced_count, vcei_count;
* 8192EB ...
*/
#define TASK_SIZE32 0x7fff8000UL
-#define TASK_SIZE 0x10000000000UL
-#define STACK_TOP \
- (((test_thread_flag(TIF_32BIT_ADDR) ? \
- TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE)
+#define TASK_SIZE64 0x10000000000UL
+#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
+
+#ifdef __KERNEL__
+#define STACK_TOP_MAX TASK_SIZE64
+#endif
+
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE \
- (test_thread_flag(TIF_32BIT_ADDR) ? \
- PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3))
#define TASK_SIZE_OF(tsk) \
- (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE)
+ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
#endif
-#ifdef __KERNEL__
-#define STACK_TOP_MAX TASK_SIZE
-#endif
+#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
#define NUM_FPU_REGS 32
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
index bb937ccfba1..6018c80ce37 100644
--- a/arch/mips/include/asm/system.h
+++ b/arch/mips/include/asm/system.h
@@ -115,21 +115,19 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
} else if (kernel_uses_llsc) {
unsigned long dummy;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %3 # xchg_u32 \n"
- " .set mips0 \n"
- " move %2, %z4 \n"
- " .set mips3 \n"
- " sc %2, %1 \n"
- " beqz %2, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " ll %0, %3 # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %2, %z4 \n"
+ " .set mips3 \n"
+ " sc %2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } while (unlikely(!dummy));
} else {
unsigned long flags;
@@ -167,19 +165,17 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
} else if (kernel_uses_llsc) {
unsigned long dummy;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %3 # xchg_u64 \n"
- " move %2, %z4 \n"
- " scd %2, %1 \n"
- " beqz %2, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (retval), "=m" (*m), "=&r" (dummy)
- : "R" (*m), "Jr" (val)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " lld %0, %3 # xchg_u64 \n"
+ " move %2, %z4 \n"
+ " scd %2, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=m" (*m), "=&r" (dummy)
+ : "R" (*m), "Jr" (val)
+ : "memory");
+ } while (unlikely(!dummy));
} else {
unsigned long flags;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 70df9c0d3c5..d309556cacf 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -83,6 +83,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - 1UL)
+#define STACK_WARN (THREAD_SIZE / 8)
+
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifdef CONFIG_DEBUG_STACK_USAGE
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index c2d53c18fd3..653a412c036 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -35,7 +35,9 @@
#ifdef CONFIG_64BIT
-#define __UA_LIMIT (- TASK_SIZE)
+extern u64 __ua_limit;
+
+#define __UA_LIMIT __ua_limit
#define __UA_ADDR ".dword"
#define __UA_LA "dla"
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 80884983270..22b2e0e3861 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -104,4 +104,6 @@ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b1b304ea212..71620e19827 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -25,6 +25,8 @@
#include <asm/system.h>
#include <asm/watch.h>
#include <asm/spram.h>
+#include <asm/uaccess.h>
+
/*
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
* the implementation of the "wait" feature differs between CPU families. This
@@ -181,12 +183,13 @@ void __init check_wait(void)
case CPU_5KC:
case CPU_25KF:
case CPU_PR4450:
- case CPU_BCM3302:
- case CPU_BCM6338:
- case CPU_BCM6348:
- case CPU_BCM6358:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
case CPU_JZRISC:
cpu_wait = r4k_wait;
break;
@@ -902,33 +905,37 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
- case PRID_IMP_BCM3302:
- /* same as PRID_IMP_BCM6338 */
- c->cputype = CPU_BCM3302;
- __cpu_name[cpu] = "Broadcom BCM3302";
- break;
- case PRID_IMP_BCM4710:
- c->cputype = CPU_BCM4710;
- __cpu_name[cpu] = "Broadcom BCM4710";
- break;
- case PRID_IMP_BCM6345:
- c->cputype = CPU_BCM6345;
- __cpu_name[cpu] = "Broadcom BCM6345";
+ case PRID_IMP_BMIPS32:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & 0xff;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ }
break;
- case PRID_IMP_BCM6348:
- c->cputype = CPU_BCM6348;
- __cpu_name[cpu] = "Broadcom BCM6348";
+ }
+ case PRID_IMP_BMIPS5000:
+ c->cputype = CPU_BMIPS5000;
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ c->options |= MIPS_CPU_ULRI;
break;
- case PRID_IMP_BCM4350:
- switch (c->processor_id & 0xf0) {
- case PRID_REV_BCM6358:
- c->cputype = CPU_BCM6358;
- __cpu_name[cpu] = "Broadcom BCM6358";
- break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
- }
+ case PRID_IMP_BMIPS4KC:
+ c->cputype = CPU_4KC;
+ __cpu_name[cpu] = "MIPS 4Kc";
break;
}
}
@@ -953,6 +960,12 @@ platform:
if (cpu == 0)
__elf_platform = "octeon";
break;
+ case PRID_IMP_CAVIUM_CN63XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ if (cpu == 0)
+ __elf_platform = "octeon2";
+ break;
default:
printk(KERN_INFO "Unknown Octeon chip!\n");
c->cputype = CPU_UNKNOWN;
@@ -976,6 +989,12 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
}
}
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
@@ -1053,6 +1072,11 @@ __cpuinit void cpu_probe(void)
c->srsets = 1;
cpu_probe_vmbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
}
__cpuinit void cpu_report(void)
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index c6345f579a8..4f93db58a79 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -151,6 +151,29 @@ void __init init_IRQ(void)
#endif
}
+#ifdef DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
+ }
+}
+#else
+static inline void check_stack_overflow(void) {}
+#endif
+
+
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
@@ -159,6 +182,7 @@ void __init init_IRQ(void)
void __irq_entry do_IRQ(unsigned int irq)
{
irq_enter();
+ check_stack_overflow();
__DO_IRQ_SMTC_HOOK(irq);
generic_handle_irq(irq);
irq_exit();
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 00000000000..2b7f3f703b8
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,601 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+/* These are for 32bit counters. For 64bit ones, define them accordingly. */
+#define MAX_PERIOD ((1ULL << 32) - 1)
+#define VALID_COUNT 0x7fffffff
+#define TOTAL_BITS 32
+#define HIGHEST_BIT 31
+
+#define MIPS_MAX_HWEVENTS 4
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * The borrowed MSB for the performance counter. A MIPS performance
+ * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
+ * counters) as a factor of determining whether a counter overflow
+ * should be signaled. So here we use a separate MSB for each
+ * counter to make things easy.
+ */
+ unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+#ifdef CONFIG_MIPS_MT_SMP
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+#else
+ #define T
+ #define V
+ #define P
+#endif
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ const char *name;
+ int irq;
+ irqreturn_t (*handle_irq)(int irq, void *dev);
+ int (*handle_shared_irq)(void);
+ void (*start)(void);
+ void (*stop)(void);
+ int (*alloc_counter)(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc);
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ void (*enable_event)(struct hw_perf_event *evt, int idx);
+ void (*disable_event)(int idx);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static const struct mips_pmu *mipspmu;
+
+static int
+mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ s64 left = local64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+ int ret = 0;
+ u64 uleft;
+ unsigned long flags;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > (s64)MAX_PERIOD)
+ left = MAX_PERIOD;
+
+ local64_set(&hwc->prev_count, (u64)-left);
+
+ local_irq_save(flags);
+ uleft = (u64)(-left) & MAX_PERIOD;
+ uleft > VALID_COUNT ?
+ set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
+ mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
+ local_irq_restore(flags);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static int mipspmu_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ /* To look for a free counter for this event. */
+ idx = mipspmu->alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipspmu->disable_event(idx);
+ cpuc->events[idx] = event;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, idx);
+
+ /* Enable the event. */
+ mipspmu->enable_event(hwc, idx);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ return err;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+ int shift = 64 - TOTAL_BITS;
+ s64 prev_raw_count, new_raw_count;
+ s64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ local_irq_save(flags);
+ /* Make the counter value be a "real" one. */
+ new_raw_count = mipspmu->read_counter(idx);
+ if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
+ new_raw_count &= VALID_COUNT;
+ clear_bit(idx, cpuc->msbs);
+ } else
+ new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
+ local_irq_restore(flags);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+
+ return;
+}
+
+static void mipspmu_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ /* We are working on a local event. */
+ mipspmu->disable_event(idx);
+
+ barrier();
+
+ mipspmu_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_unthrottle(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static struct pmu pmu = {
+ .enable = mipspmu_enable,
+ .disable = mipspmu_disable,
+ .unthrottle = mipspmu_unthrottle,
+ .read = mipspmu_read,
+};
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu->irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu->irq, mipspmu->handle_irq,
+ IRQF_DISABLED | IRQF_NOBALANCING,
+ "mips_perf_pmu", NULL);
+ if (err) {
+ pr_warning("Unable to request IRQ%d for MIPS "
+ "performance counters!\n", mipspmu->irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipspmu->handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warning("The platform hasn't properly defined its "
+ "interrupt controller.\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu->irq >= 0)
+ free_irq(mipspmu->irq, NULL);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+static inline unsigned int
+mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#endif
+}
+
+static const struct mips_perf_event *
+mipspmu_map_general_event(int idx)
+{
+ const struct mips_perf_event *pev;
+
+ pev = ((*mipspmu->general_event_map)[idx].event_id ==
+ UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
+ &(*mipspmu->general_event_map)[idx]);
+
+ return pev;
+}
+
+static const struct mips_perf_event *
+mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu->cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_event(struct cpu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event fake_hwc = event->hw;
+
+ if (event->pmu && event->pmu != &pmu)
+ return 0;
+
+ return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (!validate_event(&fake_cpuc, leader))
+ return -ENOSPC;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!validate_event(&fake_cpuc, sibling))
+ return -ENOSPC;
+ }
+
+ if (!validate_event(&fake_cpuc, event))
+ return -ENOSPC;
+
+ return 0;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu->num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+const struct pmu *hw_perf_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return ERR_PTR(-ENODEV);
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+ atomic_dec(&active_events);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return ERR_PTR(err);
+
+ err = __hw_perf_event_init(event);
+ if (err)
+ hw_perf_event_destroy(event);
+
+ return err ? ERR_PTR(err) : &pmu;
+}
+
+void hw_perf_enable(void)
+{
+ if (mipspmu)
+ mipspmu->start();
+}
+
+void hw_perf_disable(void)
+{
+ if (mipspmu)
+ mipspmu->stop();
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void
+handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, 0, data, regs))
+ mipspmu->disable_event(idx);
+}
+
+#include "perf_event_mipsxx.c"
+
+/* Callchain handling code. */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+ u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we add here.
+ */
+static void
+perf_callchain_user(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+}
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ callchain_store(entry, addr);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ }
+ }
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ callchain_store(entry, pc);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ int is_user;
+
+ if (!regs)
+ return;
+
+ is_user = user_mode(regs);
+
+ if (!current || !current->pid)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+ if (regs)
+ perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+ entry->nr = 0;
+ perf_do_callchain(regs, entry);
+ return entry;
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 00000000000..5c7c6fc0756
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1052 @@
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
+ defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
+
+#define M_CONFIG1_PC (1 << 4)
+
+#define M_PERFCTL_EXL (1UL << 0)
+#define M_PERFCTL_KERNEL (1UL << 1)
+#define M_PERFCTL_SUPERVISOR (1UL << 2)
+#define M_PERFCTL_USER (1UL << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
+#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1UL << 30)
+#define M_PERFCTL_MORE (1UL << 31)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
+ M_PERFCTL_KERNEL | \
+ M_PERFCTL_USER | \
+ M_PERFCTL_SUPERVISOR | \
+ M_PERFCTL_INTERRUPT_ENABLE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+#define M_PERFCTL_EVENT_MASK 0xfe0
+
+#define M_COUNTER_OVERFLOW (1UL << 31)
+
+#ifdef CONFIG_MIPS_MT_SMP
+static int cpu_has_mipsmt_pertccounters;
+
+/*
+ * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
+ * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
+ */
+#if defined(CONFIG_HW_PERF_EVENTS)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : smp_processor_id())
+#else
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : cpu_data[smp_processor_id()].vpe_id)
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static inline unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+#else /* !CONFIG_MIPS_MT_SMP */
+#define vpe_id() 0
+
+static inline unsigned int vpe_shift(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MIPS_MT_SMP */
+
+static inline unsigned int
+counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+static inline unsigned int
+counters_per_cpu_to_total(unsigned int counters)
+{
+ return counters << vpe_shift();
+}
+
+#define __define_perf_accessors(r, n, np) \
+ \
+static inline unsigned int r_c0_ ## r ## n(void) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ return read_c0_ ## r ## n(); \
+ case 1: \
+ return read_c0_ ## r ## np(); \
+ default: \
+ BUG(); \
+ } \
+ return 0; \
+} \
+ \
+static inline void w_c0_ ## r ## n(unsigned int value) \
+{ \
+ unsigned int cpu = vpe_id(); \
+ \
+ switch (cpu) { \
+ case 0: \
+ write_c0_ ## r ## n(value); \
+ return; \
+ case 1: \
+ write_c0_ ## r ## np(value); \
+ return; \
+ default: \
+ BUG(); \
+ } \
+ return; \
+} \
+
+__define_perf_accessors(perfcntr, 0, 2)
+__define_perf_accessors(perfcntr, 1, 3)
+__define_perf_accessors(perfcntr, 2, 0)
+__define_perf_accessors(perfcntr, 3, 1)
+
+__define_perf_accessors(perfctrl, 0, 2)
+__define_perf_accessors(perfctrl, 1, 3)
+__define_perf_accessors(perfctrl, 2, 0)
+__define_perf_accessors(perfctrl, 3, 1)
+
+static inline int __n_counters(void)
+{
+ if (!(read_c0_config1() & M_CONFIG1_PC))
+ return 0;
+ if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
+ return 1;
+ if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
+ return 2;
+ if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
+ return 3;
+
+ return 4;
+}
+
+static inline int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(0);
+ w_c0_perfcntr3(0);
+ case 3:
+ w_c0_perfctrl2(0);
+ w_c0_perfcntr2(0);
+ case 2:
+ w_c0_perfctrl1(0);
+ w_c0_perfcntr1(0);
+ case 1:
+ w_c0_perfctrl0(0);
+ w_c0_perfcntr0(0);
+ }
+}
+
+static inline u64
+mipsxx_pmu_read_counter(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ return r_c0_perfcntr0();
+ case 1:
+ return r_c0_perfcntr1();
+ case 2:
+ return r_c0_perfcntr2();
+ case 3:
+ return r_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static inline void
+mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ switch (idx) {
+ case 0:
+ w_c0_perfcntr0(val);
+ return;
+ case 1:
+ w_c0_perfcntr1(val);
+ return;
+ case 2:
+ w_c0_perfcntr2(val);
+ return;
+ case 3:
+ w_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static inline unsigned int
+mipsxx_pmu_read_control(unsigned int idx)
+{
+ switch (idx) {
+ case 0:
+ return r_c0_perfctrl0();
+ case 1:
+ return r_c0_perfctrl1();
+ case 2:
+ return r_c0_perfctrl2();
+ case 3:
+ return r_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static inline void
+mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ switch (idx) {
+ case 0:
+ w_c0_perfctrl0(val);
+ return;
+ case 1:
+ w_c0_perfctrl1(val);
+ return;
+ case 2:
+ w_c0_perfctrl2(val);
+ return;
+ case 3:
+ w_c0_perfctrl3(val);
+ return;
+ }
+}
+
+#ifdef CONFIG_MIPS_MT_SMP
+static DEFINE_RWLOCK(pmuint_rwlock);
+#endif
+
+/* 24K/34K/1004K cores can share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
+/* 74K core has different branch event code. */
+static const struct mips_perf_event mipsxx74Kcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+ [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
+};
+
+/* 24K/34K/1004K cores can share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
+/* 74K core has completely different cache event map. */
+static const struct mips_perf_event mipsxx74Kcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(DTLB)] = {
+ /* 74K core does not have specific DTLB events. */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
+ },
+},
+};
+
+#ifdef CONFIG_MIPS_MT_SMP
+static void
+check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->cpu >= 0) {
+ if (pev->range > V) {
+ /*
+ * The user selected an event that is processor
+ * wide, while expecting it to be VPE wide.
+ */
+ hwc->config_base |= M_TC_EN_ALL;
+ } else {
+ /*
+ * FIXME: cpu_data[event->cpu].vpe_id reports 0
+ * for both CPUs.
+ */
+ hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
+ hwc->config_base |= M_TC_EN_VPE;
+ }
+ } else
+ hwc->config_base |= M_TC_EN_ALL;
+}
+#else
+static void
+check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+}
+#endif
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu->map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
+
+ /* Calculate range bits and validate it. */
+ if (num_possible_cpus() > 1)
+ check_and_calc_range(event, pev);
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= M_PERFCTL_USER;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= M_PERFCTL_KERNEL;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= M_PERFCTL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= M_PERFCTL_SUPERVISOR;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = MAX_PERIOD;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event) {
+ err = validate_group(event);
+ if (err)
+ return -EINVAL;
+ }
+
+ event->destroy = hw_perf_event_destroy;
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int counters = mipspmu->num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (counters) {
+ case 4:
+ cpuc->saved_ctrl[3] = r_c0_perfctrl3();
+ w_c0_perfctrl3(cpuc->saved_ctrl[3] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 3:
+ cpuc->saved_ctrl[2] = r_c0_perfctrl2();
+ w_c0_perfctrl2(cpuc->saved_ctrl[2] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 2:
+ cpuc->saved_ctrl[1] = r_c0_perfctrl1();
+ w_c0_perfctrl1(cpuc->saved_ctrl[1] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ case 1:
+ cpuc->saved_ctrl[0] = r_c0_perfctrl0();
+ w_c0_perfctrl0(cpuc->saved_ctrl[0] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ }
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int counters = mipspmu->num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (counters) {
+ case 4:
+ w_c0_perfctrl3(cpuc->saved_ctrl[3]);
+ case 3:
+ w_c0_perfctrl2(cpuc->saved_ctrl[2]);
+ case 2:
+ w_c0_perfctrl1(cpuc->saved_ctrl[1]);
+ case 1:
+ w_c0_perfctrl0(cpuc->saved_ctrl[0]);
+ }
+ local_irq_restore(flags);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu->num_counters;
+ unsigned int counter;
+ int handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+ return handled;
+
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_MT_SMP
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ switch (counters) {
+#define HANDLE_COUNTER(n) \
+ case n + 1: \
+ if (test_bit(n, cpuc->used_mask)) { \
+ counter = r_c0_perfcntr ## n(); \
+ if (counter & M_COUNTER_OVERFLOW) { \
+ w_c0_perfcntr ## n(counter & \
+ VALID_COUNT); \
+ if (test_and_change_bit(n, cpuc->msbs)) \
+ handle_associated_event(cpuc, \
+ n, &data, regs); \
+ handled = IRQ_HANDLED; \
+ } \
+ }
+ HANDLE_COUNTER(3)
+ HANDLE_COUNTER(2)
+ HANDLE_COUNTER(1)
+ HANDLE_COUNTER(0)
+ }
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ perf_event_do_pending();
+
+#ifdef CONFIG_MIPS_MT_SMP
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+ return handled;
+}
+
+static irqreturn_t
+mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+static void mipsxx_pmu_start(void)
+{
+#ifdef CONFIG_MIPS_MT_SMP
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed accross CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipsxx_pmu_stop(void)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_MT_SMP
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static int
+mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu->num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, wheresas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void
+mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ M_PERFCTL_INTERRUPT_ENABLE;
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+ local_irq_restore(flags);
+}
+
+static void
+mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+/* 24K */
+#define IS_UNSUPPORTED_24K_EVENT(r, b) \
+ ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
+ (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
+ (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
+ (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
+ ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_UNSUPPORTED_34K_EVENT(r, b) \
+ ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
+ (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
+ ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_UNSUPPORTED_74K_EVENT(r, b) \
+ ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
+ ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
+ (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
+ (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
+ (b) == 61 || (r) == 62 || (r) == 191 || \
+ ((b) >= 64 && (b) <= 127))
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
+ ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
+ (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/*
+ * User can use 0-255 raw events, where 0-127 for the events of even
+ * counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the parity. So, for example, when user wants to take the
+ * Event Num of 15 for odd counters (by referring to the user manual),
+ * then 128 needs to be added to 15 as the input for the event config,
+ * i.e., 143 (0x8F) to be used.
+ */
+static const struct mips_perf_event *
+mipsxx_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_1004K:
+ if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
+ return ERR_PTR(-EOPNOTSUPP);
+ raw_event.event_id = base_id;
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ }
+
+ return &raw_event;
+}
+
+static struct mips_pmu mipsxxcore_pmu = {
+ .handle_irq = mipsxx_pmu_handle_irq,
+ .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
+ .start = mipsxx_pmu_start,
+ .stop = mipsxx_pmu_stop,
+ .alloc_counter = mipsxx_pmu_alloc_counter,
+ .read_counter = mipsxx_pmu_read_counter,
+ .write_counter = mipsxx_pmu_write_counter,
+ .enable_event = mipsxx_pmu_enable_event,
+ .disable_event = mipsxx_pmu_disable_event,
+ .map_raw_event = mipsxx_pmu_map_raw_event,
+ .general_event_map = &mipsxxcore_event_map,
+ .cache_event_map = &mipsxxcore_cache_map,
+};
+
+static struct mips_pmu mipsxx74Kcore_pmu = {
+ .handle_irq = mipsxx_pmu_handle_irq,
+ .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
+ .start = mipsxx_pmu_start,
+ .stop = mipsxx_pmu_stop,
+ .alloc_counter = mipsxx_pmu_alloc_counter,
+ .read_counter = mipsxx_pmu_read_counter,
+ .write_counter = mipsxx_pmu_write_counter,
+ .enable_event = mipsxx_pmu_enable_event,
+ .disable_event = mipsxx_pmu_disable_event,
+ .map_raw_event = mipsxx_pmu_map_raw_event,
+ .general_event_map = &mipsxx74Kcore_event_map,
+ .cache_event_map = &mipsxx74Kcore_cache_map,
+};
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_MT_SMP
+ cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+#ifdef MSC01E_INT_BASE
+ if (cpu_has_veic) {
+ /*
+ * Using platform specific interrupt controller defines.
+ */
+ irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+ } else {
+#endif
+ if (cp0_perfcount_irq >= 0)
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+#ifdef MSC01E_INT_BASE
+ }
+#endif
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipsxxcore_pmu.name = "mips/24K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ case CPU_34K:
+ mipsxxcore_pmu.name = "mips/34K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ case CPU_74K:
+ mipsxx74Kcore_pmu.name = "mips/74K";
+ mipsxx74Kcore_pmu.num_counters = counters;
+ mipsxx74Kcore_pmu.irq = irq;
+ mipspmu = &mipsxx74Kcore_pmu;
+ break;
+ case CPU_1004K:
+ mipsxxcore_pmu.name = "mips/1004K";
+ mipsxxcore_pmu.num_counters = counters;
+ mipsxxcore_pmu.irq = irq;
+ mipspmu = &mipsxxcore_pmu;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ if (mipspmu)
+ pr_cont("%s PMU enabled, %d counters available to each "
+ "CPU, irq %d%s\n", mipspmu->name, counters, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ return 0;
+}
+arch_initcall(init_hw_perf_events);
+
+#endif /* defined(CONFIG_CPU_MIPS32)... */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index c8777333e19..d21c388c011 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -255,9 +255,13 @@ int ptrace_set_watch_regs(struct task_struct *child,
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -386,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -478,34 +482,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) data);
+ ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) data);
+ ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, datavp);
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
case PTRACE_GET_WATCH_REGS:
- ret = ptrace_get_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_get_watch_regs(child, addrp);
break;
case PTRACE_SET_WATCH_REGS:
- ret = ptrace_set_watch_regs(child,
- (struct pt_watch_regs __user *) addr);
+ ret = ptrace_set_watch_regs(child, addrp);
break;
default:
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index a6b900f2962..acd3f2c49c0 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -490,6 +490,7 @@ static void __init arch_mem_init(char **cmdline_p)
bootmem_init();
device_tree_init();
sparse_init();
+ plat_swiotlb_setup();
paging_init();
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d053bf4759e..8e9fbe75894 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -29,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/kdb.h>
#include <linux/irq.h>
+#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -576,10 +577,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == LL)
+ if ((opcode & OPCODE) == LL) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return simulate_ll(regs, opcode);
- if ((opcode & OPCODE) == SC)
+ }
+ if ((opcode & OPCODE) == SC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return simulate_sc(regs, opcode);
+ }
return -1; /* Must be something else ... */
}
@@ -595,6 +602,8 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
@@ -630,8 +639,11 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
return 0;
+ }
return -1; /* Must be something else ... */
}
@@ -1469,6 +1481,7 @@ void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
+ unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
@@ -1501,14 +1514,14 @@ void __cpuinit per_cpu_trap_init(void)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
- if (cpu_has_mips_r2) {
- unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits;
+ if (cpu_has_mips_r2)
+ hwrena |= 0x0000000f;
- if (!noulri && cpu_has_userlocal)
- enable |= (1 << 29);
+ if (!noulri && cpu_has_userlocal)
+ hwrena |= (1 << 29);
- write_c0_hwrena(enable);
- }
+ if (hwrena)
+ write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 33d5a5ce4a2..cfea1adfa15 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -78,6 +78,8 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
@@ -109,6 +111,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value;
unsigned int res;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, regs, 0);
+
/*
* This load never faults.
*/
@@ -511,6 +516,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
unsigned int __user *pc;
mm_segment_t seg;
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
+ 1, 0, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index c97ca69b94e..6e1b77fec7e 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -20,7 +20,6 @@ config LEMOTE_FULOONG2E
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_HAS_EARLY_PRINTK
- select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select CPU_HAS_WB
select LOONGSON_MC146818
@@ -40,7 +39,6 @@ config LEMOTE_MACH2F
select CS5536
select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
select DMA_NONCOHERENT
- select GENERIC_HARDIRQS_NO__DO_IRQ
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select HW_HAS_PCI
select I8259
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index ec3faa413f3..b2ad1b0910f 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/perf_event.h>
#include <asm/inst.h>
#include <asm/bootinfo.h>
@@ -258,6 +259,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
}
emul:
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, 0, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 0f9c488044d..16c4d256b76 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -181,10 +181,10 @@ static void __cpuinit probe_octeon(void)
unsigned int config1;
struct cpuinfo_mips *c = &current_cpu_data;
+ config1 = read_c0_config1();
switch (c->cputype) {
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
- config1 = read_c0_config1();
c->icache.linesz = 2 << ((config1 >> 19) & 7);
c->icache.sets = 64 << ((config1 >> 22) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
@@ -204,6 +204,20 @@ static void __cpuinit probe_octeon(void)
c->options |= MIPS_CPU_PREFETCH;
break;
+ case CPU_CAVIUM_OCTEON2:
+ c->icache.linesz = 2 << ((config1 >> 19) & 7);
+ c->icache.sets = 8;
+ c->icache.ways = 37;
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+ c->dcache.linesz = 128;
+ c->dcache.ways = 32;
+ c->dcache.sets = 8;
+ dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
default:
panic("Unsupported Cavium Networks CPU type\n");
break;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6721ee2b1e8..b4923a75cb4 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -42,14 +42,14 @@
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
+ * o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
- int wait)
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{
preempt_disable();
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
- smp_call_function(func, info, wait);
+ smp_call_function(func, info, 1);
#endif
func(info);
preempt_enable();
@@ -363,7 +363,7 @@ static inline void local_r4k___flush_cache_all(void * args)
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
}
static inline int has_valid_asid(const struct mm_struct *mm)
@@ -410,7 +410,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
@@ -442,7 +442,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -534,7 +534,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -547,8 +547,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
- 1);
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
}
struct flush_icache_range_args {
@@ -589,7 +588,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
instruction_hazard();
}
@@ -710,7 +709,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
}
static void r4k_flush_icache_all(void)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 469d4019f79..4fc1a0fbe00 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -95,10 +95,9 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
return ret;
}
-
EXPORT_SYMBOL(dma_alloc_noncoherent);
-void *dma_alloc_coherent(struct device *dev, size_t size,
+static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
@@ -123,7 +122,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
@@ -131,10 +129,9 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long) vaddr, get_order(size));
}
-
EXPORT_SYMBOL(dma_free_noncoherent);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr;
@@ -151,8 +148,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages(addr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
-
static inline void __dma_sync(unsigned long addr, size_t size,
enum dma_data_direction direction)
{
@@ -174,21 +169,8 @@ static inline void __dma_sync(unsigned long addr, size_t size,
}
}
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- unsigned long addr = (unsigned long) ptr;
-
- if (!plat_device_is_coherent(dev))
- __dma_sync(addr, size, direction);
-
- return plat_map_dma_mem(dev, ptr, size);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
if (cpu_is_noncoherent_r10000(dev))
__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
@@ -197,15 +179,11 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
-EXPORT_SYMBOL(dma_unmap_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
{
int i;
- BUG_ON(direction == DMA_NONE);
-
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
@@ -219,33 +197,27 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction)
+static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- BUG_ON(direction == DMA_NONE);
+ unsigned long addr;
- if (!plat_device_is_coherent(dev)) {
- unsigned long addr;
+ addr = (unsigned long) page_address(page) + offset;
- addr = (unsigned long) page_address(page) + offset;
+ if (!plat_device_is_coherent(dev))
__dma_sync(addr, size, direction);
- }
- return plat_map_dma_mem_page(dev, page) + offset;
+ return plat_map_dma_mem(dev, (void *)addr, size);
}
-EXPORT_SYMBOL(dma_map_page);
-
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
unsigned long addr;
int i;
- BUG_ON(direction == DMA_NONE);
-
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
@@ -257,13 +229,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
}
}
-EXPORT_SYMBOL(dma_unmap_sg);
-
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void mips_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
-
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
@@ -272,13 +240,9 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
}
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void mips_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
-
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
@@ -288,46 +252,11 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
}
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- if (cpu_is_noncoherent_r10000(dev)) {
- unsigned long addr;
-
- addr = dma_addr_to_virt(dev, dma_handle);
- __dma_sync(addr + offset, size, direction);
- }
-}
-
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- plat_extra_sync_for_device(dev);
- if (!plat_device_is_coherent(dev)) {
- unsigned long addr;
-
- addr = dma_addr_to_virt(dev, dma_handle);
- __dma_sync(addr + offset, size, direction);
- }
-}
-
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void mips_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction direction)
{
int i;
- BUG_ON(direction == DMA_NONE);
-
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
@@ -336,15 +265,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void mips_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction direction)
{
int i;
- BUG_ON(direction == DMA_NONE);
-
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
@@ -353,24 +278,18 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return plat_dma_mapping_error(dev, dma_addr);
}
-EXPORT_SYMBOL(dma_mapping_error);
-
-int dma_supported(struct device *dev, u64 mask)
+int mips_dma_supported(struct device *dev, u64 mask)
{
return plat_dma_supported(dev, mask);
}
-EXPORT_SYMBOL(dma_supported);
-
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
+void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
@@ -379,4 +298,30 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
__dma_sync((unsigned long)vaddr, size, direction);
}
-EXPORT_SYMBOL(dma_cache_sync);
+static struct dma_map_ops mips_default_dma_map_ops = {
+ .alloc_coherent = mips_dma_alloc_coherent,
+ .free_coherent = mips_dma_free_coherent,
+ .map_page = mips_dma_map_page,
+ .unmap_page = mips_dma_unmap_page,
+ .map_sg = mips_dma_map_sg,
+ .unmap_sg = mips_dma_unmap_sg,
+ .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
+ .sync_single_for_device = mips_dma_sync_single_for_device,
+ .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = mips_dma_sync_sg_for_device,
+ .mapping_error = mips_dma_mapping_error,
+ .dma_supported = mips_dma_supported
+};
+
+struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
+EXPORT_SYMBOL(mips_dma_map_ops);
+
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init mips_dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+fs_initcall(mips_dma_init);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 783ad0065fd..137ee76a004 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
@@ -144,6 +145,7 @@ good_area:
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -151,10 +153,15 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+ 1, 0, regs, address);
tsk->maj_flt++;
- else
+ } else {
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+ 1, 0, regs, address);
tsk->min_flt++;
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 1e69b1fb4b8..3634c7ea06a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
return;
}
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();
@@ -89,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
local_flush_tlb_one(vaddr);
}
#endif
+ kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 5ab5fa8c1d8..505fecad468 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -57,6 +57,34 @@ static struct bcache_ops mips_sc_ops = {
.bc_inv = mips_sc_inv
};
+/*
+ * Check if the L2 cache controller is activated on a particular platform.
+ * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
+ * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
+ * cache being disabled. However there is no guarantee for this to be
+ * true on all platforms. In an act of stupidity the spec defined bits
+ * 12..15 as implementation defined so below function will eventually have
+ * to be replaced by a platform specific probe.
+ */
+static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
+{
+ /* Check the bypass bit (L2B) */
+ switch (c->cputype) {
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_BMIPS5000:
+ if (config2 & (1 << 12))
+ return 0;
+ }
+
+ tmp = (config2 >> 4) & 0x0f;
+ if (0 < tmp && tmp <= 7)
+ c->scache.linesz = 2 << tmp;
+ else
+ return 0;
+}
+
static inline int __init mips_sc_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -79,10 +107,8 @@ static inline int __init mips_sc_probe(void)
return 0;
config2 = read_c0_config2();
- tmp = (config2 >> 4) & 0x0f;
- if (0 < tmp && tmp <= 7)
- c->scache.linesz = 2 << tmp;
- else
+
+ if (!mips_sc_is_activated(c))
return 0;
tmp = (config2 >> 8) & 0x0f;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4510e61883e..93816f3bca6 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -338,13 +338,12 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_4KSC:
case CPU_20KC:
case CPU_25KF:
- case CPU_BCM3302:
- case CPU_BCM4710:
+ case CPU_BMIPS32:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
case CPU_LOONGSON2:
- case CPU_BCM6338:
- case CPU_BCM6345:
- case CPU_BCM6348:
- case CPU_BCM6358:
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index d2647a4e012..23afdebc8e5 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -405,7 +405,6 @@ I_u1u2u3(_mfc0)
I_u1u2u3(_mtc0)
I_u2u1u3(_ori)
I_u3u1u2(_or)
-I_u2s3u1(_pref)
I_0(_rfe)
I_u2s3u1(_sc)
I_u2s3u1(_scd)
@@ -427,6 +426,25 @@ I_u1(_syscall);
I_u1u2s3(_bbit0);
I_u1u2s3(_bbit1);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#include <asm/octeon/octeon.h>
+void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+ unsigned int c)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
+ /*
+ * As per erratum Core-14449, replace prefetches 0-4,
+ * 6-24 with 'pref 28'.
+ */
+ build_insn(buf, insn_pref, c, 28, b);
+ else
+ build_insn(buf, insn_pref, c, a, b);
+}
+UASM_EXPORT_SYMBOL(uasm_i_pref);
+#else
+I_u2s3u1(_pref)
+#endif
+
/* Handle labels. */
void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index d248b707eff..2d74fc9ae3b 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
+#include <linux/swiotlb.h>
#include <asm/time.h>
@@ -19,6 +20,8 @@
#include <asm/octeon/cvmx-pci-defs.h>
#include <asm/octeon/pci-octeon.h>
+#include <dma-coherence.h>
+
#define USE_OCTEON_INTERNAL_ARBITER
/*
@@ -32,6 +35,8 @@
/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
+u64 octeon_bar1_pci_phys;
+
/**
* This is the bit decoding used for the Octeon PCI controller addresses
*/
@@ -170,6 +175,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
+ dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
+
return 0;
}
@@ -618,12 +625,10 @@ static int __init octeon_pci_setup(void)
* before the readl()'s below. We don't want BAR2 overlapping
* with BAR0/BAR1 during these reads.
*/
- octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0);
- octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80);
-
- /* Disable the BAR1 movable mappings */
- for (index = 0; index < 32; index++)
- octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
+ octeon_npi_write32(CVMX_NPI_PCI_CFG08,
+ (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
+ octeon_npi_write32(CVMX_NPI_PCI_CFG09,
+ (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
/* Remap the Octeon BAR 0 to 0-2GB */
@@ -637,6 +642,25 @@ static int __init octeon_pci_setup(void)
octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+ /* BAR1 movable mappings set for identity mapping */
+ octeon_bar1_pci_phys = 0x80000000ull;
+ for (index = 0; index < 32; index++) {
+ union cvmx_pci_bar1_indexx bar1_index;
+
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx =
+ (octeon_bar1_pci_phys >> 22) + index;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ }
+
/* Devices go after BAR1 */
octeon_pci_mem_resource.start =
OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
@@ -652,6 +676,27 @@ static int __init octeon_pci_setup(void)
octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
+ /* BAR1 movable regions contiguous to cover the swiotlb */
+ octeon_bar1_pci_phys =
+ virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
+
+ for (index = 0; index < 32; index++) {
+ union cvmx_pci_bar1_indexx bar1_index;
+
+ bar1_index.u32 = 0;
+ /* Address bits[35:22] sent to L2C */
+ bar1_index.s.addr_idx =
+ (octeon_bar1_pci_phys >> 22) + index;
+ /* Don't put PCI accesses in L2. */
+ bar1_index.s.ca = 1;
+ /* Endian Swap Mode */
+ bar1_index.s.end_swp = 1;
+ /* Set '1' when the selected address range is valid. */
+ bar1_index.s.addr_v = 1;
+ octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
+ bar1_index.u32);
+ }
+
/* Devices go after BAR0 */
octeon_pci_mem_resource.start =
OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
@@ -667,6 +712,9 @@ static int __init octeon_pci_setup(void)
* was setup properly.
*/
cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
+
+ octeon_pci_dma_init();
+
return 0;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 861361e0c9a..385f035b24e 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -75,6 +75,8 @@ union cvmx_pcie_address {
} mem;
};
+#include <dma-coherence.h>
+
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
@@ -1391,6 +1393,9 @@ static int __init octeon_pcie_setup(void)
cvmx_pcie_get_io_size(1) - 1;
register_pci_controller(&octeon_pcie1_controller);
}
+
+ octeon_pci_dma_init();
+
return 0;
}
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 7c2a2f7f8dc..41ba38513c8 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,16 +1,20 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux Kernel Configuration"
-
config MN10300
def_bool y
select HAVE_OPROFILE
-config AM33
- def_bool y
+config AM33_2
+ def_bool n
+
+config AM33_3
+ def_bool n
+
+config AM34_2
+ def_bool n
+ select MN10300_HAS_ATOMIC_OPS_UNIT
+ select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+ def_bool y if AM33_3 || AM34_2
config MMU
def_bool y
@@ -37,7 +41,7 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CMOS_UPDATE
- def_bool y
+ def_bool n
config GENERIC_FIND_NEXT_BIT
def_bool y
@@ -45,6 +49,27 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
+config GENERIC_TIME
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+ def_bool y
+ depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+
+config CEVT_MN10300
+ def_bool y
+ depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+ def_bool y
+ depends on GENERIC_TIME
+
config GENERIC_BUG
def_bool y
@@ -61,18 +86,12 @@ config GENERIC_HARDIRQS
config HOTPLUG_CPU
def_bool n
-config HZ
- int
- default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
-
source "init/Kconfig"
source "kernel/Kconfig.freezer"
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
choice
prompt "Unit type"
@@ -87,6 +106,10 @@ config MN10300_UNIT_ASB2303
config MN10300_UNIT_ASB2305
bool "ASB2305"
+config MN10300_UNIT_ASB2364
+ bool "ASB2364"
+ select SMSC911X_ARCH_HOOKS if SMSC911X
+
endchoice
choice
@@ -99,57 +122,51 @@ choice
config MN10300_PROC_MN103E010
bool "MN103E010"
depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+ select AM33_2
+ select MN10300_PROC_HAS_TTYSM0
+ select MN10300_PROC_HAS_TTYSM1
+ select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+ bool "MN2WS0050"
+ depends on MN10300_UNIT_ASB2364
+ select AM34_2
select MN10300_PROC_HAS_TTYSM0
select MN10300_PROC_HAS_TTYSM1
select MN10300_PROC_HAS_TTYSM2
endchoice
-choice
- prompt "Processor core support"
- default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+ def_bool n
help
- This option specifies the processor core for which the kernel will be
- compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
- bool "AM33v2"
-
-endchoice
+ This should be enabled if the processor has an atomic ops unit
+ capable of doing LL/SC equivalent operations.
config FPU
bool "FPU present"
default y
- depends on MN10300_PROC_MN103E010
+ depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
-choice
- prompt "CPU Caching mode"
- default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+ bool "Save FPU state lazily"
+ default y
+ depends on FPU && !SMP
help
- This option determines the caching mode for the kernel.
+ Enable this to be lazy in the saving of the FPU state to the owning
+ task's thread struct. This is useful if most tasks on the system
+ don't use the FPU as only those tasks that use it will pass it
+ between them, and the state needn't be saved for a task that isn't
+ using it.
- Write-Back caching mode involves the all reads and writes causing
- the affected cacheline to be read into the cache first before being
- operated upon. Memory is not then updated by a write until the cache
- is filled and a cacheline needs to be displaced from the cache to
- make room. Only at that point is it written back.
+ This can't be so easily used on SMP as the process that owns the FPU
+ state on a CPU may be currently running on another CPU, so for the
+ moment, it is disabled.
- Write-Through caching only fetches cachelines from memory on a
- read. Writes always get written directly to memory. If the affected
- cacheline is also in cache, it will be updated too.
+source "arch/mn10300/mm/Kconfig.cache"
- The final option is to turn of caching entirely.
-
-config MN10300_CACHE_WBACK
- bool "Write-Back"
-
-config MN10300_CACHE_WTHRU
- bool "Write-Through"
-
-config MN10300_CACHE_DISABLED
- bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+ def_bool y
menu "Memory layout options"
@@ -170,24 +187,55 @@ config KERNEL_TEXT_ADDRESS
config KERNEL_ZIMAGE_BASE_ADDRESS
hex "Base address of compressed vmlinux image"
- default "0x90700000"
+ default "0x50700000"
+
+config BOOT_STACK_OFFSET
+ hex
+ default "0xF00" if SMP
+ default "0xFF0" if !SMP
+config BOOT_STACK_SIZE
+ hex
+ depends on SMP
+ default "0x100"
endmenu
-config PREEMPT
- bool "Preemptible Kernel"
- help
- This option reduces the latency of the kernel when reacting to
- real-time or interactive events by allowing a low priority process to
- be preempted even if it is in kernel mode executing a system call.
- This allows applications to run more reliably even when the system is
- under load.
+config SMP
+ bool "Symmetric multi-processing support"
+ default y
+ depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+ ---help---
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
- Say Y here if you are building a kernel for a desktop, embedded
- or real-time system. Say N if you are unsure.
+ If you say N here, the kernel will run on single and multiprocessor
+ machines, but will use only one CPU of a multiprocessor machine. If
+ you say Y here, the kernel will run on many, but not all,
+ singleprocessor machines. On a singleprocessor machine, the kernel
+ will run faster if you say N here.
+
+ See also <file:Documentation/i386/IO-APIC.txt>,
+ <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+ <http://www.tldp.org/docs.html#howto>.
+
+ If you don't know what to do here, say N.
+
+config NR_CPUS
+ int
+ depends on SMP
+ default "2"
+
+config USE_GENERIC_SMP_HELPERS
+ bool
+ depends on SMP
+ default y
+
+source "kernel/Kconfig.preempt"
config MN10300_CURRENT_IN_E2
bool "Hold current task address in E2 register"
+ depends on !SMP
default y
help
This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +257,15 @@ config MN10300_USING_JTAG
suppresses the use of certain hardware debugging features, such as
single-stepping, which are taken over completely by the JTAG unit.
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
config MN10300_RTC
bool "Using MN10300 RTC"
- depends on MN10300_PROC_MN103E010
+ depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+ select GENERIC_CMOS_UPDATE
default n
help
-
This option enables support for the RTC, thus enabling time to be
tracked, even when system is powered down. This is available on-chip
on the MN103E010.
@@ -306,14 +357,23 @@ config MN10300_TTYSM1
choice
prompt "Select the timer to supply the clock for SIF1"
- default MN10300_TTYSM0_TIMER9
+ default MN10300_TTYSM1_TIMER12 \
+ if !(AM33_2 || AM33_3)
+ default MN10300_TTYSM1_TIMER9 \
+ if AM33_2 || AM33_3
depends on MN10300_TTYSM1
+config MN10300_TTYSM1_TIMER12
+ bool "Use timer 12 (16-bit)"
+ depends on !(AM33_2 || AM33_3)
+
config MN10300_TTYSM1_TIMER9
bool "Use timer 9 (16-bit)"
+ depends on AM33_2 || AM33_3
config MN10300_TTYSM1_TIMER3
bool "Use timer 3 (8-bit)"
+ depends on AM33_2 || AM33_3
endchoice
@@ -328,17 +388,107 @@ config MN10300_TTYSM2
choice
prompt "Select the timer to supply the clock for SIF2"
- default MN10300_TTYSM0_TIMER10
+ default MN10300_TTYSM2_TIMER3 \
+ if !(AM33_2 || AM33_3)
+ default MN10300_TTYSM2_TIMER10 \
+ if AM33_2 || AM33_3
depends on MN10300_TTYSM2
+config MN10300_TTYSM2_TIMER9
+ bool "Use timer 9 (16-bit)"
+ depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+ bool "Use timer 1 (8-bit)"
+ depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+ bool "Use timer 3 (8-bit)"
+ depends on !(AM33_2 || AM33_3)
+
config MN10300_TTYSM2_TIMER10
bool "Use timer 10 (16-bit)"
+ depends on AM33_2 || AM33_3
endchoice
config MN10300_TTYSM2_CTS
bool "Enable the use of the CTS line /dev/ttySM2"
- depends on MN10300_TTYSM2
+ depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+ int "GDBSTUB interrupt priority"
+ depends on GDBSTUB
+ range 0 1 if LINUX_CLI_LEVEL = 2
+ range 0 2 if LINUX_CLI_LEVEL = 3
+ range 0 3 if LINUX_CLI_LEVEL = 4
+ range 0 4 if LINUX_CLI_LEVEL = 5
+ range 0 5 if LINUX_CLI_LEVEL = 6
+ default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+ int "MN10300 on-chip serial interrupt priority"
+ depends on MN10300_TTYSM
+ range 1 1 if LINUX_CLI_LEVEL = 2
+ range 1 2 if LINUX_CLI_LEVEL = 3
+ range 1 3 if LINUX_CLI_LEVEL = 4
+ range 1 4 if LINUX_CLI_LEVEL = 5
+ range 1 5 if LINUX_CLI_LEVEL = 6
+ default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+ int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+ range 2 6
+ default 2
+ help
+ local_irq_disable() doesn't actually disable maskable interrupts -
+ what it does is restrict the levels of interrupt which are permitted
+ (a lower level indicates a higher priority) by lowering the value in
+ EPSW.IM from 7. Any interrupt is permitted for which the level is
+ lower than EPSW.IM.
+
+ Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+ serial DMA interrupts are allowed to interrupt normal disabled
+ sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+ int "Kernel timer interrupt priority"
+ range LINUX_CLI_LEVEL 6
+ default 4
+
+config PCI_IRQ_LEVEL
+ int "PCI interrupt priority"
+ depends on PCI
+ range LINUX_CLI_LEVEL 6
+ default 5
+
+config ETHERNET_IRQ_LEVEL
+ int "Ethernet interrupt priority"
+ depends on SMC91X || SMC911X || SMSC911X
+ range LINUX_CLI_LEVEL 6
+ default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+ int "External serial port interrupt priority"
+ depends on SERIAL_8250
+ range LINUX_CLI_LEVEL 6
+ default 6
endmenu
diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
index ac5c6bdb2f0..7120282bf0d 100644
--- a/arch/mn10300/Makefile
+++ b/arch/mn10300/Makefile
@@ -36,6 +36,9 @@ endif
ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
PROCESSOR := mn103e010
endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR := mn2ws0050
+endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
UNIT := asb2303
@@ -43,6 +46,9 @@ endif
ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
UNIT := asb2305
endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT := asb2364
+endif
head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
diff --git a/arch/mn10300/boot/compressed/head.S b/arch/mn10300/boot/compressed/head.S
index 502e1eb5670..7b50345b9e8 100644
--- a/arch/mn10300/boot/compressed/head.S
+++ b/arch/mn10300/boot/compressed/head.S
@@ -14,10 +14,29 @@
#include <linux/linkage.h>
#include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
.globl startup_32
startup_32:
- # first save off parameters from bootloader
+#ifdef CONFIG_SMP
+ #
+ # Secondary CPUs jump directly to the kernel entry point
+ #
+ # Must save primary CPU's D0-D2 registers as they hold boot parameters
+ #
+ mov (CPUID), d3
+ and CPUID_MASK,d3
+ beq startup_primary
+ mov CONFIG_KERNEL_TEXT_ADDRESS,a0
+ jmp (a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+ # first save parameters from bootloader
mov param_save_area,a0
mov d0,(a0)
mov d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
mov (a0),d0
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy
lne
- mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0 # writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
movhu d0,(a0) # enable
+#endif /* !ENABLED */
# clear the BSS area
mov __bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
# decompress the kernel
call decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ call mn10300_dcache_flush_inv[],0
+#endif
# disable caches again
mov CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
mov (4,a0),d1
mov (8,a0),d2
+ # jump to the kernel proper entry point
mov a3,sp
mov CONFIG_KERNEL_TEXT_ADDRESS,a0
jmp (a0)
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+ ret [],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
.data
.align 4
param_save_area:
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index d80dfcb2c90..3f749b69ca7 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -12,6 +12,8 @@ CONFIG_SLAB=y
CONFIG_PROFILING=y
# CONFIG_BLOCK is not set
CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_MN10300_RTC=y
CONFIG_MN10300_TTYSM_CONSOLE=y
CONFIG_MN10300_TTYSM0=y
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
new file mode 100644
index 00000000000..83ce2f27b12
--- /dev/null
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index f0cc1f84a72..92d2f9298e3 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " mov %5,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+ : "memory", "cc");
+
+ return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long status;
+ unsigned long oldval;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " cmp %5,%1 \n"
+ " bne 2f \n"
+ " mov %6,(_ADR,%3) \n"
+ "2: mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(oldval), "=m"(*m)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+ "r"(old), "r"(new)
+ : "memory", "cc");
+
+ return oldval;
+}
+#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ *m = val;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long oldval;
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ oldval = *m;
+ if (oldval == old)
+ *m = new;
+ arch_local_irq_restore(flags);
+ return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
+ (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
#include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " add %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval += i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ int retval;
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %4,(_AAR,%3) \n"
+ " mov (_ADR,%3),%1 \n"
+ " sub %5,%1 \n"
+ " mov %1,(_ADR,%3) \n"
+ " mov (_ADR,%3),%0 \n" /* flush */
+ " mov (_ASR,%3),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+ : "memory", "cc");
+
+#else
+ unsigned long flags;
+ flags = arch_local_cli_save();
+ retval = v->counter;
+ retval -= i;
+ v->counter = retval;
+ arch_local_irq_restore(flags);
+#endif
+ return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+ atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+ atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " and %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ mask = ~mask;
+ flags = arch_local_cli_save();
+ *addr &= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+ int status;
+
+ asm volatile(
+ "1: mov %3,(_AAR,%2) \n"
+ " mov (_ADR,%2),%0 \n"
+ " or %4,%0 \n"
+ " mov %0,(_ADR,%2) \n"
+ " mov (_ADR,%2),%0 \n" /* flush */
+ " mov (_ASR,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 1b \n"
+ : "=&r"(status), "=m"(*addr)
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+ : "memory", "cc");
+#else
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+ *addr |= mask;
+ arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index 3f50e966107..3b8a868188f 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -57,7 +57,7 @@
#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
{
unsigned int *a = (unsigned int *) addr;
int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
/*
* test bit
*/
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
{
- return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
}
/*
* change bit
*/
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
{
int mask;
unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
*a ^= mask;
}
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
/*
* test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
/*
* test and change bit
*/
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
{
int mask, retval;
unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
return retval;
}
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
#include <asm-generic/bitops/lock.h>
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index 781bf613366..f29cde2cfc9 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -43,14 +43,18 @@
/* instruction cache access registers */
#define ICACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define ICACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
-/* instruction cache access registers */
+/* data cache access registers */
#define DCACHE_DATA(WAY, ENTRY, OFF) \
- __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+ __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
#define DCACHE_TAG(WAY, ENTRY) \
- __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+ __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+ (ENTRY) * L1_CACHE_BYTES, u32)
#endif /* _ASM_CACHE_H */
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 29e692f7f03..faed90240de 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -17,66 +17,55 @@
#include <linux/mm.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
*/
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end) do {} while (0)
-#define flush_icache_page(vma, pg) do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
- flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_page(vma, page); \
- } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
#ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
#else
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+ mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+ mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+ mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+ mn10300_local_dcache_inv_range2(start, size)
#define mn10300_dcache_flush() do {} while (0)
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
mn10300_dcache_inv_range2((start), (size))
#endif /* CONFIG_MN10300_CACHE_WBACK */
#else
+#define mn10300_local_icache_inv() do {} while (0)
+#define mn10300_local_icache_inv_page(start) do {} while (0)
+#define mn10300_local_icache_inv_range(start, end) do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_inv() do {} while (0)
+#define mn10300_local_dcache_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end) do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush() do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_inv() do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start) do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end) do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
#define mn10300_icache_inv() do {} while (0)
+#define mn10300_icache_inv_page(start) do {} while (0)
+#define mn10300_icache_inv_range(start, end) do {} while (0)
+#define mn10300_icache_inv_range2(start, size) do {} while (0)
#define mn10300_dcache_inv() do {} while (0)
#define mn10300_dcache_inv_page(start) do {} while (0)
#define mn10300_dcache_inv_range(start, end) do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
#define mn10300_dcache_flush_page(start) do {} while (0)
#define mn10300_dcache_flush_range(start, end) do {} while (0)
#define mn10300_dcache_flush_range2(start, size) do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while (0)
+#define flush_cache_mm(mm) do {} while (0)
+#define flush_cache_dup_mm(mm) do {} while (0)
+#define flush_cache_range(mm, start, end) do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
+#define flush_cache_vmap(start, end) do {} while (0)
+#define flush_cache_vunmap(start, end) do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do {} while (0)
+#define flush_dcache_mmap_lock(mapping) do {} while (0)
+#define flush_dcache_mmap_unlock(mapping) do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+ mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end) do {} while (0)
+#define flush_icache_page(vma, pg) do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+ flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_page(vma, page); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
/*
- * internal debugging function
+ * Internal debugging function
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
extern void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/mn10300/include/asm/cpu-regs.h b/arch/mn10300/include/asm/cpu-regs.h
index 757e9b5388e..90ed4a365c9 100644
--- a/arch/mn10300/include/asm/cpu-regs.h
+++ b/arch/mn10300/include/asm/cpu-regs.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#endif
-#ifdef CONFIG_MN10300_CPU_AM33V2
/* we tell the compiler to pretend to be AM33 so that it doesn't try and use
* the FP regs, but tell the assembler that we're actually allowed AM33v2
* instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
#else
.am33_2
#endif
-#endif
#ifdef __KERNEL__
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
#define EPSW_nAR 0x00040000 /* register bank control */
#define EPSW_ML 0x00080000 /* monitor level */
#define EPSW_FE 0x00100000 /* FPU enable */
+#define EPSW_IM_SHIFT 8 /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num) ((num) << EPSW_IM_SHIFT)
/* FPU registers */
#define FPCR_EF_I 0x00000001 /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
#define CPUREV __SYSREGC(0xc0000050, u32) /* CPU revision register */
#define CPUREV_TYPE 0x0000000f /* CPU type */
#define CPUREV_TYPE_S 0
-#define CPUREV_TYPE_AM33V1 0x00000000 /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2 0x00000001 /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1 0x00000002 /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1 0x00000000 /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2 0x00000001 /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1 0x00000002 /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3 0x00000003 /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2 0x00000004 /* - AM34-2 core, AM33/3.00 arch */
#define CPUREV_REVISION 0x000000f0 /* CPU revision */
#define CPUREV_REVISION_S 4
#define CPUREV_ICWAY 0x00000f00 /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
#define CHCTR_ICWMD 0x0f00 /* instruction cache way mode */
#define CHCTR_DCWMD 0xf000 /* data cache way mode */
+#ifdef CONFIG_AM34_2
+#define ICIVCR __SYSREG(0xc0000c00, u32) /* icache area invalidate control */
+#define ICIVCR_ICIVBSY 0x00000008 /* icache area invalidate busy */
+#define ICIVCR_ICI 0x00000001 /* icache area invalidate */
+
+#define ICIVMR __SYSREG(0xc0000c04, u32) /* icache area invalidate mask */
+
+#define DCPGCR __SYSREG(0xc0000c10, u32) /* data cache area purge control */
+#define DCPGCR_DCPGBSY 0x00000008 /* data cache area purge busy */
+#define DCPGCR_DCP 0x00000002 /* data cache area purge */
+#define DCPGCR_DCI 0x00000001 /* data cache area invalidate */
+
+#define DCPGMR __SYSREG(0xc0000c14, u32) /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
/* MMU control registers */
#define MMUCTR __SYSREG(0xc0000090, u32) /* MMU control register */
#define MMUCTR_IRP 0x0000003f /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
#define MMUCTR_DTL_LOCK0_3 0x03000000 /* - entry 0-3 locked */
#define MMUCTR_DTL_LOCK0_7 0x04000000 /* - entry 0-7 locked */
#define MMUCTR_DTL_LOCK0_15 0x05000000 /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE 0x80000000 /* write-through cache TLB entry bit enable */
+#endif
#define PIDR __SYSREG(0xc0000094, u16) /* PID register */
#define PIDR_PID 0x00ff /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
#define xPTEL_PS_4Mb 0x00000c00 /* - 4Mb page */
#define xPTEL_PPN 0xfffff006 /* physical page number */
-#define xPTEL_V_BIT 0 /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT 1
-#define xPTEL_UNUSED2_BIT 2
-#define xPTEL_C_BIT 3
-#define xPTEL_PV_BIT 4
-#define xPTEL_D_BIT 5
-#define xPTEL_G_BIT 9
-
#define IPTEU __SYSREG(0xc00000a4, u32) /* instruction TLB virtual addr */
#define DPTEU __SYSREG(0xc00000b4, u32) /* data TLB virtual addr */
#define xPTEU_VPN 0xfffffc00 /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
#define xPTEL2_PS_128Kb 0x00000100 /* - 128Kb page */
#define xPTEL2_PS_1Kb 0x00000200 /* - 1Kb page */
#define xPTEL2_PS_4Mb 0x00000300 /* - 4Mb page */
-#define xPTEL2_PPN 0xfffffc00 /* physical page number */
+#define xPTEL2_CWT 0x00000400 /* cacheable write-through */
+#define xPTEL2_UNUSED1 0x00000800 /* unused bit (broadcast mask) */
+#define xPTEL2_PPN 0xfffff000 /* physical page number */
+
+#define xPTEL2_V_BIT 0 /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT 1
+#define xPTEL2_PV_BIT 2
+#define xPTEL2_D_BIT 3
+#define xPTEL2_G_BIT 7
+#define xPTEL2_UNUSED1_BIT 11
#define MMUFCR __SYSREGC(0xc000009c, u32) /* MMU exception cause */
#define MMUFCR_IFC __SYSREGC(0xc000009c, u16) /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
#define MMUFCR_xFC_PR_RWK_RWU 0x01c0 /* - R/W kernel and R/W user */
#define MMUFCR_xFC_ILLADDR 0x0200 /* illegal address excep flag */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR __SYSREG(0xc0000a00, u32) /* cacheable address */
+#define AAR2 __SYSREG(0xc0000a04, u32) /* uncacheable address */
+#define ADR __SYSREG(0xc0000a08, u32) /* data */
+#define ASR __SYSREG(0xc0000a0c, u32) /* status */
+#define AARU __SYSREG(0xd400aa00, u32) /* user address */
+#define ADRU __SYSREG(0xd400aa08, u32) /* user data */
+#define ASRU __SYSREG(0xd400aa0c, u32) /* user status */
+
+#define ASR_RW 0x00000008 /* read */
+#define ASR_BW 0x00000004 /* bus error */
+#define ASR_IW 0x00000002 /* interrupt */
+#define ASR_LW 0x00000001 /* bus lock */
+
+#define ASRU_RW ASR_RW /* read */
+#define ASRU_BW ASR_BW /* bus error */
+#define ASRU_IW ASR_IW /* interrupt */
+#define ASRU_LW ASR_LW /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+ "_AAR = 0\n"
+ "_AAR2 = 4\n"
+ "_ADR = 8\n"
+ "_ASR = 12\n");
+#else
+#define _AAR 0
+#define _AAR2 4
+#define _ADR 8
+#define _ASR 12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR 0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
#endif /* __KERNEL__ */
#endif /* _ASM_CPU_REGS_H */
diff --git a/arch/mn10300/include/asm/dmactl-regs.h b/arch/mn10300/include/asm/dmactl-regs.h
index 58a199da0f4..80337b339c9 100644
--- a/arch/mn10300/include/asm/dmactl-regs.h
+++ b/arch/mn10300/include/asm/dmactl-regs.h
@@ -11,91 +11,6 @@
#ifndef _ASM_DMACTL_REGS_H
#define _ASM_DMACTL_REGS_H
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
-#define DMxCTR_BG 0x0000001f /* transfer request source */
-#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
-#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
-#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
-#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
-#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
-#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
-#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
-#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
-#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
-#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
-#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
-#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
-#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
-#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
-#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
-#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
-#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
-#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
-#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
-#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
-#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
-#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
-#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
-#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
-#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
-#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
-#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
-#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
-#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
-#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
-#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
-#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
-#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
-#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
-#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
-#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
-#define DMxCTR_RQM 0x00060000 /* external request input source mode */
-#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
-#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
-#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
-#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
-#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
-#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
-
-#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
-
-#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
-
-#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
-#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
-
-#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
- * size reg */
-#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
-
-#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
-#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
-#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
-#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
-
-#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
-#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
-#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
-#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
- u32 ctr;
- const void *src;
- void *dst;
- u32 siz;
- u32 cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
#endif /* _ASM_DMACTL_REGS_H */
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index e5fa97cd9a1..8157c9267f4 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -32,6 +32,12 @@
#define R_MN10300_ALIGN 34 /* Alignment requirement. */
/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT 1 /* Has AM34 Atomic Operations */
+
+
+/*
* ELF register definitions..
*/
typedef unsigned long elf_greg_t;
@@ -47,8 +53,6 @@ typedef struct {
u_int32_t fpcr;
} elf_fpregset_t;
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
/*
* This is used to ensure we don't load something for the wrong architecture
*/
@@ -130,7 +134,11 @@ do { \
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
#define ELF_HWCAP (0)
+#endif
/*
* This yields a string that ld.so will use to load implementation
diff --git a/arch/mn10300/include/asm/exceptions.h b/arch/mn10300/include/asm/exceptions.h
index fa16466ef3f..ca3e20508c7 100644
--- a/arch/mn10300/include/asm/exceptions.h
+++ b/arch/mn10300/include/asm/exceptions.h
@@ -15,8 +15,8 @@
/*
* define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- * (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ * the same time.
*/
#define GDBSTUB_BKPT 0xFF
@@ -90,7 +90,6 @@ enum exception_code {
extern void __set_intr_stub(enum exception_code code, void *handler);
extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
struct pt_regs;
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
extern asmlinkage void raw_bus_error(void);
extern asmlinkage void double_fault(void);
extern asmlinkage int system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
extern asmlinkage void uninitialised_exception(struct pt_regs *,
enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
+#define NUM2EXCEP_IRQ_LEVEL(num) (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_EXCEPTIONS_H */
diff --git a/arch/mn10300/include/asm/fpu.h b/arch/mn10300/include/asm/fpu.h
index 64a2b83a7a6..b7625de8ead 100644
--- a/arch/mn10300/include/asm/fpu.h
+++ b/arch/mn10300/include/asm/fpu.h
@@ -12,74 +12,125 @@
#ifndef _ASM_FPU_H
#define _ASM_FPU_H
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
#include <asm/sigcontext.h>
-#include <asm/user.h>
#ifdef __KERNEL__
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
extern struct task_struct *fpu_state_owner;
+#endif
-#define set_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags |= THREAD_USING_FPU; \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
-#define clear_using_fpu(tsk) \
-do { \
- (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU; \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bset %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+ asm volatile(
+ "bclr %0,(0,%1)"
+ :
+ : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+ : "memory", "cc");
+}
-#define unlazy_fpu(tsk) \
-do { \
- preempt_disable(); \
- if (fpu_state_owner == (tsk)) \
- fpu_save(&tsk->thread.fpu_state); \
- preempt_enable(); \
-} while (0)
-
-#define exit_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) \
- fpu_state_owner = NULL; \
- preempt_enable(); \
-} while (0)
-
-#define flush_fpu() \
-do { \
- struct task_struct *__tsk = current; \
- preempt_disable(); \
- if (fpu_state_owner == __tsk) { \
- fpu_state_owner = NULL; \
- __tsk->thread.uregs->epsw &= ~EPSW_FE; \
- } \
- preempt_enable(); \
- clear_using_fpu(__tsk); \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
-extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU */
-
-/*
- * signal frame handlers
- */
extern int fpu_setup_sigcontext(struct fpucontext *buf);
extern int fpu_restore_sigcontext(struct fpucontext *buf);
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk)
+ fpu_save(&tsk->thread.fpu_state);
+#endif
+ preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+ if (fpu_state_owner == tsk)
+ fpu_state_owner = NULL;
+ preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+ struct task_struct *tsk = current;
+
+ preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#else
+ if (fpu_state_owner == tsk) {
+ fpu_state_owner = NULL;
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ }
+#endif
+ preempt_enable();
+ clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU */
+
#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_FPU_H */
diff --git a/arch/mn10300/include/asm/frame.inc b/arch/mn10300/include/asm/frame.inc
index 5b1949bdf03..2ee58e3eb6b 100644
--- a/arch/mn10300/include/asm/frame.inc
+++ b/arch/mn10300/include/asm/frame.inc
@@ -18,6 +18,7 @@
#ifndef __ASM_OFFSETS_H__
#include <asm/asm-offsets.h>
#endif
+#include <asm/thread_info.h>
#define pi break
@@ -37,11 +38,15 @@
movm [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
mov sp,fp # FRAME pointer in A3
add -12,sp # allow for calls to be made
- mov (__frame),a1
- mov a1,(REG_NEXT,fp)
- mov fp,(__frame)
- and ~EPSW_FE,epsw # disable the FPU inside the kernel
+ # push the exception frame onto the front of the list
+ GET_THREAD_INFO a1
+ mov (TI_frame,a1),a0
+ mov a0,(REG_NEXT,fp)
+ mov fp,(TI_frame,a1)
+
+ # disable the FPU inside the kernel
+ and ~EPSW_FE,epsw
# we may be holding current in E2
#ifdef CONFIG_MN10300_CURRENT_IN_E2
@@ -57,10 +62,11 @@
.macro RESTORE_ALL
# peel back the stack to the calling frame
# - this permits execve() to discard extra frames due to kernel syscalls
- mov (__frame),fp
+ GET_THREAD_INFO a0
+ mov (TI_frame,a0),fp
mov fp,sp
- mov (REG_NEXT,fp),d0 # userspace has regs->next == 0
- mov d0,(__frame)
+ mov (REG_NEXT,fp),d0
+ mov d0,(TI_frame,a0) # userspace has regs->next == 0
#ifndef CONFIG_MN10300_USING_JTAG
mov (REG_EPSW,fp),d0
diff --git a/arch/mn10300/include/asm/gdb-stub.h b/arch/mn10300/include/asm/gdb-stub.h
index 41ed2676396..f5495ad82b7 100644
--- a/arch/mn10300/include/asm/gdb-stub.h
+++ b/arch/mn10300/include/asm/gdb-stub.h
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void __gdbstub_bug_trap(void);
extern asmlinkage void __gdbstub_pause(void);
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
extern asmlinkage void gdbstub_purge_cache(void);
#else
#define gdbstub_purge_cache() do {} while (0)
diff --git a/arch/mn10300/include/asm/hardirq.h b/arch/mn10300/include/asm/hardirq.h
index 54d95011767..0000d650b55 100644
--- a/arch/mn10300/include/asm/hardirq.h
+++ b/arch/mn10300/include/asm/hardirq.h
@@ -19,9 +19,10 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
- unsigned long idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
unsigned int __nmi_count; /* arch dependent */
unsigned int __irq_count; /* arch dependent */
+#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index f577ba2268c..bfe2d88604d 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
BUG();
#endif
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
return vaddr;
}
@@ -101,7 +101,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
return;
}
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
#if HIGHMEM_DEBUG
{
@@ -116,9 +116,11 @@ static inline void __kunmap_atomic(unsigned long vaddr)
* this pte without first remap it
*/
pte_clear(kmap_pte - idx);
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
#endif
+
+ kmap_atomic_idx_pop();
pagefault_enable();
}
#endif /* __KERNEL__ */
diff --git a/arch/mn10300/include/asm/intctl-regs.h b/arch/mn10300/include/asm/intctl-regs.h
index ba544c796c5..585b708c2bc 100644
--- a/arch/mn10300/include/asm/intctl-regs.h
+++ b/arch/mn10300/include/asm/intctl-regs.h
@@ -15,24 +15,19 @@
#ifdef __KERNEL__
-/* interrupt controller registers */
-#define GxICR(X) __SYSREG(0xd4000000 + (X) * 4, u16) /* group irq ctrl regs */
-
-#define IAGR __SYSREG(0xd4000100, u16) /* intr acceptance group reg */
-#define IAGR_GN 0x00fc /* group number register
- * (documentation _has_ to be wrong)
- */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
-#define EXTMD __SYSREG(0xd4000200, u16) /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X) \
+ __SYSREG(0xd4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
-#define SET_XIRQ_TRIGGER(X,Y) \
-do { \
- u16 x = EXTMD; \
- x &= ~(3 << ((X) * 2)); \
- x |= ((Y) & 3) << ((X) * 2); \
- EXTMD = x; \
-} while (0)
+#include <proc/intctl-regs.h>
#define XIRQ_TRIGGER_LOWLEVEL 0
#define XIRQ_TRIGGER_HILEVEL 1
@@ -59,10 +54,18 @@ do { \
#define GxICR_LEVEL_5 0x5000 /* - level 5 */
#define GxICR_LEVEL_6 0x6000 /* - level 6 */
#define GxICR_LEVEL_SHIFT 12
+#define GxICR_NMI 0x8000 /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num) ((num) << GxICR_LEVEL_SHIFT)
#ifndef __ASSEMBLY__
extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
#endif
/* external interrupts */
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index c1a4119e649..787255da744 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
#define iowrite32_rep(p, src, count) \
outsl((unsigned long) (p), (src), (count))
+#define readsb(p, dst, count) \
+ insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+ insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+ insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+ outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+ outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+ outsl((unsigned long) (p), (src), (count))
#define IO_SPACE_LIMIT 0xffffffff
diff --git a/arch/mn10300/include/asm/irq.h b/arch/mn10300/include/asm/irq.h
index 25c045d16d1..1a73fb3f60c 100644
--- a/arch/mn10300/include/asm/irq.h
+++ b/arch/mn10300/include/asm/irq.h
@@ -21,8 +21,16 @@
/* this number is used when no interrupt has been assigned */
#define NO_IRQ INT_MAX
-/* hardware irq numbers */
-#define NR_IRQS GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#define NR_IRQS NR_CPU_IRQS
+#endif
/* external hardware irq numbers */
#define NR_XIRQS GxICR_NUM_XIRQS
diff --git a/arch/mn10300/include/asm/irq_regs.h b/arch/mn10300/include/asm/irq_regs.h
index a848cd232eb..97d0cb5af80 100644
--- a/arch/mn10300/include/asm/irq_regs.h
+++ b/arch/mn10300/include/asm/irq_regs.h
@@ -18,7 +18,11 @@
#define ARCH_HAS_OWN_IRQ_REGS
#ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+ return current_frame();
+}
#endif
#endif /* _ASM_IRQ_REGS_H */
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
index 5e529a117cb..7a7ae12c711 100644
--- a/arch/mn10300/include/asm/irqflags.h
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -13,6 +13,9 @@
#define _ASM_IRQFLAGS_H
#include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
/*
* interrupt control
@@ -23,11 +26,7 @@
* - level 6 - timer interrupt
* - "enabled": run in IM7
*/
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
#ifndef __ASSEMBLY__
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
/*
* we make sure arch_irq_enable() doesn't cause priority inversion
*/
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
static inline void arch_local_irq_enable(void)
{
unsigned long tmp;
+ int cpu = raw_smp_processor_id();
asm volatile(
" mov epsw,%0 \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
" or %2,%0 \n"
" mov %0,epsw \n"
: "=&d"(tmp)
- : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
- : "memory");
+ : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+ : "memory", "cc");
}
static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+ return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
}
static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
*/
static inline void arch_safe_halt(void)
{
+#ifdef CONFIG_SMP
+ arch_local_irq_enable();
+#else
asm volatile(
" or %0,epsw \n"
" nop \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
:
: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
: "cc");
+#endif
}
+#define __sleep_cpu() \
+do { \
+ asm volatile( \
+ " bset %1,(%0)\n" \
+ "1: btst %1,(%0)\n" \
+ " bne 1b\n" \
+ : \
+ : "i"(&CPUM), "i"(CPUM_SLEEP) \
+ : "cc" \
+ ); \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ :
+ : "i"(~EPSW_IE)
+ : "memory"
+ );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ arch_local_cli();
+ return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+ asm volatile(
+ " or %0,epsw \n"
+ :
+ : "i"(EPSW_IE)
+ : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+ asm volatile(
+ " and %0,epsw \n"
+ " or %1,epsw \n"
+ :
+ : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+ : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg) \
+ mov epsw,reg
+
+#define LOCAL_IRQ_DISABLE \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|MN10300_CLI_LEVEL,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_IRQ_ENABLE \
+ or EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+ mov reg,epsw
+
+#define LOCAL_CLI_SAVE(reg) \
+ mov epsw,reg; \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_CLI \
+ and ~EPSW_IE,epsw; \
+ nop; \
+ nop; \
+ nop
+
+#define LOCAL_STI \
+ or EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level) \
+ and ~EPSW_IM,epsw; \
+ or EPSW_IE|(level),epsw
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c244de..c8f6c82672a 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -27,28 +27,38 @@
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
+#define MMU_CONTEXT_TLBPID_NR 256
#define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL
#define MMU_CONTEXT_VERSION_MASK 0xffffff00UL
#define MMU_CONTEXT_FIRST_VERSION 0x00000100UL
#define MMU_NO_CONTEXT 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR 0
#define enter_lazy_tlb(mm, tsk) do {} while (0)
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
#ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
- cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
- cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
#else
-#define cpu_ran_vm(cpu, mm) do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm) true
-#endif /* CONFIG_SMP */
+ return true;
+#endif
+}
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
*/
static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
{
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
/* we exhausted the TLB PIDs of this version on this CPU, so we
* flush this CPU's TLB in its entirety and start new cycle */
- flush_tlb_all();
+ local_flush_tlb_all();
/* fix the TLB version if needed (we avoid version #0 so as to
* distingush MMU_NO_CONTEXT) */
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk,
}
/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm) do { } while (0)
-
-/*
* after we have set current->mm to a new value, this activates the context for
* the new mm so we see the new mappings.
*/
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
{
PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
}
+#else /* CONFIG_MN10300_TLB_USE_PIDR */
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm) (0)
+#define activate_context(mm) local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm) do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
if (prev != next) {
+#ifdef CONFIG_SMP
+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
cpu_ran_vm(cpu, next);
- activate_context(next, cpu);
PTBR = (unsigned long) next->pgd;
- } else if (!cpu_maybe_ran_vm(cpu, next)) {
- activate_context(next, cpu);
+ activate_context(next);
}
}
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index a19f11327cd..146bacf193e 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -11,7 +11,6 @@
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
-#include <asm/processor.h>
#include <asm/page.h>
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h
index b049a8bd157..a1e894b5f65 100644
--- a/arch/mn10300/include/asm/pgtable.h
+++ b/arch/mn10300/include/asm/pgtable.h
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* The vmalloc() routines also leaves a hole of 4kB between each vmalloced
* area to catch addressing errors.
*/
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START (0x70000000UL)
+#define VMALLOC_END (0x7C000000UL)
+#else
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START (0x70000000)
#define VMALLOC_END (0x7C000000)
+#endif
#ifndef __ASSEMBLY__
extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#endif
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL xPTEL_G_BIT
-
-#define _PAGE_VALID xPTEL_V
-#define _PAGE_ACCESSED xPTEL_UNUSED1
-#define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */
-#define _PAGE_CACHE xPTEL_C
-#define _PAGE_PRESENT xPTEL_PV
-#define _PAGE_DIRTY xPTEL_D
-#define _PAGE_PROT xPTEL_PR
-#define _PAGE_PROT_RKNU xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL xPTEL_G
-#define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */
-
-#define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX 0x040
-#define __PAGE_PROT_USER 0x080
-#define __PAGE_PROT_WRITE 0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID xPTEL2_V
+#define _PAGE_CACHE xPTEL2_C
+#define _PAGE_PRESENT xPTEL2_PV
+#define _PAGE_DIRTY xPTEL2_D
+#define _PAGE_PROT xPTEL2_PR
+#define _PAGE_PROT_RKNU xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL xPTEL2_G
+#define _PAGE_PS_MASK xPTEL2_PS
+#define _PAGE_PS_4Kb xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb xPTEL2_PS_4Mb
+#define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
+#define _PAGE_CACHE_WT xPTEL2_CWT
+#define _PAGE_ACCESSED xPTEL2_UNUSED1
+#define _PAGE_NX 0 /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x000 /* If not present */
+
+#define __PAGE_PROT_UWAUX 0x010
+#define __PAGE_PROT_USER 0x020
+#define __PAGE_PROT_WRITE 0x040
#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE 0x000 /* If not present */
#ifndef __ASSEMBLY__
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO __pgprot(__PAGE_USERIO)
+
/*
* Whilst the MN10300 can do page protection for execute (given separate data
* and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
return 1;
}
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS 29
+#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(pte) (pte_val(pte) >> 2)
#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Macro to mark a page protection value as "uncacheable". On processors which
* do not support it, this is a no-op.
*/
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index f7d4b0d285e..4c1b5cc14c1 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -13,10 +13,13 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/threads.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
__pc; \
})
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
extern void show_registers(struct pt_regs *regs);
/*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
struct mn10300_cpuinfo {
int type;
- unsigned long loops_per_sec;
+ unsigned long loops_per_jiffy;
char hard_math;
- unsigned long *pgd_quick;
- unsigned long *pte_quick;
- unsigned long pgtable_cache_sz;
};
extern struct mn10300_cpuinfo boot_cpu_data;
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else /* CONFIG_SMP */
#define cpu_data &boot_cpu_data
#define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
extern void identify_cpu(struct mn10300_cpuinfo *);
extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
*/
#define TASK_UNMAPPED_BASE 0x30000000
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
struct fpu_state_struct {
unsigned long fs[32]; /* fpu registers */
unsigned long fpcr; /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
unsigned long a3; /* kernel FP */
unsigned long wchan;
unsigned long usp;
- struct pt_regs *__frame;
unsigned long fpu_flags;
#define THREAD_USING_FPU 0x00000001 /* T if this task is using the FPU */
+#define THREAD_HAS_FPU 0x00000002 /* T if this task owns the FPU right now */
struct fpu_state_struct fpu_state;
};
-#define INIT_THREAD \
-{ \
- .uregs = init_uregs, \
- .pc = 0, \
- .sp = 0, \
- .a3 = 0, \
- .wchan = 0, \
- .__frame = NULL, \
+#define INIT_THREAD \
+{ \
+ .uregs = init_uregs, \
+ .pc = 0, \
+ .sp = 0, \
+ .a3 = 0, \
+ .wchan = 0, \
}
#define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
* - need to discard the frame stacked by the kernel thread invoking the execve
* syscall (see RESTORE_ALL macro)
*/
-#define start_thread(regs, new_pc, new_sp) do { \
- set_fs(USER_DS); \
- __frame = current->thread.uregs; \
- __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM; \
- __frame->pc = new_pc; \
- __frame->sp = new_sp; \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+ unsigned long new_pc, unsigned long new_sp)
+{
+ struct thread_info *ti = current_thread_info();
+ struct pt_regs *frame0;
+ set_fs(USER_DS);
+
+ frame0 = thread_info_to_uregs(ti);
+ frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+ frame0->pc = new_pc;
+ frame0->sp = new_sp;
+ ti->frame = frame0;
+}
+
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
static inline void prefetch(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
static inline void prefetchw(const void *x)
{
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_PROC_MN103E010
asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
#else
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 7c2e911052b..b6961811d44 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -40,7 +40,6 @@
#define PT_PC 26
#define NR_PTREGS 27
-#ifndef __ASSEMBLY__
/*
* This defines the way registers are stored in the event of an exception
* - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
unsigned long epsw;
unsigned long pc;
};
-#endif
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
@@ -86,12 +84,7 @@ struct pt_regs {
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame; /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
#define instruction_pointer(regs) ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
-#endif /* !__ASSEMBLY */
-
#define profile_pc(regs) ((regs)->pc)
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_PTRACE_H */
diff --git a/arch/mn10300/include/asm/reset-regs.h b/arch/mn10300/include/asm/reset-regs.h
index 174523d5013..10c7502a113 100644
--- a/arch/mn10300/include/asm/reset-regs.h
+++ b/arch/mn10300/include/asm/reset-regs.h
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
RSTCTR |= RSTCTR_CHIPRST;
}
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
extern void watchdog_go(void);
extern asmlinkage void watchdog_handler(void);
diff --git a/arch/mn10300/include/asm/rtc.h b/arch/mn10300/include/asm/rtc.h
index c295194cc70..6c14bb1d0d9 100644
--- a/arch/mn10300/include/asm/rtc.h
+++ b/arch/mn10300/include/asm/rtc.h
@@ -15,25 +15,14 @@
#include <linux/init.h>
-extern void check_rtc_time(void);
extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
#else /* !CONFIG_MN10300_RTC */
-static inline void check_rtc_time(void)
-{
-}
-
static inline void calibrate_clock(void)
{
}
-static inline unsigned long get_initial_rtc_time(void)
-{
- return 0;
-}
-
#endif /* !CONFIG_MN10300_RTC */
#include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644
index 00000000000..6d594d4a0e1
--- /dev/null
+++ b/arch/mn10300/include/asm/rwlock.h
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS 0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_read_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_read_lock_const(rw, helper); \
+ else \
+ __build_read_lock_ptr(rw, helper); \
+ } while (0)
+
+#define __build_write_lock_ptr(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock_const(rw, helper) \
+ do { \
+ asm volatile( \
+ " mov (%0),d3 \n" \
+ " sub 1,d3 \n" \
+ " mov d3,(%0) \n" \
+ " blt 1f \n" \
+ " bra 2f \n" \
+ "1: jmp 3f \n" \
+ "2: \n" \
+ " .section .text.lock,\"ax\" \n" \
+ "3: call "helper"[],0 \n" \
+ " jmp 2b \n" \
+ " .previous" \
+ : \
+ : "d" (rw) \
+ : "memory", "d3", "cc"); \
+ } while (0)
+
+#define __build_write_lock(rw, helper) \
+ do { \
+ if (__builtin_constant_p(rw)) \
+ __build_write_lock_const(rw, helper); \
+ else \
+ __build_write_lock_ptr(rw, helper); \
+ } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
diff --git a/arch/mn10300/include/asm/serial-regs.h b/arch/mn10300/include/asm/serial-regs.h
index 6498469e93a..8320cda32f5 100644
--- a/arch/mn10300/include/asm/serial-regs.h
+++ b/arch/mn10300/include/asm/serial-regs.h
@@ -20,18 +20,25 @@
/* serial port 0 */
#define SC0CTR __SYSREG(0xd4002000, u16) /* control reg */
#define SC01CTR_CK 0x0007 /* clock source select */
-#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
#define SC01CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
#define SC01CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_2 0x0003 /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define SC1CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow (serial port 1 only) */
#define SC1CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define SC01CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
-#define SC01CTR_CK_EXTERN 0x0007 /* - external closk */
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define SC0CTR_CK_TM8UFLOW_8 0x0000 /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM0UFLOW_8 0x0004 /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define SC0CTR_CK_TM2UFLOW_8 0x0005 /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define SC1CTR_CK_TM12UFLOW_8 0x0000 /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
#define SC01CTR_STB 0x0008 /* stop bit select */
#define SC01CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC01CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -100,11 +107,23 @@
/* serial port 2 */
#define SC2CTR __SYSREG(0xd4002020, u16) /* control reg */
+#ifdef CONFIG_AM33_2
#define SC2CTR_CK 0x0003 /* clock source select */
#define SC2CTR_CK_TM10UFLOW 0x0000 /* - timer 10 underflow */
#define SC2CTR_CK_TM2UFLOW 0x0001 /* - timer 2 underflow */
#define SC2CTR_CK_EXTERN 0x0002 /* - external closk */
#define SC2CTR_CK_TM3UFLOW 0x0003 /* - timer 3 underflow */
+#else /* CONFIG_AM33_2 */
+#define SC2CTR_CK 0x0007 /* clock source select */
+#define SC2CTR_CK_TM9UFLOW_8 0x0000 /* - 1/8 timer 9 underflow */
+#define SC2CTR_CK_IOCLK_8 0x0001 /* - 1/8 IOCLK */
+#define SC2CTR_CK_IOCLK_32 0x0002 /* - 1/32 IOCLK */
+#define SC2CTR_CK_TM3UFLOW_2 0x0003 /* - 1/2 timer 3 underflow */
+#define SC2CTR_CK_TM1UFLOW_8 0x0004 /* - 1/8 timer 1 underflow */
+#define SC2CTR_CK_TM3UFLOW_8 0x0005 /* - 1/8 timer 3 underflow */
+#define SC2CTR_CK_EXTERN_8 0x0006 /* - 1/8 external closk */
+#define SC2CTR_CK_EXTERN 0x0007 /* - external closk */
+#endif /* CONFIG_AM33_2 */
#define SC2CTR_STB 0x0008 /* stop bit select */
#define SC2CTR_STB_1BIT 0x0000 /* - 1 stop bit */
#define SC2CTR_STB_2BIT 0x0008 /* - 2 stop bits */
@@ -134,9 +153,14 @@
#define SC2ICR_RES 0x04 /* receive error select */
#define SC2ICR_RI 0x01 /* receive interrupt cause */
-#define SC2TXB __SYSREG(0xd4002018, u8) /* transmit buffer reg */
-#define SC2RXB __SYSREG(0xd4002019, u8) /* receive buffer reg */
-#define SC2STR __SYSREG(0xd400201c, u8) /* status reg */
+#define SC2TXB __SYSREG(0xd4002028, u8) /* transmit buffer reg */
+#define SC2RXB __SYSREG(0xd4002029, u8) /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define SC2STR __SYSREG(0xd400202c, u8) /* status reg */
+#else /* CONFIG_AM33_2 */
+#define SC2STR __SYSREG(0xd400202c, u16) /* status reg */
+#endif /* CONFIG_AM33_2 */
#define SC2STR_OEF 0x0001 /* overrun error found */
#define SC2STR_PEF 0x0002 /* parity error found */
#define SC2STR_FEF 0x0004 /* framing error found */
@@ -146,10 +170,17 @@
#define SC2STR_RXF 0x0040 /* receive status */
#define SC2STR_TXF 0x0080 /* transmit status */
+#ifdef CONFIG_AM33_2
#define SC2TIM __SYSREG(0xd400202d, u8) /* status reg */
+#endif
+#ifdef CONFIG_AM33_2
#define SC2RXIRQ 24 /* serial 2 Receive IRQ */
#define SC2TXIRQ 25 /* serial 2 Transmit IRQ */
+#else /* CONFIG_AM33_2 */
+#define SC2RXIRQ 68 /* serial 2 Receive IRQ */
+#define SC2TXIRQ 69 /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
#define SC2RXICR GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
#define SC2TXICR GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
diff --git a/arch/mn10300/include/asm/serial.h b/arch/mn10300/include/asm/serial.h
index a29445cddd6..23a79929359 100644
--- a/arch/mn10300/include/asm/serial.h
+++ b/arch/mn10300/include/asm/serial.h
@@ -9,10 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
#endif
#include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 4eb8c61b7da..a3930e43a95 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -3,6 +3,16 @@
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ * for SMP support.
+ * 22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ * 23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ * 23-Jun-2008 MEI Delete INTC_IPI.
+ * 22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ * 04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
@@ -11,8 +21,85 @@
#ifndef _ASM_SMP_H
#define _ASM_SMP_H
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
#endif
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI 63
+#define CALL_FUNC_SINGLE_IPI 192
+#define LOCAL_TIMER_IPI 193
+#define FLUSH_CACHE_IPI 194
+#define CALL_FUNCTION_NMI_IPI 195
+#define GDB_NMI_IPI 196
+
+#define SMP_BOOT_IRQ 195
+
+#define RESCHEDULE_GxICR_LV GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI 100
+#define DELAY_TIME_BOOT_IPI 75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed. A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
+
+static inline int cpu_logical_map(int cpu)
+{
+ return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+ return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644
index 00000000000..2fcd1080322
--- /dev/null
+++ b/arch/mn10300/include/asm/smsc911x.h
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 4bf9c8b169e..93429154e89 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -11,6 +11,183 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ " bclr 1,(0,%0) \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int ret;
+
+ asm volatile(
+ " mov 1,%0 \n"
+ " bset %0,(%1) \n"
+ " bne 1f \n"
+ " clr %0 \n"
+ "1: xor 1,%0 \n"
+ : "=d"(ret)
+ : "a"(&lock->slock)
+ : "memory", "cc");
+
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ asm volatile(
+ "1: bset 1,(0,%0) \n"
+ " bne 1b \n"
+ :
+ : "a"(&lock->slock)
+ : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ unsigned long flags)
+{
+ int temp;
+
+ asm volatile(
+ "1: bset 1,(0,%2) \n"
+ " beq 3f \n"
+ " mov %1,epsw \n"
+ "2: mov (0,%2),%0 \n"
+ " or %0,%0 \n"
+ " bne 2b \n"
+ " mov %3,%0 \n"
+ " mov %0,epsw \n"
+ " nop \n"
+ " nop \n"
+ " bra 1b\n"
+ "3: \n"
+ : "=&d" (temp)
+ : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+ : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_lock(rw, "__read_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (atomic_dec_return(count) < 0)
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_lock(rw, "__write_lock_failed");
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_read_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_inc(count);
+ }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ __build_write_unlock(rw);
+#else
+ {
+ atomic_t *count = (atomic_t *)rw;
+ atomic_add(RW_LOCK_BIAS, count);
+ }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ atomic_dec(count);
+ if (atomic_read(count) >= 0)
+ return 1;
+ atomic_inc(count);
+ return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ atomic_t *count = (atomic_t *)lock;
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+ return 1;
+ atomic_add(RW_LOCK_BIAS, count);
+ return 0;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644
index 00000000000..653dc519b40
--- /dev/null
+++ b/arch/mn10300/include/asm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+ unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 9f7c7e17c01..8ff3e5aaca4 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -12,12 +12,29 @@
#define _ASM_SYSTEM_H
#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next) \
+ do { \
+ if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
+ (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
+ (prev)->thread.uregs->epsw &= ~EPSW_FE; \
+ fpu_save(&(prev)->thread.fpu_state); \
+ } \
+ } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
struct task_struct;
struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
/* context switching is now performed out-of-line in switch_to.S */
#define switch_to(prev, next, last) \
do { \
+ switch_fpu(prev, next); \
current->thread.wchan = (u_long) __builtin_return_address(0); \
(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
mb(); \
@@ -40,8 +58,6 @@ do { \
#define nop() asm volatile ("nop")
-#endif /* !__ASSEMBLY__ */
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do { \
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
-#else
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#endif
-
#define set_mb(var, value) do { var = value; mb(); } while (0)
+#endif /* CONFIG_SMP */
+
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- *m = val;
- local_irq_restore(flags);
- return retval;
-}
-
-#define xchg(ptr, v) \
- ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
- (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
- unsigned long old, unsigned long new)
-{
- unsigned long retval;
- unsigned long flags;
-
- local_irq_save(flags);
- retval = *m;
- if (retval == old)
- *m = new;
- local_irq_restore(flags);
- return retval;
-}
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
- (unsigned long)(o), \
- (unsigned long)(n)))
-
#endif /* !__ASSEMBLY__ */
-
#endif /* __KERNEL__ */
#endif /* _ASM_SYSTEM_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 2001cb657a9..aa07a4a5d79 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -16,10 +16,6 @@
#include <asm/page.h>
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
#define PREEMPT_ACTIVE 0x10000000
#ifdef CONFIG_4KSTACKS
@@ -38,10 +34,14 @@
* must also be changed
*/
#ifndef __ASSEMBLY__
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
+ struct pt_regs *frame; /* current exception frame */
unsigned long flags; /* low level flags */
__u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
__u8 supervisor_stack[0];
};
+#define thread_info_to_uregs(ti) \
+ ((struct pt_regs *) \
+ ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
#else /* !__ASSEMBLY__ */
#ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
return ti;
}
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+ return current_thread_info()->frame;
+}
+
/* how to get the current stack pointer from C */
static inline unsigned long current_stack_pointer(void)
{
diff --git a/arch/mn10300/include/asm/timer-regs.h b/arch/mn10300/include/asm/timer-regs.h
index 1d883b7f94a..c634977caf6 100644
--- a/arch/mn10300/include/asm/timer-regs.h
+++ b/arch/mn10300/include/asm/timer-regs.h
@@ -17,21 +17,27 @@
#ifdef __KERNEL__
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
#define TMPSCNT __SYSREG(0xd4003071, u8) /* timer prescaler control */
#define TMPSCNT_ENABLE 0x80 /* timer prescaler enable */
#define TMPSCNT_DISABLE 0x00 /* timer prescaler disable */
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
#define TM0MD __SYSREG(0xd4003000, u8) /* timer 0 mode register */
#define TM0MD_SRC 0x07 /* timer source */
#define TM0MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM0MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM0MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM0MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+#define TM0MD_SRC_TM2IO 0x03 /* - TM2IO pin input */
#define TM0MD_SRC_TM0IO 0x07 /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM0MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM0MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -43,7 +49,9 @@
#define TM1MD_SRC_TM0CASCADE 0x03 /* - cascade with timer 0 */
#define TM1MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM1MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM1MD_SRC_TM1IO 0x07 /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM1MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM1MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -55,7 +63,9 @@
#define TM2MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 1 */
#define TM2MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM2MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
#define TM2MD_SRC_TM2IO 0x07 /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM2MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM2MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -64,11 +74,13 @@
#define TM3MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM3MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM3MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM3MD_SRC_TM1CASCADE 0x03 /* - cascade with timer 2 */
+#define TM3MD_SRC_TM2CASCADE 0x03 /* - cascade with timer 2 */
#define TM3MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM3MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM3MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM3MD_SRC_TM3IO 0x07 /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM3MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM3MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -96,7 +108,9 @@
#define TM2ICR GxICR(TM2IRQ) /* timer 2 uflow intr ctrl reg */
#define TM3ICR GxICR(TM3IRQ) /* timer 3 uflow intr ctrl reg */
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
#define TM4MD __SYSREG(0xd4003080, u8) /* timer 4 mode register */
#define TM4MD_SRC 0x07 /* timer source */
#define TM4MD_SRC_IOCLK 0x00 /* - IOCLK */
@@ -105,7 +119,9 @@
#define TM4MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM4MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM4MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM4MD_SRC_TM4IO 0x07 /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM4MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM4MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -118,7 +134,11 @@
#define TM5MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM5MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM5MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM5MD_SRC_TM5IO 0x07 /* - TM5IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM5MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM5MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM5MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -130,7 +150,9 @@
#define TM7MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM7MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM7MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM7MD_SRC_TM7IO 0x07 /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
#define TM7MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM7MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -143,7 +165,11 @@
#define TM8MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM8MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM8MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM8MD_SRC_TM8IO 0x07 /* - TM8IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM8MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM8MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM8MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -156,7 +182,11 @@
#define TM9MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM9MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM9MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM9MD_SRC_TM9IO 0x07 /* - TM9IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM9MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM9MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM9MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -169,7 +199,11 @@
#define TM10MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM10MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM10MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM10MD_SRC_TM10IO 0x07 /* - TM10IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM10MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM10MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM10MD_COUNT_ENABLE 0x80 /* timer count enable */
@@ -178,32 +212,101 @@
#define TM11MD_SRC_IOCLK 0x00 /* - IOCLK */
#define TM11MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
#define TM11MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
-#define TM11MD_SRC_TM7CASCADE 0x03 /* - cascade with timer 7 */
#define TM11MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
#define TM11MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
#define TM11MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
#define TM11MD_SRC_TM11IO 0x07 /* - TM11IO pin input */
+#else /* !CONFIG_AM33_2 */
+#define TM11MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
#define TM11MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
#define TM11MD_COUNT_ENABLE 0x80 /* timer count enable */
+#if defined(CONFIG_AM34_2)
+#define TM12MD __SYSREG(0xd4003180, u8) /* timer 11 mode register */
+#define TM12MD_SRC 0x07 /* timer source */
+#define TM12MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM12MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM12MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM12MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM12MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM12MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM12MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM12MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM12MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM13MD __SYSREG(0xd4003182, u8) /* timer 11 mode register */
+#define TM13MD_SRC 0x07 /* timer source */
+#define TM13MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM13MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM13MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM13MD_SRC_TM12CASCADE 0x03 /* - cascade with timer 12 */
+#define TM13MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM13MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM13MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM13MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM13MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM13MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM14MD __SYSREG(0xd4003184, u8) /* timer 11 mode register */
+#define TM14MD_SRC 0x07 /* timer source */
+#define TM14MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM14MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM14MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM14MD_SRC_TM13CASCADE 0x03 /* - cascade with timer 13 */
+#define TM14MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM14MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM14MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM14MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM14MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM14MD_COUNT_ENABLE 0x80 /* timer count enable */
+
+#define TM15MD __SYSREG(0xd4003186, u8) /* timer 11 mode register */
+#define TM15MD_SRC 0x07 /* timer source */
+#define TM15MD_SRC_IOCLK 0x00 /* - IOCLK */
+#define TM15MD_SRC_IOCLK_8 0x01 /* - 1/8 IOCLK */
+#define TM15MD_SRC_IOCLK_32 0x02 /* - 1/32 IOCLK */
+#define TM15MD_SRC_TM0UFLOW 0x04 /* - timer 0 underflow */
+#define TM15MD_SRC_TM1UFLOW 0x05 /* - timer 1 underflow */
+#define TM15MD_SRC_TM2UFLOW 0x06 /* - timer 2 underflow */
+#define TM15MD_SRC_TM7UFLOW 0x07 /* - timer 7 underflow */
+#define TM15MD_INIT_COUNTER 0x40 /* initialize TMnBC = TMnBR */
+#define TM15MD_COUNT_ENABLE 0x80 /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
#define TM4BR __SYSREG(0xd4003090, u16) /* timer 4 base register */
#define TM5BR __SYSREG(0xd4003092, u16) /* timer 5 base register */
+#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
#define TM7BR __SYSREG(0xd4003096, u16) /* timer 7 base register */
#define TM8BR __SYSREG(0xd4003098, u16) /* timer 8 base register */
#define TM9BR __SYSREG(0xd400309a, u16) /* timer 9 base register */
+#define TM89BR __SYSREG(0xd4003098, u32) /* timer 8:9 base register */
#define TM10BR __SYSREG(0xd400309c, u16) /* timer 10 base register */
#define TM11BR __SYSREG(0xd400309e, u16) /* timer 11 base register */
-#define TM45BR __SYSREG(0xd4003090, u32) /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define TM12BR __SYSREG(0xd4003190, u16) /* timer 12 base register */
+#define TM13BR __SYSREG(0xd4003192, u16) /* timer 13 base register */
+#define TM14BR __SYSREG(0xd4003194, u16) /* timer 14 base register */
+#define TM15BR __SYSREG(0xd4003196, u16) /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
#define TM4BC __SYSREG(0xd40030a0, u16) /* timer 4 binary counter */
#define TM5BC __SYSREG(0xd40030a2, u16) /* timer 5 binary counter */
#define TM45BC __SYSREG(0xd40030a0, u32) /* timer 4:5 binary counter */
-
#define TM7BC __SYSREG(0xd40030a6, u16) /* timer 7 binary counter */
#define TM8BC __SYSREG(0xd40030a8, u16) /* timer 8 binary counter */
#define TM9BC __SYSREG(0xd40030aa, u16) /* timer 9 binary counter */
+#define TM89BC __SYSREG(0xd40030a8, u32) /* timer 8:9 binary counter */
#define TM10BC __SYSREG(0xd40030ac, u16) /* timer 10 binary counter */
#define TM11BC __SYSREG(0xd40030ae, u16) /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define TM12BC __SYSREG(0xd40031a0, u16) /* timer 12 binary counter */
+#define TM13BC __SYSREG(0xd40031a2, u16) /* timer 13 binary counter */
+#define TM14BC __SYSREG(0xd40031a4, u16) /* timer 14 binary counter */
+#define TM15BC __SYSREG(0xd40031a6, u16) /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
#define TM4IRQ 6 /* timer 4 IRQ */
#define TM5IRQ 7 /* timer 5 IRQ */
@@ -212,6 +315,12 @@
#define TM9IRQ 13 /* timer 9 IRQ */
#define TM10IRQ 14 /* timer 10 IRQ */
#define TM11IRQ 15 /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ 64 /* timer 12 IRQ */
+#define TM13IRQ 65 /* timer 13 IRQ */
+#define TM14IRQ 66 /* timer 14 IRQ */
+#define TM15IRQ 67 /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
#define TM4ICR GxICR(TM4IRQ) /* timer 4 uflow intr ctrl reg */
#define TM5ICR GxICR(TM5IRQ) /* timer 5 uflow intr ctrl reg */
@@ -220,8 +329,16 @@
#define TM9ICR GxICR(TM9IRQ) /* timer 9 uflow intr ctrl reg */
#define TM10ICR GxICR(TM10IRQ) /* timer 10 uflow intr ctrl reg */
#define TM11ICR GxICR(TM11IRQ) /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define TM12ICR GxICR(TM12IRQ) /* timer 12 uflow intr ctrl reg */
+#define TM13ICR GxICR(TM13IRQ) /* timer 13 uflow intr ctrl reg */
+#define TM14ICR GxICR(TM14IRQ) /* timer 14 uflow intr ctrl reg */
+#define TM15ICR GxICR(TM15IRQ) /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
#define TM6MD __SYSREG(0xd4003084, u16) /* timer6 mode register */
#define TM6MD_SRC 0x0007 /* timer source */
#define TM6MD_SRC_IOCLK 0x0000 /* - IOCLK */
@@ -229,10 +346,14 @@
#define TM6MD_SRC_IOCLK_32 0x0002 /* - 1/32 IOCLK */
#define TM6MD_SRC_TM0UFLOW 0x0004 /* - timer 0 underflow */
#define TM6MD_SRC_TM1UFLOW 0x0005 /* - timer 1 underflow */
-#define TM6MD_SRC_TM6IOB_BOTH 0x0006 /* - TM6IOB pin input (both edges) */
+#define TM6MD_SRC_TM2UFLOW 0x0006 /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define TM6MD_SRC_TM6IOB_BOTH 0x0006 */ /* - TM6IOB pin input (both edges) */
#define TM6MD_SRC_TM6IOB_SINGLE 0x0007 /* - TM6IOB pin input (single edge) */
-#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#endif /* CONFIG_AM33_2 */
#define TM6MD_ONESHOT_ENABLE 0x0040 /* oneshot count */
+#define TM6MD_CLR_ENABLE 0x0010 /* clear count enable */
+#if defined(CONFIG_AM33_2)
#define TM6MD_TRIG_ENABLE 0x0080 /* TM6IOB pin trigger enable */
#define TM6MD_PWM 0x3800 /* PWM output mode */
#define TM6MD_PWM_DIS 0x0000 /* - disabled */
@@ -240,10 +361,15 @@
#define TM6MD_PWM_11BIT 0x1800 /* - 11 bits mode */
#define TM6MD_PWM_12BIT 0x3000 /* - 12 bits mode */
#define TM6MD_PWM_14BIT 0x3800 /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
#define TM6MD_INIT_COUNTER 0x4000 /* initialize TMnBC to zero */
#define TM6MD_COUNT_ENABLE 0x8000 /* timer count enable */
#define TM6MDA __SYSREG(0xd40030b4, u8) /* timer6 cmp/cap A mode reg */
+#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDA_OUT 0x07 /* output select */
#define TM6MDA_OUT_SETA_RESETB 0x00 /* - set at match A, reset at match B */
#define TM6MDA_OUT_SETA_RESETOV 0x01 /* - set at match A, reset at overflow */
@@ -251,30 +377,35 @@
#define TM6MDA_OUT_RESETA 0x03 /* - reset at match A */
#define TM6MDA_OUT_TOGGLE 0x04 /* - toggle on match A */
#define TM6MDA_MODE 0xc0 /* compare A register mode */
-#define TM6MDA_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDA_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDA_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDA_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDA_EDGE 0x20 /* compare A edge select */
#define TM6MDA_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDA_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDA_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDA_MODE 0x40 /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6MDB __SYSREG(0xd40030b5, u8) /* timer6 cmp/cap B mode reg */
+#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
+#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
#define TM6MDB_OUT 0x07 /* output select */
#define TM6MDB_OUT_SETB_RESETA 0x00 /* - set at match B, reset at match A */
#define TM6MDB_OUT_SETB_RESETOV 0x01 /* - set at match B */
#define TM6MDB_OUT_RESETB 0x03 /* - reset at match B */
#define TM6MDB_OUT_TOGGLE 0x04 /* - toggle on match B */
#define TM6MDB_MODE 0xc0 /* compare B register mode */
-#define TM6MDB_MODE_CMP_SINGLE 0x00 /* - compare, single buffer mode */
-#define TM6MDB_MODE_CMP_DOUBLE 0x40 /* - compare, double buffer mode */
#define TM6MDB_MODE_CAP_S_EDGE 0x80 /* - capture, single edge mode */
#define TM6MDB_MODE_CAP_D_EDGE 0xc0 /* - capture, double edge mode */
#define TM6MDB_EDGE 0x20 /* compare B edge select */
#define TM6MDB_EDGE_FALLING 0x00 /* capture on falling edge */
#define TM6MDB_EDGE_RISING 0x20 /* capture on rising edge */
#define TM6MDB_CAPTURE_ENABLE 0x10 /* capture enable */
+#else /* !CONFIG_AM33_2 */
+#define TM6MDB_MODE 0x40 /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
#define TM6CA __SYSREG(0xd40030c4, u16) /* timer6 cmp/capture reg A */
#define TM6CB __SYSREG(0xd40030d4, u16) /* timer6 cmp/capture reg B */
@@ -288,6 +419,34 @@
#define TM6AICR GxICR(TM6AIRQ) /* timer 6A intr control reg */
#define TM6BICR GxICR(TM6BIRQ) /* timer 6B intr control reg */
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define TMTMD __SYSREG(0xd4004100, u8) /* Tick Timer mode register */
+#define TMTMD_TMTLDE 0x40 /* initialize TMTBC = TMTBR */
+#define TMTMD_TMTCNE 0x80 /* timer count enable */
+
+#define TMTBR __SYSREG(0xd4004110, u32) /* Tick Timer mode reg */
+#define TMTBC __SYSREG(0xd4004120, u32) /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define TMSMD __SYSREG(0xd4004140, u8) /* Tick Timer mode register */
+#define TMSMD_TMSLDE 0x40 /* initialize TMSBC = TMSBR */
+#define TMSMD_TMSCNE 0x80 /* timer count enable */
+
+#define TMSBR __SYSREG(0xd4004150, u32) /* Tick Timer mode register */
+#define TMSBC __SYSREG(0xd4004160, u32) /* Tick Timer mode register */
+
+#define TMTIRQ 119 /* OS Tick timer IRQ */
+#define TMSIRQ 120 /* Timestamp timer IRQ */
+
+#define TMTICR GxICR(TMTIRQ) /* OS Tick timer uflow intr ctrl reg */
+#define TMSICR GxICR(TMSIRQ) /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMER_REGS_H */
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index 8d031f9e117..bd4e90dfe6c 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -16,18 +16,30 @@
#define TICK_SIZE (tick_nsec / 1000)
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
- * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
#ifdef __KERNEL__
+extern cycles_t cacheflush_time;
+
static inline cycles_t get_cycles(void)
{
return read_timestamp_counter();
}
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+ struct irqaction *action)
+{
+ u16 tmp;
+ setup_irq(irq, action);
+ set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+ GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+ tmp = GxICR(irq);
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e29281c5..efddd6e1ade 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -11,24 +11,78 @@
#ifndef _ASM_TLBFLUSH_H
#define _ASM_TLBFLUSH_H
+#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
-do { \
- int w; \
- __asm__ __volatile__ \
- (" mov %1,%0 \n" \
- " or %2,%0 \n" \
- " mov %0,%1 \n" \
- : "=d"(w) \
- : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
- : "cc", "memory" \
- ); \
-} while (0)
+struct tlb_state {
+ struct mm_struct *active_mm;
+ int state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+ int w;
+ asm volatile(
+ " mov %1,%0 \n"
+ " or %2,%0 \n"
+ " mov %0,%1 \n"
+ : "=d"(w)
+ : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+ : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+ local_flush_tlb();
+}
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+ local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long pteu, flags, cnx;
+
+ addr &= PAGE_MASK;
+
+ local_irq_save(flags);
+
+ cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+ if (cnx) {
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & xPTEU_PID;
+#endif
+ IPTEU = pteu;
+ DPTEU = pteu;
+ if (IPTEL & xPTEL_V)
+ IPTEL = 0;
+ if (DPTEL & xPTEL_V)
+ DPTEL = 0;
+ }
+ local_irq_restore(flags);
+}
/*
* TLB flushing:
@@ -40,41 +94,61 @@ do { \
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
-#define flush_tlb_all() \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_mm(mm) \
-do { \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-#define flush_tlb_range(vma, start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- preempt_disable(); \
- __flush_tlb_all(); \
- preempt_enable(); \
-} while (0)
-
-
-#define __flush_tlb_global() flush_tlb_all()
-#define flush_tlb() flush_tlb_all()
-#define flush_tlb_kernel_range(start, end) \
-do { \
- unsigned long __s __attribute__((unused)) = (start); \
- unsigned long __e __attribute__((unused)) = (end); \
- flush_tlb_all(); \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end) do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb() flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+
+#else /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ preempt_disable();
+ local_flush_tlb_all();
+ preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb() flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 197a7af3dd8..679dee0bbd0 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -14,9 +14,8 @@
/*
* User space memory access functions
*/
-#include <linux/sched.h>
+#include <linux/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#define VERIFY_READ 0
@@ -29,7 +28,6 @@
*
* For historical reasons, these macros are grossly misnamed.
*/
-
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
#if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index 23f2ab67574..8f5f1e81baf 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -3,13 +3,16 @@
#
extra-y := head.o init_task.o vmlinux.lds
-obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
- switch_to.o mn10300_ksyms.o kernel_execve.o
+ switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
obj-$(CONFIG_GDBSTUB) += gdb-cache.o
endif
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
obj-$(CONFIG_PROFILE) += profile.o profile-low.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index 02dc7e461fe..96f24fab7de 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -23,6 +23,7 @@ void foo(void)
OFFSET(TI_task, thread_info, task);
OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_frame, thread_info, frame);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
OFFSET(THREAD_SP, thread_struct, sp);
OFFSET(THREAD_A3, thread_struct, a3);
OFFSET(THREAD_USP, thread_struct, usp);
- OFFSET(THREAD_FRAME, thread_struct, __frame);
+#ifdef CONFIG_FPU
+ OFFSET(THREAD_FPU_FLAGS, thread_struct, fpu_flags);
+ OFFSET(THREAD_FPU_STATE, thread_struct, fpu_state);
+ DEFINE(__THREAD_USING_FPU, THREAD_USING_FPU);
+ DEFINE(__THREAD_HAS_FPU, THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+ BLANK();
+
+ OFFSET(TASK_THREAD, task_struct, thread);
BLANK();
DEFINE(CLONE_VM_asm, CLONE_VM);
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
new file mode 100644
index 00000000000..d4cb535bf78
--- /dev/null
+++ b/arch/mn10300/kernel/cevt-mn10300.c
@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0) {
+ stop_jiffies_counter();
+ reload_jiffies_counter(delta - 1);
+ } else {
+ stop_jiffies_counter1();
+ reload_jiffies_counter1(delta - 1);
+ }
+ return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd;
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ stop_jiffies_counter();
+ else
+ stop_jiffies_counter1();
+
+ cd = &per_cpu(mn10300_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+ struct clock_event_device *cd;
+ struct irqaction *iact;
+ unsigned int cpu = smp_processor_id();
+
+ cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+ if (cpu == 0) {
+ stop_jiffies_counter();
+ cd->irq = TMJCIRQ;
+ } else {
+ stop_jiffies_counter1();
+ cd->irq = TMJC1IRQ;
+ }
+
+ cd->name = "Timestamp";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ clockevent_set_clock(cd, MN10300_JCCLK);
+
+ cd->max_delta_ns = clockevent_delta2ns(TMJCBR_MAX, cd);
+ cd->min_delta_ns = clockevent_delta2ns(100, cd);
+
+ cd->rating = 200;
+ cd->cpumask = cpumask_of(smp_processor_id());
+ cd->set_mode = set_clock_mode;
+ cd->event_handler = event_handler;
+ cd->set_next_event = next_event;
+
+ iact = &per_cpu(timer_irq, cpu);
+ iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+ iact->handler = timer_interrupt;
+
+ clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ /* setup timer irq affinity so it only runs on this cpu */
+ {
+ struct irq_desc *desc;
+ desc = irq_to_desc(cd->irq);
+ cpumask_copy(desc->affinity, cpumask_of(cpu));
+ iact->flags |= IRQF_NOBALANCING;
+ }
+#endif
+
+ if (cpu == 0) {
+ reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+ iact->name = "CPU0 Timer";
+ } else {
+ reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+ iact->name = "CPU1 Timer";
+ }
+
+ setup_jiffies_interrupt(cd->irq, iact);
+
+ return 0;
+}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
new file mode 100644
index 00000000000..ba2f0c4d6e0
--- /dev/null
+++ b/arch/mn10300/kernel/csrc-mn10300.c
@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+ return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+ .name = "TSC",
+ .rating = 200,
+ .read = mn10300_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+ startup_timestamp_counter();
+ clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+ clocksource_register(&clocksource_mn10300);
+ return 0;
+}
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 3d394b4eefb..f00b9bafcd3 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -28,25 +28,17 @@
#include <asm/asm-offsets.h>
#include <asm/frame.inc>
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
#ifdef CONFIG_PREEMPT
-#define preempt_stop __cli
+#define preempt_stop LOCAL_IRQ_DISABLE
#else
#define preempt_stop
#define resume_kernel restore_all
#endif
- .macro __cli
- and ~EPSW_IM,epsw
- or EPSW_IE|MN10300_CLI_LEVEL,epsw
- nop
- nop
- nop
- .endm
- .macro __sti
- or EPSW_IE|EPSW_IM_7,epsw
- .endm
-
-
.am33_2
###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
syscall_exit:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
mov (TI_flags,a2),d2
btst _TIF_ALLWORK_MASK,d2
bne syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
syscall_exit_work:
btst _TIF_SYSCALL_TRACE,d2
beq work_pending
- __sti # could let syscall_trace_exit() call
+ LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call
# schedule() instead
mov fp,d0
call syscall_trace_exit[],0 # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
# is there any work to be done other than syscall tracing?
mov (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
ENTRY(resume_userspace)
# make sure we don't miss an interrupt setting need_resched or
# sigpending between sampling and the rti
- __cli
+ LOCAL_IRQ_DISABLE
# is there any work to be done on int/exception return?
mov (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- __cli
+ LOCAL_IRQ_DISABLE
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
@@ -216,31 +208,6 @@ ENTRY(irq_handler)
###############################################################################
#
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
- movbu (0xae000001),d1
- cmp 1,d1
- beq monsignal
- ret [],0
-
-monsignal:
- or EPSW_NMID,epsw
- mov d0,a0
- mov a0,sp
- mov (REG_EPSW,fp),d1
- and ~EPSW_nSL,d1
- mov d1,(REG_EPSW,fp)
- movm (sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
- mov (sp),a1
- mov a1,usp
- movm (sp),[other]
- add 4,sp
-here: jmp 0x8e000008-here+0x8e000008
-
-###############################################################################
-#
# Double Fault handler entry point
# - note that there will not be a stack, D0/A0 will hold EPSW/PC as were
#
@@ -276,6 +243,10 @@ double_fault_loop:
ENTRY(raw_bus_error)
add -4,sp
mov d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d0
+ mov d0,(MMUCTR)
+#endif
mov (BCBERR),d0 # what
btst BCBERR_BEMR_DMA,d0 # see if it was an external bus error
beq __common_exception_aux # it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
add -4,sp
mov d0,(sp)
mov (TBR),d0
+
+#ifdef CONFIG_SMP
+ add -4,sp
+ mov d0,(sp) # save d0(TBR)
+ movhu (NMIAGR),d0
+ and NMIAGR_GN,d0
+ lsr 0x2,d0
+ cmp CALL_FUNCTION_NMI_IPI,d0
+ bne 5f # if not call function, jump
+
+ # function call nmi ipi
+ add 4,sp # no need to store TBR
+ mov GxICR_DETECT,d0 # clear NMI request
+ movbu d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+ movhu (GxICR(CALL_FUNCTION_NMI_IPI)),d0
+ and ~EPSW_NMID,epsw # enable NMI
+
+ mov (sp),d0 # restore d0
+ SAVE_ALL
+ call smp_nmi_call_function_interrupt[],0
+ RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+ cmp GDB_NMI_IPI,d0
+ bne 3f # if not gdb nmi ipi, jump
+
+ # gdb nmi ipi
+ add 4,sp # no need to store TBR
+ mov GxICR_DETECT,d0 # clear NMI
+ movbu d0,(GxICR(GDB_NMI_IPI))
+ movhu (GxICR(GDB_NMI_IPI)),d0
+ and ~EPSW_NMID,epsw # enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ mov (gdbstub_nmi_opr_type),d0
+ cmp GDBSTUB_NMI_CACHE_PURGE,d0
+ bne 4f # if not gdb cache purge, jump
+
+ # gdb cache purge nmi ipi
+ add -20,sp
+ mov d1,(4,sp)
+ mov a0,(8,sp)
+ mov a1,(12,sp)
+ mov mdr,d0
+ mov d0,(16,sp)
+ call gdbstub_local_purge_cache[],0
+ mov 0x1,d0
+ mov (CPUID),d1
+ asl d1,d0
+ mov gdbstub_nmi_cpumask,a0
+ bclr d0,(a0)
+ mov (4,sp),d1
+ mov (8,sp),a0
+ mov (12,sp),a1
+ mov (16,sp),d0
+ mov d0,mdr
+ add 20,sp
+ mov (sp),d0
+ add 4,sp
+ rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+ # gdb wait nmi ipi
+ mov (sp),d0
+ SAVE_ALL
+ call gdbstub_nmi_wait[],0
+ RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+ mov (sp),d0 # restore TBR to d0
+ add 4,sp
+#endif /* CONFIG_SMP */
+
bra __common_exception_nonmi
ENTRY(__common_exception)
add -4,sp
mov d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d0
+ mov d0,(MMUCTR)
+#endif
__common_exception_aux:
mov (TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
mov d0,(REG_ORIG_D0,fp)
#ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+ call gdbstub_busy_check[],0
+ and d0,d0 # check return value
+ beq 2f
+#else /* CONFIG_SMP */
btst 0x01,(gdbstub_busy)
beq 2f
+#endif /* CONFIG_SMP */
and ~EPSW_IE,epsw
mov fp,d0
mov a2,d1
call gdbstub_exception[],0 # gdbstub itself caused an exception
bra restore_all
2:
-#endif
+#endif /* CONFIG_GDBSTUB */
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
add exception_table,d0
mov d1,(d0)
mov 4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
- jmp mn10300_dcache_flush_inv_range2
-#else
ret [],0
-#endif
###############################################################################
#
diff --git a/arch/mn10300/kernel/fpu-low.S b/arch/mn10300/kernel/fpu-low.S
index 96cfd47e68d..78df25cfae2 100644
--- a/arch/mn10300/kernel/fpu-low.S
+++ b/arch/mn10300/kernel/fpu-low.S
@@ -8,25 +8,14 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/linkage.h>
#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
- .globl fpu_init_state
- .type fpu_init_state,@function
-fpu_init_state:
- mov epsw,d0
- or EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
- nop
- nop
- nop
-#endif
+.macro FPU_INIT_STATE_ALL
fmov 0,fs0
fmov fs0,fs1
fmov fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
fmov fs0,fs30
fmov fs0,fs31
fmov FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+ fmov fs0,(\areg+)
+ fmov fs1,(\areg+)
+ fmov fs2,(\areg+)
+ fmov fs3,(\areg+)
+ fmov fs4,(\areg+)
+ fmov fs5,(\areg+)
+ fmov fs6,(\areg+)
+ fmov fs7,(\areg+)
+ fmov fs8,(\areg+)
+ fmov fs9,(\areg+)
+ fmov fs10,(\areg+)
+ fmov fs11,(\areg+)
+ fmov fs12,(\areg+)
+ fmov fs13,(\areg+)
+ fmov fs14,(\areg+)
+ fmov fs15,(\areg+)
+ fmov fs16,(\areg+)
+ fmov fs17,(\areg+)
+ fmov fs18,(\areg+)
+ fmov fs19,(\areg+)
+ fmov fs20,(\areg+)
+ fmov fs21,(\areg+)
+ fmov fs22,(\areg+)
+ fmov fs23,(\areg+)
+ fmov fs24,(\areg+)
+ fmov fs25,(\areg+)
+ fmov fs26,(\areg+)
+ fmov fs27,(\areg+)
+ fmov fs28,(\areg+)
+ fmov fs29,(\areg+)
+ fmov fs30,(\areg+)
+ fmov fs31,(\areg+)
+ fmov fpcr,\dreg
+ mov \dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+ fmov (\areg+),fs0
+ fmov (\areg+),fs1
+ fmov (\areg+),fs2
+ fmov (\areg+),fs3
+ fmov (\areg+),fs4
+ fmov (\areg+),fs5
+ fmov (\areg+),fs6
+ fmov (\areg+),fs7
+ fmov (\areg+),fs8
+ fmov (\areg+),fs9
+ fmov (\areg+),fs10
+ fmov (\areg+),fs11
+ fmov (\areg+),fs12
+ fmov (\areg+),fs13
+ fmov (\areg+),fs14
+ fmov (\areg+),fs15
+ fmov (\areg+),fs16
+ fmov (\areg+),fs17
+ fmov (\areg+),fs18
+ fmov (\areg+),fs19
+ fmov (\areg+),fs20
+ fmov (\areg+),fs21
+ fmov (\areg+),fs22
+ fmov (\areg+),fs23
+ fmov (\areg+),fs24
+ fmov (\areg+),fs25
+ fmov (\areg+),fs26
+ fmov (\areg+),fs27
+ fmov (\areg+),fs28
+ fmov (\areg+),fs29
+ fmov (\areg+),fs30
+ fmov (\areg+),fs31
+ mov (\areg),\dreg
+ fmov \dreg,fpcr
+.endm
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+ .globl fpu_init_state
+ .type fpu_init_state,@function
+fpu_init_state:
+ mov epsw,d0
+ or EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+ nop
+ nop
+ nop
+#endif
+ FPU_INIT_STATE_ALL
#ifdef CONFIG_MN10300_PROC_MN103E010
nop
nop
@@ -89,40 +171,7 @@ fpu_save:
nop
#endif
mov d0,a0
- fmov fs0,(a0+)
- fmov fs1,(a0+)
- fmov fs2,(a0+)
- fmov fs3,(a0+)
- fmov fs4,(a0+)
- fmov fs5,(a0+)
- fmov fs6,(a0+)
- fmov fs7,(a0+)
- fmov fs8,(a0+)
- fmov fs9,(a0+)
- fmov fs10,(a0+)
- fmov fs11,(a0+)
- fmov fs12,(a0+)
- fmov fs13,(a0+)
- fmov fs14,(a0+)
- fmov fs15,(a0+)
- fmov fs16,(a0+)
- fmov fs17,(a0+)
- fmov fs18,(a0+)
- fmov fs19,(a0+)
- fmov fs20,(a0+)
- fmov fs21,(a0+)
- fmov fs22,(a0+)
- fmov fs23,(a0+)
- fmov fs24,(a0+)
- fmov fs25,(a0+)
- fmov fs26,(a0+)
- fmov fs27,(a0+)
- fmov fs28,(a0+)
- fmov fs29,(a0+)
- fmov fs30,(a0+)
- fmov fs31,(a0+)
- fmov fpcr,d0
- mov d0,(a0)
+ FPU_SAVE_ALL a0,d0
#ifdef CONFIG_MN10300_PROC_MN103E010
nop
nop
@@ -135,63 +184,75 @@ fpu_save:
###############################################################################
#
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+# when CONFIG_FPU is enabled
#
###############################################################################
- .globl fpu_restore
- .type fpu_restore,@function
-fpu_restore:
- mov epsw,d1
- or EPSW_FE,epsw /* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+ .type fpu_disabled,@function
+ .globl fpu_disabled
+fpu_disabled:
+ or EPSW_nAR|EPSW_FE,epsw
nop
nop
-#endif
- mov d0,a0
- fmov (a0+),fs0
- fmov (a0+),fs1
- fmov (a0+),fs2
- fmov (a0+),fs3
- fmov (a0+),fs4
- fmov (a0+),fs5
- fmov (a0+),fs6
- fmov (a0+),fs7
- fmov (a0+),fs8
- fmov (a0+),fs9
- fmov (a0+),fs10
- fmov (a0+),fs11
- fmov (a0+),fs12
- fmov (a0+),fs13
- fmov (a0+),fs14
- fmov (a0+),fs15
- fmov (a0+),fs16
- fmov (a0+),fs17
- fmov (a0+),fs18
- fmov (a0+),fs19
- fmov (a0+),fs20
- fmov (a0+),fs21
- fmov (a0+),fs22
- fmov (a0+),fs23
- fmov (a0+),fs24
- fmov (a0+),fs25
- fmov (a0+),fs26
- fmov (a0+),fs27
- fmov (a0+),fs28
- fmov (a0+),fs29
- fmov (a0+),fs30
- fmov (a0+),fs31
- mov (a0),d0
- fmov d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
nop
+
+ mov sp,a1
+ mov (a1),d1 /* get epsw of user context */
+ and ~(THREAD_SIZE-1),a1 /* a1: (thread_info *ti) */
+ mov (TI_task,a1),a2 /* a2: (task_struct *tsk) */
+ btst EPSW_nSL,d1
+ beq fpu_used_in_kernel
+
+ or EPSW_FE,d1
+ mov d1,(sp)
+ mov (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+ or __THREAD_HAS_FPU,d1
+ mov d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else /* !CONFIG_LAZY_SAVE_FPU */
+ mov (fpu_state_owner),a0
+ cmp 0,a0
+ beq fpu_regs_save_end
+
+ mov (TASK_THREAD+THREAD_UREGS,a0),a1
+ add TASK_THREAD+THREAD_FPU_STATE,a0
+ FPU_SAVE_ALL a0,d0
+
+ mov (REG_EPSW,a1),d0
+ and ~EPSW_FE,d0
+ mov d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+ mov a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+ btst __THREAD_USING_FPU,d1
+ beq fpu_regs_init
+ add TASK_THREAD+THREAD_FPU_STATE,a2
+ FPU_RESTORE_ALL a2,d0
+ rti
+
+fpu_regs_init:
+ FPU_INIT_STATE_ALL
+ add TASK_THREAD+THREAD_FPU_FLAGS,a2
+ bset __THREAD_USING_FPU,(0,a2)
+ rti
+
+fpu_used_in_kernel:
+ and ~(EPSW_nAR|EPSW_FE),epsw
nop
nop
-#endif
- mov d1,epsw
- ret [],0
+ add -4,sp
+ SAVE_ALL
+ mov -1,d0
+ mov d0,(REG_ORIG_D0,fp)
+
+ and ~EPSW_NMID,epsw
+
+ mov fp,d0
+ call fpu_disabled_in_kernel[],0
+ jmp ret_from_exception
- .size fpu_restore,.-fpu_restore
+ .size fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S
new file mode 100644
index 00000000000..7ea087a549f
--- /dev/null
+++ b/arch/mn10300/kernel/fpu-nofpu-low.S
@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+# when CONFIG_FPU is disabled
+#
+###############################################################################
+ .type fpu_disabled,@function
+ .globl fpu_disabled
+fpu_disabled:
+ add -4,sp
+ SAVE_ALL
+ mov -1,d0
+ mov d0,(REG_ORIG_D0,fp)
+
+ and ~EPSW_NMID,epsw
+
+ mov fp,d0
+ call unexpected_fpu_exception[],0
+ jmp ret_from_exception
+
+ .size fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c
new file mode 100644
index 00000000000..31c765b92c5
--- /dev/null
+++ b/arch/mn10300/kernel/fpu-nofpu.c
@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ * be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+ panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+ return 0; /* not valid */
+}
diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c
index e705f25ad5f..5f9c3fa19a8 100644
--- a/arch/mn10300/kernel/fpu.c
+++ b/arch/mn10300/kernel/fpu.c
@@ -12,56 +12,19 @@
#include <asm/fpu.h>
#include <asm/elf.h>
#include <asm/exceptions.h>
+#include <asm/system.h>
+#ifdef CONFIG_LAZY_SAVE_FPU
struct task_struct *fpu_state_owner;
+#endif
/*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
*/
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
{
- struct task_struct *tsk = current;
-
- if (!user_mode(regs))
- die_if_no_fixup("An FPU Disabled exception happened in"
- " kernel space\n",
- regs, code);
-
-#ifdef CONFIG_FPU
- preempt_disable();
-
- /* transfer the last process's FPU state to memory */
- if (fpu_state_owner) {
- fpu_save(&fpu_state_owner->thread.fpu_state);
- fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
- }
-
- /* the current process now owns the FPU state */
- fpu_state_owner = tsk;
- regs->epsw |= EPSW_FE;
-
- /* load the FPU with the current process's FPU state or invent a new
- * clean one if the process doesn't have one */
- if (is_using_fpu(tsk)) {
- fpu_restore(&tsk->thread.fpu_state);
- } else {
- fpu_init_state();
- set_using_fpu(tsk);
- }
-
- preempt_enable();
-#else
- {
- siginfo_t info;
-
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void *) tsk->thread.uregs->pc;
- info.si_code = FPE_FLTINV;
-
- force_sig_info(SIGFPE, &info, tsk);
- }
-#endif /* CONFIG_FPU */
+ die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+ regs, EXCEP_FPU_DISABLED);
}
/*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
*/
asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
{
- struct task_struct *tsk = fpu_state_owner;
+ struct task_struct *tsk = current;
siginfo_t info;
+ u32 fpcr;
if (!user_mode(regs))
die_if_no_fixup("An FPU Operation exception happened in"
" kernel space\n",
regs, code);
- if (!tsk)
+ if (!is_using_fpu(tsk))
die_if_no_fixup("An FPU Operation exception happened,"
" but the FPU is not in use",
regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
info.si_addr = (void *) tsk->thread.uregs->pc;
info.si_code = FPE_FLTINV;
-#ifdef CONFIG_FPU
- {
- u32 fpcr;
+ unlazy_fpu(tsk);
- /* get FPCR (we need to enable the FPU whilst we do this) */
- asm volatile(" or %1,epsw \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
- " nop \n"
- " nop \n"
- " nop \n"
-#endif
- " fmov fpcr,%0 \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
- " nop \n"
- " nop \n"
- " nop \n"
-#endif
- " and %2,epsw \n"
- : "=&d"(fpcr)
- : "i"(EPSW_FE), "i"(~EPSW_FE)
- );
-
- if (fpcr & FPCR_EC_Z)
- info.si_code = FPE_FLTDIV;
- else if (fpcr & FPCR_EC_O)
- info.si_code = FPE_FLTOVF;
- else if (fpcr & FPCR_EC_U)
- info.si_code = FPE_FLTUND;
- else if (fpcr & FPCR_EC_I)
- info.si_code = FPE_FLTRES;
- }
-#endif
+ fpcr = tsk->thread.fpu_state.fpcr;
+
+ if (fpcr & FPCR_EC_Z)
+ info.si_code = FPE_FLTDIV;
+ else if (fpcr & FPCR_EC_O)
+ info.si_code = FPE_FLTOVF;
+ else if (fpcr & FPCR_EC_U)
+ info.si_code = FPE_FLTUND;
+ else if (fpcr & FPCR_EC_I)
+ info.si_code = FPE_FLTRES;
force_sig_info(SIGFPE, &info, tsk);
}
/*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+ siginfo_t info;
+
+ if (!user_mode(regs))
+ die_if_no_fixup("FPU invalid opcode", regs, code);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_COPROC;
+ info.si_addr = (void *) regs->pc;
+ force_sig_info(info.si_signo, &info, current);
+}
+
+/*
* save the FPU state to a signal context
*/
int fpu_setup_sigcontext(struct fpucontext *fpucontext)
{
-#ifdef CONFIG_FPU
struct task_struct *tsk = current;
if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
*/
preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ fpu_save(&tsk->thread.fpu_state);
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ }
+#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_save(&tsk->thread.fpu_state);
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
return -1;
return 1;
-#else
- return 0;
-#endif
}
/*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
*/
void fpu_kill_state(struct task_struct *tsk)
{
-#ifdef CONFIG_FPU
/* disown anything left in the FPU */
preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+ if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+ tsk->thread.uregs->epsw &= ~EPSW_FE;
+ tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+ }
+#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
-#endif
+
/* we no longer have a valid current FPU state */
clear_using_fpu(tsk);
}
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
int ret;
/* load up the old FPU state */
- ret = copy_from_user(&tsk->thread.fpu_state,
- fpucontext,
+ ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
min(sizeof(struct fpu_state_struct),
sizeof(struct fpucontext)));
if (!ret)
diff --git a/arch/mn10300/kernel/gdb-io-serial-low.S b/arch/mn10300/kernel/gdb-io-serial-low.S
index 4998b24f5d3..b1d0152e96c 100644
--- a/arch/mn10300/kernel/gdb-io-serial-low.S
+++ b/arch/mn10300/kernel/gdb-io-serial-low.S
@@ -18,6 +18,7 @@
#include <asm/thread_info.h>
#include <asm/frame.inc>
#include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
#include <unit/serial.h>
.text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
bra gdbstub_io_rx_done
gdbstub_io_rx_enter:
- or EPSW_IE|EPSW_IM_1,epsw
+ LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
add -4,sp
SAVE_ALL
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
mov fp,d0
call gdbstub_rx_irq[],0 # gdbstub_rx_irq(regs,excep)
- and ~EPSW_IE,epsw
+ LOCAL_CLI
bclr 0x01,(gdbstub_busy)
.globl gdbstub_return
diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c
index ae663dc717e..0d5d63c91dc 100644
--- a/arch/mn10300/kernel/gdb-io-serial.c
+++ b/arch/mn10300/kernel/gdb-io-serial.c
@@ -23,6 +23,7 @@
#include <asm/exceptions.h>
#include <asm/serial-regs.h>
#include <unit/serial.h>
+#include <asm/smp.h>
/*
* initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
+#if CONFIG_GDBSTUB_IRQ_LEVEL == 0
IVAR0 = EXCEP_IRQ_LEVEL0;
- set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+ gdbstub_io_rx_handler);
XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
- XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+ XIRQxICR(GDBPORT_SERIAL_IRQ) =
+ GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
/* permit level 0 IRQs to take place */
- asm volatile(
- " and %0,epsw \n"
- " or %1,epsw \n"
- :
- : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
- );
+ local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+ int cpu;
+#endif
*_ch = 0xff;
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
- watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ watchdog_alert_counter[cpu] = 0;
+#endif
goto try_again;
}
diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c
index a560bbc3137..97dfda23342 100644
--- a/arch/mn10300/kernel/gdb-io-ttysm.c
+++ b/arch/mn10300/kernel/gdb-io-ttysm.c
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
gdbstub_io_set_baud(115200);
/* we want to get serial receive interrupts */
- set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
- set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
- set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+ set_intr_level(gdbstub_port->rx_irq,
+ NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+ set_intr_level(gdbstub_port->tx_irq,
+ NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+ gdbstub_io_rx_handler);
*gdbstub_port->rx_icr |= GxICR_ENABLE;
tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
tmp = *gdbstub_port->_control;
/* permit level 0 IRQs only */
- asm volatile(
- " and %0,epsw \n"
- " or %1,epsw \n"
- :
- : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
- );
+ local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
}
/*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
{
unsigned ix;
u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+ int cpu;
+#endif
*_ch = 0xff;
@@ -201,8 +202,9 @@ try_again:
if (nonblock)
return -EAGAIN;
#ifdef CONFIG_MN10300_WD_TIMER
- watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ watchdog_alert_counter[cpu] = 0;
+#endif
goto try_again;
}
diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c
index 41b11706c8e..a5fc3f05309 100644
--- a/arch/mn10300/kernel/gdb-stub.c
+++ b/arch/mn10300/kernel/gdb-stub.c
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
static int __gdbstub_mark_bp(u8 *addr, int ix)
{
- if (addr < (u8 *) 0x70000000UL)
- return 0;
- /* 70000000-7fffffff: vmalloc area */
- if (addr < (u8 *) 0x80000000UL)
+ /* vmalloc area */
+ if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
goto okay;
- if (addr < (u8 *) 0x8c000000UL)
- return 0;
- /* 8c000000-93ffffff: SRAM, SDRAM */
- if (addr < (u8 *) 0x94000000UL)
+ /* SRAM, SDRAM */
+ if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
goto okay;
return 0;
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
mn10300_set_gdbleds(1);
asm volatile("mov mdr,%0" : "=d"(mdr));
- asm volatile("mov epsw,%0" : "=d"(epsw));
- asm volatile("mov %0,epsw"
- :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+ local_save_flags(epsw);
+ local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
gdbstub_store_fpu();
diff --git a/arch/mn10300/kernel/head.S b/arch/mn10300/kernel/head.S
index 14f27f3bfaf..73e00fc7807 100644
--- a/arch/mn10300/kernel/head.S
+++ b/arch/mn10300/kernel/head.S
@@ -19,6 +19,12 @@
#include <asm/frame.inc>
#include <asm/param.h>
#include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
__HEAD
@@ -30,17 +36,51 @@
.globl _start
.type _start,@function
_start:
+#ifdef CONFIG_SMP
+ #
+ # If this is a secondary CPU (AP), then deal with that elsewhere
+ #
+ mov (CPUID),d3
+ and CPUID_MASK,d3
+ bne startup_secondary
+
+ #
+ # We're dealing with the primary CPU (BP) here, then.
+ # Keep BP's D0,D1,D2 register for boot check.
+ #
+
+ # Set up the Boot IPI for each secondary CPU
+ mov 0x1,a0
+loop_set_secondary_icr:
+ mov a0,a1
+ asl CROSS_ICR_CPU_SHIFT,a1
+ add CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+ movhu (a1),d3
+ or GxICR_ENABLE|GxICR_LEVEL_0,d3
+ movhu d3,(a1)
+ movhu (a1),d3 # flush
+ inc a0
+ cmp NR_CPUS,a0
+ bne loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
# save commandline pointer
mov d0,a3
# preload the PGD pointer register
mov swapper_pg_dir,d0
mov d0,(PTBR)
+ clr d0
+ movbu d0,(PIDR)
# turn on the TLBs
mov MMUCTR_IIV|MMUCTR_DIV,d0
mov d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
mov d0,(MMUCTR)
# turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
mov d0,(TBR)
# invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+ mov ECHCTR,a0
+ clr d0
+ mov d0,(a0)
+#endif
mov CHCTR,a0
clr d0
movhu d0,(a0) # turn off first
@@ -61,18 +106,18 @@ _start:
btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy
lne
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
#ifdef CONFIG_MN10300_CACHE_WBACK
#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
#else
mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
#endif /* WBACK */
movhu d0,(a0) # enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
# turn on RTS on the debug serial port if applicable
#ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
call processor_init[],0
call unit_init[],0
+#ifdef CONFIG_SMP
+ # mark the primary CPU in cpu_boot_map
+ mov cpu_boot_map,a0
+ mov 0x1,d0
+ mov d0,(a0)
+
+ # signal each secondary CPU to begin booting
+ mov 0x1,d2 # CPU ID
+
+loop_request_boot_secondary:
+ mov d2,a0
+ # send SMP_BOOT_IPI to secondary CPU
+ asl CROSS_ICR_CPU_SHIFT,a0
+ add CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+ movhu (a0),d0
+ or GxICR_REQUEST|GxICR_DETECT,d0
+ movhu d0,(a0)
+ movhu (a0),d0 # flush
+
+ # wait up to 100ms for AP's IPI to be received
+ clr d3
+wait_on_secondary_boot:
+ mov DELAY_TIME_BOOT_IPI,d0
+ call __delay[],0
+ inc d3
+ mov cpu_boot_map,a0
+ mov (a0),d0
+ lsr d2,d0
+ btst 0x1,d0
+ bne 1f
+ cmp TIME_OUT_COUNT_BOOT_IPI,d3
+ bne wait_on_secondary_boot
+1:
+ inc d2
+ cmp NR_CPUS,d2
+ bne loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_GDBSTUB
call gdbstub_init[],0
@@ -217,7 +300,118 @@ __gdbstub_pause:
#endif
jmp start_kernel
- .size _start, _start-.
+ .size _start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+ # preload the PGD pointer register
+ mov swapper_pg_dir,d0
+ mov d0,(PTBR)
+ clr d0
+ movbu d0,(PIDR)
+
+ # turn on the TLBs
+ mov MMUCTR_IIV|MMUCTR_DIV,d0
+ mov d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+ mov MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+ mov d0,(MMUCTR)
+
+ # turn on AM33v2 exception handling mode and set the trap table base
+ movhu (CPUP),d0
+ or CPUP_EXM_AM33V2,d0
+ movhu d0,(CPUP)
+
+ # set the interrupt vector table
+ mov CONFIG_INTERRUPT_VECTOR_BASE,d0
+ mov d0,(TBR)
+
+ # invalidate and enable both of the caches
+ mov ECHCTR,a0
+ clr d0
+ mov d0,(a0)
+ mov CHCTR,a0
+ clr d0
+ movhu d0,(a0) # turn off first
+ mov CHCTR_ICINV|CHCTR_DCINV,d0
+ movhu d0,(a0)
+ setlb
+ mov (a0),d0
+ btst CHCTR_ICBUSY|CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
+ lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif /* !NOWRALLOC */
+#else
+ mov CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
+ movhu d0,(a0) # enable
+#endif /* ENABLED */
+
+ # Clear the boot IPI interrupt for this CPU
+ movhu (GxICR(SMP_BOOT_IRQ)),d0
+ and ~GxICR_REQUEST,d0
+ movhu d0,(GxICR(SMP_BOOT_IRQ))
+ movhu (GxICR(SMP_BOOT_IRQ)),d0 # flush
+
+ /* get stack */
+ mov CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+ mov (CPUID),d0
+ and CPUID_MASK,d0
+ mulu CONFIG_BOOT_STACK_SIZE,d0
+ sub d0,a0
+ mov a0,sp
+
+ # init interrupt for AP
+ call smp_prepare_cpu_init[],0
+
+ # mark this secondary CPU in cpu_boot_map
+ mov (CPUID),d0
+ mov 0x1,d1
+ asl d0,d1
+ mov cpu_boot_map,a0
+ bset d1,(a0)
+
+ or EPSW_IE|EPSW_IM_1,epsw # permit level 0 interrupts
+ nop
+ nop
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ # flush the local cache if it's in writeback mode
+ call mn10300_local_dcache_flush_inv[],0
+ setlb
+ mov (CHCTR),d0
+ btst CHCTR_DCBUSY,d0 # wait till not busy (use CPU loop buffer)
+ lne
+#endif
+
+ # now sleep waiting for further instructions
+secondary_sleep:
+ mov CPUM_SLEEP,d0
+ movhu d0,(CPUM)
+ nop
+ nop
+ bra secondary_sleep
+ .size startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
ENTRY(__head_end)
/*
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h
index eee2eee8626..6a064ab5af0 100644
--- a/arch/mn10300/kernel/internal.h
+++ b/arch/mn10300/kernel/internal.h
@@ -9,6 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
+struct clocksource;
+struct clock_event_device;
+
/*
* kthread.S
*/
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
* entry.S
*/
extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index e2d5ed891f3..c2e44597c22 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -12,11 +12,26 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/cpumask.h>
#include <asm/setup.h>
+#include <asm/serial-regs.h>
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+ [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+ [0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+ [0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif /* CONFIG_SMP */
+
atomic_t irq_err_count;
/*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
*/
static void mn10300_cpupic_ack(unsigned int irq)
{
+ unsigned long flags;
u16 tmp;
- *(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
}
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+ unsigned int mask, unsigned int set)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL);
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+ __mask_and_set_icr(irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(unsigned int irq)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
- tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ if (!test_and_clear_bit(irq, irq_affinity_request)) {
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = GxICR(irq);
+ } else {
+ u16 tmp2;
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL);
+ tmp2 = GxICR(irq);
+
+ irq_affinity_online[irq] =
+ any_online_cpu(*irq_desc[irq].affinity);
+ CROSS_GxICR(irq, irq_affinity_online[irq]) =
+ (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+ }
+
+ arch_local_irq_restore(flags);
+#else /* CONFIG_SMP */
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(unsigned int irq)
{
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
- tmp = GxICR(irq);
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
- u16 tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
- tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ if (!test_and_clear_bit(irq, irq_affinity_request)) {
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ tmp = GxICR(irq);
+ } else {
+ tmp = GxICR(irq);
+
+ irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+ CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+ }
+
+ arch_local_irq_restore(flags);
+#else /* CONFIG_SMP */
+ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
}
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+ unsigned long flags;
+ int err;
+
+ flags = arch_local_cli_save();
+
+ /* check irq no */
+ switch (irq) {
+ case TMJCIRQ:
+ case RESCHEDULE_IPI:
+ case CALL_FUNC_SINGLE_IPI:
+ case LOCAL_TIMER_IPI:
+ case FLUSH_CACHE_IPI:
+ case CALL_FUNCTION_NMI_IPI:
+ case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+ case SC0RXIRQ:
+ case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+ case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+ case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+ case SC1RXIRQ:
+ case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+ case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+ case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+ case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+ case SC2RXIRQ:
+ case SC2TXIRQ:
+ case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+ err = -1;
+ break;
+
+ default:
+ set_bit(irq, irq_affinity_request);
+ err = 0;
+ break;
+ }
+
+ arch_local_irq_restore(flags);
+ return err;
+}
+#endif /* CONFIG_SMP */
+
/*
* MN10300 PIC level-triggered IRQ handling.
*
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask,
.unmask = mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+ .set_affinity = mn10300_cpupic_setaffinity,
+#endif
};
/*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
.mask = mn10300_cpupic_mask,
.mask_ack = mn10300_cpupic_mask_ack,
.unmask = mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+ .set_affinity = mn10300_cpupic_setaffinity,
+#endif
};
/*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
*/
void set_intr_level(int irq, u16 level)
{
- u16 tmp;
+ BUG_ON(in_interrupt());
- if (in_interrupt())
- BUG();
+ __mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
- tmp = GxICR(irq);
- GxICR(irq) = (tmp & GxICR_ENABLE) | level;
- tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+ set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+ __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+ __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+ mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+ mn10300_cpupic_mask(irq);
}
/*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
* than before
* - see Documentation/mn10300/features.txt
*/
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
{
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
* interrupts */
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
+
unit_init_IRQ();
}
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+ unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
- if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
- BUG();
+ BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
- old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+ old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
- __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+ __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
- __IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+ __IRQ_STAT(cpu_id, __irq_count)++;
+#endif
irq_enter();
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
local_irq_restore(epsw);
}
- __mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+ __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
- seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
- (GxICR(i) & GxICR_LEVEL) >>
- GxICR_LEVEL_SHIFT);
+
+ if (i < NR_CPU_IRQS)
+ seq_printf(p, " %14s.%u",
+ irq_desc[i].chip->name,
+ (GxICR(i) & GxICR_LEVEL) >>
+ GxICR_LEVEL_SHIFT);
+ else
+ seq_printf(p, " %14s",
+ irq_desc[i].chip->name);
+
seq_printf(p, " %s", action->name);
for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
/* polish off with NMI and error counters */
case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
+#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+ irq_desc_t *desc;
+ int irq;
+ unsigned int self, new;
+ unsigned long flags;
+
+ self = smp_processor_id();
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ desc = irq_desc + irq;
+
+ if (desc->status == IRQ_PER_CPU)
+ continue;
+
+ if (cpu_isset(self, irq_desc[irq].affinity) &&
+ !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+ int cpu_id;
+ cpu_id = first_cpu(cpu_online_map);
+ cpu_set(cpu_id, irq_desc[irq].affinity);
+ }
+ /* We need to operate irq_affinity_online atomically. */
+ arch_local_cli_save(flags);
+ if (irq_affinity_online[irq] == self) {
+ u16 x, tmp;
+
+ x = GxICR(irq);
+ GxICR(irq) = x & GxICR_LEVEL;
+ tmp = GxICR(irq);
+
+ new = any_online_cpu(irq_desc[irq].affinity);
+ irq_affinity_online[irq] = new;
+
+ CROSS_GxICR(irq, new) =
+ (x & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, new);
+
+ x &= GxICR_LEVEL | GxICR_ENABLE;
+ if (GxICR(irq) & GxICR_REQUEST) {
+ x |= GxICR_REQUEST | GxICR_DETECT;
+ CROSS_GxICR(irq, new) = x;
+ tmp = CROSS_GxICR(irq, new);
+ }
+ arch_local_irq_restore(flags);
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index 67e6389d625..0311a7fcea1 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush();
mn10300_icache_inv();
+#endif
}
void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
{
*p->addr = p->opcode;
regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush();
mn10300_icache_inv();
+#endif
}
static inline
diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S
index 66702d25661..dfc1b6f2fa9 100644
--- a/arch/mn10300/kernel/mn10300-serial-low.S
+++ b/arch/mn10300/kernel/mn10300-serial-low.S
@@ -39,7 +39,7 @@
###############################################################################
.balign L1_CACHE_BYTES
ENTRY(mn10300_serial_vdma_interrupt)
- or EPSW_IE,psw # permit overriding by
+# or EPSW_IE,psw # permit overriding by
# debugging interrupts
movm [d2,d3,a2,a3,exreg0],(sp)
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
rti
mnsc_vdma_tx_empty:
- mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
+ mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable the interrupt
movhu (e3),d2 # flush
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
movhu (SCxCTR,e2),d2 # turn on break mode
or SC01CTR_BKE,d2
movhu d2,(SCxCTR,e2)
- mov +(GxICR_LEVEL_1|GxICR_DETECT),d2
+ mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
movhu d2,(e3) # disable transmit interrupts on this
# channel
movhu (e3),d2 # flush
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index db509dd8056..996384dba45 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
#include <unit/timex.h>
#include "mn10300-serial.h"
+#ifdef CONFIG_SMP
+#undef GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
#define kenter(FMT, ...) \
printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
#define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
#define _proto(FMT, ...) \
no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB 004000000000 /* change Transfer bit-order */
+#endif
+
#define NR_UARTS 3
#ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
.name = "ttySM0",
._iobase = &SC0CTR,
._control = &SC0CTR,
- ._status = (volatile u8 *) &SC0STR,
+ ._status = (volatile u8 *)&SC0STR,
._intr = &SC0ICR,
._rxb = &SC0RXB,
._txb = &SC0TXB,
.rx_name = "ttySM0:Rx",
.tx_name = "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
.tm_name = "ttySM0:Timer8",
._tmxmd = &TM8MD,
._tmxbr = &TM8BR,
._tmicr = &TM8ICR,
.tm_irq = TM8IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+ .tm_name = "ttySM0:Timer0",
+ ._tmxmd = &TM0MD,
+ ._tmxbr = (volatile u16 *)&TM0BR,
+ ._tmicr = &TM0ICR,
+ .tm_irq = TM0IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
.tm_name = "ttySM0:Timer2",
._tmxmd = &TM2MD,
- ._tmxbr = (volatile u16 *) &TM2BR,
+ ._tmxbr = (volatile u16 *)&TM2BR,
._tmicr = &TM2ICR,
.tm_irq = TM2IRQ,
.div_timer = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
#endif
.rx_irq = SC0RXIRQ,
.tx_irq = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
.name = "ttySM1",
._iobase = &SC1CTR,
._control = &SC1CTR,
- ._status = (volatile u8 *) &SC1STR,
+ ._status = (volatile u8 *)&SC1STR,
._intr = &SC1ICR,
._rxb = &SC1RXB,
._txb = &SC1TXB,
.rx_name = "ttySM1:Rx",
.tx_name = "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
.tm_name = "ttySM1:Timer9",
._tmxmd = &TM9MD,
._tmxbr = &TM9BR,
._tmicr = &TM9ICR,
.tm_irq = TM9IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
.tm_name = "ttySM1:Timer3",
._tmxmd = &TM3MD,
- ._tmxbr = (volatile u16 *) &TM3BR,
+ ._tmxbr = (volatile u16 *)&TM3BR,
._tmicr = &TM3ICR,
.tm_irq = TM3IRQ,
.div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+ .tm_name = "ttySM1/Timer12",
+ ._tmxmd = &TM12MD,
+ ._tmxbr = &TM12BR,
+ ._tmicr = &TM12ICR,
+ .tm_irq = TM12IRQ,
+ .div_timer = MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
#endif
.rx_irq = SC1RXIRQ,
.tx_irq = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
.uart.lock =
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
.name = "ttySM2",
- .rx_name = "ttySM2:Rx",
- .tx_name = "ttySM2:Tx",
- .tm_name = "ttySM2:Timer10",
._iobase = &SC2CTR,
._control = &SC2CTR,
- ._status = &SC2STR,
+ ._status = (volatile u8 *)&SC2STR,
._intr = &SC2ICR,
._rxb = &SC2RXB,
._txb = &SC2TXB,
+ .rx_name = "ttySM2:Rx",
+ .tx_name = "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+ .tm_name = "ttySM2/Timer10",
._tmxmd = &TM10MD,
._tmxbr = &TM10BR,
._tmicr = &TM10ICR,
.tm_irq = TM10IRQ,
.div_timer = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+ .tm_name = "ttySM2/Timer9",
+ ._tmxmd = &TM9MD,
+ ._tmxbr = &TM9BR,
+ ._tmicr = &TM9ICR,
+ .tm_irq = TM9IRQ,
+ .div_timer = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+ .tm_name = "ttySM2/Timer1",
+ ._tmxmd = &TM1MD,
+ ._tmxbr = (volatile u16 *)&TM1BR,
+ ._tmicr = &TM1ICR,
+ .tm_irq = TM1IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+ .tm_name = "ttySM2/Timer3",
+ ._tmxmd = &TM3MD,
+ ._tmxbr = (volatile u16 *)&TM3BR,
+ ._tmicr = &TM3ICR,
+ .tm_irq = TM3IRQ,
+ .div_timer = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
.rx_irq = SC2RXIRQ,
.tx_irq = SC2TXIRQ,
.rx_icr = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
*/
static void mn10300_serial_mask_ack(unsigned int irq)
{
+ unsigned long flags;
u16 tmp;
+
+ flags = arch_local_cli_save();
GxICR(irq) = GxICR_LEVEL_6;
tmp = GxICR(irq); /* flush write buffer */
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
x = *port->tx_icr;
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+ flags = arch_local_cli_save();
+ *port->tx_icr =
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
x = *port->tx_icr;
+ arch_local_irq_restore(flags);
}
static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
{
+ unsigned long flags;
u16 x;
- *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+ flags = arch_local_cli_save();
+ *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
x = *port->rx_icr;
+ arch_local_irq_restore(flags);
}
/*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
static void mn10300_serial_set_mctrl(struct uart_port *_port,
unsigned int mctrl)
{
- struct mn10300_serial_port *port =
+ struct mn10300_serial_port *port __attribute__ ((unused)) =
container_of(_port, struct mn10300_serial_port, uart);
_enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
UART_XMIT_SIZE));
/* kick the virtual DMA controller */
+ arch_local_cli();
x = *port->tx_icr;
x |= GxICR_ENABLE;
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
_debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
*port->_control, *port->_intr, *port->_status,
- *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+ *port->_tmxmd,
+ (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+ *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+ *port->tx_icr);
*port->tx_icr = x;
x = *port->tx_icr;
+ arch_local_sti();
}
/*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
pint->port = port;
pint->vdma = mn10300_serial_vdma_tx_handler;
- set_intr_level(port->rx_irq, GxICR_LEVEL_1);
- set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+ set_intr_level(port->rx_irq,
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+ set_intr_level(port->tx_irq,
+ NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
set_irq_chip(port->tm_irq, &mn10300_serial_pic);
if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
*/
static void mn10300_serial_shutdown(struct uart_port *_port)
{
+ u16 x;
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
free_irq(port->rx_irq, port);
free_irq(port->tx_irq, port);
- *port->rx_icr = GxICR_LEVEL_1;
- *port->tx_icr = GxICR_LEVEL_1;
+ arch_local_cli();
+ *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+ x = *port->rx_icr;
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+ x = *port->tx_icr;
+ arch_local_sti();
}
/*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
/* Determine divisor based on baud rate */
battempt = 0;
- if (div_timer == MNSCx_DIV_TIMER_16BIT)
- scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
- * == SC2CTR_CK_TM10UFLOW) */
- else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+ switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+ case 0: /* ttySM0 */
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
+ scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+ scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+ break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+ case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
+ scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+ scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+ scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+ case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+ scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if defined(CONFIG_MN10300_TTYSM2_TIMER9)
+ scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+ scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+ scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+ break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+ default:
+ break;
+ }
try_alternative:
baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
ctr &= ~SC2CTR_TWE;
*port->_control = ctr;
}
+
+ /* change Transfer bit-order (LSB/MSB) */
+ if (new->c_cflag & CODMSB)
+ *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+ else
+ *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
}
/*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
printk(KERN_INFO "%s version %s (%s)\n",
serial_name, serial_version, serial_revdate);
-#ifdef CONFIG_MN10300_TTYSM2
- SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+ {
+ int tmp;
+ SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+ tmp = SC2TIM;
+ }
#endif
- set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+ set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+ mn10300_serial_vdma_interrupt);
ret = uart_register_driver(&mn10300_serial_driver);
if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
port = mn10300_serial_ports[co->index];
/* firstly hijack the serial port from the "virtual DMA" controller */
+ arch_local_cli();
txicr = *port->tx_icr;
- *port->tx_icr = GxICR_LEVEL_1;
+ *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
tmp = *port->tx_icr;
+ arch_local_sti();
/* the transmitter may be disabled */
scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
if (!(scxctr & SC01CTR_TXE))
*port->_control = scxctr;
+ arch_local_cli();
*port->tx_icr = txicr;
tmp = *port->tx_icr;
+ arch_local_sti();
}
/*
diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S
index 996244745cc..f2f5c9cfaab 100644
--- a/arch/mn10300/kernel/mn10300-watchdog-low.S
+++ b/arch/mn10300/kernel/mn10300-watchdog-low.S
@@ -16,6 +16,7 @@
#include <asm/intctl-regs.h>
#include <asm/timer-regs.h>
#include <asm/frame.inc>
+#include <linux/threads.h>
.text
@@ -53,7 +54,13 @@ watchdog_handler:
.type touch_nmi_watchdog,@function
touch_nmi_watchdog:
clr d0
- mov d0,(watchdog_alert_counter)
+ clr d1
+ mov watchdog_alert_counter, a0
+ setlb
+ mov d0, (a0+)
+ inc d1
+ cmp NR_CPUS, d1
+ lne
ret [],0
.size touch_nmi_watchdog,.-touch_nmi_watchdog
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c
index f362d9d138f..c5e12bfd9fc 100644
--- a/arch/mn10300/kernel/mn10300-watchdog.c
+++ b/arch/mn10300/kernel/mn10300-watchdog.c
@@ -30,7 +30,7 @@
static DEFINE_SPINLOCK(watchdog_print_lock);
static unsigned int watchdog;
static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
* is to check its timer makes IRQ counts. If they are not
* changing then that CPU has some problem.
*
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
* since NMIs dont listen to _any_ locks, we have to be extremely
* careful not to rely on unsafe variables. The printk might lock
* up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
printk(KERN_INFO "OK.\n");
- /* now that we know it works we can reduce NMI frequency to
- * something more reasonable; makes a difference in some configs
+ /* now that we know it works we can reduce NMI frequency to something
+ * more reasonable; makes a difference in some configs
*/
watchdog_hz = 1;
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
}
}
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+ printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+ show_registers(current_frame());
+}
+#endif
+
asmlinkage
void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
{
-
/*
* Since current-> is always on the stack, and we always switch
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
- int sum, cpu = smp_processor_id();
+ int sum, cpu;
int irq = NMIIRQ;
u8 wdt, tmp;
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
tmp = WDCTR;
NMICR = NMICR_WDIF;
- nmi_count(cpu)++;
+ nmi_count(smp_processor_id())++;
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
- sum = irq_stat[cpu].__irq_count;
-
- if (last_irq_sums[cpu] == sum) {
- /*
- * Ayiee, looks like this CPU is stuck ...
- * wait a few IRQs (5 seconds) before doing the oops ...
- */
- watchdog_alert_counter++;
- if (watchdog_alert_counter == 5 * watchdog_hz) {
- spin_lock(&watchdog_print_lock);
+
+ for_each_online_cpu(cpu) {
+
+ sum = irq_stat[cpu].__irq_count;
+
+ if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+ && !(CHK_GDBSTUB_BUSY()
+ || atomic_read(&cpu_doing_single_step))
+#endif
+ ) {
/*
- * We are in trouble anyway, lets at least try
- * to get a message out.
+ * Ayiee, looks like this CPU is stuck ...
+ * wait a few IRQs (5 seconds) before doing the oops ...
*/
- bust_spinlocks(1);
- printk(KERN_ERR
- "NMI Watchdog detected LOCKUP on CPU%d,"
- " pc %08lx, registers:\n",
- cpu, regs->pc);
- show_registers(regs);
- printk("console shuts up ...\n");
- console_silent();
- spin_unlock(&watchdog_print_lock);
- bust_spinlocks(0);
+ watchdog_alert_counter[cpu]++;
+ if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+ spin_lock(&watchdog_print_lock);
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ bust_spinlocks(1);
+ printk(KERN_ERR
+ "NMI Watchdog detected LOCKUP on CPU%d,"
+ " pc %08lx, registers:\n",
+ cpu, regs->pc);
+#ifdef CONFIG_SMP
+ printk(KERN_ERR
+ "--- Register Dump (CPU%d) ---\n",
+ CPUID);
+#endif
+ show_registers(regs);
+#ifdef CONFIG_SMP
+ smp_nmi_call_function(watchdog_dump_register,
+ NULL, 1);
+#endif
+ printk(KERN_NOTICE "console shuts up ...\n");
+ console_silent();
+ spin_unlock(&watchdog_print_lock);
+ bust_spinlocks(0);
#ifdef CONFIG_GDBSTUB
- if (gdbstub_busy)
- gdbstub_exception(regs, excep);
- else
- gdbstub_intercept(regs, excep);
+ if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+ gdbstub_exception(regs, excep);
+ else
+ gdbstub_intercept(regs, excep);
#endif
- do_exit(SIGSEGV);
+ do_exit(SIGSEGV);
+ }
+ } else {
+ last_irq_sums[cpu] = sum;
+ watchdog_alert_counter[cpu] = 0;
}
- } else {
- last_irq_sums[cpu] = sum;
- watchdog_alert_counter = 0;
}
WDCTR = wdt | WDCTR_WDRST;
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index f48373e2bc1..e1b14a6ed54 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
@@ -57,6 +56,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/*
* we use this if we don't have any better idle routine
*/
@@ -69,6 +69,35 @@ static void default_idle(void)
local_irq_enable();
}
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+ int oldval;
+
+ local_irq_enable();
+
+ /*
+ * Deal with another CPU just having chosen a thread to
+ * run here:
+ */
+ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+ if (!oldval) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ while (!need_resched())
+ cpu_relax();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ } else {
+ set_need_resched();
+ }
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
/*
* the idle thread
* - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +106,6 @@ static void default_idle(void)
*/
void cpu_idle(void)
{
- int cpu = smp_processor_id();
-
/* endless idle loop with no priority at all */
for (;;) {
while (!need_resched()) {
@@ -86,10 +113,13 @@ void cpu_idle(void)
smp_rmb();
idle = pm_idle;
- if (!idle)
+ if (!idle) {
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+ idle = poll_idle;
+#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
idle = default_idle;
-
- irq_stat[cpu].idle_timestamp = jiffies;
+#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
+ }
idle();
}
@@ -197,6 +227,7 @@ int copy_thread(unsigned long clone_flags,
unsigned long c_usp, unsigned long ustk_size,
struct task_struct *p, struct pt_regs *kregs)
{
+ struct thread_info *ti = task_thread_info(p);
struct pt_regs *c_uregs, *c_kregs, *uregs;
unsigned long c_ksp;
@@ -217,7 +248,7 @@ int copy_thread(unsigned long clone_flags,
/* the new TLS pointer is passed in as arg #5 to sys_clone() */
if (clone_flags & CLONE_SETTLS)
- c_uregs->e2 = __frame->d3;
+ c_uregs->e2 = current_frame()->d3;
/* set up the return kernel frame if called from kernel_thread() */
c_kregs = c_uregs;
@@ -235,7 +266,7 @@ int copy_thread(unsigned long clone_flags,
}
/* set up things up so the scheduler can start the new task */
- p->thread.__frame = c_kregs;
+ ti->frame = c_kregs;
p->thread.a3 = (unsigned long) c_kregs;
p->thread.sp = c_ksp;
p->thread.pc = (unsigned long) ret_from_fork;
@@ -247,25 +278,26 @@ int copy_thread(unsigned long clone_flags,
/*
* clone a process
- * - tlsptr is retrieved by copy_thread() from __frame->d3
+ * - tlsptr is retrieved by copy_thread() from current_frame()->d3
*/
asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
int __user *parent_tidptr, int __user *child_tidptr,
int __user *tlsptr)
{
- return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0,
- parent_tidptr, child_tidptr);
+ return do_fork(clone_flags, newsp ?: current_frame()->sp,
+ current_frame(), 0, parent_tidptr, child_tidptr);
}
asmlinkage long sys_fork(void)
{
- return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL);
+ return do_fork(SIGCHLD, current_frame()->sp,
+ current_frame(), 0, NULL, NULL);
}
asmlinkage long sys_vfork(void)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame,
- 0, NULL, NULL);
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
+ current_frame(), 0, NULL, NULL);
}
asmlinkage long sys_execve(const char __user *name,
@@ -279,7 +311,7 @@ asmlinkage long sys_execve(const char __user *name,
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
- error = do_execve(filename, argv, envp, __frame);
+ error = do_execve(filename, argv, envp, current_frame());
putname(filename);
return error;
}
diff --git a/arch/mn10300/kernel/profile.c b/arch/mn10300/kernel/profile.c
index 20d7d0306b1..4f342f75d00 100644
--- a/arch/mn10300/kernel/profile.c
+++ b/arch/mn10300/kernel/profile.c
@@ -41,7 +41,7 @@ static __init int profile_init(void)
tmp = TM11ICR;
printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
- mn10300_ioclk / 8 / (TM11BR + 1));
+ MN10300_IOCLK / 8 / (TM11BR + 1));
printk(KERN_INFO "Profile histogram stored %p-%p\n",
prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
diff --git a/arch/mn10300/kernel/ptrace.c b/arch/mn10300/kernel/ptrace.c
index cf847dabc1b..5c0b07e6100 100644
--- a/arch/mn10300/kernel/ptrace.c
+++ b/arch/mn10300/kernel/ptrace.c
@@ -295,31 +295,31 @@ void ptrace_disable(struct task_struct *child)
/*
* handle the arch-specific side of process tracing
*/
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
if (addr < NR_PTREGS << 2)
tmp = get_stack_long(child,
ptrace_regid_to_frame[addr]);
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, datap);
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR:
ret = -EIO;
- if ((addr & 3) || addr < 0 ||
- addr > sizeof(struct user) - 3)
+ if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
ret = 0;
@@ -332,25 +332,25 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
- (const void __user *)data);
+ datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
- (const void __user *)data);
+ datap);
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 4eef0e7224f..e9e20f9a4dd 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -20,18 +20,22 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-/* time for RTC to update itself in ioclks */
-static unsigned long mn10300_rtc_update_period;
-
+/*
+ * Read the current RTC time
+ */
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
get_rtc_time(&tm);
- ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
ts->tv_nsec = 0;
+ ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ /* if rtc is way off in the past, set something reasonable */
+ if (ts->tv_sec < 0)
+ ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
}
/*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
*/
void __init calibrate_clock(void)
{
- unsigned long count0, counth, count1;
unsigned char status;
/* make sure the RTC is running and is set to operate in 24hr mode */
status = RTSRC;
RTCRB |= RTCRB_SET;
RTCRB |= RTCRB_TM_24HR;
+ RTCRB &= ~RTCRB_DM_BINARY;
RTCRA |= RTCRA_DVR;
RTCRA &= ~RTCRA_DVR;
RTCRB &= ~RTCRB_SET;
-
- /* work out the clock speed by counting clock cycles between ends of
- * the RTC update cycle - track the RTC through one complete update
- * cycle (1 second)
- */
- startup_timestamp_counter();
-
- while (!(RTCRA & RTCRA_UIP)) {}
- while ((RTCRA & RTCRA_UIP)) {}
-
- count0 = TMTSCBC;
-
- while (!(RTCRA & RTCRA_UIP)) {}
-
- counth = TMTSCBC;
-
- while ((RTCRA & RTCRA_UIP)) {}
-
- count1 = TMTSCBC;
-
- shutdown_timestamp_counter();
-
- MN10300_TSCCLK = count0 - count1; /* the timers count down */
- mn10300_rtc_update_period = counth - count1;
- MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
}
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index d464affcba0..9e7a3209a3e 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
+#include <linux/cpu.h>
#include <asm/processor.h>
#include <linux/console.h>
#include <asm/uaccess.h>
@@ -30,7 +31,6 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <proc/proc.h>
-#include <asm/busctl-regs.h>
#include <asm/fpu.h>
#include <asm/sections.h>
@@ -64,11 +64,13 @@ unsigned long memory_size;
struct thread_info *__current_ti = &init_thread_union.thread_info;
struct task_struct *__current = &init_task;
-#define mn10300_known_cpus 3
+#define mn10300_known_cpus 5
static const char *const mn10300_cputypes[] = {
- "am33v1",
- "am33v2",
- "am34v1",
+ "am33-1",
+ "am33-2",
+ "am34-1",
+ "am33-3",
+ "am34-2",
"unknown"
};
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
cpu_init();
unit_setup();
+ smp_init_cpus();
parse_mem_cmdline(cmdline_p);
init_mm.start_code = (unsigned long)&_text;
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p)
void __init cpu_init(void)
{
unsigned long cpurev = CPUREV, type;
- unsigned long base, size;
type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
- printk(KERN_INFO "Matsushita %s, rev %ld\n",
+ printk(KERN_INFO "Panasonic %s, rev %ld\n",
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
- /* determine the memory size and base from the memory controller regs */
- memory_size = 0;
-
- base = SDBASE(0);
- if (base & SDBASE_CE) {
- size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
- size = ~size + 1;
- base &= SDBASE_CBA;
+ get_mem_info(&phys_memory_base, &memory_size);
+ phys_memory_end = phys_memory_base + memory_size;
- printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
- memory_size += size;
- phys_memory_base = base;
- }
+ fpu_init_state();
+}
- base = SDBASE(1);
- if (base & SDBASE_CE) {
- size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
- size = ~size + 1;
- base &= SDBASE_CBA;
+static struct cpu cpu_devices[NR_CPUS];
- printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
- memory_size += size;
- if (phys_memory_base == 0)
- phys_memory_base = base;
- }
+static int __init topology_init(void)
+{
+ int i;
- phys_memory_end = phys_memory_base + memory_size;
+ for_each_present_cpu(i)
+ register_cpu(&cpu_devices[i], i);
-#ifdef CONFIG_FPU
- fpu_init_state();
-#endif
+ return 0;
}
+subsys_initcall(topology_init);
+
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
+#ifdef CONFIG_SMP
+ struct mn10300_cpuinfo *c = v;
+ unsigned long cpu_id = c - cpu_data;
+ unsigned long cpurev = c->type, type, icachesz, dcachesz;
+#else /* CONFIG_SMP */
+ unsigned long cpu_id = 0;
unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
+#endif /* CONFIG_SMP */
- type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
+#ifdef CONFIG_SMP
+ if (!cpu_online(cpu_id))
+ return 0;
+#endif
+
+ type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
if (type > mn10300_known_cpus)
type = mn10300_known_cpus;
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1024;
seq_printf(m,
- "processor : 0\n"
- "vendor_id : Matsushita\n"
+ "processor : %ld\n"
+ "vendor_id : " PROCESSOR_VENDOR_NAME "\n"
"cpu core : %s\n"
"cpu rev : %lu\n"
"model name : " PROCESSOR_MODEL_NAME "\n"
"icache size: %lu\n"
"dcache size: %lu\n",
+ cpu_id,
mn10300_cputypes[type],
(cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"bogomips : %lu.%02lu\n\n",
MN10300_IOCLK / 1000000,
(MN10300_IOCLK / 10000) % 100,
+#ifdef CONFIG_SMP
+ c->loops_per_jiffy / (500000 / HZ),
+ (c->loops_per_jiffy / (5000 / HZ)) % 100
+#else /* CONFIG_SMP */
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100
+#endif /* CONFIG_SMP */
);
return 0;
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index d4de05ab786..690f4e9507d 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig,
*/
asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
{
- return do_sigaltstack(uss, uoss, __frame->sp);
+ return do_sigaltstack(uss, uoss, current_frame()->sp);
}
/*
@@ -156,10 +156,11 @@ badframe:
*/
asmlinkage long sys_sigreturn(void)
{
- struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
+ struct sigframe __user *frame;
sigset_t set;
long d0;
+ frame = (struct sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask))
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(__frame, &frame->sc, &d0))
+ if (restore_sigcontext(current_frame(), &frame->sc, &d0))
goto badframe;
return d0;
@@ -191,11 +192,11 @@ badframe:
*/
asmlinkage long sys_rt_sigreturn(void)
{
- struct rt_sigframe __user *frame =
- (struct rt_sigframe __user *) __frame->sp;
+ struct rt_sigframe __user *frame;
sigset_t set;
- unsigned long d0;
+ long d0;
+ frame = (struct rt_sigframe __user *) current_frame()->sp;
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0))
+ if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
goto badframe;
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) ==
+ -EFAULT)
goto badframe;
return d0;
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(__frame);
+ tracehook_notify_resume(current_frame());
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S
new file mode 100644
index 00000000000..72938cefc05
--- /dev/null
+++ b/arch/mn10300/kernel/smp-low.S
@@ -0,0 +1,97 @@
+/* SMP IPI low-level handler
+ *
+ * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+ .am33_2
+
+###############################################################################
+#
+# IPI interrupt handler
+#
+###############################################################################
+ .globl mn10300_low_ipi_handler
+mn10300_low_ipi_handler:
+ add -4,sp
+ mov d0,(sp)
+ movhu (IAGR),d0
+ and IAGR_GN,d0
+ lsr 0x2,d0
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ cmp FLUSH_CACHE_IPI,d0
+ beq mn10300_flush_cache_ipi
+#endif
+ cmp SMP_BOOT_IRQ,d0
+ beq mn10300_smp_boot_ipi
+ /* OTHERS */
+ mov (sp),d0
+ add 4,sp
+#ifdef CONFIG_GDBSTUB
+ jmp gdbstub_io_rx_handler
+#else
+ jmp end
+#endif
+
+###############################################################################
+#
+# Cache flush IPI interrupt handler
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+mn10300_flush_cache_ipi:
+ mov (sp),d0
+ add 4,sp
+
+ /* FLUSH_CACHE_IPI */
+ add -4,sp
+ SAVE_ALL
+ mov GxICR_DETECT,d2
+ movbu d2,(GxICR(FLUSH_CACHE_IPI)) # ACK the interrupt
+ movhu (GxICR(FLUSH_CACHE_IPI)),d2
+ call smp_cache_interrupt[],0
+ RESTORE_ALL
+ jmp end
+#endif
+
+###############################################################################
+#
+# SMP boot CPU IPI interrupt handler
+#
+###############################################################################
+mn10300_smp_boot_ipi:
+ /* clear interrupt */
+ movhu (GxICR(SMP_BOOT_IRQ)),d0
+ and ~GxICR_REQUEST,d0
+ movhu d0,(GxICR(SMP_BOOT_IRQ))
+ mov (sp),d0
+ add 4,sp
+
+ # get stack
+ mov (CPUID),a0
+ add -1,a0
+ add a0,a0
+ add a0,a0
+ mov (start_stack,a0),a0
+ mov a0,sp
+ jmp initialize_secondary
+
+
+# Jump here after RTI to suppress the icache lookahead
+end:
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644
index 00000000000..0dcd1c686ba
--- /dev/null
+++ b/arch/mn10300/kernel/smp.c
@@ -0,0 +1,1152 @@
+/* SMP support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+#include "internal.h"
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+
+static unsigned long sleep_mode[NR_CPUS];
+
+static void run_sleep_cpu(unsigned int cpu);
+static void run_wakeup_cpu(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Debug Message function
+ */
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#else
+#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#endif
+
+/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
+#define CALL_FUNCTION_NMI_IPI_TIMEOUT 0
+
+/*
+ * Structure and data for smp_nmi_call_function().
+ */
+struct nmi_call_data_struct {
+ smp_call_func_t func;
+ void *info;
+ cpumask_t started;
+ cpumask_t finished;
+ int wait;
+ char size_alignment[0]
+ __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static DEFINE_SPINLOCK(smp_nmi_call_lock);
+static struct nmi_call_data_struct *nmi_call_data;
+
+/*
+ * Data structures and variables
+ */
+static cpumask_t cpu_callin_map; /* Bitmask of callin CPUs */
+static cpumask_t cpu_callout_map; /* Bitmask of callout CPUs */
+cpumask_t cpu_boot_map; /* Bitmask of boot APs */
+unsigned long start_stack[NR_CPUS - 1];
+
+/*
+ * Per CPU parameters
+ */
+struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount; /* The count of boot CPUs */
+static cpumask_t smp_commenced_mask;
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/*
+ * Function Prototypes
+ */
+static int do_boot_cpu(int);
+static void smp_show_cpu_info(int cpu_id);
+static void smp_callin(void);
+static void smp_online(void);
+static void smp_store_cpu_info(int);
+static void smp_cpu_init(void);
+static void smp_tune_scheduling(void);
+static void send_IPI_mask(const cpumask_t *cpumask, int irq);
+static void init_ipi(void);
+
+/*
+ * IPI Initialization interrupt definitions
+ */
+static void mn10300_ipi_disable(unsigned int irq);
+static void mn10300_ipi_enable(unsigned int irq);
+static void mn10300_ipi_ack(unsigned int irq);
+static void mn10300_ipi_nop(unsigned int irq);
+
+static struct irq_chip mn10300_ipi_type = {
+ .name = "cpu_ipi",
+ .disable = mn10300_ipi_disable,
+ .enable = mn10300_ipi_enable,
+ .ack = mn10300_ipi_ack,
+ .eoi = mn10300_ipi_nop
+};
+
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
+
+static struct irqaction reschedule_ipi = {
+ .handler = smp_reschedule_interrupt,
+ .name = "smp reschedule IPI"
+};
+static struct irqaction call_function_ipi = {
+ .handler = smp_call_function_interrupt,
+ .name = "smp call function IPI"
+};
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
+static struct irqaction local_timer_ipi = {
+ .handler = smp_ipi_timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "smp local timer IPI"
+};
+#endif
+
+/**
+ * init_ipi - Initialise the IPI mechanism
+ */
+static void init_ipi(void)
+{
+ unsigned long flags;
+ u16 tmp16;
+
+ /* set up the reschedule IPI */
+ set_irq_chip_and_handler(RESCHEDULE_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
+ set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
+ mn10300_ipi_enable(RESCHEDULE_IPI);
+
+ /* set up the call function IPI */
+ set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
+ set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
+ mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+ /* set up the local timer IPI */
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ set_irq_chip_and_handler(LOCAL_TIMER_IPI,
+ &mn10300_ipi_type, handle_percpu_irq);
+ setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
+ set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
+ mn10300_ipi_enable(LOCAL_TIMER_IPI);
+#endif
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ /* set up the cache flush IPI */
+ flags = arch_local_cli_save();
+ __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
+ mn10300_low_ipi_handler);
+ GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(FLUSH_CACHE_IPI);
+ arch_local_irq_restore(flags);
+#endif
+
+ /* set up the NMI call function IPI */
+ flags = arch_local_cli_save();
+ GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+ arch_local_irq_restore(flags);
+
+ /* set up the SMP boot IPI */
+ flags = arch_local_cli_save();
+ __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
+ mn10300_low_ipi_handler);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_shutdown - Shut down handling of an IPI
+ * @irq: The IPI to be shut down.
+ */
+static void mn10300_ipi_shutdown(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_enable - Enable an IPI
+ * @irq: The IPI to be enabled.
+ */
+static void mn10300_ipi_enable(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_disable - Disable an IPI
+ * @irq: The IPI to be disabled.
+ */
+static void mn10300_ipi_disable(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+
+ tmp = GxICR(irq);
+ GxICR(irq) = tmp & GxICR_LEVEL;
+ tmp = GxICR(irq);
+
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
+ * @irq: The IPI to be acknowledged.
+ *
+ * Clear the interrupt detection flag for the IPI on the appropriate interrupt
+ * channel in the PIC.
+ */
+static void mn10300_ipi_ack(unsigned int irq)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ flags = arch_local_cli_save();
+ GxICR_u8(irq) = GxICR_DETECT;
+ tmp = GxICR(irq);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_nop - Dummy IPI action
+ * @irq: The IPI to be acted upon.
+ */
+static void mn10300_ipi_nop(unsigned int irq)
+{
+}
+
+/**
+ * send_IPI_mask - Send IPIs to all CPUs in list
+ * @cpumask: The list of CPUs to target.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all the CPUs in the list, not waiting for them to
+ * finish before returning. The caller is responsible for synchronisation if
+ * that is needed.
+ */
+static void send_IPI_mask(const cpumask_t *cpumask, int irq)
+{
+ int i;
+ u16 tmp;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_isset(i, *cpumask)) {
+ /* send IPI */
+ tmp = CROSS_GxICR(irq, i);
+ CROSS_GxICR(irq, i) =
+ tmp | GxICR_REQUEST | GxICR_DETECT;
+ tmp = CROSS_GxICR(irq, i); /* flush write buffer */
+ }
+ }
+}
+
+/**
+ * send_IPI_self - Send an IPI to this CPU.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to the current CPU.
+ */
+void send_IPI_self(int irq)
+{
+ send_IPI_mask(cpumask_of(smp_processor_id()), irq);
+}
+
+/**
+ * send_IPI_allbutself - Send IPIs to all the other CPUs.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all CPUs in the system barring the current one,
+ * not waiting for them to finish before returning. The caller is responsible
+ * for synchronisation if that is needed.
+ */
+void send_IPI_allbutself(int irq)
+{
+ cpumask_t cpumask;
+
+ cpumask = cpu_online_map;
+ cpu_clear(smp_processor_id(), cpumask);
+ send_IPI_mask(&cpumask, irq);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ BUG();
+ /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
+}
+
+/**
+ * smp_send_reschedule - Send reschedule IPI to a CPU
+ * @cpu: The CPU to target.
+ */
+void smp_send_reschedule(int cpu)
+{
+ send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
+}
+
+/**
+ * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
+ * @func: The function to ask to be run.
+ * @info: The context data to pass to that function.
+ * @wait: If true, wait (atomically) until function is run on all CPUs.
+ *
+ * Send a non-maskable request to all CPUs in the system, requesting them to
+ * run the specified function with the given context data, and, potentially, to
+ * wait for completion of that function on all CPUs.
+ *
+ * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
+ * timeout.
+ */
+int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
+{
+ struct nmi_call_data_struct data;
+ unsigned long flags;
+ unsigned int cnt;
+ int cpus, ret = 0;
+
+ cpus = num_online_cpus() - 1;
+ if (cpus < 1)
+ return 0;
+
+ data.func = func;
+ data.info = info;
+ data.started = cpu_online_map;
+ cpu_clear(smp_processor_id(), data.started);
+ data.wait = wait;
+ if (wait)
+ data.finished = data.started;
+
+ spin_lock_irqsave(&smp_nmi_call_lock, flags);
+ nmi_call_data = &data;
+ smp_mb();
+
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
+
+ /* Wait for response */
+ if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
+ for (cnt = 0;
+ cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+ !cpus_empty(data.started);
+ cnt++)
+ mdelay(1);
+
+ if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
+ for (cnt = 0;
+ cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+ !cpus_empty(data.finished);
+ cnt++)
+ mdelay(1);
+ }
+
+ if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
+ ret = -ETIMEDOUT;
+
+ } else {
+ /* If timeout value is zero, wait until cpumask has been
+ * cleared */
+ while (!cpus_empty(data.started))
+ barrier();
+ if (wait)
+ while (!cpus_empty(data.finished))
+ barrier();
+ }
+
+ spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
+ return ret;
+}
+
+/**
+ * stop_this_cpu - Callback to stop a CPU.
+ * @unused: Callback context (ignored).
+ */
+void stop_this_cpu(void *unused)
+{
+ static volatile int stopflag;
+ unsigned long flags;
+
+#ifdef CONFIG_GDBSTUB
+ /* In case of single stepping smp_send_stop by other CPU,
+ * clear procindebug to avoid deadlock.
+ */
+ atomic_set(&procindebug[smp_processor_id()], 0);
+#endif /* CONFIG_GDBSTUB */
+
+ flags = arch_local_cli_save();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+
+ while (!stopflag)
+ cpu_relax();
+
+ cpu_set(smp_processor_id(), cpu_online_map);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_send_stop - Send a stop request to all CPUs.
+ */
+void smp_send_stop(void)
+{
+ smp_nmi_call_function(stop_this_cpu, NULL, 0);
+}
+
+/**
+ * smp_reschedule_interrupt - Reschedule IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * We need do nothing here, since the scheduling will be effected on our way
+ * back through entry.S.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+{
+ /* do nothing */
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp_call_function_interrupt - Call function IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+{
+ /* generic_smp_call_function_interrupt(); */
+ generic_smp_call_function_single_interrupt();
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
+ */
+void smp_nmi_call_function_interrupt(void)
+{
+ smp_call_func_t func = nmi_call_data->func;
+ void *info = nmi_call_data->info;
+ int wait = nmi_call_data->wait;
+
+ /* Notify the initiating CPU that I've grabbed the data and am about to
+ * execute the function
+ */
+ smp_mb();
+ cpu_clear(smp_processor_id(), nmi_call_data->started);
+ (*func)(info);
+
+ if (wait) {
+ smp_mb();
+ cpu_clear(smp_processor_id(), nmi_call_data->finished);
+ }
+}
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+ defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/**
+ * smp_ipi_timer_interrupt - Local timer IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
+{
+ return local_timer_interrupt();
+}
+#endif
+
+void __init smp_init_cpus(void)
+{
+ int i;
+ for (i = 0; i < NR_CPUS; i++) {
+ set_cpu_possible(i, true);
+ set_cpu_present(i, true);
+ }
+}
+
+/**
+ * smp_cpu_init - Initialise AP in start_secondary.
+ *
+ * For this Application Processor, set up init_mm, initialise FPU and set
+ * interrupt level 0-6 setting.
+ */
+static void __init smp_cpu_init(void)
+{
+ unsigned long flags;
+ int cpu_id = smp_processor_id();
+ u16 tmp16;
+
+ if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+ for (;;)
+ local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Force FPU initialization */
+ clear_using_fpu(current);
+
+ GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+ GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(LOCAL_TIMER_IPI);
+
+ GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(RESCHEDULE_IPI);
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+ GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+ mn10300_ipi_enable(FLUSH_CACHE_IPI);
+#endif
+
+ mn10300_ipi_shutdown(SMP_BOOT_IRQ);
+
+ /* Set up the non-maskable call function IPI */
+ flags = arch_local_cli_save();
+ GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+ arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_prepare_cpu_init - Initialise CPU in startup_secondary
+ *
+ * Set interrupt level 0-6 setting and init ICR of gdbstub.
+ */
+void smp_prepare_cpu_init(void)
+{
+ int loop;
+
+ /* Set the interrupt vector registers */
+ IVAR0 = EXCEP_IRQ_LEVEL0;
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+ IVAR6 = EXCEP_IRQ_LEVEL6;
+
+ /* Disable all interrupts and set to priority 6 (lowest) */
+ for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+
+#ifdef CONFIG_GDBSTUB
+ /* initialise GDB-stub */
+ do {
+ unsigned long flags;
+ u16 tmp16;
+
+ flags = arch_local_cli_save();
+ GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+ tmp16 = GxICR(GDB_NMI_IPI);
+ arch_local_irq_restore(flags);
+ } while (0);
+#endif
+}
+
+/**
+ * start_secondary - Activate a secondary CPU (AP)
+ * @unused: Thread parameter (ignored).
+ */
+int __init start_secondary(void *unused)
+{
+ smp_cpu_init();
+ smp_callin();
+ while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ cpu_relax();
+
+ local_flush_tlb();
+ preempt_disable();
+ smp_online();
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ init_clockevents();
+#endif
+ cpu_idle();
+ return 0;
+}
+
+/**
+ * smp_prepare_cpus - Boot up secondary CPUs (APs)
+ * @max_cpus: Maximum number of CPUs to boot.
+ *
+ * Call do_boot_cpu, and boot up APs.
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int phy_id;
+
+ /* Setup boot CPU information */
+ smp_store_cpu_info(0);
+ smp_tune_scheduling();
+
+ init_ipi();
+
+ /* If SMP should be disabled, then finish */
+ if (max_cpus == 0) {
+ printk(KERN_INFO "SMP mode deactivated.\n");
+ goto smp_done;
+ }
+
+ /* Boot secondary CPUs (for which phy_id > 0) */
+ for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
+ /* Don't boot primary CPU */
+ if (max_cpus <= cpucount + 1)
+ continue;
+ if (phy_id != 0)
+ do_boot_cpu(phy_id);
+ set_cpu_possible(phy_id, true);
+ smp_show_cpu_info(phy_id);
+ }
+
+smp_done:
+ Dprintk("Boot done.\n");
+}
+
+/**
+ * smp_store_cpu_info - Save a CPU's information
+ * @cpu: The CPU to save for.
+ *
+ * Save boot_cpu_data and jiffy for the specified CPU.
+ */
+static void __init smp_store_cpu_info(int cpu)
+{
+ struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+ *ci = boot_cpu_data;
+ ci->loops_per_jiffy = loops_per_jiffy;
+ ci->type = CPUREV;
+}
+
+/**
+ * smp_tune_scheduling - Set time slice value
+ *
+ * Nothing to do here.
+ */
+static void __init smp_tune_scheduling(void)
+{
+}
+
+/**
+ * do_boot_cpu: Boot up one CPU
+ * @phy_id: Physical ID of CPU to boot.
+ *
+ * Send an IPI to a secondary CPU to boot it. Returns 0 on success, 1
+ * otherwise.
+ */
+static int __init do_boot_cpu(int phy_id)
+{
+ struct task_struct *idle;
+ unsigned long send_status, callin_status;
+ int timeout, cpu_id;
+
+ send_status = GxICR_REQUEST;
+ callin_status = 0;
+ timeout = 0;
+ cpu_id = phy_id;
+
+ cpucount++;
+
+ /* Create idle thread for this CPU */
+ idle = fork_idle(cpu_id);
+ if (IS_ERR(idle))
+ panic("Failed fork for CPU#%d.", cpu_id);
+
+ idle->thread.pc = (unsigned long)start_secondary;
+
+ printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
+ start_stack[cpu_id - 1] = idle->thread.sp;
+
+ task_thread_info(idle)->cpu = cpu_id;
+
+ /* Send boot IPI to AP */
+ send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
+
+ Dprintk("Waiting for send to finish...\n");
+
+ /* Wait for AP's IPI receive in 100[ms] */
+ do {
+ udelay(1000);
+ send_status =
+ CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
+ } while (send_status == GxICR_REQUEST && timeout++ < 100);
+
+ Dprintk("Waiting for cpu_callin_map.\n");
+
+ if (send_status == 0) {
+ /* Allow AP to start initializing */
+ cpu_set(cpu_id, cpu_callout_map);
+
+ /* Wait for setting cpu_callin_map */
+ timeout = 0;
+ do {
+ udelay(1000);
+ callin_status = cpu_isset(cpu_id, cpu_callin_map);
+ } while (callin_status == 0 && timeout++ < 5000);
+
+ if (callin_status == 0)
+ Dprintk("Not responding.\n");
+ } else {
+ printk(KERN_WARNING "IPI not delivered.\n");
+ }
+
+ if (send_status == GxICR_REQUEST || callin_status == 0) {
+ cpu_clear(cpu_id, cpu_callout_map);
+ cpu_clear(cpu_id, cpu_callin_map);
+ cpu_clear(cpu_id, cpu_initialized);
+ cpucount--;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * smp_show_cpu_info - Show SMP CPU information
+ * @cpu: The CPU of interest.
+ */
+static void __init smp_show_cpu_info(int cpu)
+{
+ struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+ printk(KERN_INFO
+ "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
+ cpu,
+ MN10300_IOCLK / 1000000,
+ (MN10300_IOCLK / 10000) % 100,
+ ci->loops_per_jiffy / (500000 / HZ),
+ (ci->loops_per_jiffy / (5000 / HZ)) % 100);
+}
+
+/**
+ * smp_callin - Set cpu_callin_map of the current CPU ID
+ */
+static void __init smp_callin(void)
+{
+ unsigned long timeout;
+ int cpu;
+
+ cpu = smp_processor_id();
+ timeout = jiffies + (2 * HZ);
+
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ printk(KERN_ERR "CPU#%d already present.\n", cpu);
+ BUG();
+ }
+ Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
+
+ /* Wait for AP startup 2s total */
+ while (time_before(jiffies, timeout)) {
+ if (cpu_isset(cpu, cpu_callout_map))
+ break;
+ cpu_relax();
+ }
+
+ if (!time_before(jiffies, timeout)) {
+ printk(KERN_ERR
+ "BUG: CPU#%d started up but did not get a callout!\n",
+ cpu);
+ BUG();
+ }
+
+#ifdef CONFIG_CALIBRATE_DELAY
+ calibrate_delay(); /* Get our bogomips */
+#endif
+
+ /* Save our processor parameters */
+ smp_store_cpu_info(cpu);
+
+ /* Allow the boot processor to continue */
+ cpu_set(cpu, cpu_callin_map);
+}
+
+/**
+ * smp_online - Set cpu_online_map
+ */
+static void __init smp_online(void)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_enable();
+
+ cpu_set(cpu, cpu_online_map);
+ smp_wmb();
+}
+
+/**
+ * smp_cpus_done -
+ * @max_cpus: Maximum CPU count.
+ *
+ * Do nothing.
+ */
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * smp_prepare_boot_cpu - Set up stuff for the boot processor.
+ *
+ * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
+ * processor (CPU 0).
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+ cpu_set(0, cpu_callout_map);
+ cpu_set(0, cpu_callin_map);
+ current_thread_info()->cpu = 0;
+}
+
+/*
+ * initialize_secondary - Initialise a secondary CPU (Application Processor).
+ *
+ * Set SP register and jump to thread's PC address.
+ */
+void initialize_secondary(void)
+{
+ asm volatile (
+ "mov %0,sp \n"
+ "jmp (%1) \n"
+ :
+ : "a"(current->thread.sp), "a"(current->thread.pc));
+}
+
+/**
+ * __cpu_up - Set smp_commenced_mask for the nominated CPU
+ * @cpu: The target CPU.
+ */
+int __devinit __cpu_up(unsigned int cpu)
+{
+ int timeout;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (num_online_cpus() == 1)
+ disable_hlt();
+ if (sleep_mode[cpu])
+ run_wakeup_cpu(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+ cpu_set(cpu, smp_commenced_mask);
+
+ /* Wait 5s total for a response */
+ for (timeout = 0 ; timeout < 5000 ; timeout++) {
+ if (cpu_isset(cpu, cpu_online_map))
+ break;
+ udelay(1000);
+ }
+
+ BUG_ON(!cpu_isset(cpu, cpu_online_map));
+ return 0;
+}
+
+/**
+ * setup_profiling_timer - Set up the profiling timer
+ * @multiplier - The frequency multiplier to use
+ *
+ * The frequency of the profiling timer can be changed by writing a multiplier
+ * value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*
+ * CPU hotplug routines
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int cpu, ret;
+
+ for_each_cpu(cpu) {
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ if (ret)
+ printk(KERN_WARNING
+ "topology_init: register_cpu %d failed (%d)\n",
+ cpu, ret);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+int __cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+ if (cpu == 0)
+ return -EBUSY;
+
+ migrate_irqs();
+ cpu_clear(cpu, current->active_mm->cpu_vm_mask);
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ run_sleep_cpu(cpu);
+
+ if (num_online_cpus() == 1)
+ enable_hlt();
+}
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+static inline void hotplug_cpu_disable_cache(void)
+{
+ int tmp;
+ asm volatile(
+ " movhu (%1),%0 \n"
+ " and %2,%0 \n"
+ " movhu %0,(%1) \n"
+ "1: movhu (%1),%0 \n"
+ " btst %3,%0 \n"
+ " bne 1b \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
+ "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
+ : "memory", "cc");
+}
+
+static inline void hotplug_cpu_enable_cache(void)
+{
+ int tmp;
+ asm volatile(
+ "movhu (%1),%0 \n"
+ "or %2,%0 \n"
+ "movhu %0,(%1) \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(CHCTR_ICEN | CHCTR_DCEN)
+ : "memory", "cc");
+}
+
+static inline void hotplug_cpu_invalidate_cache(void)
+{
+ int tmp;
+ asm volatile (
+ "movhu (%1),%0 \n"
+ "or %2,%0 \n"
+ "movhu %0,(%1) \n"
+ : "=&r"(tmp)
+ : "a"(&CHCTR),
+ "i"(CHCTR_ICINV | CHCTR_DCINV)
+ : "cc");
+}
+
+#else /* CONFIG_MN10300_CACHE_ENABLED */
+#define hotplug_cpu_disable_cache() do {} while (0)
+#define hotplug_cpu_enable_cache() do {} while (0)
+#define hotplug_cpu_invalidate_cache() do {} while (0)
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/**
+ * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
+ * @cpumask: List of target CPUs.
+ * @func: The function to call on those CPUs.
+ * @info: The context data for the function to be called.
+ * @wait: Whether to wait for the calls to complete.
+ *
+ * Non-maskably call a function on another CPU for hotplug purposes.
+ *
+ * This function must be called with maskable interrupts disabled.
+ */
+static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
+ smp_call_func_t func, void *info,
+ int wait)
+{
+ /*
+ * The address and the size of nmi_call_func_mask_data
+ * need to be aligned on L1_CACHE_BYTES.
+ */
+ static struct nmi_call_data_struct nmi_call_func_mask_data
+ __cacheline_aligned;
+ unsigned long start, end;
+
+ start = (unsigned long)&nmi_call_func_mask_data;
+ end = start + sizeof(struct nmi_call_data_struct);
+
+ nmi_call_func_mask_data.func = func;
+ nmi_call_func_mask_data.info = info;
+ nmi_call_func_mask_data.started = cpumask;
+ nmi_call_func_mask_data.wait = wait;
+ if (wait)
+ nmi_call_func_mask_data.finished = cpumask;
+
+ spin_lock(&smp_nmi_call_lock);
+ nmi_call_data = &nmi_call_func_mask_data;
+ mn10300_local_dcache_flush_range(start, end);
+ smp_wmb();
+
+ send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
+
+ do {
+ mn10300_local_dcache_inv_range(start, end);
+ barrier();
+ } while (!cpus_empty(nmi_call_func_mask_data.started));
+
+ if (wait) {
+ do {
+ mn10300_local_dcache_inv_range(start, end);
+ barrier();
+ } while (!cpus_empty(nmi_call_func_mask_data.finished));
+ }
+
+ spin_unlock(&smp_nmi_call_lock);
+ return 0;
+}
+
+static void restart_wakeup_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_set(cpu, cpu_callin_map);
+ local_flush_tlb();
+ cpu_set(cpu, cpu_online_map);
+ smp_wmb();
+}
+
+static void prepare_sleep_cpu(void *unused)
+{
+ sleep_mode[smp_processor_id()] = 1;
+ smp_mb();
+ mn10300_local_dcache_flush_inv();
+ hotplug_cpu_disable_cache();
+ hotplug_cpu_invalidate_cache();
+}
+
+/* when this function called, IE=0, NMID=0. */
+static void sleep_cpu(void *unused)
+{
+ unsigned int cpu_id = smp_processor_id();
+ /*
+ * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
+ * before this cpu goes in SLEEP mode.
+ */
+ do {
+ smp_mb();
+ __sleep_cpu();
+ } while (sleep_mode[cpu_id]);
+ restart_wakeup_cpu();
+}
+
+static void run_sleep_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+ cpumask_t cpumask = cpumask_of(cpu);
+
+ flags = arch_local_cli_save();
+ hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
+ hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
+ udelay(1); /* delay for the cpu to sleep. */
+ arch_local_irq_restore(flags);
+}
+
+static void wakeup_cpu(void)
+{
+ hotplug_cpu_invalidate_cache();
+ hotplug_cpu_enable_cache();
+ smp_mb();
+ sleep_mode[smp_processor_id()] = 0;
+}
+
+static void run_wakeup_cpu(unsigned int cpu)
+{
+ unsigned long flags;
+
+ flags = arch_local_cli_save();
+#if NR_CPUS == 2
+ mn10300_local_dcache_flush_inv();
+#else
+ /*
+ * Before waking up the cpu,
+ * all online cpus should stop and flush D-Cache for global data.
+ */
+#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
+#endif
+ hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
+ arch_local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mn10300/kernel/switch_to.S b/arch/mn10300/kernel/switch_to.S
index 630aad71b94..9074d0fb878 100644
--- a/arch/mn10300/kernel/switch_to.S
+++ b/arch/mn10300/kernel/switch_to.S
@@ -15,6 +15,9 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/cpu-regs.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
.text
@@ -35,8 +38,6 @@ ENTRY(__switch_to)
mov d1,a1
# save prev context
- mov (__frame),d0
- mov d0,(THREAD_FRAME,a0)
mov __switch_back,d0
mov d0,(THREAD_PC,a0)
mov sp,a2
@@ -58,8 +59,6 @@ ENTRY(__switch_to)
mov a2,e2
#endif
- mov (THREAD_FRAME,a1),a2
- mov a2,(__frame)
mov (THREAD_PC,a1),a2
mov d2,d0 # for ret_from_fork
mov d0,a0 # for __switch_to
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 8f7f6d22783..f860a340acc 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -17,29 +17,18 @@
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/cnt32_to_63.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
#include <asm/rtc.h>
-
-#ifdef CONFIG_MN10300_RTC
-unsigned long mn10300_ioclk; /* system I/O clock frequency */
-unsigned long mn10300_iobclk; /* system I/O clock frequency */
-unsigned long mn10300_tsc_per_HZ; /* number of ioclks per jiffy */
-#endif /* CONFIG_MN10300_RTC */
+#include "internal.h"
static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
* interrupt occurred */
-static irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static struct irqaction timer_irq = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
- .name = "timer",
-};
-
static unsigned long sched_clock_multiplier;
/*
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void)
unsigned long tsc, tmp;
unsigned product[3]; /* 96-bit intermediate value */
+ /* cnt32_to_63() is not safe with preemption */
+ preempt_disable();
+
/* read the TSC value
*/
- tsc = 0 - get_cycles(); /* get_cycles() counts down */
+ tsc = get_cycles();
/* expand to 64-bits.
* - sched_clock() must be called once a minute or better or the
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void)
*/
tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
+ preempt_enable();
+
/* scale the 64-bit TSC value to a nanosecond value via a 96-bit
* intermediate
*/
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void)
__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
}
+/**
+ * local_timer_interrupt - Local timer interrupt handler
+ *
+ * Handle local timer interrupts for this CPU. They may have been propagated
+ * to this CPU from the CPU that actually gets them by way of an IPI.
+ */
+irqreturn_t local_timer_interrupt(void)
+{
+ profile_tick(CPU_PROFILING);
+ update_process_times(user_mode(get_irq_regs()));
+ return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_TIME
/*
* advance the kernel's time keeping clocks (xtime and jiffies)
* - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned tsc, elapse;
+ irqreturn_t ret;
write_seqlock(&xtime_lock);
while (tsc = get_cycles(),
- elapse = mn10300_last_tsc - tsc, /* time elapsed since last
+ elapse = tsc - mn10300_last_tsc, /* time elapsed since last
* tick */
elapse > MN10300_TSC_PER_HZ
) {
- mn10300_last_tsc -= MN10300_TSC_PER_HZ;
+ mn10300_last_tsc += MN10300_TSC_PER_HZ;
/* advance the kernel's time tracking system */
- profile_tick(CPU_PROFILING);
do_timer(1);
}
write_sequnlock(&xtime_lock);
- update_process_times(user_mode(get_irq_regs()));
+ ret = local_timer_interrupt();
+#ifdef CONFIG_SMP
+ send_IPI_allbutself(LOCAL_TIMER_IPI);
+#endif
+ return ret;
+}
- return IRQ_HANDLED;
+static struct irqaction timer_irq = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
+ .name = "timer",
+};
+#endif /* CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_CSRC_MN10300
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) NSEC_PER_SEC << shift;
+ do_div(temp, clock);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cs->shift = shift;
+ cs->mult = (u32) temp;
}
+#endif
+
+#if CONFIG_CEVT_MN10300
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) clock << shift;
+ do_div(temp, NSEC_PER_SEC);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cd->shift = shift;
+ cd->mult = (u32) temp;
+}
+#endif
/*
* initialise the various timers used by the main part of the kernel
@@ -131,21 +185,25 @@ void __init time_init(void)
*/
TMPSCNT |= TMPSCNT_ENABLE;
+#ifdef CONFIG_GENERIC_TIME
+ init_clocksource();
+#else
startup_timestamp_counter();
+#endif
printk(KERN_INFO
"timestamp counter I/O clock running at %lu.%02lu"
" (calibrated against RTC)\n",
MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
- mn10300_last_tsc = TMTSCBC;
-
- /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
- setup_irq(TMJCIRQ, &timer_irq);
+ mn10300_last_tsc = read_timestamp_counter();
- set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
-
- startup_jiffies_counter();
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ init_clockevents();
+#else
+ reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+ setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL);
+#endif
#ifdef CONFIG_MN10300_WD_TIMER
/* start the watchdog timer */
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index 91365adba4f..b90c3f160c7 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -45,9 +45,6 @@
#error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
#endif
-struct pt_regs *__frame; /* current frame pointer */
-EXPORT_SYMBOL(__frame);
-
int kstack_depth_to_print = 24;
spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL, {}, "invalid opcode", invalid_op, ILL_ILLOPC);
DO_EINFO(SIGILL, {}, "invalid ex opcode", invalid_exop, ILL_ILLOPC);
DO_EINFO(SIGBUS, {}, "invalid address", mem_error, BUS_ADRERR);
DO_EINFO(SIGBUS, {}, "bus error", bus_error, BUS_ADRERR);
-DO_EINFO(SIGILL, {}, "FPU invalid opcode", fpu_invalid_op, ILL_COPROC);
DO_ERROR(SIGTRAP,
#ifndef CONFIG_MN10300_USING_JTAG
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs)
printk(KERN_EMERG "threadinfo=%p task=%p)\n",
current_thread_info(), current);
- if ((unsigned long) current >= 0x90000000UL &&
- (unsigned long) current < 0x94000000UL)
+ if ((unsigned long) current >= PAGE_OFFSET &&
+ (unsigned long) current < (unsigned long)high_memory)
printk(KERN_EMERG "Process %s (pid: %d)\n",
current->comm, current->pid);
+#ifdef CONFIG_SMP
+ printk(KERN_EMERG "CPUID: %08x\n", CPUID);
+#endif
printk(KERN_EMERG "CPUP: %04hx\n", CPUP);
printk(KERN_EMERG "TBR: %08x\n", TBR);
printk(KERN_EMERG "DEAR: %08x\n", DEAR);
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
{
unsigned long addr;
u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
+ unsigned long flags;
addr = (unsigned long) handler - (unsigned long) vector;
+
+ flags = arch_local_cli_save();
+
vector[0] = 0xdc; /* JMP handler */
vector[1] = addr;
vector[2] = addr >> 8;
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
vector[6] = 0xcb;
vector[7] = 0xcb;
- mn10300_dcache_flush_inv();
- mn10300_icache_inv();
-}
-
-/*
- * set an interrupt stub to invoke the JTAG unit and then jump to a handler
- */
-void __init set_jtag_stub(enum exception_code code, void *handler)
-{
- unsigned long addr;
- u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
-
- addr = (unsigned long) handler - ((unsigned long) vector + 1);
- vector[0] = 0xff; /* PI to jump into JTAG debugger */
- vector[1] = 0xdc; /* jmp handler */
- vector[2] = addr;
- vector[3] = addr >> 8;
- vector[4] = addr >> 16;
- vector[5] = addr >> 24;
- vector[6] = 0xcb;
- vector[7] = 0xcb;
+ arch_local_irq_restore(flags);
+#ifndef CONFIG_MN10300_CACHE_SNOOP
mn10300_dcache_flush_inv();
- flush_icache_range((unsigned long) vector, (unsigned long) vector + 8);
+ mn10300_icache_inv();
+#endif
}
/*
@@ -581,7 +566,6 @@ void __init trap_init(void)
set_excp_vector(EXCEP_PRIVINSACC, insn_acc_error);
set_excp_vector(EXCEP_PRIVDATACC, data_acc_error);
set_excp_vector(EXCEP_DATINSACC, insn_acc_error);
- set_excp_vector(EXCEP_FPU_DISABLED, fpu_disabled);
set_excp_vector(EXCEP_FPU_UNIMPINS, fpu_invalid_op);
set_excp_vector(EXCEP_FPU_OPERATION, fpu_exception);
diff --git a/arch/mn10300/lib/bitops.c b/arch/mn10300/lib/bitops.c
index 440a7dcbf87..a66c6cdaf44 100644
--- a/arch/mn10300/lib/bitops.c
+++ b/arch/mn10300/lib/bitops.c
@@ -15,7 +15,7 @@
/*
* try flipping a bit using BSET and BCLR
*/
-void change_bit(int nr, volatile void *addr)
+void change_bit(unsigned long nr, volatile void *addr)
{
if (test_bit(nr, addr))
goto try_clear_bit;
@@ -34,7 +34,7 @@ try_clear_bit:
/*
* try flipping a bit using BSET and BCLR and returning the old value
*/
-int test_and_change_bit(int nr, volatile void *addr)
+int test_and_change_bit(unsigned long nr, volatile void *addr)
{
if (test_bit(nr, addr))
goto try_clear_bit;
diff --git a/arch/mn10300/lib/delay.c b/arch/mn10300/lib/delay.c
index fdf6f710f94..8e7ceb8ba33 100644
--- a/arch/mn10300/lib/delay.c
+++ b/arch/mn10300/lib/delay.c
@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
*/
void __udelay(unsigned long usecs)
{
- signed long ioclk, stop;
+ unsigned long start, stop, cnt;
/* usecs * CLK / 1E6 */
stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
- stop = TMTSCBC - stop;
+ start = TMTSCBC;
do {
- ioclk = TMTSCBC;
- } while (stop < ioclk);
+ cnt = start - TMTSCBC;
+ } while (cnt < stop);
}
EXPORT_SYMBOL(__udelay);
diff --git a/arch/mn10300/lib/do_csum.S b/arch/mn10300/lib/do_csum.S
index e138994e166..1d27bba0cd8 100644
--- a/arch/mn10300/lib/do_csum.S
+++ b/arch/mn10300/lib/do_csum.S
@@ -10,26 +10,25 @@
*/
#include <asm/cache.h>
- .section .text
- .balign L1_CACHE_BYTES
+ .section .text
+ .balign L1_CACHE_BYTES
###############################################################################
#
-# unsigned int do_csum(const unsigned char *buff, size_t len)
+# unsigned int do_csum(const unsigned char *buff, int len)
#
###############################################################################
.globl do_csum
- .type do_csum,@function
+ .type do_csum,@function
do_csum:
movm [d2,d3],(sp)
- mov d0,(12,sp)
- mov d1,(16,sp)
mov d1,d2 # count
mov d0,a0 # buff
+ mov a0,a1
clr d1 # accumulator
cmp +0,d2
- beq do_csum_done # return if zero-length buffer
+ ble do_csum_done # check for zero length or negative
# 4-byte align the buffer pointer
btst +3,a0
@@ -41,17 +40,15 @@ do_csum:
inc a0
asl +8,d0
add d0,d1
- addc +0,d1
add -1,d2
-do_csum_addr_not_odd:
+do_csum_addr_not_odd:
cmp +2,d2
bcs do_csum_fewer_than_4
btst +2,a0
beq do_csum_now_4b_aligned
movhu (a0+),d0
add d0,d1
- addc +0,d1
add -2,d2
cmp +4,d2
bcs do_csum_fewer_than_4
@@ -66,20 +63,20 @@ do_csum_now_4b_aligned:
do_csum_loop:
mov (a0+),d0
- add d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ add d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
mov (a0+),d0
- addc d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ addc d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
addc +0,d1
@@ -94,12 +91,12 @@ do_csum_remainder:
cmp +16,d2
bcs do_csum_fewer_than_16
mov (a0+),d0
- add d0,d1
mov (a0+),e0
- addc e0,d1
mov (a0+),e1
- addc e1,d1
mov (a0+),e3
+ add d0,d1
+ addc e0,d1
+ addc e1,d1
addc e3,d1
addc +0,d1
add -16,d2
@@ -131,9 +128,9 @@ do_csum_fewer_than_4:
xor_cmp d0,d0,+2,d2
bcs do_csum_fewer_than_2
movhu (a0+),d0
-do_csum_fewer_than_2:
and +1,d2
beq do_csum_add_last_bit
+do_csum_fewer_than_2:
movbu (a0),d3
add d3,d0
do_csum_add_last_bit:
@@ -142,21 +139,19 @@ do_csum_add_last_bit:
do_csum_done:
# compress the checksum down to 16 bits
- mov +0xffff0000,d2
- and d1,d2
+ mov +0xffff0000,d0
+ and d1,d0
asl +16,d1
- add d2,d1,d0
+ add d1,d0
addc +0xffff,d0
lsr +16,d0
# flip the halves of the word result if the buffer was oddly aligned
- mov (12,sp),d1
- and +1,d1
+ and +1,a1
beq do_csum_not_oddly_aligned
swaph d0,d0 # exchange bits 15:8 with 7:0
do_csum_not_oddly_aligned:
ret [d2,d3],8
-do_csum_end:
- .size do_csum, do_csum_end-do_csum
+ .size do_csum, .-do_csum
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644
index 00000000000..c4fd923a55a
--- /dev/null
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+ prompt "CPU Caching mode"
+ default MN10300_CACHE_WBACK
+ help
+ This option determines the caching mode for the kernel.
+
+ Write-Back caching mode involves the all reads and writes causing
+ the affected cacheline to be read into the cache first before being
+ operated upon. Memory is not then updated by a write until the cache
+ is filled and a cacheline needs to be displaced from the cache to
+ make room. Only at that point is it written back.
+
+ Write-Through caching only fetches cachelines from memory on a
+ read. Writes always get written directly to memory. If the affected
+ cacheline is also in cache, it will be updated too.
+
+ The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+ bool "Write-Back"
+ help
+ The dcache operates in delayed write-back mode. It must be manually
+ flushed if writes are made that subsequently need to be executed or
+ to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+ bool "Write-Through"
+ help
+ The dcache operates in immediate write-through mode. Writes are
+ committed to RAM immediately in addition to being stored in the
+ cache. This means that the written data is immediately available for
+ execution or DMA.
+
+ This is not available for use with an SMP kernel if cache flushing
+ and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+ bool "Disabled"
+ help
+ The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+ def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+ prompt "CPU cache flush/invalidate method"
+ default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+ default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+ depends on MN10300_CACHE_ENABLED
+ help
+ This determines the method by which CPU cache flushing and
+ invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+ bool "Use the cache tag registers directly"
+ depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+ bool "Flush areas by way of automatic purge registers (AM34 only)"
+ depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+ def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+ def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+ def_bool n
+
+config MN10300_CACHE_SNOOP
+ bool "Use CPU Cache Snooping"
+ depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+ default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+ def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+ def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+ help
+ Set if we need the icache to be invalidated, even if the dcache is in
+ write-through mode and doesn't need flushing.
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 1557277fbc5..203fee23f7d 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,11 +2,21 @@
# Makefile for the MN10300-specific memory management code
#
-cacheflush-y := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y := cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644
index 00000000000..1dcae021167
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-reg.S
@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set mask
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge
+ #
+ # DCPGCR = DCPGCR_DCP
+ #
+ mov DCPGCR_DCP,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize;
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # determine the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgloop:
+ # area purge
+ mov a2,d0
+ or DCPGCR_DCP,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP
+
+ # wait for busy bit of area purge
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgloop
+ bns dcpgloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2,d3,a2],12
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov DCPGCR,a0
+
+ LOCAL_CLI_SAVE(d1)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # set the mask to cover everything
+ clr d0
+ mov d0,(DCPGMR)
+
+ # area purge & invalidate
+ mov DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0)
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ # calculate alignsize
+ #
+ # alignsize = L1_CACHE_BYTES;
+ # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+ # alignsize <<= 1;
+ # d2 = alignsize
+ #
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+ mov d1,a1 # a1 = end
+
+ LOCAL_CLI_SAVE(d3)
+ mov DCPGCR,a0
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # set the mask
+ mov d2,d1
+ add -1,d1
+ not d1 # d1 = mask = ~(alignsize-1)
+ mov d1,(DCPGMR)
+
+ and d1,d0,a2 # a2 = mask & start
+
+dcpgivloop:
+ # area purge & invalidate
+ mov a2,d0
+ or DCPGCR_DCP|DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+ # wait for busy bit of area purge & invalidate
+ setlb
+ mov (a0),d1
+ btst DCPGCR_DCPGBSY,d1
+ lne
+
+ # check purge & invalidate of end address
+ add d2,a2 # a2 += alignsize
+ cmp a1,a2 # if (a2 < end) goto dcpgivloop
+ bns dcpgivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644
index 00000000000..5cd6a27dd63
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-tag.S
@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_dcache_flush
+ .globl mn10300_dcache_flush_page
+ .globl mn10300_dcache_flush_range
+ .globl mn10300_dcache_flush_range2
+ .globl mn10300_dcache_flush_inv
+ .globl mn10300_dcache_flush_inv_page
+ .globl mn10300_dcache_flush_inv_range
+ .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush = mn10300_local_dcache_flush
+mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush
+ .type mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_end
+
+ # read the addresses tagged in the cache's tag RAM and attempt to flush
+ # those addresses specifically
+ # - we rely on the hardware to filter out invalid tag entry addresses
+ mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
+ mov DCACHE_PURGE(0,0),a1 # dcache purge request address
+ mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
+
+mn10300_local_dcache_flush_loop:
+ mov (a0),d0
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
+ # cache
+ mov d0,(a1) # conditional purge
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+ ret [],0
+ .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_page
+ .globl mn10300_local_dcache_flush_range
+ .globl mn10300_local_dcache_flush_range2
+ .type mn10300_local_dcache_flush_page,@function
+ .type mn10300_local_dcache_flush_range,@function
+ .type mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+ add d0,d1
+mn10300_local_dcache_flush_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush
+1:
+
+ # round start addr down
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush all instances of an address from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
+ # cache
+
+mn10300_local_dcache_flush_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+ ret [d2],4
+
+ .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+ .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+ .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv
+ .type mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+ movhu (CHCTR),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_flush_inv_end
+
+ mov L1_CACHE_NENTRIES,d1
+ clr a1
+
+mn10300_local_dcache_flush_inv_loop:
+ mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
+ mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
+
+ add L1_CACHE_BYTES,a1
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_flush_inv_page
+ .globl mn10300_local_dcache_flush_inv_range
+ .globl mn10300_local_dcache_flush_inv_range2
+ .type mn10300_local_dcache_flush_inv_page,@function
+ .type mn10300_local_dcache_flush_inv_range,@function
+ .type mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+ add d0,d1
+mn10300_local_dcache_flush_inv_range:
+ movm [d2],(sp)
+
+ movhu (CHCTR),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_flush_inv_range_end
+
+ sub d0,d1,a0
+ cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0
+ ble 1f
+
+ movm (sp),[d2]
+ bra mn10300_local_dcache_flush_inv
+1:
+
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+ mov d0,a1
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+ # write a request to flush and invalidate all instances of an address
+ # from the cache
+ mov DCACHE_PURGE(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache purge control
+ # reg address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+ mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
+ # in all ways
+
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
+ add -1,d1
+ bne mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+ ret [d2],4
+ .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+ .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+ .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644
index 00000000000..fdb1a9db20f
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long start = page_to_phys(page);
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ mn10300_local_dcache_flush_page(start);
+ mn10300_local_icache_inv_page(start);
+
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given. The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* flush the dcache and invalidate the icache coverage on that
+ * region */
+ mn10300_local_dcache_flush_range2(addr + off, size);
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_local_dcache_flush_range(start_page, end);
+ mn10300_local_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_dcache_flush();
+ mn10300_icache_inv();
+ smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644
index c8ed1cbac10..00000000000
--- a/arch/mn10300/mm/cache-flush-mn10300.S
+++ /dev/null
@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
- .am33_2
- .globl mn10300_dcache_flush
- .globl mn10300_dcache_flush_page
- .globl mn10300_dcache_flush_range
- .globl mn10300_dcache_flush_range2
- .globl mn10300_dcache_flush_inv
- .globl mn10300_dcache_flush_inv_page
- .globl mn10300_dcache_flush_inv_range
- .globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_end
-
- # read the addresses tagged in the cache's tag RAM and attempt to flush
- # those addresses specifically
- # - we rely on the hardware to filter out invalid tag entry addresses
- mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_loop:
- mov (a0),d0
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
- # cache
- mov d0,(a1) # conditional purge
-
-mn10300_dcache_flush_skip:
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
- add d0,d1
-mn10300_dcache_flush_range:
- movm [d2,d3],(sp)
-
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_range_end
-
- # round start addr down
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush all instances of an address from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
- # cache
-
-mn10300_dcache_flush_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
- ret [d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv:
- movhu (CHCTR),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_flush_inv_end
-
- # hit each line in the dcache with an unconditional purge
- mov DCACHE_PURGE(0,0),a1 # dcache purge request address
- mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
-
-mn10300_dcache_flush_inv_loop:
- mov (a1),d0 # unconditional purge
-
- add L1_CACHE_BYTES,a1
- add -1,d1
- bne mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_flush_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
- add d0,d1
-mn10300_dcache_flush_inv_range:
- movm [d2,d3],(sp)
- movhu (CHCTR),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_flush_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- # write a request to flush and invalidate all instances of an address
- # from the cache
- mov DCACHE_PURGE(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache purge control
- # reg address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
-mn10300_dcache_flush_inv_range_loop:
- mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
- # in all ways
-
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
- add -1,d1
- bne mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
- ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644
index 00000000000..c8950861ed7
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-reg.S
@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+ # round the addresses out to be full cachelines, unless we're in
+ # writeback mode, in which case we would be in flush and invalidate by
+ # now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ mov L1_CACHE_BYTES-1,d2
+ add d2,d1
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+ sub d0,d1,d2 # calculate the total size
+ mov d0,a2 # A2 = start address
+ mov d1,a1 # A1 = end address
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov DCPGCR,a0 # make sure the purger isn't busy
+ setlb
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ # skip initial address alignment calculation if address is zero
+ mov d2,d1
+ cmp 0,a2
+ beq 1f
+
+dcivloop:
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * while (! start & alignsize) {
+ * alignsize <<=1;
+ * }
+ * d1 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d1
+ lsr 1,d1
+ setlb
+ add d1,d1
+ mov d1,d0
+ and a2,d0
+ leq
+
+1:
+ /* calculate invsize
+ *
+ * if (totalsize > alignsize) {
+ * invsize = alignsize;
+ * } else {
+ * invsize = totalsize;
+ * tmp = 0x80000000;
+ * while (! invsize & tmp) {
+ * tmp >>= 1;
+ * }
+ * invsize = tmp;
+ * }
+ * d1 = invsize
+ */
+ cmp d2,d1
+ bns 2f
+ mov d2,d1
+
+ mov 0x80000000,d0 # start from 31bit=1
+ setlb
+ lsr 1,d0
+ mov d0,e0
+ and d1,e0
+ leq
+ mov d0,d1
+
+2:
+ /* set mask
+ *
+ * mask = ~(invsize-1);
+ * DCPGMR = mask;
+ */
+ mov d1,d0
+ add -1,d0
+ not d0
+ mov d0,(DCPGMR)
+
+ # invalidate area
+ mov a2,d0
+ or DCPGCR_DCI,d0
+ mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
+
+ setlb # wait for the purge to complete
+ mov (a0),d0
+ btst DCPGCR_DCPGBSY,d0
+ lne
+
+ sub d1,d2 # decrease size remaining
+ add d1,a2 # increase next start address
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + invsize
+ * if (a2 < end) {
+ * goto dcivloop;
+ * } */
+ cmp a1,a2
+ bns dcivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+ .type mn10300_local_icache_inv_page,@function
+ .type mn10300_local_icache_inv_range,@function
+ .type mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+ add d0,d1
+mn10300_local_icache_inv_range:
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a0
+ movhu (a0),d2
+ btst CHCTR_ICEN,d2
+ beq mn10300_local_icache_inv_range_reg_end
+
+ /* calculate alignsize
+ *
+ * alignsize = L1_CACHE_BYTES;
+ * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
+ * alignsize <<= 1;
+ * }
+ * d2 = alignsize;
+ */
+ mov L1_CACHE_BYTES,d2
+ sub d0,d1,d3
+ add -1,d3
+ lsr L1_CACHE_SHIFT,d3
+ beq 2f
+1:
+ add d2,d2
+ lsr 1,d3
+ bne 1b
+2:
+
+ /* a1 = end */
+ mov d1,a1
+
+ LOCAL_CLI_SAVE(d3)
+
+ mov ICIVCR,a0
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* set mask
+ *
+ * mask = ~(alignsize-1);
+ * ICIVMR = mask;
+ */
+ mov d2,d1
+ add -1,d1
+ not d1
+ mov d1,(ICIVMR)
+ /* a2 = mask & start */
+ and d1,d0,a2
+
+icivloop:
+ /* area invalidate
+ *
+ * ICIVCR = (mask & start) | ICIVCR_ICI
+ */
+ mov a2,d0
+ or ICIVCR_ICI,d0
+ mov d0,(a0)
+
+ /* wait for busy bit of area invalidation */
+ setlb
+ mov (a0),d1
+ btst ICIVCR_ICIVBSY,d1
+ lne
+
+ /* check invalidating of end address
+ *
+ * a2 = a2 + alignsize
+ * if (a2 < end) {
+ * goto icivloop;
+ * } */
+ add d2,a2
+ cmp a1,a2
+ bns icivloop
+
+ LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+ .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+ .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644
index 00000000000..e9713b40c0f
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-tag.S
@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+ +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+ .am33_2
+
+ .globl mn10300_local_icache_inv_page
+ .globl mn10300_local_icache_inv_range
+ .globl mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page = mn10300_local_icache_inv
+mn10300_local_icache_inv_range = mn10300_local_icache_inv
+mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+ .globl mn10300_icache_inv
+ .globl mn10300_icache_inv_page
+ .globl mn10300_icache_inv_range
+ .globl mn10300_icache_inv_range2
+ .globl mn10300_dcache_inv
+ .globl mn10300_dcache_inv_page
+ .globl mn10300_dcache_inv_range
+ .globl mn10300_dcache_inv_range2
+
+mn10300_icache_inv = mn10300_local_icache_inv
+mn10300_icache_inv_page = mn10300_local_icache_inv_page
+mn10300_icache_inv_range = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
+mn10300_dcache_inv = mn10300_local_dcache_inv
+mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_icache_inv
+ .type mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_ICEN,d0
+ beq mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the icache
+ and ~CHCTR_ICEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_ICBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_ICINV,d0
+ or CHCTR_ICEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_ICINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+ ret [],0
+ .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv
+ .type mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+ mov CHCTR,a0
+
+ movhu (a0),d0
+ btst CHCTR_DCEN,d0
+ beq mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+ LOCAL_CLI_SAVE(d1)
+
+ # disable the dcache
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a0)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+
+ # wait for the cache to finish
+ mov CHCTR,a0
+ setlb
+ movhu (a0),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ and ~CHCTR_DCINV,d0
+ or CHCTR_DCEN,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+
+ LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+ # invalidate
+ or CHCTR_DCINV,d0
+ movhu d0,(a0)
+ movhu (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+ ret [],0
+ .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+ ALIGN
+ .globl mn10300_local_dcache_inv_page
+ .globl mn10300_local_dcache_inv_range
+ .globl mn10300_local_dcache_inv_range2
+ .type mn10300_local_dcache_inv_page,@function
+ .type mn10300_local_dcache_inv_range,@function
+ .type mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+ and ~(PAGE_SIZE-1),d0
+ mov PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+ add d0,d1
+mn10300_local_dcache_inv_range:
+ # If we are in writeback mode we check the start and end alignments,
+ # and if they're not cacheline-aligned, we must flush any bits outside
+ # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+ btst ~(L1_CACHE_BYTES-1),d0
+ bne 1f
+ btst ~(L1_CACHE_BYTES-1),d1
+ beq 2f
+1:
+ bra mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+ movm [d2,d3,a2],(sp)
+
+ mov CHCTR,a2
+ movhu (a2),d2
+ btst CHCTR_DCEN,d2
+ beq mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
+ # addr down
+
+ add L1_CACHE_BYTES,d1 # round end addr up
+ and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+ mov d0,a1
+
+ clr d2 # we're going to clear tag RAM
+ # entries
+
+ # read the tags from the tag RAM, and if they indicate a valid dirty
+ # cache line then invalidate that line
+ mov DCACHE_TAG(0,0),a0
+ mov a1,d0
+ and L1_CACHE_TAG_ENTRY,d0
+ add d0,a0 # starting dcache tag RAM
+ # access address
+
+ sub a1,d1
+ lsr L1_CACHE_SHIFT,d1 # total number of entries to
+ # examine
+
+ and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+ LOCAL_CLI_SAVE(d3)
+
+ # disable the dcache
+ movhu (a2),d0
+ and ~CHCTR_DCEN,d0
+ movhu d0,(a2)
+
+ # and wait for it to calm down
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+mn10300_local_dcache_inv_range_loop:
+
+ # process the way 0 slot
+ mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+ # process the way 1 slot
+ mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+ # process the way 2 slot
+ mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+ # process the way 3 slot
+ mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
+ btst L1_CACHE_TAG_VALID,d0
+ beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
+ # is not valid
+
+ xor a1,d0
+ lsr 12,d0
+ bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
+
+ mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+ # approx every N steps we re-enable the cache and see if there are any
+ # interrupts to be processed
+ # we also break out if we've reached the end of the loop
+ # (the bottom nibble of the count is zero in both cases)
+ add L1_CACHE_BYTES,a0
+ add L1_CACHE_BYTES,a1
+ and ~L1_CACHE_WAYDISP,a0
+ add -1,d1
+ btst mn10300_local_dcache_inv_range_intr_interval,d1
+ bne mn10300_local_dcache_inv_range_loop
+
+ # wait for the cache to finish what it's doing
+ setlb
+ movhu (a2),d0
+ btst CHCTR_DCBUSY,d0
+ lne
+
+ # and reenable it
+ or CHCTR_DCEN,d0
+ movhu d0,(a2)
+ movhu (a2),d0
+
+ # re-enable interrupts
+ # - we don't bother with delay NOPs as we'll have enough instructions
+ # before we disable interrupts again to give the interrupts a chance
+ # to happen
+ LOCAL_IRQ_RESTORE(d3)
+
+ # go around again if the counter hasn't yet reached zero
+ add 0,d1
+ bne mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+ ret [d2,d3,a2],12
+ .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+ .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+ .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644
index 00000000000..a8933a60b2d
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ * single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given. The page must be in the paged area. The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, size, off;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ppte, pte;
+
+ /* work out how much of the page to flush */
+ off = start & ~PAGE_MASK;
+ size = end - start;
+
+ /* get the physical address the page is mapped to from the page
+ * tables */
+ pgd = pgd_offset(current->mm, start);
+ if (!pgd || !pgd_val(*pgd))
+ return;
+
+ pud = pud_offset(pgd, start);
+ if (!pud || !pud_val(*pud))
+ return;
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd || !pmd_val(*pmd))
+ return;
+
+ ppte = pte_offset_map(pmd, start);
+ if (!ppte)
+ return;
+ pte = *ppte;
+ pte_unmap(ppte);
+
+ if (pte_none(pte))
+ return;
+
+ page = pte_page(pte);
+ if (!page)
+ return;
+
+ addr = page_to_phys(page);
+
+ /* invalidate the icache coverage on that region */
+ mn10300_local_icache_inv_range2(addr + off, size);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long start_page, end_page;
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+
+ if (end > 0x80000000UL) {
+ /* addresses above 0xa0000000 do not go through the cache */
+ if (end > 0xa0000000UL) {
+ end = 0xa0000000UL;
+ if (start >= end)
+ goto done;
+ }
+
+ /* kernel addresses between 0x80000000 and 0x9fffffff do not
+ * require page tables, so we just map such addresses
+ * directly */
+ start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+ mn10300_icache_inv_range(start_page, end);
+ smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+ if (start_page == start)
+ goto done;
+ end = start_page;
+ }
+
+ start_page = start & PAGE_MASK;
+ end_page = (end - 1) & PAGE_MASK;
+
+ if (start_page == end_page) {
+ /* the first and last bytes are on the same page */
+ flush_icache_page_range(start, end);
+ } else if (start_page + 1 == end_page) {
+ /* split over two virtually contiguous pages */
+ flush_icache_page_range(start, end_page);
+ flush_icache_page_range(end_page, end);
+ } else {
+ /* more than 2 pages; just flush the entire cache */
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ }
+
+done:
+ smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644
index e839d0aedd6..00000000000
--- a/arch/mn10300/mm/cache-mn10300.S
+++ /dev/null
@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
- +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
- .am33_2
-
- .globl mn10300_icache_inv
- .globl mn10300_dcache_inv
- .globl mn10300_dcache_inv_range
- .globl mn10300_dcache_inv_range2
- .globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
- ALIGN
-mn10300_icache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_ICEN,d0
- beq mn10300_icache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the icache
- and ~CHCTR_ICEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # invalidate
- or CHCTR_ICINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_ICBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_ICINV,d0
- or CHCTR_ICEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_icache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv:
- mov CHCTR,a0
-
- movhu (a0),d0
- btst CHCTR_DCEN,d0
- beq mn10300_dcache_inv_end
-
- mov epsw,d1
- and ~EPSW_IE,epsw
- nop
- nop
-
- # disable the dcache
- and ~CHCTR_DCEN,d0
- movhu d0,(a0)
-
- # and wait for it to calm down
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # invalidate
- or CHCTR_DCINV,d0
- movhu d0,(a0)
-
- # wait for the cache to finish
- mov CHCTR,a0
- setlb
- movhu (a0),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- and ~CHCTR_DCINV,d0
- or CHCTR_DCEN,d0
- movhu d0,(a0)
- movhu (a0),d0
-
- mov d1,epsw
-
-mn10300_dcache_inv_end:
- ret [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
- ALIGN
-mn10300_dcache_inv_page:
- mov PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
- add d0,d1
-mn10300_dcache_inv_range:
- movm [d2,d3,a2],(sp)
- mov CHCTR,a2
-
- movhu (a2),d2
- btst CHCTR_DCEN,d2
- beq mn10300_dcache_inv_range_end
-
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
- # addr down
- mov d0,a1
-
- add L1_CACHE_BYTES,d1 # round end addr up
- and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
- clr d2 # we're going to clear tag ram
- # entries
-
- # read the tags from the tag RAM, and if they indicate a valid dirty
- # cache line then invalidate that line
- mov DCACHE_TAG(0,0),a0
- mov a1,d0
- and L1_CACHE_TAG_ENTRY,d0
- add d0,a0 # starting dcache tag RAM
- # access address
-
- sub a1,d1
- lsr L1_CACHE_SHIFT,d1 # total number of entries to
- # examine
-
- and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
- # disable interrupts
- mov epsw,d3
- and ~EPSW_IE,epsw
- nop # note that reading CHCTR and
- # AND'ing D0 occupy two delay
- # slots after disabling
- # interrupts
-
- # disable the dcache
- movhu (a2),d0
- and ~CHCTR_DCEN,d0
- movhu d0,(a2)
-
- # and wait for it to calm down
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
-mn10300_dcache_inv_range_loop:
-
- # process the way 0 slot
- mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
- # process the way 1 slot
- mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
- # process the way 2 slot
- mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
- # process the way 3 slot
- mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
- btst L1_CACHE_TAG_VALID,d0
- beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
- # valid
-
- xor a1,d0
- lsr 12,d0
- bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
-
- mov d2,(a0) # kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
- # approx every N steps we re-enable the cache and see if there are any
- # interrupts to be processed
- # we also break out if we've reached the end of the loop
- # (the bottom nibble of the count is zero in both cases)
- add L1_CACHE_BYTES,a0
- add L1_CACHE_BYTES,a1
- add -1,d1
- btst mn10300_dcache_inv_range_intr_interval,d1
- bne mn10300_dcache_inv_range_loop
-
- # wait for the cache to finish what it's doing
- setlb
- movhu (a2),d0
- btst CHCTR_DCBUSY,d0
- lne
-
- # and reenable it
- or CHCTR_DCEN,d0
- movhu d0,(a2)
- movhu (a2),d0
-
- # re-enable interrupts
- # - we don't bother with delay NOPs as we'll have enough instructions
- # before we disable interrupts again to give the interrupts a chance
- # to happen
- mov d3,epsw
-
- # go around again if the counter hasn't yet reached zero
- add 0,d1
- bne mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
- ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644
index 00000000000..fd51af5eaf7
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-flush.c
@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush();
+ smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv();
+ smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ * cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_page(start);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ * cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_flush_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644
index 00000000000..ff1787358c8
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-inv.c
@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv();
+ smp_cache_call(SMP_ICACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_page(start);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range(start, end);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_icache_inv_range2(start, size);
+ smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv();
+ smp_cache_call(SMP_DCACHE_INV, 0, 0);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+ unsigned long flags;
+
+ start &= ~(PAGE_SIZE-1);
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_page(start);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range(start, end);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+ smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+ unsigned long flags;
+
+ flags = smp_lock_cache();
+ mn10300_local_dcache_inv_range2(start, size);
+ smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+ smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644
index 00000000000..4a6e9a4b5b2
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.c
@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches. The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+ unsigned long opr_mask = smp_cache_mask;
+
+ switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+ case SMP_DCACHE_NOP:
+ break;
+ case SMP_DCACHE_INV:
+ mn10300_local_dcache_inv();
+ break;
+ case SMP_DCACHE_INV_RANGE:
+ mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH:
+ mn10300_local_dcache_flush();
+ break;
+ case SMP_DCACHE_FLUSH_RANGE:
+ mn10300_local_dcache_flush_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ case SMP_DCACHE_FLUSH_INV:
+ mn10300_local_dcache_flush_inv();
+ break;
+ case SMP_DCACHE_FLUSH_INV_RANGE:
+ mn10300_local_dcache_flush_inv_range(smp_cache_start,
+ smp_cache_end);
+ break;
+ }
+
+ switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+ case SMP_ICACHE_NOP:
+ break;
+ case SMP_ICACHE_INV:
+ mn10300_local_icache_inv();
+ break;
+ case SMP_ICACHE_INV_RANGE:
+ mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+ break;
+ }
+
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+ unsigned long start, unsigned long end)
+{
+ smp_cache_mask = opr_mask;
+ smp_cache_start = start;
+ smp_cache_end = end;
+ smp_cache_ipi_map = cpu_online_map;
+ cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+ send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+ while (!cpus_empty(smp_cache_ipi_map))
+ /* nothing. lockup detection does not belong here */
+ mb();
+}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644
index 00000000000..cb52892aa66
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.h
@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+ SMP_ICACHE_NOP = 0x0000,
+ SMP_ICACHE_INV = 0x0001,
+ SMP_ICACHE_INV_RANGE = 0x0002,
+};
+#define SMP_ICACHE_OP_MASK 0x0003
+
+enum smp_dcache_ops {
+ SMP_DCACHE_NOP = 0x0000,
+ SMP_DCACHE_INV = 0x0004,
+ SMP_DCACHE_INV_RANGE = 0x0008,
+ SMP_DCACHE_FLUSH = 0x000c,
+ SMP_DCACHE_FLUSH_RANGE = 0x0010,
+ SMP_DCACHE_FLUSH_INV = 0x0014,
+ SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
+};
+#define SMP_DCACHE_OP_MASK 0x001c
+
+#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+ __acquires(&smp_cache_lock)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&smp_cache_lock, flags);
+ return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+ __releases(&smp_cache_lock)
+{
+ spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+ unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2..0a1f0aa92eb 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -18,8 +18,13 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -37,96 +42,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- mn10300_dcache_flush_page(page_to_phys(page));
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
- unsigned long addr, size, base, off;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ppte, pte;
-
- if (end > 0x80000000UL) {
- /* addresses above 0xa0000000 do not go through the cache */
- if (end > 0xa0000000UL) {
- end = 0xa0000000UL;
- if (start >= end)
- return;
- }
-
- /* kernel addresses between 0x80000000 and 0x9fffffff do not
- * require page tables, so we just map such addresses directly */
- base = (start >= 0x80000000UL) ? start : 0x80000000UL;
- mn10300_dcache_flush_range(base, end);
- if (base == start)
- goto invalidate;
- end = base;
- }
-
- for (; start < end; start += size) {
- /* work out how much of the page to flush */
- off = start & (PAGE_SIZE - 1);
-
- size = end - start;
- if (size > PAGE_SIZE - off)
- size = PAGE_SIZE - off;
-
- /* get the physical address the page is mapped to from the page
- * tables */
- pgd = pgd_offset(current->mm, start);
- if (!pgd || !pgd_val(*pgd))
- continue;
-
- pud = pud_offset(pgd, start);
- if (!pud || !pud_val(*pud))
- continue;
-
- pmd = pmd_offset(pud, start);
- if (!pmd || !pmd_val(*pmd))
- continue;
-
- ppte = pte_offset_map(pmd, start);
- if (!ppte)
- continue;
- pte = *ppte;
- pte_unmap(ppte);
-
- if (pte_none(pte))
- continue;
-
- page = pte_page(pte);
- if (!page)
- continue;
-
- addr = page_to_phys(page);
-
- /* flush the dcache and invalidate the icache coverage on that
- * region */
- mn10300_dcache_flush_range2(addr + off, size);
- }
-#endif
-
-invalidate:
- mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 81f153fa51b..59c3da49d9d 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
-#ifdef CONFIG_SMP
- /* Many serial drivers do __global_cli() */
- global_irq_lock = 0;
-#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
}
#endif
-asmlinkage void monitor_signal(struct pt_regs *);
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
*/
bad_area:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/* User mode accesses just cause a SIGSEGV */
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
}
no_context:
- monitor_signal(regs);
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -338,14 +330,13 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
- goto no_context;
- pagefault_out_of_memory();
- return;
+ printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ do_exit(SIGKILL);
+ goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
- monitor_signal(regs);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e5152..48907cc3bdb 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
/*
* set up paging
*/
@@ -73,7 +77,24 @@ void __init paging_init(void)
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(zones_size);
- __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+ /* The Atomic Operation Unit registers need to be mapped to userspace
+ * for all processes. The following uses vm_area_register_early() to
+ * reserve the first page of the vmalloc area and sets the pte for that
+ * page.
+ *
+ * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+ * it from now on.
+ */
+ user_iomap_vm.flags = VM_USERMAP;
+ user_iomap_vm.size = 1 << PAGE_SHIFT;
+ vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+ ppte = kernel_vmalloc_ptes;
+ set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+ PAGE_USERIO));
+#endif
+
+ local_flush_tlb_all();
}
/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
int codesize, reservedpages, datasize, initsize;
int tmp;
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
#define START_PFN (contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index 6dffbf97ac2..eef989c1d0c 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -449,8 +449,7 @@ found_opcode:
regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
tmp = format_tbl[pop->format].opsz;
- if (tmp > noc)
- BUG(); /* match was less complete than it ought to have been */
+ BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
if (tmp < noc) {
tmp = noc - tmp;
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba02191d4..a4f7d3dcc6e 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -13,40 +13,15 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
/*
* list of the MMU contexts last allocated on each CPU
*/
unsigned long mmu_context_cache[NR_CPUS] = {
- [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+ [0 ... NR_CPUS - 1] =
+ MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
};
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long pteu, cnx, flags;
-
- addr &= PAGE_MASK;
-
- /* make sure the context doesn't migrate and defend against
- * interference from vmalloc'd regions */
- local_irq_save(flags);
-
- cnx = mm_context(vma->vm_mm);
-
- if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
- IPTEU = pteu;
- DPTEU = pteu;
- if (IPTEL & xPTEL_V)
- IPTEL = 0;
- if (DPTEL & xPTEL_V)
- DPTEL = 0;
- }
-
- local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
/*
* preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
* interference from vmalloc'd regions */
local_irq_save(flags);
+ cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
cnx = mm_context(vma->vm_mm);
+#endif
if (cnx != MMU_NO_CONTEXT) {
- pteu = addr | (cnx & 0x000000ffUL);
+ pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+ pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
if (!(pte_val(pte) & _PAGE_NX)) {
IPTEU = pteu;
if (IPTEL & xPTEL_V)
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c9e4e..450f7ba3f8f 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ local_flush_tlb_one(vaddr);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
index 7095147dcb8..b9940177d81 100644
--- a/arch/mn10300/mm/tlb-mn10300.S
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -27,7 +27,6 @@
###############################################################################
.type itlb_miss,@function
ENTRY(itlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (IPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
btst _PAGE_VALID,d2
beq itlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
itlb_miss_set:
- mov d2,(IPTEL) # change the TLB
+ mov d2,(IPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
###############################################################################
.type dtlb_miss,@function
ENTRY(dtlb_miss)
- and ~EPSW_NMID,epsw
#ifdef CONFIG_GDBSTUB
movm [d2,d3,a2],(sp)
#else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
nop
#endif
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d2
+ mov d2,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
mov (DPTEU),d3
mov (PTBR),a2
mov d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
btst _PAGE_VALID,d2
beq dtlb_miss_fault # jump if doesn't point to a page
# (might be a swap id)
+#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
bset _PAGE_ACCESSED,(0,a2)
- and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+ bset +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+ and ~xPTEL2_UNUSED1,d2
dtlb_miss_set:
- mov d2,(DPTEL) # change the TLB
+ mov d2,(DPTEL2) # change the TLB
#ifdef CONFIG_GDBSTUB
movm (sp),[d2,d3,a2]
#endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
###############################################################################
.type itlb_aerror,@function
ENTRY(itlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
+ and ~EPSW_NMID,epsw
add -4,sp # need to pass three params
# calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
or 0x00010000,d1 # it's an instruction fetch
# determine the page address
- mov (IPTEU),a2
- mov a2,d0
+ mov (IPTEU),d0
and PAGE_MASK,d0
mov d0,(12,sp)
clr d0
- mov d0,(IPTEL)
+ mov d0,(IPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
###############################################################################
.type dtlb_aerror,@function
ENTRY(dtlb_aerror)
- and ~EPSW_NMID,epsw
add -4,sp
SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+ mov (MMUCTR),d1
+ mov d1,(MMUCTR)
+#endif
+
add -4,sp # need to pass three params
+ and ~EPSW_NMID,epsw
# calculate the fault code
movhu (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
mov d0,(12,sp)
clr d0
- mov d0,(DPTEL)
+ mov d0,(DPTEL2)
- and ~EPSW_NMID,epsw
or EPSW_IE,epsw
mov fp,d0
call do_page_fault[],0 # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644
index 00000000000..0b6a5ad1960
--- /dev/null
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -0,0 +1,214 @@
+/* SMP TLB support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+
+/*
+ * For flush TLB
+ */
+#define FLUSH_ALL 0xffffffff
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+ &init_mm, 0
+};
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va);
+static void do_flush_tlb_all(void *info);
+
+/**
+ * smp_flush_tlb - Callback to invalidate the TLB.
+ * @unused: Callback context (ignored).
+ */
+void smp_flush_tlb(void *unused)
+{
+ unsigned long cpu_id;
+
+ cpu_id = get_cpu();
+
+ if (!cpu_isset(cpu_id, flush_cpumask))
+ /* This was a BUG() but until someone can quote me the line
+ * from the intel manual that guarantees an IPI to multiple
+ * CPUs is retried _only_ on the erroring CPUs its staying as a
+ * return
+ *
+ * BUG();
+ */
+ goto out;
+
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ local_flush_tlb_page(flush_mm, flush_va);
+
+ smp_mb__before_clear_bit();
+ cpu_clear(cpu_id, flush_cpumask);
+ smp_mb__after_clear_bit();
+out:
+ put_cpu();
+}
+
+/**
+ * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
+ * @cpumask: The list of CPUs to target.
+ * @mm: The VM context to flush from (if va!=FLUSH_ALL).
+ * @va: Virtual address to flush or FLUSH_ALL to flush everything.
+ */
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
+ cpumask_t tmp;
+
+ /* A couple of sanity checks (to be removed):
+ * - mask must not be empty
+ * - current CPU must not be in mask
+ * - we do not send IPIs to as-yet unbooted CPUs.
+ */
+ BUG_ON(!mm);
+ BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+
+ cpus_and(tmp, cpumask, cpu_online_map);
+ BUG_ON(!cpus_equal(cpumask, tmp));
+
+ /* I'm not happy about this global shared spinlock in the MM hot path,
+ * but we'll see how contended it is.
+ *
+ * Temporarily this turns IRQs off, so that lockups are detected by the
+ * NMI watchdog.
+ */
+ spin_lock(&tlbstate_lock);
+
+ flush_mm = mm;
+ flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+ atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+#else
+#error Not supported.
+#endif
+
+ /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
+ smp_call_function(smp_flush_tlb, NULL, 1);
+
+ while (!cpus_empty(flush_cpumask))
+ /* Lockup detection does not belong here */
+ smp_mb();
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock(&tlbstate_lock);
+}
+
+/**
+ * flush_tlb_mm - Invalidate TLB of specified VM context
+ * @mm: The VM context to invalidate.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_current_task - Invalidate TLB of current task
+ */
+void flush_tlb_current_task(void)
+{
+ struct mm_struct *mm = current->mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb();
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+ preempt_enable();
+}
+
+/**
+ * flush_tlb_page - Invalidate TLB of page
+ * @vma: The VM context to invalidate the page for.
+ * @va: The virtual address of the page to invalidate.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ cpumask_t cpu_mask;
+
+ preempt_disable();
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+
+ local_flush_tlb_page(mm, va);
+ if (!cpus_empty(cpu_mask))
+ flush_tlb_others(cpu_mask, mm, va);
+
+ preempt_enable();
+}
+
+/**
+ * do_flush_tlb_all - Callback to completely invalidate a TLB
+ * @unused: Callback context (ignored).
+ */
+static void do_flush_tlb_all(void *unused)
+{
+ local_flush_tlb_all();
+}
+
+/**
+ * flush_tlb_all - Completely invalidate TLBs on all CPUs
+ */
+void flush_tlb_all(void)
+{
+ on_each_cpu(do_flush_tlb_all, 0, 1);
+}
diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
index bdc1f9a59b4..c1528004163 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
@@ -30,4 +30,13 @@
*/
#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL 4
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/clock.h b/arch/mn10300/proc-mn103e010/include/proc/clock.h
index aa23e147d62..704a819f1f4 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/clock.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/clock.h
@@ -13,6 +13,4 @@
#include <unit/clock.h>
-#define MN10300_WDCLK MN10300_IOCLK
-
#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
new file mode 100644
index 00000000000..d72d328d1f9
--- /dev/null
+++ b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
@@ -0,0 +1,102 @@
+/* MN103E010 on-board DMA controller registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define DMxCTR(N) __SYSREG(0xd2000000 + ((N) * 0x100), u32) /* control reg */
+#define DMxCTR_BG 0x0000001f /* transfer request source */
+#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
+#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
+#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
+#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
+#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
+#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
+#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
+#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
+#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
+#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
+#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
+#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
+#define DMxCTR_BG_AFE 0x0000000d /* - analogue front-end interrupt source */
+#define DMxCTR_BG_ADC 0x0000000e /* - A/D conversion end interrupt source */
+#define DMxCTR_BG_IRDA 0x0000000f /* - IrDA interrupt source */
+#define DMxCTR_BG_RTC 0x00000010 /* - RTC interrupt source */
+#define DMxCTR_BG_XIRQ0 0x00000011 /* - XIRQ0 pin interrupt source */
+#define DMxCTR_BG_XIRQ1 0x00000012 /* - XIRQ1 pin interrupt source */
+#define DMxCTR_BG_XDMR0 0x00000013 /* - external request 0 source (XDMR0 pin) */
+#define DMxCTR_BG_XDMR1 0x00000014 /* - external request 1 source (XDMR1 pin) */
+#define DMxCTR_SAM 0x000000e0 /* DMA transfer src addr mode */
+#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
+#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
+#define DMxCTR_DAM 0x00000000 /* DMA transfer dest addr mode */
+#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
+#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
+#define DMxCTR_TM 0x00001800 /* DMA transfer mode */
+#define DMxCTR_TM_BATCH 0x00000000 /* - batch transfer */
+#define DMxCTR_TM_INTERM 0x00001000 /* - intermittent transfer */
+#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
+#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
+#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
+#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
+#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
+#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
+#define DMxCTR_RQM 0x00060000 /* external request input source mode */
+#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
+#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
+#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
+#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
+#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
+#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
+
+#define DMxSRC(N) __SYSREG(0xd2000004 + ((N) * 0x100), u32) /* control reg */
+
+#define DMxDST(N) __SYSREG(0xd2000008 + ((N) * 0x100), u32) /* src addr reg */
+
+#define DMxSIZ(N) __SYSREG(0xd200000c + ((N) * 0x100), u32) /* dest addr reg */
+#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
+
+#define DMxCYC(N) __SYSREG(0xd2000010 + ((N) * 0x100), u32) /* intermittent
+ * size reg */
+#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
+
+#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
+#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
+#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
+#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
+
+#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
+#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
+#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
+#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+ u32 ctr;
+ const void *src;
+ void *dst;
+ u32 siz;
+ u32 cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
new file mode 100644
index 00000000000..f537801a44b
--- /dev/null
+++ b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN 0x00fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z) \
+({ \
+ typeof(Z) x = (Z); \
+ x &= ~(3 << ((X) * 2)); \
+ x |= ((Y) & 3) << ((X) * 2); \
+ (Z) = x; \
+})
+
+/* external pin intr spec reg */
+#define EXTMD __SYSREG(0xd4000200, u16)
+#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/proc.h b/arch/mn10300/proc-mn103e010/include/proc/proc.h
index 22a2b93f70b..39c4f8e7d2d 100644
--- a/arch/mn10300/proc-mn103e010/include/proc/proc.h
+++ b/arch/mn10300/proc-mn103e010/include/proc/proc.h
@@ -12,7 +12,7 @@
#ifndef _ASM_PROC_PROC_H
#define _ASM_PROC_PROC_H
-#define PROCESSOR_VENDOR_NAME "Matsushita"
+#define PROCESSOR_VENDOR_NAME "Panasonic"
#define PROCESSOR_MODEL_NAME "mn103e010"
#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn103e010/proc-init.c b/arch/mn10300/proc-mn103e010/proc-init.c
index 9a482efafa8..27b97980dca 100644
--- a/arch/mn10300/proc-mn103e010/proc-init.c
+++ b/arch/mn10300/proc-mn103e010/proc-init.c
@@ -9,7 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/kernel.h>
+#include <asm/fpu.h>
#include <asm/rtc.h>
+#include <asm/busctl-regs.h>
/*
* initialise the on-silicon processor peripherals
@@ -28,6 +30,7 @@ asmlinkage void __init processor_init(void)
__set_intr_stub(EXCEP_DAERROR, dtlb_aerror);
__set_intr_stub(EXCEP_BUSERROR, raw_bus_error);
__set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault);
+ __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled);
__set_intr_stub(EXCEP_SYSCALL0, system_call);
__set_intr_stub(EXCEP_NMI, nmi_handler);
@@ -73,3 +76,37 @@ asmlinkage void __init processor_init(void)
calibrate_clock();
}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+ unsigned long base, size;
+
+ *mem_base = 0;
+ *mem_size = 0;
+
+ base = SDBASE(0);
+ if (base & SDBASE_CE) {
+ size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+ size = ~size + 1;
+ base &= SDBASE_CBA;
+
+ printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
+ *mem_size += size;
+ *mem_base = base;
+ }
+
+ base = SDBASE(1);
+ if (base & SDBASE_CE) {
+ size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+ size = ~size + 1;
+ base &= SDBASE_CBA;
+
+ printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
+ *mem_size += size;
+ if (*mem_base == 0)
+ *mem_base = base;
+ }
+}
diff --git a/arch/mn10300/proc-mn2ws0050/Makefile b/arch/mn10300/proc-mn2ws0050/Makefile
new file mode 100644
index 00000000000..d4ca13309a8
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := proc-init.o
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
new file mode 100644
index 00000000000..cafd7b5b55b
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
@@ -0,0 +1,48 @@
+/* Cache specification
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Add L1_CACHE_SHIFT_MAX definition.
+ * 29-Jul-2008 MEI Add define for MN10300_HAS_AREAPURGE_REG.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CACHE_H
+#define _ASM_PROC_CACHE_H
+
+/*
+ * L1 cache
+ */
+#define L1_CACHE_NWAYS 4 /* number of ways in caches */
+#define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+#define L1_CACHE_BYTES 32 /* bytes per entry */
+#define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
+#define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+#define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
+#define L1_CACHE_TAG_ENTRY 0x00000fe0 /* cache tag entry address mask */
+#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
+
+/*
+ * specification of the interval between interrupt checking intervals whilst
+ * managing the cache with the interrupts disabled
+ */
+#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL 4
+
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER \
+ +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
+#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/clock.h b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
new file mode 100644
index 00000000000..fe4c0a4a53a
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
@@ -0,0 +1,20 @@
+/* clock.h: proc-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 23-Feb-2007 MEI Delete define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CLOCK_H
+#define _ASM_PROC_CLOCK_H
+
+#include <unit/clock.h>
+
+#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
new file mode 100644
index 00000000000..4c4319e241d
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
@@ -0,0 +1,103 @@
+/* MN2WS0050 on-board DMA controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define DMxCTR(N) __SYSREG(0xd4005000+(N*0x100), u32) /* control reg */
+#define DMxCTR_BG 0x0000001f /* transfer request source */
+#define DMxCTR_BG_SOFT 0x00000000 /* - software source */
+#define DMxCTR_BG_SC0TX 0x00000002 /* - serial port 0 transmission */
+#define DMxCTR_BG_SC0RX 0x00000003 /* - serial port 0 reception */
+#define DMxCTR_BG_SC1TX 0x00000004 /* - serial port 1 transmission */
+#define DMxCTR_BG_SC1RX 0x00000005 /* - serial port 1 reception */
+#define DMxCTR_BG_SC2TX 0x00000006 /* - serial port 2 transmission */
+#define DMxCTR_BG_SC2RX 0x00000007 /* - serial port 2 reception */
+#define DMxCTR_BG_TM0UFLOW 0x00000008 /* - timer 0 underflow */
+#define DMxCTR_BG_TM1UFLOW 0x00000009 /* - timer 1 underflow */
+#define DMxCTR_BG_TM2UFLOW 0x0000000a /* - timer 2 underflow */
+#define DMxCTR_BG_TM3UFLOW 0x0000000b /* - timer 3 underflow */
+#define DMxCTR_BG_TM6ACMPCAP 0x0000000c /* - timer 6A compare/capture */
+#define DMxCTR_BG_RYBY 0x0000000d /* - NAND Flash RY/BY request source */
+#define DMxCTR_BG_RMC 0x0000000e /* - remote controller output */
+#define DMxCTR_BG_XIRQ12 0x00000011 /* - XIRQ12 pin interrupt source */
+#define DMxCTR_BG_XIRQ13 0x00000012 /* - XIRQ13 pin interrupt source */
+#define DMxCTR_BG_TCK 0x00000014 /* - tick timer underflow */
+#define DMxCTR_BG_SC4TX 0x00000019 /* - serial port4 transmission */
+#define DMxCTR_BG_SC4RX 0x0000001a /* - serial port4 reception */
+#define DMxCTR_BG_SC5TX 0x0000001b /* - serial port5 transmission */
+#define DMxCTR_BG_SC5RX 0x0000001c /* - serial port5 reception */
+#define DMxCTR_BG_SC6TX 0x0000001d /* - serial port6 transmission */
+#define DMxCTR_BG_SC6RX 0x0000001e /* - serial port6 reception */
+#define DMxCTR_BG_TMSUFLOW 0x0000001f /* - timestamp timer underflow */
+#define DMxCTR_SAM 0x00000060 /* DMA transfer src addr mode */
+#define DMxCTR_SAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_SAM_DECR 0x00000020 /* - decrement */
+#define DMxCTR_SAM_FIXED 0x00000040 /* - fixed */
+#define DMxCTR_DAM 0x00000300 /* DMA transfer dest addr mode */
+#define DMxCTR_DAM_INCR 0x00000000 /* - increment */
+#define DMxCTR_DAM_DECR 0x00000100 /* - decrement */
+#define DMxCTR_DAM_FIXED 0x00000200 /* - fixed */
+#define DMxCTR_UT 0x00006000 /* DMA transfer unit */
+#define DMxCTR_UT_1 0x00000000 /* - 1 byte */
+#define DMxCTR_UT_2 0x00002000 /* - 2 byte */
+#define DMxCTR_UT_4 0x00004000 /* - 4 byte */
+#define DMxCTR_UT_16 0x00006000 /* - 16 byte */
+#define DMxCTR_RRE 0x00008000 /* DMA round robin enable */
+#define DMxCTR_TEN 0x00010000 /* DMA channel transfer enable */
+#define DMxCTR_RQM 0x00060000 /* external request input source mode */
+#define DMxCTR_RQM_FALLEDGE 0x00000000 /* - falling edge */
+#define DMxCTR_RQM_RISEEDGE 0x00020000 /* - rising edge */
+#define DMxCTR_RQM_LOLEVEL 0x00040000 /* - low level */
+#define DMxCTR_RQM_HILEVEL 0x00060000 /* - high level */
+#define DMxCTR_RQF 0x01000000 /* DMA transfer request flag */
+#define DMxCTR_PERR 0x40000000 /* DMA transfer parameter error flag */
+#define DMxCTR_XEND 0x80000000 /* DMA transfer end flag */
+
+#define DMxSRC(N) __SYSREG(0xd4005004+(N*0x100), u32) /* control reg */
+
+#define DMxDST(N) __SYSREG(0xd4005008+(N*0x100), u32) /* source addr reg */
+
+#define DMxSIZ(N) __SYSREG(0xd400500c+(N*0x100), u32) /* dest addr reg */
+#define DMxSIZ_CT 0x000fffff /* number of bytes to transfer */
+
+#define DMxCYC(N) __SYSREG(0xd4005010+(N*0x100), u32) /* intermittent size reg */
+#define DMxCYC_CYC 0x000000ff /* number of interrmittent transfers -1 */
+
+#define DM0IRQ 16 /* DMA channel 0 complete IRQ */
+#define DM1IRQ 17 /* DMA channel 1 complete IRQ */
+#define DM2IRQ 18 /* DMA channel 2 complete IRQ */
+#define DM3IRQ 19 /* DMA channel 3 complete IRQ */
+
+#define DM0ICR GxICR(DM0IRQ) /* DMA channel 0 complete intr ctrl reg */
+#define DM1ICR GxICR(DM0IR1) /* DMA channel 1 complete intr ctrl reg */
+#define DM2ICR GxICR(DM0IR2) /* DMA channel 2 complete intr ctrl reg */
+#define DM3ICR GxICR(DM0IR3) /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+ u32 ctr;
+ const void *src;
+ void *dst;
+ u32 siz;
+ u32 cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
new file mode 100644
index 00000000000..a1e977273d1
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN 0x003fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z) \
+({ \
+ typeof(Z) x = (Z); \
+ x &= ~(3 << ((X) * 2)); \
+ x |= ((Y) & 3) << ((X) * 2); \
+ (Z) = x; \
+})
+
+/* external pin intr spec reg */
+#define EXTMD0 __SYSREG(0xd4000200, u32)
+#define GET_XIRQ_TRIGGER(X) __GET_XIRQ_TRIGGER(X, EXTMD0)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD0)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/irq.h b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
new file mode 100644
index 00000000000..37777a85ab6
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
@@ -0,0 +1,49 @@
+/* MN2WS0050 on-board interrupt controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Define extended IRQ number for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PROC_IRQ_H
+#define _PROC_IRQ_H
+
+#ifdef __KERNEL__
+
+#define GxICR_NUM_IRQS 163
+#ifdef CONFIG_SMP
+#define GxICR_NUM_EXT_IRQS 197
+#endif /* CONFIG_SMP */
+
+#define GxICR_NUM_XIRQS 16
+
+#define XIRQ0 34
+#define XIRQ1 35
+#define XIRQ2 36
+#define XIRQ3 37
+#define XIRQ4 38
+#define XIRQ5 39
+#define XIRQ6 40
+#define XIRQ7 41
+#define XIRQ8 42
+#define XIRQ9 43
+#define XIRQ10 44
+#define XIRQ11 45
+#define XIRQ12 46
+#define XIRQ13 47
+#define XIRQ14 48
+#define XIRQ15 49
+
+#define XIRQ2IRQ(num) (XIRQ0 + num)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PROC_IRQ_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
new file mode 100644
index 00000000000..84448f3828b
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
@@ -0,0 +1,120 @@
+/* NAND flash interface register definitions
+ *
+ * Copyright (C) 2008-2009 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PROC_NAND_REGS_H_
+#define _PROC_NAND_REGS_H_
+
+/* command register */
+#define FCOMMAND_0 __SYSREG(0xd8f00000, u8) /* fcommand[24:31] */
+#define FCOMMAND_1 __SYSREG(0xd8f00001, u8) /* fcommand[16:23] */
+#define FCOMMAND_2 __SYSREG(0xd8f00002, u8) /* fcommand[8:15] */
+#define FCOMMAND_3 __SYSREG(0xd8f00003, u8) /* fcommand[0:7] */
+
+/* for dma 16 byte trans, use FCOMMAND2 register */
+#define FCOMMAND2_0 __SYSREG(0xd8f00110, u8) /* fcommand2[24:31] */
+#define FCOMMAND2_1 __SYSREG(0xd8f00111, u8) /* fcommand2[16:23] */
+#define FCOMMAND2_2 __SYSREG(0xd8f00112, u8) /* fcommand2[8:15] */
+#define FCOMMAND2_3 __SYSREG(0xd8f00113, u8) /* fcommand2[0:7] */
+
+#define FCOMMAND_FIEN 0x80 /* nand flash I/F enable */
+#define FCOMMAND_BW_8BIT 0x00 /* 8bit bus width */
+#define FCOMMAND_BW_16BIT 0x40 /* 16bit bus width */
+#define FCOMMAND_BLOCKSZ_SMALL 0x00 /* small block */
+#define FCOMMAND_BLOCKSZ_LARGE 0x20 /* large block */
+#define FCOMMAND_DMASTART 0x10 /* dma start */
+#define FCOMMAND_RYBY 0x08 /* ready/busy flag */
+#define FCOMMAND_RYBYINTMSK 0x04 /* mask ready/busy interrupt */
+#define FCOMMAND_XFWP 0x02 /* write protect enable */
+#define FCOMMAND_XFCE 0x01 /* flash device disable */
+#define FCOMMAND_SEQKILL 0x10 /* stop seq-read */
+#define FCOMMAND_ANUM 0x07 /* address cycle */
+#define FCOMMAND_ANUM_NONE 0x00 /* address cycle none */
+#define FCOMMAND_ANUM_1CYC 0x01 /* address cycle 1cycle */
+#define FCOMMAND_ANUM_2CYC 0x02 /* address cycle 2cycle */
+#define FCOMMAND_ANUM_3CYC 0x03 /* address cycle 3cycle */
+#define FCOMMAND_ANUM_4CYC 0x04 /* address cycle 4cycle */
+#define FCOMMAND_ANUM_5CYC 0x05 /* address cycle 5cycle */
+#define FCOMMAND_FCMD_READ0 0x00 /* read1 command */
+#define FCOMMAND_FCMD_SEQIN 0x80 /* page program 1st command */
+#define FCOMMAND_FCMD_PAGEPROG 0x10 /* page program 2nd command */
+#define FCOMMAND_FCMD_RESET 0xff /* reset command */
+#define FCOMMAND_FCMD_ERASE1 0x60 /* erase 1st command */
+#define FCOMMAND_FCMD_ERASE2 0xd0 /* erase 2nd command */
+#define FCOMMAND_FCMD_STATUS 0x70 /* read status command */
+#define FCOMMAND_FCMD_READID 0x90 /* read id command */
+#define FCOMMAND_FCMD_READOOB 0x50 /* read3 command */
+/* address register */
+#define FADD __SYSREG(0xd8f00004, u32)
+/* address register 2 */
+#define FADD2 __SYSREG(0xd8f00008, u32)
+/* error judgement register */
+#define FJUDGE __SYSREG(0xd8f0000c, u32)
+#define FJUDGE_NOERR 0x0 /* no error */
+#define FJUDGE_1BITERR 0x1 /* 1bit error in data area */
+#define FJUDGE_PARITYERR 0x2 /* parity error */
+#define FJUDGE_UNCORRECTABLE 0x3 /* uncorrectable error */
+#define FJUDGE_ERRJDG_MSK 0x3 /* mask of judgement result */
+/* 1st ECC store register */
+#define FECC11 __SYSREG(0xd8f00010, u32)
+/* 2nd ECC store register */
+#define FECC12 __SYSREG(0xd8f00014, u32)
+/* 3rd ECC store register */
+#define FECC21 __SYSREG(0xd8f00018, u32)
+/* 4th ECC store register */
+#define FECC22 __SYSREG(0xd8f0001c, u32)
+/* 5th ECC store register */
+#define FECC31 __SYSREG(0xd8f00020, u32)
+/* 6th ECC store register */
+#define FECC32 __SYSREG(0xd8f00024, u32)
+/* 7th ECC store register */
+#define FECC41 __SYSREG(0xd8f00028, u32)
+/* 8th ECC store register */
+#define FECC42 __SYSREG(0xd8f0002c, u32)
+/* data register */
+#define FDATA __SYSREG(0xd8f00030, u32)
+/* access pulse register */
+#define FPWS __SYSREG(0xd8f00100, u32)
+#define FPWS_PWS1W_2CLK 0x00000000 /* write pulse width 1clock */
+#define FPWS_PWS1W_3CLK 0x01000000 /* write pulse width 2clock */
+#define FPWS_PWS1W_4CLK 0x02000000 /* write pulse width 4clock */
+#define FPWS_PWS1W_5CLK 0x03000000 /* write pulse width 5clock */
+#define FPWS_PWS1W_6CLK 0x04000000 /* write pulse width 6clock */
+#define FPWS_PWS1W_7CLK 0x05000000 /* write pulse width 7clock */
+#define FPWS_PWS1W_8CLK 0x06000000 /* write pulse width 8clock */
+#define FPWS_PWS1R_3CLK 0x00010000 /* read pulse width 3clock */
+#define FPWS_PWS1R_4CLK 0x00020000 /* read pulse width 4clock */
+#define FPWS_PWS1R_5CLK 0x00030000 /* read pulse width 5clock */
+#define FPWS_PWS1R_6CLK 0x00040000 /* read pulse width 6clock */
+#define FPWS_PWS1R_7CLK 0x00050000 /* read pulse width 7clock */
+#define FPWS_PWS1R_8CLK 0x00060000 /* read pulse width 8clock */
+#define FPWS_PWS2W_2CLK 0x00000100 /* write pulse interval 2clock */
+#define FPWS_PWS2W_3CLK 0x00000200 /* write pulse interval 3clock */
+#define FPWS_PWS2W_4CLK 0x00000300 /* write pulse interval 4clock */
+#define FPWS_PWS2W_5CLK 0x00000400 /* write pulse interval 5clock */
+#define FPWS_PWS2W_6CLK 0x00000500 /* write pulse interval 6clock */
+#define FPWS_PWS2R_2CLK 0x00000001 /* read pulse interval 2clock */
+#define FPWS_PWS2R_3CLK 0x00000002 /* read pulse interval 3clock */
+#define FPWS_PWS2R_4CLK 0x00000003 /* read pulse interval 4clock */
+#define FPWS_PWS2R_5CLK 0x00000004 /* read pulse interval 5clock */
+#define FPWS_PWS2R_6CLK 0x00000005 /* read pulse interval 6clock */
+/* command register 2 */
+#define FCOMMAND2 __SYSREG(0xd8f00110, u32)
+/* transfer frequency register */
+#define FNUM __SYSREG(0xd8f00114, u32)
+#define FSDATA_ADDR 0xd8f00400
+/* active data register */
+#define FSDATA __SYSREG(FSDATA_ADDR, u32)
+
+#endif /* _PROC_NAND_REGS_H_ */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/proc.h b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
new file mode 100644
index 00000000000..90d5cadd05b
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
@@ -0,0 +1,18 @@
+/* proc.h: MN2WS0050 processor description
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_PROC_H
+#define _ASM_PROC_PROC_H
+
+#define PROCESSOR_VENDOR_NAME "Panasonic"
+#define PROCESSOR_MODEL_NAME "mn2ws0050"
+
+#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
new file mode 100644
index 00000000000..22f277fbb4d
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
@@ -0,0 +1,51 @@
+/* MN10300/AM33v2 Microcontroller SMP registers
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ * Created:
+ * 13-Nov-2006 MEI Add extended cache and atomic operation register
+ * for SMP support.
+ * 23-Feb-2007 MEI Add define for gdbstub SMP.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_SMP_REGS_H
+#define _ASM_PROC_SMP_REGS_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm/cpu-regs.h>
+
+/*
+ * Reference to the interrupt controllers of other CPUs
+ */
+#define CROSS_ICR_CPU_SHIFT 16
+
+#define CROSS_GxICR(X, CPU) __SYSREG(0xc4000000 + (X) * 4 + \
+ ((X) >= 64 && (X) < 192) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u16)
+#define CROSS_GxICR_u8(X, CPU) __SYSREG(0xc4000000 + (X) * 4 + \
+ (((X) >= 64) && ((X) < 192)) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u8)
+
+/* CPU ID register */
+#define CPUID __SYSREGC(0xc0000054, u32)
+#define CPUID_MASK 0x00000007 /* CPU ID mask */
+
+/* extended cache control register */
+#define ECHCTR __SYSREG(0xc0000c20, u32)
+#define ECHCTR_IBCM 0x00000001 /* instruction cache broad cast mask */
+#define ECHCTR_DBCM 0x00000002 /* data cache broad cast mask */
+#define ECHCTR_ISPM 0x00000004 /* instruction cache snoop mask */
+#define ECHCTR_DSPM 0x00000008 /* data cache snoop mask */
+
+#define NMIAGR __SYSREG(0xd400013c, u16)
+#define NMIAGR_GN 0x03fc
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_PROC_SMP_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c
new file mode 100644
index 00000000000..c58249b9525
--- /dev/null
+++ b/arch/mn10300/proc-mn2ws0050/proc-init.c
@@ -0,0 +1,134 @@
+/* MN2WS0050 processor initialisation
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/busctl-regs.h>
+#include <unit/timex.h>
+#include <asm/fpu.h>
+#include <asm/rtc.h>
+
+#define MEMCONF __SYSREGC(0xdf800400, u32)
+
+/*
+ * initialise the on-silicon processor peripherals
+ */
+asmlinkage void __init processor_init(void)
+{
+ int loop;
+
+ /* set up the exception table first */
+ for (loop = 0x000; loop < 0x400; loop += 8)
+ __set_intr_stub(loop, __common_exception);
+
+ __set_intr_stub(EXCEP_ITLBMISS, itlb_miss);
+ __set_intr_stub(EXCEP_DTLBMISS, dtlb_miss);
+ __set_intr_stub(EXCEP_IAERROR, itlb_aerror);
+ __set_intr_stub(EXCEP_DAERROR, dtlb_aerror);
+ __set_intr_stub(EXCEP_BUSERROR, raw_bus_error);
+ __set_intr_stub(EXCEP_DOUBLE_FAULT, double_fault);
+ __set_intr_stub(EXCEP_FPU_DISABLED, fpu_disabled);
+ __set_intr_stub(EXCEP_SYSCALL0, system_call);
+
+ __set_intr_stub(EXCEP_NMI, nmi_handler);
+ __set_intr_stub(EXCEP_WDT, nmi_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL0, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL1, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL2, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL3, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL4, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL5, irq_handler);
+ __set_intr_stub(EXCEP_IRQ_LEVEL6, irq_handler);
+
+ IVAR0 = EXCEP_IRQ_LEVEL0;
+ IVAR1 = EXCEP_IRQ_LEVEL1;
+ IVAR2 = EXCEP_IRQ_LEVEL2;
+ IVAR3 = EXCEP_IRQ_LEVEL3;
+ IVAR4 = EXCEP_IRQ_LEVEL4;
+ IVAR5 = EXCEP_IRQ_LEVEL5;
+ IVAR6 = EXCEP_IRQ_LEVEL6;
+
+#ifndef CONFIG_MN10300_HAS_CACHE_SNOOP
+ mn10300_dcache_flush_inv();
+ mn10300_icache_inv();
+#endif
+
+ /* disable all interrupts and set to priority 6 (lowest) */
+#ifdef CONFIG_SMP
+ for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#else /* !CONFIG_SMP */
+ for (loop = 0; loop < NR_IRQS; loop++)
+ GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#endif /* !CONFIG_SMP */
+
+ /* clear the timers */
+ TM0MD = 0;
+ TM1MD = 0;
+ TM2MD = 0;
+ TM3MD = 0;
+ TM4MD = 0;
+ TM5MD = 0;
+ TM6MD = 0;
+ TM6MDA = 0;
+ TM6MDB = 0;
+ TM7MD = 0;
+ TM8MD = 0;
+ TM9MD = 0;
+ TM10MD = 0;
+ TM11MD = 0;
+ TM12MD = 0;
+ TM13MD = 0;
+ TM14MD = 0;
+ TM15MD = 0;
+
+ calibrate_clock();
+}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+ unsigned long memconf = MEMCONF;
+ unsigned long size = 0; /* order: MByte */
+
+ *mem_base = 0x90000000; /* fixed address */
+
+ switch (memconf & 0x00000003) {
+ case 0x01:
+ size = 256 / 8; /* 256 Mbit per chip */
+ break;
+ case 0x02:
+ size = 512 / 8; /* 512 Mbit per chip */
+ break;
+ case 0x03:
+ size = 1024 / 8; /* 1 Gbit per chip */
+ break;
+ default:
+ panic("Invalid SDRAM size");
+ break;
+ }
+
+ printk(KERN_INFO "DDR2-SDRAM: %luMB x 2 @%08lx\n", size, *mem_base);
+
+ *mem_size = (size * 2) << 20;
+}
diff --git a/arch/mn10300/unit-asb2303/include/unit/clock.h b/arch/mn10300/unit-asb2303/include/unit/clock.h
index 2a0bf79ab96..0316907a012 100644
--- a/arch/mn10300/unit-asb2303/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2303/include/unit/clock.h
@@ -14,32 +14,11 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
#define MN10300_IOCLK 33333333UL
/* #define MN10300_IOBCLK 66666666UL */
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK MN10300_IOCLK
-#define MN10300_TSCCLK MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
#endif /* !__ASSEMBLY__ */
+#define MN10300_WDCLK MN10300_IOCLK
+
#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2303/include/unit/serial.h b/arch/mn10300/unit-asb2303/include/unit/serial.h
index 047566cd2e3..991e356bac5 100644
--- a/arch/mn10300/unit-asb2303/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2303/include/unit/serial.h
@@ -22,6 +22,11 @@
#define SERIAL_IRQ XIRQ0 /* Dual serial (PC16552) (Hi) */
/*
+ * The ASB2303 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD (18432000 / 16)
+
+/*
* dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
*/
#ifndef CONFIG_GDBSTUB_ON_TTYSx
diff --git a/arch/mn10300/unit-asb2303/include/unit/timex.h b/arch/mn10300/unit-asb2303/include/unit/timex.h
index f206b63c95b..cc18fe7d8b9 100644
--- a/arch/mn10300/unit-asb2303/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2303/include/unit/timex.h
@@ -1,6 +1,6 @@
-/* ASB2303-specific timer specifcations
+/* ASB2303-specific timer specifications
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,67 +17,72 @@
#include <asm/timer-regs.h>
#include <unit/clock.h>
+#include <asm/param.h>
/*
* jiffies counter specifications
*/
#define TMJCBR_MAX 0xffff
-#define TMJCBC TM01BC
-
-#define TMJCMD TM01MD
-#define TMJCBR TM01BR
#define TMJCIRQ TM1IRQ
#define TMJCICR TM1ICR
-#define TMJCICR_LEVEL GxICR_LEVEL_5
#ifndef __ASSEMBLY__
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 1
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 8
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 32
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
{
- unsigned rate;
- u16 md, t16;
-
- /* use as little prescaling as possible to avoid losing accuracy */
- md = TM0MD_SRC_IOCLK;
- rate = MN10300_JCCLK / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_8;
- rate = MN10300_JCCLK / 8 / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_32;
- rate = MN10300_JCCLK / 32 / HZ;
-
- if (rate > TMJCBR_MAX)
- BUG();
- }
- }
+ u16 tmp;
+ TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+ tmp = TM01MD;
+}
- TMJCBR = rate - 1;
- t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_INIT_COUNTER |
- TM1MD_INIT_COUNTER << 8;
+ TM01BR = cnt;
+ tmp = TM01BR;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_COUNT_ENABLE |
- TM1MD_COUNT_ENABLE << 8;
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_INIT_COUNTER | \
+ TM1MD_INIT_COUNTER << 8;
- t16 = TMJCMD;
- TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- t16 = TMJCICR;
-}
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_COUNT_ENABLE | \
+ TM1MD_COUNT_ENABLE << 8;
-static inline void shutdown_jiffies_counter(void)
-{
+ tmp = TM01MD;
}
#endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
static inline void startup_timestamp_counter(void)
{
+ u32 t32;
+
/* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
* - count down from 4Gig-1 to 0 and wrap at IOCLK rate
*/
TM45BR = TMTSCBR_MAX;
+ t32 = TM45BR;
- TM4MD = TM4MD_SRC_IOCLK;
+ TM4MD = TSC_TIMER_CLKSRC;
TM4MD |= TM4MD_INIT_COUNTER;
TM4MD &= ~TM4MD_INIT_COUNTER;
TM4ICR = 0;
+ t32 = TM4ICR;
TM5MD = TM5MD_SRC_TM4CASCADE;
TM5MD |= TM5MD_INIT_COUNTER;
TM5MD &= ~TM5MD_INIT_COUNTER;
TM5ICR = 0;
+ t32 = TM5ICR;
TM5MD |= TM5MD_COUNT_ENABLE;
TM4MD |= TM4MD_COUNT_ENABLE;
+ t32 = TM5MD;
+ t32 = TM4MD;
}
static inline void shutdown_timestamp_counter(void)
{
+ u8 t8;
TM4MD = 0;
TM5MD = 0;
+ t8 = TM4MD;
+ t8 = TM5MD;
}
/*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
static inline cycles_t read_timestamp_counter(void)
{
- return (cycles_t)TMTSCBC;
+ return (cycles_t)~TMTSCBC;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c
index 70e8cb4ea26..834a76aa551 100644
--- a/arch/mn10300/unit-asb2303/unit-init.c
+++ b/arch/mn10300/unit-asb2303/unit-init.c
@@ -31,6 +31,14 @@ asmlinkage void __init unit_init(void)
SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL);
SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL);
SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL);
+
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+ set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#endif
+
+#ifdef CONFIG_ETHERNET_IRQ_LEVEL
+ set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
}
/*
@@ -51,7 +59,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_intr_postackable(XIRQ2IRQ(extnum));
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2305/include/unit/clock.h b/arch/mn10300/unit-asb2305/include/unit/clock.h
index 67be3f2eb18..29e3425431c 100644
--- a/arch/mn10300/unit-asb2305/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2305/include/unit/clock.h
@@ -14,32 +14,11 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
#define MN10300_IOCLK 33333333UL
/* #define MN10300_IOBCLK 66666666UL */
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK MN10300_IOCLK
-#define MN10300_TSCCLK MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
#endif /* !__ASSEMBLY__ */
+#define MN10300_WDCLK MN10300_IOCLK
+
#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2305/include/unit/serial.h b/arch/mn10300/unit-asb2305/include/unit/serial.h
index 8086cc092ce..88c08219315 100644
--- a/arch/mn10300/unit-asb2305/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2305/include/unit/serial.h
@@ -21,6 +21,11 @@
#define SERIAL_IRQ XIRQ0 /* Dual serial (PC16552) (Hi) */
/*
+ * The ASB2305 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD (18432000 / 16)
+
+/*
* dispose of the /dev/ttyS0 serial port
*/
#ifndef CONFIG_GDBSTUB_ON_TTYSx
diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h
index d1c72d59fa9..758af30d1a1 100644
--- a/arch/mn10300/unit-asb2305/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2305/include/unit/timex.h
@@ -1,6 +1,6 @@
-/* ASB2305 timer specifcations
+/* ASB2305-specific timer specifications
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,67 +17,72 @@
#include <asm/timer-regs.h>
#include <unit/clock.h>
+#include <asm/param.h>
/*
* jiffies counter specifications
*/
#define TMJCBR_MAX 0xffff
-#define TMJCBC TM01BC
-
-#define TMJCMD TM01MD
-#define TMJCBR TM01BR
#define TMJCIRQ TM1IRQ
#define TMJCICR TM1ICR
-#define TMJCICR_LEVEL GxICR_LEVEL_5
#ifndef __ASSEMBLY__
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 1
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 8
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE 32
+# define JC_TIMER_CLKSRC TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
{
- unsigned rate;
- u16 md, t16;
-
- /* use as little prescaling as possible to avoid losing accuracy */
- md = TM0MD_SRC_IOCLK;
- rate = MN10300_JCCLK / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_8;
- rate = MN10300_JCCLK / 8 / HZ;
-
- if (rate > TMJCBR_MAX) {
- md = TM0MD_SRC_IOCLK_32;
- rate = MN10300_JCCLK / 32 / HZ;
-
- if (rate > TMJCBR_MAX)
- BUG();
- }
- }
+ u16 tmp;
+ TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+ tmp = TM01MD;
+}
- TMJCBR = rate - 1;
- t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_INIT_COUNTER |
- TM1MD_INIT_COUNTER << 8;
+ TM01BR = cnt;
+ tmp = TM01BR;
- TMJCMD =
- md |
- TM1MD_SRC_TM0CASCADE << 8 |
- TM0MD_COUNT_ENABLE |
- TM1MD_COUNT_ENABLE << 8;
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_INIT_COUNTER | \
+ TM1MD_INIT_COUNTER << 8;
- t16 = TMJCMD;
- TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- t16 = TMJCICR;
-}
+ TM01MD = JC_TIMER_CLKSRC | \
+ TM1MD_SRC_TM0CASCADE << 8 | \
+ TM0MD_COUNT_ENABLE | \
+ TM1MD_COUNT_ENABLE << 8;
-static inline void shutdown_jiffies_counter(void)
-{
+ tmp = TM01MD;
}
#endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
static inline void startup_timestamp_counter(void)
{
+ u32 t32;
+
/* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
* - count down from 4Gig-1 to 0 and wrap at IOCLK rate
*/
TM45BR = TMTSCBR_MAX;
+ t32 = TM45BR;
- TM4MD = TM4MD_SRC_IOCLK;
+ TM4MD = TSC_TIMER_CLKSRC;
TM4MD |= TM4MD_INIT_COUNTER;
TM4MD &= ~TM4MD_INIT_COUNTER;
TM4ICR = 0;
+ t32 = TM4ICR;
TM5MD = TM5MD_SRC_TM4CASCADE;
TM5MD |= TM5MD_INIT_COUNTER;
TM5MD &= ~TM5MD_INIT_COUNTER;
TM5ICR = 0;
+ t32 = TM5ICR;
TM5MD |= TM5MD_COUNT_ENABLE;
TM4MD |= TM4MD_COUNT_ENABLE;
+ t32 = TM5MD;
+ t32 = TM4MD;
}
static inline void shutdown_timestamp_counter(void)
{
+ u8 t8;
TM4MD = 0;
TM5MD = 0;
+ t8 = TM4MD;
+ t8 = TM5MD;
}
/*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
static inline cycles_t read_timestamp_counter(void)
{
- return (cycles_t) TMTSCBC;
+ return (cycles_t)~TMTSCBC;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index 45b40ac6c46..8e6763e6f25 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -93,7 +93,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
struct pci_bus *bus;
struct pci_dev *dev;
int idx;
- struct resource *r, *pr;
+ struct resource *r;
/* Depth-First Search on bus tree */
list_for_each_entry(bus, bus_list, node) {
@@ -105,10 +105,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->flags)
continue;
- pr = pci_find_parent_resource(dev, r);
if (!r->start ||
- !pr ||
- request_resource(pr, r) < 0) {
+ pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of bridge %s\n",
@@ -131,7 +129,7 @@ static void __init pcibios_allocate_resources(int pass)
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
- struct resource *r, *pr;
+ struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -150,8 +148,7 @@ static void __init pcibios_allocate_resources(int pass)
" (f=%lx, d=%d, p=%d)\n",
pci_name(dev), r->start, r->end, r->flags,
disabled, pass);
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of device %s\n",
@@ -184,7 +181,7 @@ static void __init pcibios_allocate_resources(int pass)
static int __init pcibios_assign_resources(void)
{
struct pci_dev *dev = NULL;
- struct resource *r, *pr;
+ struct resource *r;
if (!(pci_probe & PCI_ASSIGN_ROMS)) {
/* Try to use BIOS settings for ROMs, otherwise let
@@ -194,8 +191,7 @@ static int __init pcibios_assign_resources(void)
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
continue;
- pr = pci_find_parent_resource(dev, r);
- if (!pr || request_resource(pr, r) < 0) {
+ if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
r->end -= r->start;
r->start = 0;
}
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6d8720a0a59..a4954fe8209 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -503,7 +503,7 @@ asmlinkage void __init unit_pci_init(void)
struct pci_ops *o = &pci_direct_ampci;
u32 x;
- set_intr_level(XIRQ1, GxICR_LEVEL_3);
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL));
memset(&bus, 0, sizeof(bus));
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index a76c8e0ab90..e1becd6b757 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -26,8 +26,10 @@ asmlinkage void __init unit_init(void)
{
#ifndef CONFIG_GDBSTUB_ON_TTYSx
/* set the 16550 interrupt line to level 3 if not being used for GDB */
- set_intr_level(XIRQ0, GxICR_LEVEL_3);
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+ set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
#endif
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
}
/*
@@ -51,7 +53,7 @@ void __init unit_init_IRQ(void)
switch (GET_XIRQ_TRIGGER(extnum)) {
case XIRQ_TRIGGER_HILEVEL:
case XIRQ_TRIGGER_LOWLEVEL:
- set_intr_postackable(XIRQ2IRQ(extnum));
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
break;
default:
break;
diff --git a/arch/mn10300/unit-asb2364/Makefile b/arch/mn10300/unit-asb2364/Makefile
new file mode 100644
index 00000000000..b3263ecfc4f
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+obj-y := unit-init.o leds.o irq-fpga.o
+
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/arch/mn10300/unit-asb2364/include/unit/clock.h b/arch/mn10300/unit-asb2364/include/unit/clock.h
new file mode 100644
index 00000000000..d34ac9a7508
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/clock.h
@@ -0,0 +1,29 @@
+/* clock.h: unit-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 23-Feb-2007 MEI Add define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_CLOCK_H
+#define _ASM_UNIT_CLOCK_H
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_IOCLK 100000000UL /* for DDR800 */
+/*#define MN10300_IOCLK 83333333UL */ /* for DDR667 */
+#define MN10300_IOBCLK MN10300_IOCLK /* IOBCLK is equal to IOCLK */
+
+#endif /* !__ASSEMBLY__ */
+
+#define MN10300_WDCLK 27000000UL
+
+#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
new file mode 100644
index 00000000000..7cf12054db6
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
@@ -0,0 +1,52 @@
+/* ASB2364 FPGA registers
+ */
+
+#ifndef _ASM_UNIT_FPGA_REGS_H
+#define _ASM_UNIT_FPGA_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+#define ASB2364_FPGA_REG_RESET_LAN __SYSREG(0xa9001300, u16)
+#define ASB2364_FPGA_REG_RESET_UART __SYSREG(0xa9001304, u16)
+#define ASB2364_FPGA_REG_RESET_I2C __SYSREG(0xa9001308, u16)
+#define ASB2364_FPGA_REG_RESET_USB __SYSREG(0xa900130c, u16)
+#define ASB2364_FPGA_REG_RESET_AV __SYSREG(0xa9001310, u16)
+
+#define ASB2364_FPGA_REG_IRQ(X) __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_IRQ_LAN ASB2364_FPGA_REG_IRQ(0)
+#define ASB2364_FPGA_REG_IRQ_UART ASB2364_FPGA_REG_IRQ(1)
+#define ASB2364_FPGA_REG_IRQ_I2C ASB2364_FPGA_REG_IRQ(2)
+#define ASB2364_FPGA_REG_IRQ_USB ASB2364_FPGA_REG_IRQ(3)
+#define ASB2364_FPGA_REG_IRQ_FPGA ASB2364_FPGA_REG_IRQ(5)
+
+#define ASB2364_FPGA_REG_MASK(X) __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_MASK_LAN ASB2364_FPGA_REG_MASK(0)
+#define ASB2364_FPGA_REG_MASK_UART ASB2364_FPGA_REG_MASK(1)
+#define ASB2364_FPGA_REG_MASK_I2C ASB2364_FPGA_REG_MASK(2)
+#define ASB2364_FPGA_REG_MASK_USB ASB2364_FPGA_REG_MASK(3)
+#define ASB2364_FPGA_REG_MASK_FPGA ASB2364_FPGA_REG_MASK(5)
+
+#define ASB2364_FPGA_REG_CPLD5_SET1 __SYSREG(0xa9002500, u16)
+#define ASB2364_FPGA_REG_CPLD5_SET2 __SYSREG(0xa9002504, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET1 __SYSREG(0xa9002600, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET2 __SYSREG(0xa9002604, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET1 __SYSREG(0xa9002700, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET2 __SYSREG(0xa9002704, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET1 __SYSREG(0xa9002800, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET2 __SYSREG(0xa9002804, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET1 __SYSREG(0xa9002900, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET2 __SYSREG(0xa9002904, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET1 __SYSREG(0xa9002a00, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET2 __SYSREG(0xa9002a04, u16)
+
+#define SyncExBus() \
+ do { \
+ unsigned short w; \
+ w = *(volatile short *)0xa9000000; \
+ } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_UNIT_FPGA_REGS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/irq.h b/arch/mn10300/unit-asb2364/include/unit/irq.h
new file mode 100644
index 00000000000..786148e4656
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/irq.h
@@ -0,0 +1,35 @@
+/* ASB2364 FPGA irq numbers
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _UNIT_IRQ_H
+#define _UNIT_IRQ_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#define NR_CPU_IRQS GxICR_NUM_EXT_IRQS
+#else
+#define NR_CPU_IRQS GxICR_NUM_IRQS
+#endif
+
+enum {
+ FPGA_LAN_IRQ = NR_CPU_IRQS,
+ FPGA_UART_IRQ,
+ FPGA_I2C_IRQ,
+ FPGA_USB_IRQ,
+ FPGA_RESERVED_IRQ,
+ FPGA_FPGA_IRQ,
+ NR_IRQS
+};
+
+extern void __init irq_fpga_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _UNIT_IRQ_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/leds.h b/arch/mn10300/unit-asb2364/include/unit/leds.h
new file mode 100644
index 00000000000..03a3933ad32
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/leds.h
@@ -0,0 +1,54 @@
+/* Unit-specific leds
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_LEDS_H
+#define _ASM_UNIT_LEDS_H
+
+#include <asm/pio-regs.h>
+#include <asm/cpu-regs.h>
+#include <asm/exceptions.h>
+
+#define MN10300_USE_7SEGLEDS 0
+
+#define ASB2364_7SEGLEDS __SYSREG(0xA9001630, u32)
+
+/*
+ * use the 7-segment LEDs to indicate states
+ */
+
+#if MN10300_USE_7SEGLEDS
+/* flip the 7-segment LEDs between "Gdb-" and "----" */
+#define mn10300_set_gdbleds(ONOFF) \
+ do { \
+ ASB2364_7SEGLEDS = (ONOFF) ? 0x8543077f : 0x7f7f7f7f; \
+ } while (0)
+#else
+#define mn10300_set_gdbleds(ONOFF) do {} while (0)
+#endif
+
+#if MN10300_USE_7SEGLEDS
+/* indicate double-fault by displaying "db-f" on the LEDs */
+#define mn10300_set_dbfleds \
+ mov 0x43077f1d,d0 ; \
+ mov d0,(ASB2364_7SEGLEDS)
+#else
+#define mn10300_set_dbfleds
+#endif
+
+#ifndef __ASSEMBLY__
+extern void peripheral_leds_display_exception(enum exception_code);
+extern void peripheral_leds_led_chase(void);
+extern void peripheral_leds7x4_display_dec(unsigned int, unsigned int);
+extern void peripheral_leds7x4_display_hex(unsigned int, unsigned int);
+extern void debug_to_serial(const char *, int);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_LEDS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/serial.h b/arch/mn10300/unit-asb2364/include/unit/serial.h
new file mode 100644
index 00000000000..7f048bbfdfd
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/serial.h
@@ -0,0 +1,151 @@
+/* Unit-specific 8250 serial ports
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_SERIAL_H
+#define _ASM_UNIT_SERIAL_H
+
+#include <asm/cpu-regs.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+#include <linux/serial_reg.h>
+
+#define SERIAL_PORT0_BASE_ADDRESS 0xA8200000
+
+#define SERIAL_IRQ XIRQ1 /* single serial (TL16C550C) (Lo) */
+
+/*
+ * The ASB2364 has an 12.288 MHz clock
+ * for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD (12288000 / 16)
+
+/*
+ * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
+ */
+#ifndef CONFIG_GDBSTUB_ON_TTYSx
+
+#define SERIAL_PORT_DFNS \
+ { \
+ .baud_base = BASE_BAUD, \
+ .irq = SERIAL_IRQ, \
+ .flags = STD_COM_FLAGS, \
+ .iomem_base = (u8 *) SERIAL_PORT0_BASE_ADDRESS, \
+ .iomem_reg_shift = 1, \
+ .io_type = SERIAL_IO_MEM, \
+ },
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#else /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_PORT_DFNS /* stolen by gdb-stub */
+
+#if defined(CONFIG_GDBSTUB_ON_TTYS0)
+#define GDBPORT_SERIAL_RX __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_RX * 4, u8)
+#define GDBPORT_SERIAL_TX __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_TX * 4, u8)
+#define GDBPORT_SERIAL_DLL __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLL * 4, u8)
+#define GDBPORT_SERIAL_DLM __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLM * 4, u8)
+#define GDBPORT_SERIAL_IER __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 4, u8)
+#define GDBPORT_SERIAL_IIR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IIR * 4, u8)
+#define GDBPORT_SERIAL_FCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_FCR * 4, u8)
+#define GDBPORT_SERIAL_LCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LCR * 4, u8)
+#define GDBPORT_SERIAL_MCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MCR * 4, u8)
+#define GDBPORT_SERIAL_LSR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LSR * 4, u8)
+#define GDBPORT_SERIAL_MSR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MSR * 4, u8)
+#define GDBPORT_SERIAL_SCR __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_SCR * 4, u8)
+#define GDBPORT_SERIAL_IRQ SERIAL_IRQ
+
+#elif defined(CONFIG_GDBSTUB_ON_TTYS1)
+#error The ASB2364 does not have a /dev/ttyS1
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+ char ch;
+
+#define LSR_WAIT_FOR(STATE) \
+ do {} while (!(GDBPORT_SERIAL_LSR & UART_LSR_##STATE))
+#define FLOWCTL_QUERY(LINE) \
+ ({ GDBPORT_SERIAL_MSR & UART_MSR_##LINE; })
+#define FLOWCTL_WAIT_FOR(LINE) \
+ do {} while (!(GDBPORT_SERIAL_MSR & UART_MSR_##LINE))
+#define FLOWCTL_CLEAR(LINE) \
+ do { GDBPORT_SERIAL_MCR &= ~UART_MCR_##LINE; } while (0)
+#define FLOWCTL_SET(LINE) \
+ do { GDBPORT_SERIAL_MCR |= UART_MCR_##LINE; } while (0)
+
+ FLOWCTL_SET(DTR);
+
+ for (; n > 0; n--) {
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_WAIT_FOR(CTS);
+
+ ch = *p++;
+ if (ch == 0x0a) {
+ GDBPORT_SERIAL_TX = 0x0d;
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_WAIT_FOR(CTS);
+ }
+ GDBPORT_SERIAL_TX = ch;
+ }
+
+ FLOWCTL_CLEAR(DTR);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_INITIALIZE \
+do { \
+ /* release reset */ \
+ ASB2364_FPGA_REG_RESET_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_CHECK_INTERRUPT \
+do { \
+ if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) == 0x0001) { \
+ return IRQ_NONE; \
+ } \
+} while (0)
+
+#define SERIAL_CLEAR_INTERRUPT \
+do { \
+ ASB2364_FPGA_REG_IRQ_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_SET_INT_MASK \
+do { \
+ ASB2364_FPGA_REG_MASK_UART = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#define SERIAL_CLEAR_INT_MASK \
+do { \
+ ASB2364_FPGA_REG_MASK_UART = 0x0000; \
+ SyncExBus(); \
+} while (0)
+
+#endif /* _ASM_UNIT_SERIAL_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/smsc911x.h b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
new file mode 100644
index 00000000000..4c1ede535fa
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
@@ -0,0 +1,171 @@
+/* Support for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_SMSC911X_H
+#define _ASM_UNIT_SMSC911X_H
+
+#include <linux/netdevice.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+
+#define MN10300_USE_EXT_EEPROM
+
+
+#define SMSC911X_BASE 0xA8000000UL
+#define SMSC911X_BASE_END 0xA8000100UL
+#define SMSC911X_IRQ FPGA_LAN_IRQ
+
+/*
+ * Allow the FPGA to be initialised by the SMSC911x driver
+ */
+#undef SMSC_INITIALIZE
+#define SMSC_INITIALIZE() \
+do { \
+ /* release reset */ \
+ ASB2364_FPGA_REG_RESET_LAN = 0x0001; \
+ SyncExBus(); \
+} while (0)
+
+#ifdef MN10300_USE_EXT_EEPROM
+#include <linux/delay.h>
+#include <unit/clock.h>
+
+#define EEPROM_ADDRESS 0xA0
+#define MAC_OFFSET 0x0008
+#define USE_IIC_CH 0 /* 0 or 1 */
+#define IIC_OFFSET (0x80000 * USE_IIC_CH)
+#define IIC_DTRM __SYSREG(0xd8400000 + IIC_OFFSET, u32)
+#define IIC_DREC __SYSREG(0xd8400004 + IIC_OFFSET, u32)
+#define IIC_MYADD __SYSREG(0xd8400008 + IIC_OFFSET, u32)
+#define IIC_CLK __SYSREG(0xd840000c + IIC_OFFSET, u32)
+#define IIC_BRST __SYSREG(0xd8400010 + IIC_OFFSET, u32)
+#define IIC_HOLD __SYSREG(0xd8400014 + IIC_OFFSET, u32)
+#define IIC_BSTS __SYSREG(0xd8400018 + IIC_OFFSET, u32)
+#define IIC_ICR __SYSREG(0xd4000080 + 4 * USE_IIC_CH, u16)
+
+#define IIC_CLK_PLS ((unsigned short)(MN10300_IOCLK / 100000 - 1))
+#define IIC_CLK_LOW ((unsigned short)(IIC_CLK_PLS / 2))
+
+#define SYS_IIC_DTRM_Bit_STA ((unsigned short)0x0400)
+#define SYS_IIC_DTRM_Bit_STO ((unsigned short)0x0200)
+#define SYS_IIC_DTRM_Bit_ACK ((unsigned short)0x0100)
+#define SYS_IIC_DTRM_Bit_DATA ((unsigned short)0x00FF)
+
+static inline void POLL_INT_REQ(volatile u16 *icr)
+{
+ unsigned long flags;
+ u16 tmp;
+
+ while (!(*icr & GxICR_REQUEST))
+ ;
+ flags = arch_local_cli_save();
+ tmp = *icr;
+ *icr = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+ tmp = *icr;
+ arch_local_irq_restore(flags);
+}
+
+/*
+ * Implement the SMSC911x hook for MAC address retrieval
+ */
+#undef smsc_get_mac
+static inline int smsc_get_mac(struct net_device *dev)
+{
+ unsigned char *mac_buf = dev->dev_addr;
+ int i;
+ unsigned short value;
+ unsigned int data;
+ int mac_length = 6;
+ int check;
+ u16 orig_gicr, tmp;
+ unsigned long flags;
+
+ /* save original GnICR and clear GnICR.IE */
+ flags = arch_local_cli_save();
+ orig_gicr = IIC_ICR;
+ IIC_ICR = orig_gicr & GxICR_LEVEL;
+ tmp = IIC_ICR;
+ arch_local_irq_restore(flags);
+
+ IIC_MYADD = 0x00000008;
+ IIC_CLK = (IIC_CLK_LOW << 16) + (IIC_CLK_PLS);
+ /* bus hung recovery */
+
+ while (1) {
+ check = 0;
+ for (i = 0; i < 3; i++) {
+ if ((IIC_BSTS & 0x00000003) == 0x00000003)
+ check++;
+ udelay(3);
+ }
+
+ if (check == 3) {
+ IIC_BRST = 0x00000003;
+ break;
+ } else {
+ for (i = 0; i < 3; i++) {
+ IIC_BRST = 0x00000002;
+ udelay(8);
+ IIC_BRST = 0x00000003;
+ udelay(8);
+ }
+ }
+ }
+
+ IIC_BRST = 0x00000002;
+ IIC_BRST = 0x00000003;
+
+ value = SYS_IIC_DTRM_Bit_STA | SYS_IIC_DTRM_Bit_ACK;
+ value |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+ (unsigned short)0x0000);
+ IIC_DTRM = value;
+ POLL_INT_REQ(&IIC_ICR);
+
+ /** send offset of MAC address in EEPROM **/
+ IIC_DTRM = (unsigned char)((MAC_OFFSET & 0xFF00) >> 8);
+ POLL_INT_REQ(&IIC_ICR);
+
+ IIC_DTRM = (unsigned char)(MAC_OFFSET & 0x00FF);
+ POLL_INT_REQ(&IIC_ICR);
+
+ udelay(1000);
+
+ value = SYS_IIC_DTRM_Bit_STA;
+ value |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+ (unsigned short)0x0001);
+ IIC_DTRM = value;
+ POLL_INT_REQ(&IIC_ICR);
+
+ IIC_DTRM = 0x00000000;
+ while (mac_length > 0) {
+ POLL_INT_REQ(&IIC_ICR);
+
+ data = IIC_DREC;
+ mac_length--;
+ if (mac_length == 0)
+ value = 0x00000300; /* stop IIC bus */
+ else if (mac_length == 1)
+ value = 0x00000100; /* no ack */
+ else
+ value = 0x00000000; /* ack */
+ IIC_DTRM = value;
+ *mac_buf++ = (unsigned char)(data & 0xff);
+ }
+
+ /* restore GnICR.LV and GnICR.IE */
+ flags = arch_local_cli_save();
+ IIC_ICR = (orig_gicr & (GxICR_LEVEL | GxICR_ENABLE));
+ tmp = IIC_ICR;
+ arch_local_irq_restore(flags);
+
+ return 0;
+}
+#endif /* MN10300_USE_EXT_EEPROM */
+#endif /* _ASM_UNIT_SMSC911X_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
new file mode 100644
index 00000000000..ddb7ed01070
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/include/unit/timex.h
@@ -0,0 +1,159 @@
+/* timex.h: MN2WS0038 architecture timer specifications
+ *
+ * Copyright (C) 2002, 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_TIMEX_H
+#define _ASM_UNIT_TIMEX_H
+
+#ifndef __ASSEMBLY__
+#include <linux/irq.h>
+#endif /* __ASSEMBLY__ */
+
+#include <asm/timer-regs.h>
+#include <unit/clock.h>
+#include <asm/param.h>
+
+/*
+ * jiffies counter specifications
+ */
+
+#define TMJCBR_MAX 0xffffff /* 24bit */
+#define TMJCIRQ TMTIRQ
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_SRC_IOBCLK MN10300_IOBCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+
+#define MN10300_JCCLK (MN10300_SRC_IOBCLK)
+#define MN10300_TSCCLK (MN10300_SRC_IOBCLK)
+
+#define MN10300_JC_PER_HZ ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+/* Check bit width of MTM interval value that sets base register */
+#if (MN10300_JC_PER_HZ - 1) > TMJCBR_MAX
+# error MTM tick timer interval value is overflow.
+#endif
+
+static inline void stop_jiffies_counter(void)
+{
+ u16 tmp;
+ TMTMD = 0;
+ tmp = TMTMD;
+}
+
+static inline void reload_jiffies_counter(u32 cnt)
+{
+ u32 tmp;
+
+ TMTBR = cnt;
+ tmp = TMTBR;
+
+ TMTMD = TMTMD_TMTLDE;
+ TMTMD = TMTMD_TMTCNE;
+ tmp = TMTMD;
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS) && \
+ !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/*
+ * If we aren't using broadcasting, each core needs its own event timer.
+ * Since CPU0 uses the tick timer which is 24-bits, we use timer 4 & 5
+ * cascaded to 32-bits for CPU1 (but only really use 24-bits to match
+ * CPU0).
+ */
+
+#define TMJC1IRQ TM5IRQ
+
+static inline void stop_jiffies_counter1(void)
+{
+ u8 tmp;
+ TM4MD = 0;
+ TM5MD = 0;
+ tmp = TM4MD;
+ tmp = TM5MD;
+}
+
+static inline void reload_jiffies_counter1(u32 cnt)
+{
+ u32 tmp;
+
+ TM45BR = cnt;
+ tmp = TM45BR;
+
+ TM4MD = TM4MD_INIT_COUNTER;
+ tmp = TM4MD;
+
+ TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_INIT_COUNTER;
+ TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_COUNT_ENABLE;
+ tmp = TM5MD;
+
+ TM4MD = TM4MD_COUNT_ENABLE;
+ tmp = TM4MD;
+}
+#endif /* CONFIG_SMP&GENERIC_CLOCKEVENTS&!GENERIC_CLOCKEVENTS_BROADCAST */
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * timestamp counter specifications
+ */
+#define TMTSCBR_MAX 0xffffffff
+
+#ifndef __ASSEMBLY__
+
+/* Use 32-bit timestamp counter */
+#define TMTSCMD TMSMD
+#define TMTSCBR TMSBR
+#define TMTSCBC TMSBC
+#define TMTSCICR TMSICR
+
+static inline void startup_timestamp_counter(void)
+{
+ u32 sync;
+
+ /* set up TMS(Timestamp) 32bit timer register to count real time
+ * - count down from 4Gig-1 to 0 and wrap at IOBCLK rate
+ */
+
+ TMTSCBR = TMTSCBR_MAX;
+ sync = TMTSCBR;
+
+ TMTSCICR = 0;
+ sync = TMTSCICR;
+
+ TMTSCMD = TMTMD_TMTLDE;
+ TMTSCMD = TMTMD_TMTCNE;
+ sync = TMTSCMD;
+}
+
+static inline void shutdown_timestamp_counter(void)
+{
+ TMTSCMD = 0;
+}
+
+/*
+ * we use a cascaded pair of 16-bit down-counting timers to count I/O
+ * clock cycles for the purposes of time keeping
+ */
+typedef unsigned long cycles_t;
+
+static inline cycles_t read_timestamp_counter(void)
+{
+ return (cycles_t)~TMTSCBC;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_TIMEX_H */
diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c
new file mode 100644
index 00000000000..fcf29754e4d
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/irq-fpga.c
@@ -0,0 +1,96 @@
+/* ASB2364 FPGA interrupt multiplexing
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * FPGA PIC operations
+ */
+static void asb2364_fpga_mask(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_ack(unsigned int irq)
+{
+ ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_mask_ack(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+ ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+ SyncExBus();
+}
+
+static void asb2364_fpga_unmask(unsigned int irq)
+{
+ ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0000;
+ SyncExBus();
+}
+
+static struct irq_chip asb2364_fpga_pic = {
+ .name = "fpga",
+ .ack = asb2364_fpga_ack,
+ .mask = asb2364_fpga_mask,
+ .mask_ack = asb2364_fpga_mask_ack,
+ .unmask = asb2364_fpga_unmask,
+};
+
+/*
+ * FPGA PIC interrupt handler
+ */
+static irqreturn_t fpga_interrupt(int irq, void *_mask)
+{
+ if ((ASB2364_FPGA_REG_IRQ_LAN & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_LAN_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_UART_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_I2C & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_I2C_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_USB & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_USB_IRQ);
+ if ((ASB2364_FPGA_REG_IRQ_FPGA & 0x0001) != 0x0001)
+ generic_handle_irq(FPGA_FPGA_IRQ);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define an interrupt action for each FPGA PIC output
+ */
+static struct irqaction fpga_irq[] = {
+ [0] = {
+ .handler = fpga_interrupt,
+ .flags = IRQF_DISABLED | IRQF_SHARED,
+ .name = "fpga",
+ },
+};
+
+/*
+ * Initialise the FPGA's PIC
+ */
+void __init irq_fpga_init(void)
+{
+ int irq;
+
+ for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++)
+ set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq);
+
+ /* the FPGA drives the XIRQ1 input on the CPU PIC */
+ setup_irq(XIRQ1, &fpga_irq[0]);
+}
diff --git a/arch/mn10300/unit-asb2364/leds.c b/arch/mn10300/unit-asb2364/leds.c
new file mode 100644
index 00000000000..1ff830c372b
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/leds.c
@@ -0,0 +1,98 @@
+/* leds.c: ASB2364 peripheral 7seg LEDs x4 support
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/intctl-regs.h>
+#include <asm/rtc-regs.h>
+#include <unit/leds.h>
+
+#if MN10300_USE_7SEGLEDS
+static const u8 asb2364_led_hex_tbl[16] = {
+ 0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0,
+ 0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c
+};
+
+static const u32 asb2364_led_chase_tbl[6] = {
+ ~0x02020202, /* top - segA */
+ ~0x04040404, /* right top - segB */
+ ~0x08080808, /* right bottom - segC */
+ ~0x10101010, /* bottom - segD */
+ ~0x20202020, /* left bottom - segE */
+ ~0x40404040, /* left top - segF */
+};
+
+static unsigned asb2364_led_chase;
+
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(val/1000) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/100) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/10) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[val % 10];
+ leds |= points^0x01010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(val/1000) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/100) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(val/10) % 10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[val % 10];
+ leds |= points^0x01010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+/* display triple horizontal bar and exception code */
+void peripheral_leds_display_exception(enum exception_code code)
+{
+ u32 leds;
+
+ leds = asb2364_led_hex_tbl[(code/0x100) % 0x10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[(code/0x10) % 0x10];
+ leds <<= 8;
+ leds |= asb2364_led_hex_tbl[code % 0x10];
+ leds |= 0x6d010101;
+
+ ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds_led_chase(void)
+{
+ ASB2364_7SEGLEDS = asb2364_led_chase_tbl[asb2364_led_chase];
+ asb2364_led_chase++;
+ if (asb2364_led_chase >= 6)
+ asb2364_led_chase = 0;
+}
+#else /* MN10300_USE_7SEGLEDS */
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { }
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { }
+void peripheral_leds_display_exception(enum exception_code code) { }
+void peripheral_leds_led_chase(void) { }
+#endif /* MN10300_USE_7SEGLEDS */
diff --git a/arch/mn10300/unit-asb2364/smsc911x.c b/arch/mn10300/unit-asb2364/smsc911x.c
new file mode 100644
index 00000000000..544a73e94c8
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/smsc911x.c
@@ -0,0 +1,58 @@
+/* Specification for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/smsc911x.h>
+#include <unit/smsc911x.h>
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .flags = SMSC911X_USE_32BIT,
+};
+
+static struct resource smsc911x_resources[] = {
+ [0] = {
+ .start = SMSC911X_BASE,
+ .end = SMSC911X_BASE_END,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = SMSC911X_IRQ,
+ .end = SMSC911X_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smsc911x_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smsc911x_resources),
+ .resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ }
+};
+
+/*
+ * add platform devices
+ */
+static int __init unit_device_init(void)
+{
+ platform_device_register(&smsc911x_device);
+ return 0;
+}
+
+device_initcall(unit_device_init);
diff --git a/arch/mn10300/unit-asb2364/unit-init.c b/arch/mn10300/unit-asb2364/unit-init.c
new file mode 100644
index 00000000000..11440803db1
--- /dev/null
+++ b/arch/mn10300/unit-asb2364/unit-init.c
@@ -0,0 +1,88 @@
+/* ASB2364 initialisation
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/intctl-regs.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * initialise some of the unit hardware before gdbstub is set up
+ */
+asmlinkage void __init unit_init(void)
+{
+ /* set up the external interrupts */
+
+ /* XIRQ[0]: NAND RXBY */
+ /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */
+
+ /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */
+ SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL);
+
+ /* XIRQ[2]: Extend Slot 1-9 */
+ /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) && \
+ defined(CONFIG_ETHERNET_IRQ_LEVEL) && \
+ (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL)
+# error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL
+#endif
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL)
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#elif defined(CONFIG_ETHERNET_IRQ_LEVEL)
+ set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
+}
+
+/*
+ * initialise the rest of the unit hardware after gdbstub is ready
+ */
+asmlinkage void __init unit_setup(void)
+{
+
+}
+
+/*
+ * initialise the external interrupts used by a unit of this type
+ */
+void __init unit_init_IRQ(void)
+{
+ unsigned int extnum;
+
+ for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) {
+ switch (GET_XIRQ_TRIGGER(extnum)) {
+ /* LEVEL triggered interrupts should be made
+ * post-ACK'able as they hold their lines until
+ * serviced
+ */
+ case XIRQ_TRIGGER_HILEVEL:
+ case XIRQ_TRIGGER_LOWLEVEL:
+ mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
+ break;
+ default:
+ break;
+ }
+ }
+
+#define IRQCTL __SYSREG(0xd5000090, u32)
+ IRQCTL |= 0x02;
+
+ irq_fpga_init();
+}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 79a04a9394d..0888675c98d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/PA-RISC Kernel Configuration"
-
config PARISC
def_bool y
select HAVE_IDE
@@ -19,6 +12,7 @@ config PARISC
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
@@ -85,6 +79,9 @@ config IRQ_PER_CPU
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ def_bool y
+
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM
bool
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index ba430a03bc7..30394081d9b 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -28,7 +28,6 @@
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/vfs.h>
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 039880e7d2c..47f11c707b6 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -24,8 +24,6 @@
#ifndef __ASSEMBLY__
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index dba11aedce1..f388a85bba1 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -126,20 +126,20 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page))
-static inline void *kmap_atomic(struct page *page, enum km_type idx)
+static inline void *__kmap_atomic(struct page *page)
{
pagefault_disable();
return page_address(page);
}
-static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
+static inline void __kunmap_atomic(void *addr)
{
kunmap_parisc(addr);
pagefault_enable();
}
-#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
-#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
diff --git a/arch/parisc/include/asm/irq.h b/arch/parisc/include/asm/irq.h
index dfa26b67f91..c67dccf2e31 100644
--- a/arch/parisc/include/asm/irq.h
+++ b/arch/parisc/include/asm/irq.h
@@ -40,7 +40,7 @@ struct irq_chip;
void no_ack_irq(unsigned int irq);
void no_end_irq(unsigned int irq);
void cpu_ack_irq(unsigned int irq);
-void cpu_end_irq(unsigned int irq);
+void cpu_eoi_irq(unsigned int irq);
extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 1ce7d2851d9..3eb82c2a5ec 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -813,8 +813,9 @@
#define __NR_perf_event_open (__NR_Linux + 318)
#define __NR_recvmmsg (__NR_Linux + 319)
#define __NR_accept4 (__NR_Linux + 320)
+#define __NR_prlimit64 (__NR_Linux + 321)
-#define __NR_Linux_syscalls (__NR_accept4 + 1)
+#define __NR_Linux_syscalls (__NR_prlimit64 + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index efbcee5d222..5024f643b3b 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -52,7 +52,7 @@ static volatile unsigned long cpu_eiem = 0;
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
-static void cpu_disable_irq(unsigned int irq)
+static void cpu_mask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -63,7 +63,7 @@ static void cpu_disable_irq(unsigned int irq)
* then gets disabled */
}
-static void cpu_enable_irq(unsigned int irq)
+static void cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -75,12 +75,6 @@ static void cpu_enable_irq(unsigned int irq)
smp_send_all_nop();
}
-static unsigned int cpu_startup_irq(unsigned int irq)
-{
- cpu_enable_irq(irq);
- return 0;
-}
-
void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { }
@@ -99,7 +93,7 @@ void cpu_ack_irq(unsigned int irq)
mtctl(mask, 23);
}
-void cpu_end_irq(unsigned int irq)
+void cpu_eoi_irq(unsigned int irq)
{
unsigned long mask = EIEM_MASK(irq);
int cpu = smp_processor_id();
@@ -146,12 +140,10 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
- .startup = cpu_startup_irq,
- .shutdown = cpu_disable_irq,
- .enable = cpu_enable_irq,
- .disable = cpu_disable_irq,
+ .mask = cpu_mask_irq,
+ .unmask = cpu_unmask_irq,
.ack = cpu_ack_irq,
- .end = cpu_end_irq,
+ .eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
#endif
@@ -247,10 +239,11 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
if (irq_desc[irq].chip != &cpu_interrupt_type)
return -EBUSY;
+ /* for iosapic interrupts */
if (type) {
- irq_desc[irq].chip = type;
- irq_desc[irq].chip_data = data;
- cpu_interrupt_type.enable(irq);
+ set_irq_chip_and_handler(irq, type, handle_level_irq);
+ set_irq_chip_data(irq, data);
+ cpu_unmask_irq(irq);
}
return 0;
}
@@ -368,7 +361,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
}
#endif
- __do_IRQ(irq);
+ generic_handle_irq(irq);
out:
irq_exit();
@@ -398,14 +391,15 @@ static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
- irq_desc[i].chip = &cpu_interrupt_type;
+ set_irq_chip_and_handler(i, &cpu_interrupt_type,
+ handle_level_irq);
}
- irq_desc[TIMER_IRQ].action = &timer_action;
- irq_desc[TIMER_IRQ].status = IRQ_PER_CPU;
+ set_irq_handler(TIMER_IRQ, handle_percpu_irq);
+ setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
- irq_desc[IPI_IRQ].action = &ipi_action;
- irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
+ set_irq_handler(IPI_IRQ, handle_percpu_irq);
+ setup_irq(IPI_IRQ, &ipi_action);
#endif
}
@@ -423,3 +417,4 @@ void __init init_IRQ(void)
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
+
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 1ff366cb968..66d1f17fdb9 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -12,6 +12,7 @@
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -31,12 +32,11 @@
/*
* The PDC console is a simple console, which can be used for debugging
- * boot related problems on HP PA-RISC machines.
+ * boot related problems on HP PA-RISC machines. It is also useful when no
+ * other console works.
*
* This code uses the ROM (=PDC) based functions to read and write characters
* from and to PDC's boot path.
- * Since all character read from that path must be polled, this code never
- * can or will be a fully functional linux console.
*/
/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
@@ -53,6 +53,7 @@
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static DEFINE_SPINLOCK(pdc_console_lock);
+static struct console pdc_cons;
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
@@ -85,12 +86,138 @@ static int pdc_console_setup(struct console *co, char *options)
#if defined(CONFIG_PDC_CONSOLE)
#include <linux/vt_kern.h>
+#include <linux/tty_flip.h>
+
+#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
+
+static struct timer_list pdc_console_timer;
+
+extern struct console * console_drivers;
+
+static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+{
+
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+
+ return 0;
+}
+
+static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ if (!tty->count)
+ del_timer(&pdc_console_timer);
+}
+
+static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ pdc_console_write(NULL, buf, count);
+ return count;
+}
+
+static int pdc_console_tty_write_room(struct tty_struct *tty)
+{
+ return 32768; /* no limit, no buffer used */
+}
+
+static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0; /* no buffer */
+}
+
+static struct tty_driver *pdc_console_tty_driver;
+
+static const struct tty_operations pdc_console_tty_ops = {
+ .open = pdc_console_tty_open,
+ .close = pdc_console_tty_close,
+ .write = pdc_console_tty_write,
+ .write_room = pdc_console_tty_write_room,
+ .chars_in_buffer = pdc_console_tty_chars_in_buffer,
+};
+
+static void pdc_console_poll(unsigned long unused)
+{
+
+ int data, count = 0;
+
+ struct tty_struct *tty = pdc_console_tty_driver->ttys[0];
+
+ if (!tty)
+ return;
+
+ while (1) {
+ data = pdc_console_poll_key(NULL);
+ if (data == -1)
+ break;
+ tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
+ count ++;
+ }
+
+ if (count)
+ tty_flip_buffer_push(tty);
+
+ if (tty->count && (pdc_cons.flags & CON_ENABLED))
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+}
+
+static int __init pdc_console_tty_driver_init(void)
+{
+
+ int err;
+ struct tty_driver *drv;
+
+ /* Check if the console driver is still registered.
+ * It is unregistered if the pdc console was not selected as the
+ * primary console. */
+
+ struct console *tmp = console_drivers;
+
+ for (tmp = console_drivers; tmp; tmp = tmp->next)
+ if (tmp == &pdc_cons)
+ break;
+
+ if (!tmp) {
+ printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
+ pdc_cons.flags &= ~CON_BOOT;
+
+ drv = alloc_tty_driver(1);
+
+ if (!drv)
+ return -ENOMEM;
+
+ drv->driver_name = "pdc_cons";
+ drv->name = "ttyB";
+ drv->major = MUX_MAJOR;
+ drv->minor_start = 0;
+ drv->type = TTY_DRIVER_TYPE_SYSTEM;
+ drv->init_termios = tty_std_termios;
+ drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(drv, &pdc_console_tty_ops);
+
+ err = tty_register_driver(drv);
+ if (err) {
+ printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
+ return err;
+ }
+
+ pdc_console_tty_driver = drv;
+
+ /* No need to initialize the pdc_console_timer if tty isn't allocated */
+ init_timer(&pdc_console_timer);
+ pdc_console_timer.function = pdc_console_poll;
+
+ return 0;
+}
+
+module_init(pdc_console_tty_driver_init);
static struct tty_driver * pdc_console_device (struct console *c, int *index)
{
- extern struct tty_driver console_driver;
- *index = c->index ? c->index-1 : fg_console;
- return &console_driver;
+ *index = c->index;
+ return pdc_console_tty_driver;
}
#else
#define pdc_console_device NULL
@@ -101,7 +228,7 @@ static struct console pdc_cons = {
.write = pdc_console_write,
.device = pdc_console_device,
.setup = pdc_console_setup,
- .flags = CON_BOOT | CON_PRINTBUFFER | CON_ENABLED,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
.index = -1,
};
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index c4f49e45129..2905b1f52d3 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -110,7 +110,8 @@ void user_enable_block_step(struct task_struct *task)
pa_psw(task)->l = 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long tmp;
long ret = -EIO;
@@ -120,11 +121,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Read the word at location addr in the USER area. For ptraced
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR:
- if ((addr & (sizeof(long)-1)) ||
- (unsigned long) addr >= sizeof(struct pt_regs))
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
break;
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, (unsigned long __user *) data);
break;
/* Write the word at location addr in the USER area. This will need
@@ -151,8 +152,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- if ((addr & (sizeof(long)-1)) ||
- (unsigned long) addr >= sizeof(struct pt_regs))
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
break;
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 9779ece2b07..88a0ad14a9c 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -20,7 +20,6 @@
#include <linux/times.h>
#include <linux/time.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3d52c978738..74867dfdabe 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -419,6 +419,7 @@
ENTRY_SAME(perf_event_open)
ENTRY_COMP(recvmmsg)
ENTRY_SAME(accept4) /* 320 */
+ ENTRY_SAME(prlimit64)
/* Nothing yet */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 92d977bb5ea..234e3682cf0 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -619,15 +619,12 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
-
-#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
-#endif
}
#endif
switch (regs->iir & OPCODE3_MASK)
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index d58eac1a828..76ed62ed785 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -80,8 +80,11 @@ find_unwind_entry(unsigned long addr)
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
- if (e)
+ if (e) {
+ /* Move-to-front to exploit common traces */
+ list_move(&table->list, &unwind_tables);
break;
+ }
}
return e;
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index 1f3f225897f..0bd63b08a79 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -3,7 +3,7 @@
#
# See arch/parisc/math-emu/README
-EXTRA_CFLAGS += -Wno-parentheses -Wno-implicit-function-declaration \
+ccflags-y := -Wno-parentheses -Wno-implicit-function-declaration \
-Wno-uninitialized -Wno-strict-prototypes -Wno-return-type \
-Wno-implicit-int
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4b1e521d966..e625e9e034a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -1,15 +1,13 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/PowerPC Kernel Configuration"
-
source "arch/powerpc/platforms/Kconfig.cputype"
config PPC32
bool
default y if !PPC64
+config 32BIT
+ bool
+ default y if PPC32
+
config 64BIT
bool
default y if PPC64
@@ -688,9 +686,12 @@ config 4xx_SOC
bool
config FSL_LBC
- bool
+ bool "Freescale Local Bus support"
+ depends on FSL_SOC
help
- Freescale Localbus support
+ Enables reporting of errors from the Freescale local bus
+ controller. Also contains some common code used by
+ drivers for specific local bus peripherals.
config FSL_GTM
bool
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S
index 722f360a32a..d271ab54267 100644
--- a/arch/powerpc/boot/div64.S
+++ b/arch/powerpc/boot/div64.S
@@ -33,9 +33,10 @@ __div64_32:
cntlzw r0,r5 # we are shifting the dividend right
li r10,-1 # to make it < 2^32, and shifting
srw r10,r10,r0 # the divisor right the same amount,
- add r9,r4,r10 # rounding up (so the estimate cannot
+ addc r9,r4,r10 # rounding up (so the estimate cannot
andc r11,r6,r10 # ever be too large, only too small)
andc r9,r9,r10
+ addze r9,r9
or r11,r5,r11
rotlw r9,r9,r0
rotlw r11,r11,r0
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 8bdc6a9e577..1cf20bdfbec 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
}
/*
- * Convert cputime <-> milliseconds
+ * Convert cputime <-> microseconds
*/
extern u64 __cputime_msec_factor;
-static inline unsigned long cputime_to_msecs(const cputime_t ct)
+static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor);
+ return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
}
-static inline cputime_t msecs_to_cputime(const unsigned long ms)
+static inline cputime_t usecs_to_cputime(const unsigned long us)
{
cputime_t ct;
unsigned long sec;
/* have to be a little careful about overflow */
- ct = ms % 1000;
- sec = ms / 1000;
+ ct = us % 1000000;
+ sec = us / 1000000;
if (ct) {
ct *= tb_ticks_per_sec;
do_div(ct, 1000);
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 1b5a21041f9..5c1bf346674 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -1,9 +1,10 @@
/* Freescale Local Bus Controller
*
- * Copyright (c) 2006-2007 Freescale Semiconductor
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
*
* Authors: Nick Spence <nick.spence@freescale.com>,
* Scott Wood <scottwood@freescale.com>
+ * Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,6 +27,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
struct fsl_lbc_bank {
__be32 br; /**< Base Register */
@@ -125,13 +128,23 @@ struct fsl_lbc_regs {
#define LTESR_ATMW 0x00800000
#define LTESR_ATMR 0x00400000
#define LTESR_CS 0x00080000
+#define LTESR_UPM 0x00000002
#define LTESR_CC 0x00000001
#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+#define LTESR_MASK (LTESR_BM | LTESR_FCT | LTESR_PAR | LTESR_WP \
+ | LTESR_ATMW | LTESR_ATMR | LTESR_CS | LTESR_UPM \
+ | LTESR_CC)
+#define LTESR_CLEAR 0xFFFFFFFF
+#define LTECCR_CLEAR 0xFFFFFFFF
+#define LTESR_STATUS LTESR_MASK
+#define LTEIR_ENABLE LTESR_MASK
+#define LTEDR_ENABLE 0x00000000
__be32 ltedr; /**< Transfer Error Disable Register */
__be32 lteir; /**< Transfer Error Interrupt Register */
__be32 lteatr; /**< Transfer Error Attributes Register */
__be32 ltear; /**< Transfer Error Address Register */
- u8 res6[0xC];
+ __be32 lteccr; /**< Transfer Error ECC Register */
+ u8 res6[0x8];
__be32 lbcr; /**< Configuration Register */
#define LBCR_LDIS 0x80000000
#define LBCR_LDIS_SHIFT 31
@@ -235,6 +248,7 @@ struct fsl_upm {
int width;
};
+extern u32 fsl_lbc_addr(phys_addr_t addr_base);
extern int fsl_lbc_find(phys_addr_t addr_base);
extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm);
@@ -265,7 +279,23 @@ static inline void fsl_upm_end_pattern(struct fsl_upm *upm)
cpu_relax();
}
+/* overview of the fsl lbc controller */
+
+struct fsl_lbc_ctrl {
+ /* device info */
+ struct device *dev;
+ struct fsl_lbc_regs __iomem *regs;
+ int irq;
+ wait_queue_head_t irq_wait;
+ spinlock_t lock;
+ void *nand;
+
+ /* status read from LTESR by irq handler */
+ unsigned int irq_status;
+};
+
extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base,
u32 mar);
+extern struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
#endif /* __ASM_FSL_LBC_H */
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
deleted file mode 100644
index debc5ed96d6..00000000000
--- a/arch/powerpc/include/asm/fsldma.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Freescale MPC83XX / MPC85XX DMA Controller
- *
- * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
-#define __ARCH_POWERPC_ASM_FSLDMA_H__
-
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-
-/*
- * Definitions for the Freescale DMA controller's DMA_SLAVE implemention
- *
- * The Freescale DMA_SLAVE implementation was designed to handle many-to-many
- * transfers. An example usage would be an accelerated copy between two
- * scatterlists. Another example use would be an accelerated copy from
- * multiple non-contiguous device buffers into a single scatterlist.
- *
- * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
- * structure contains a list of hardware addresses that should be copied
- * to/from the scatterlist passed into device_prep_slave_sg(). The structure
- * also has some fields to enable hardware-specific features.
- */
-
-/**
- * struct fsl_dma_hw_addr
- * @entry: linked list entry
- * @address: the hardware address
- * @length: length to transfer
- *
- * Holds a single physical hardware address / length pair for use
- * with the DMAEngine DMA_SLAVE API.
- */
-struct fsl_dma_hw_addr {
- struct list_head entry;
-
- dma_addr_t address;
- size_t length;
-};
-
-/**
- * struct fsl_dma_slave
- * @addresses: a linked list of struct fsl_dma_hw_addr structures
- * @request_count: value for DMA request count
- * @src_loop_size: setup and enable constant source-address DMA transfers
- * @dst_loop_size: setup and enable constant destination address DMA transfers
- * @external_start: enable externally started DMA transfers
- * @external_pause: enable externally paused DMA transfers
- *
- * Holds a list of address / length pairs for use with the DMAEngine
- * DMA_SLAVE API implementation for the Freescale DMA controller.
- */
-struct fsl_dma_slave {
-
- /* List of hardware address/length pairs */
- struct list_head addresses;
-
- /* Support for extra controller features */
- unsigned int request_count;
- unsigned int src_loop_size;
- unsigned int dst_loop_size;
- bool external_start;
- bool external_pause;
-};
-
-/**
- * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
- * @slave: the &struct fsl_dma_slave to add to
- * @address: the hardware address to add
- * @length: the length of bytes to transfer from @address
- *
- * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
- * success, -ERRNO otherwise.
- */
-static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
- dma_addr_t address, size_t length)
-{
- struct fsl_dma_hw_addr *addr;
-
- addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
- if (!addr)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&addr->entry);
- addr->address = address;
- addr->length = length;
-
- list_add_tail(&addr->entry, &slave->addresses);
- return 0;
-}
-
-/**
- * fsl_dma_slave_free - free a struct fsl_dma_slave
- * @slave: the struct fsl_dma_slave to free
- *
- * Free a struct fsl_dma_slave and all associated address/length pairs
- */
-static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
-{
- struct fsl_dma_hw_addr *addr, *tmp;
-
- if (slave) {
- list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
- list_del(&addr->entry);
- kfree(addr);
- }
-
- kfree(slave);
- }
-}
-
-/**
- * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
- * @gfp: the flags to pass to kmalloc when allocating this structure
- *
- * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
- * struct fsl_dma_slave on success, or NULL on failure.
- */
-static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
-{
- struct fsl_dma_slave *slave;
-
- slave = kzalloc(sizeof(*slave), gfp);
- if (!slave)
- return NULL;
-
- INIT_LIST_HEAD(&slave->addresses);
- return slave;
-}
-
-#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
index edd217006d2..9db24e77b9f 100644
--- a/arch/powerpc/include/asm/kgdb.h
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -31,6 +31,7 @@ static inline void arch_kgdb_breakpoint(void)
asm(".long 0x7d821008"); /* twge r2, r2 */
}
#define CACHE_FLUSH_IS_SAFE 1
+#define DBG_MAX_REG_NUM 70
/* The number bytes of registers we have to save depends on a few
* things. For 64bit we default to not including vector registers and
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7f61a3ac787..42850ee00ad 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -194,40 +194,6 @@ static int kgdb_dabr_match(struct pt_regs *regs)
ptr = (unsigned long *)ptr32; \
} while (0)
-
-void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
-{
- unsigned long *ptr = gdb_regs;
- int reg;
-
- memset(gdb_regs, 0, NUMREGBYTES);
-
- for (reg = 0; reg < 32; reg++)
- PACK64(ptr, regs->gpr[reg]);
-
-#ifdef CONFIG_FSL_BOOKE
-#ifdef CONFIG_SPE
- for (reg = 0; reg < 32; reg++)
- PACK64(ptr, current->thread.evr[reg]);
-#else
- ptr += 32;
-#endif
-#else
- /* fp registers not used by kernel, leave zero */
- ptr += 32 * 8 / sizeof(long);
-#endif
-
- PACK64(ptr, regs->nip);
- PACK64(ptr, regs->msr);
- PACK32(ptr, regs->ccr);
- PACK64(ptr, regs->link);
- PACK64(ptr, regs->ctr);
- PACK32(ptr, regs->xer);
-
- BUG_ON((unsigned long)ptr >
- (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
-}
-
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
@@ -271,44 +237,140 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
-#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+#define GDB_SIZEOF_REG_U32 sizeof(u32)
-#define UNPACK32(dest, ptr) do { \
- u32 *ptr32; \
- ptr32 = (u32 *)ptr; \
- dest = *(ptr32++); \
- ptr = (unsigned long *)ptr32; \
- } while (0)
+#ifdef CONFIG_FSL_BOOKE
+#define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
+#else
+#define GDB_SIZEOF_FLOAT_REG sizeof(u64)
+#endif
-void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
- unsigned long *ptr = gdb_regs;
- int reg;
-
- for (reg = 0; reg < 32; reg++)
- UNPACK64(regs->gpr[reg], ptr);
+ { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) },
+ { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) },
+ { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) },
+ { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) },
+ { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) },
+ { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) },
+ { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) },
+ { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) },
+ { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) },
+ { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) },
+ { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) },
+ { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) },
+ { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) },
+ { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) },
+ { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) },
+ { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) },
+ { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) },
+ { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) },
+
+ { "f0", GDB_SIZEOF_FLOAT_REG, 0 },
+ { "f1", GDB_SIZEOF_FLOAT_REG, 1 },
+ { "f2", GDB_SIZEOF_FLOAT_REG, 2 },
+ { "f3", GDB_SIZEOF_FLOAT_REG, 3 },
+ { "f4", GDB_SIZEOF_FLOAT_REG, 4 },
+ { "f5", GDB_SIZEOF_FLOAT_REG, 5 },
+ { "f6", GDB_SIZEOF_FLOAT_REG, 6 },
+ { "f7", GDB_SIZEOF_FLOAT_REG, 7 },
+ { "f8", GDB_SIZEOF_FLOAT_REG, 8 },
+ { "f9", GDB_SIZEOF_FLOAT_REG, 9 },
+ { "f10", GDB_SIZEOF_FLOAT_REG, 10 },
+ { "f11", GDB_SIZEOF_FLOAT_REG, 11 },
+ { "f12", GDB_SIZEOF_FLOAT_REG, 12 },
+ { "f13", GDB_SIZEOF_FLOAT_REG, 13 },
+ { "f14", GDB_SIZEOF_FLOAT_REG, 14 },
+ { "f15", GDB_SIZEOF_FLOAT_REG, 15 },
+ { "f16", GDB_SIZEOF_FLOAT_REG, 16 },
+ { "f17", GDB_SIZEOF_FLOAT_REG, 17 },
+ { "f18", GDB_SIZEOF_FLOAT_REG, 18 },
+ { "f19", GDB_SIZEOF_FLOAT_REG, 19 },
+ { "f20", GDB_SIZEOF_FLOAT_REG, 20 },
+ { "f21", GDB_SIZEOF_FLOAT_REG, 21 },
+ { "f22", GDB_SIZEOF_FLOAT_REG, 22 },
+ { "f23", GDB_SIZEOF_FLOAT_REG, 23 },
+ { "f24", GDB_SIZEOF_FLOAT_REG, 24 },
+ { "f25", GDB_SIZEOF_FLOAT_REG, 25 },
+ { "f26", GDB_SIZEOF_FLOAT_REG, 26 },
+ { "f27", GDB_SIZEOF_FLOAT_REG, 27 },
+ { "f28", GDB_SIZEOF_FLOAT_REG, 28 },
+ { "f29", GDB_SIZEOF_FLOAT_REG, 29 },
+ { "f30", GDB_SIZEOF_FLOAT_REG, 30 },
+ { "f31", GDB_SIZEOF_FLOAT_REG, 31 },
+
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) },
+ { "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) },
+ { "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) },
+ { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) },
+ { "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) },
+ { "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) },
+};
-#ifdef CONFIG_FSL_BOOKE
-#ifdef CONFIG_SPE
- for (reg = 0; reg < 32; reg++)
- UNPACK64(current->thread.evr[reg], ptr);
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (regno < 32 || regno >= 64)
+ /* First 0 -> 31 gpr registers*/
+ /* pc, msr, ls... registers 64 -> 69 */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+
+ if (regno >= 32 && regno < 64) {
+ /* FP registers 32 -> 63 */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
+ if (current)
+ memcpy(mem, &current->thread.evr[regno-32],
+ dbg_reg_def[regno].size);
#else
- ptr += 32;
+ /* fp registers not used by kernel, leave zero */
+ memset(mem, 0, dbg_reg_def[regno].size);
#endif
+ }
+
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (regno < 32 || regno >= 64)
+ /* First 0 -> 31 gpr registers*/
+ /* pc, msr, ls... registers 64 -> 69 */
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+
+ if (regno >= 32 && regno < 64) {
+ /* FP registers 32 -> 63 */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
+ memcpy(&current->thread.evr[regno-32], mem,
+ dbg_reg_def[regno].size);
#else
- /* fp registers not used by kernel, leave zero */
- ptr += 32 * 8 / sizeof(int);
+ /* fp registers not used by kernel, leave zero */
+ return 0;
#endif
+ }
- UNPACK64(regs->nip, ptr);
- UNPACK64(regs->msr, ptr);
- UNPACK32(regs->ccr, ptr);
- UNPACK64(regs->link, ptr);
- UNPACK64(regs->ctr, ptr);
- UNPACK32(regs->xer, ptr);
-
- BUG_ON((unsigned long)ptr >
- (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+ return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e538ae..b06bdae0406 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
static void kvm_patch_ins_b(u32 *inst, int addr)
{
-#ifdef CONFIG_RELOCATABLE
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
/* On relocatable kernels interrupts handlers and our code
can be in different regions, so we don't patch them */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 286d9783d93..a9b32967cff 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1406,37 +1406,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
-static long arch_ptrace_old(struct task_struct *child, long request, long addr,
- long data)
+static long arch_ptrace_old(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
+ void __user *datavp = (void __user *) data;
+
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
- (void __user *) data);
+ datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
- (const void __user *) data);
+ datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
- (void __user *) data);
+ datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
- (const void __user *) data);
+ datavp);
}
return -EPERM;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EPERM;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -1446,11 +1451,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
- index = (unsigned long) addr >> 2;
+ index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
- index = (unsigned long) addr >> 3;
+ index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
@@ -1463,7 +1468,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = ((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)];
}
- ret = put_user(tmp,(unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -1474,11 +1479,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
- index = (unsigned long) addr >> 2;
+ index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
- index = (unsigned long) addr >> 3;
+ index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
@@ -1525,11 +1530,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
dbginfo.features = 0;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
- if (!access_ok(VERIFY_WRITE, data,
+ if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
- ret = __copy_to_user((struct ppc_debug_info __user *)data,
- &dbginfo, sizeof(struct ppc_debug_info)) ?
+ ret = __copy_to_user(datavp, &dbginfo,
+ sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
@@ -1537,11 +1542,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
- if (!access_ok(VERIFY_READ, data,
+ if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
- ret = __copy_from_user(&bp_info,
- (struct ppc_hw_breakpoint __user *)data,
+ ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
@@ -1560,11 +1564,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.dac1,
- (unsigned long __user *)data);
+ ret = put_user(child->thread.dac1, datalp);
#else
- ret = put_user(child->thread.dabr,
- (unsigned long __user *)data);
+ ret = put_user(child->thread.dabr, datalp);
#endif
break;
}
@@ -1580,7 +1582,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
- (void __user *) data);
+ datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
@@ -1589,19 +1591,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
- (const void __user *) data);
+ datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
- (void __user *) data);
+ datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
- (const void __user *) data);
+ datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
@@ -1609,40 +1611,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
- (void __user *) data);
+ datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
- (const void __user *) data);
+ datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
- (void __user *) data);
+ datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
- (const void __user *) data);
+ datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
- (void __user *) data);
+ datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
- (const void __user *) data);
+ datavp);
#endif
/* Old reverse args ptrace callss */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 2a178b0ebcd..ce6f61c6f87 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -497,9 +497,8 @@ static void __init emergency_stack_init(void)
}
/*
- * Called into from start_kernel, after lock_kernel has been called.
- * Initializes bootmem, which is unsed to manage page allocation until
- * mem_init is called.
+ * Called into from start_kernel this initializes bootmem, which is used
+ * to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index b1b6043a56c..4e5bf1edc0f 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -23,7 +23,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 049846911ce..1cc471faac2 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@ lightweight_exit:
lwz r3, VCPU_PC(r4)
mtsrr0 r3
lwz r3, VCPU_SHARED(r4)
- lwz r3, VCPU_SHARED_MSR(r3)
+ lwz r3, (VCPU_SHARED_MSR + 4)(r3)
oris r3, r3, KVMPPC_MSR_MASK@h
ori r3, r3, KVMPPC_MSR_MASK@l
mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2dd5d..e3768ee9b59 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
free_page((unsigned long)vcpu->arch.shared);
- kvmppc_e500_tlb_uninit(vcpu_e500);
kvm_vcpu_uninit(vcpu);
+ kvmppc_e500_tlb_uninit(vcpu_e500);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a1627f6..38f756f2505 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) {
case KVM_PPC_GET_PVINFO: {
struct kvm_ppc_pvinfo pvinfo;
+ memset(&pvinfo, 0, sizeof(pvinfo));
r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f12a9..a021f5827a3 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
int i;
/* pause guest execution to avoid concurrent updates */
- local_irq_disable();
mutex_lock(&vcpu->mutex);
vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
vcpu->arch.timing_last_enter.tv64 = 0;
mutex_unlock(&vcpu->mutex);
- local_irq_enable();
}
static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 83f534d862d..5e9584405c4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1123,7 +1123,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
- subpage_protection(pgdir, ea));
+ subpage_protection(mm, ea));
/* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index b0848b462bb..e7450bdbe83 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -62,7 +62,7 @@ void __kunmap_atomic(void *kvaddr)
return;
}
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
@@ -79,6 +79,8 @@ void __kunmap_atomic(void *kvaddr)
local_flush_tlb_page(NULL, vaddr);
}
#endif
+
+ kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 8b04c54e596..8526bd9d2aa 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -138,8 +138,11 @@
cmpldi cr0,r15,0 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
beq normal_tlb_miss
+
+ li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
+ oris r11,r11,_PAGE_ACCESSED@h
/* XXX replace the RMW cycles with immediate loads + writes */
-1: mfspr r10,SPRN_MAS1
+ mfspr r10,SPRN_MAS1
cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 36c0c449a89..2a030d89bbc 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -585,6 +585,6 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
/* Finally limit subsequent allocations */
- memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size);
+ memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 5dec408d670..3532b92de98 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -798,17 +798,17 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return spufs_create_root(sb, data);
}
-static int
-spufs_get_sb(struct file_system_type *fstype, int flags,
- const char *name, void *data, struct vfsmount *mnt)
+static struct dentry *
+spufs_mount(struct file_system_type *fstype, int flags,
+ const char *name, void *data)
{
- return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
+ return mount_single(fstype, flags, data, spufs_fill_super);
}
static struct file_system_type spufs_type = {
.owner = THIS_MODULE,
.name = "spufs",
- .get_sb = spufs_get_sb,
+ .mount = spufs_mount,
.kill_sb = kill_litter_super,
};
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index c667f0f02c3..3139814f643 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -47,6 +47,12 @@ config LPARCFG
config PPC_PSERIES_DEBUG
depends on PPC_PSERIES && PPC_EARLY_DEBUG
bool "Enable extra debug logging in platforms/pseries"
+ help
+ Say Y here if you want the pseries core to produce a bunch of
+ debug messages to the system log. Select this if you are having a
+ problem with the pseries core and want to see more of what is
+ going on. This does not enable debugging in lpar.c, which must
+ be manually done due to its verbosity.
default y
config PPC_SMLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 34b7dc12e73..17a11c82e6f 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -21,8 +21,6 @@
* Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
-#undef DEBUG
-
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 4b7a062dee1..5fcc92a12d3 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -25,8 +25,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#undef DEBUG
-
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index dceb8d1a843..4fcb5a4e60d 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -1,9 +1,12 @@
/*
* Freescale LBC and UPM routines.
*
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ * Copyright © 2007-2008 MontaVista Software, Inc.
+ * Copyright © 2010 Freescale Semiconductor
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Author: Jack Lan <Jack.Lan@freescale.com>
+ * Author: Roy Zang <tie-fei.zang@freescale.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,39 +22,37 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <asm/prom.h>
#include <asm/fsl_lbc.h>
static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
-static struct fsl_lbc_regs __iomem *fsl_lbc_regs;
+struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
+EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
-static char __initdata *compat_lbc[] = {
- "fsl,pq2-localbus",
- "fsl,pq2pro-localbus",
- "fsl,pq3-localbus",
- "fsl,elbc",
-};
-
-static int __init fsl_lbc_init(void)
+/**
+ * fsl_lbc_addr - convert the base address
+ * @addr_base: base address of the memory bank
+ *
+ * This function converts a base address of lbc into the right format for the
+ * BR register. If the SOC has eLBC then it returns 32bit physical address
+ * else it convers a 34bit local bus physical address to correct format of
+ * 32bit address for BR register (Example: MPC8641).
+ */
+u32 fsl_lbc_addr(phys_addr_t addr_base)
{
- struct device_node *lbus;
- int i;
+ struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node;
+ u32 addr = addr_base & 0xffff8000;
- for (i = 0; i < ARRAY_SIZE(compat_lbc); i++) {
- lbus = of_find_compatible_node(NULL, NULL, compat_lbc[i]);
- if (lbus)
- goto found;
- }
- return -ENODEV;
+ if (of_device_is_compatible(np, "fsl,elbc"))
+ return addr;
-found:
- fsl_lbc_regs = of_iomap(lbus, 0);
- of_node_put(lbus);
- if (!fsl_lbc_regs)
- return -ENOMEM;
- return 0;
+ return addr | ((addr_base & 0x300000000ull) >> 19);
}
-arch_initcall(fsl_lbc_init);
+EXPORT_SYMBOL(fsl_lbc_addr);
/**
* fsl_lbc_find - find Localbus bank
@@ -65,15 +66,17 @@ arch_initcall(fsl_lbc_init);
int fsl_lbc_find(phys_addr_t addr_base)
{
int i;
+ struct fsl_lbc_regs __iomem *lbc;
- if (!fsl_lbc_regs)
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(fsl_lbc_regs->bank); i++) {
- __be32 br = in_be32(&fsl_lbc_regs->bank[i].br);
- __be32 or = in_be32(&fsl_lbc_regs->bank[i].or);
+ lbc = fsl_lbc_ctrl_dev->regs;
+ for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) {
+ __be32 br = in_be32(&lbc->bank[i].br);
+ __be32 or = in_be32(&lbc->bank[i].or);
- if (br & BR_V && (br & or & BR_BA) == addr_base)
+ if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base))
return i;
}
@@ -94,22 +97,27 @@ int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm)
{
int bank;
__be32 br;
+ struct fsl_lbc_regs __iomem *lbc;
bank = fsl_lbc_find(addr_base);
if (bank < 0)
return bank;
- br = in_be32(&fsl_lbc_regs->bank[bank].br);
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+
+ lbc = fsl_lbc_ctrl_dev->regs;
+ br = in_be32(&lbc->bank[bank].br);
switch (br & BR_MSEL) {
case BR_MS_UPMA:
- upm->mxmr = &fsl_lbc_regs->mamr;
+ upm->mxmr = &lbc->mamr;
break;
case BR_MS_UPMB:
- upm->mxmr = &fsl_lbc_regs->mbmr;
+ upm->mxmr = &lbc->mbmr;
break;
case BR_MS_UPMC:
- upm->mxmr = &fsl_lbc_regs->mcmr;
+ upm->mxmr = &lbc->mcmr;
break;
default:
return -EINVAL;
@@ -148,9 +156,12 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
int ret = 0;
unsigned long flags;
+ if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+ return -ENODEV;
+
spin_lock_irqsave(&fsl_lbc_lock, flags);
- out_be32(&fsl_lbc_regs->mar, mar);
+ out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar);
switch (upm->width) {
case 8:
@@ -172,3 +183,166 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
return ret;
}
EXPORT_SYMBOL(fsl_upm_run_pattern);
+
+static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
+{
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+ /* clear event registers */
+ setbits32(&lbc->ltesr, LTESR_CLEAR);
+ out_be32(&lbc->lteatr, 0);
+ out_be32(&lbc->ltear, 0);
+ out_be32(&lbc->lteccr, LTECCR_CLEAR);
+ out_be32(&lbc->ltedr, LTEDR_ENABLE);
+
+ /* Enable interrupts for any detected events */
+ out_be32(&lbc->lteir, LTEIR_ENABLE);
+
+ return 0;
+}
+
+/*
+ * NOTE: This interrupt is used to report localbus events of various kinds,
+ * such as transaction errors on the chipselects.
+ */
+
+static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_lbc_ctrl *ctrl = data;
+ struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ u32 status;
+
+ status = in_be32(&lbc->ltesr);
+ if (!status)
+ return IRQ_NONE;
+
+ out_be32(&lbc->ltesr, LTESR_CLEAR);
+ out_be32(&lbc->lteatr, 0);
+ out_be32(&lbc->ltear, 0);
+ ctrl->irq_status = status;
+
+ if (status & LTESR_BM)
+ dev_err(ctrl->dev, "Local bus monitor time-out: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_WP)
+ dev_err(ctrl->dev, "Write protect error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_ATMW)
+ dev_err(ctrl->dev, "Atomic write error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_ATMR)
+ dev_err(ctrl->dev, "Atomic read error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_CS)
+ dev_err(ctrl->dev, "Chip select error: "
+ "LTESR 0x%08X\n", status);
+ if (status & LTESR_UPM)
+ ;
+ if (status & LTESR_FCT) {
+ dev_err(ctrl->dev, "FCM command time-out: "
+ "LTESR 0x%08X\n", status);
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+ }
+ if (status & LTESR_PAR) {
+ dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: "
+ "LTESR 0x%08X\n", status);
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+ }
+ if (status & LTESR_CC) {
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+ }
+ if (status & ~LTESR_MASK)
+ dev_err(ctrl->dev, "Unknown error: "
+ "LTESR 0x%08X\n", status);
+ return IRQ_HANDLED;
+}
+
+/*
+ * fsl_lbc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+
+static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
+{
+ int ret;
+
+ if (!dev->dev.of_node) {
+ dev_err(&dev->dev, "Device OF-Node is NULL");
+ return -EFAULT;
+ }
+
+ fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL);
+ if (!fsl_lbc_ctrl_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev);
+
+ spin_lock_init(&fsl_lbc_ctrl_dev->lock);
+ init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait);
+
+ fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_lbc_ctrl_dev->regs) {
+ dev_err(&dev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (fsl_lbc_ctrl_dev->irq == NO_IRQ) {
+ dev_err(&dev->dev, "failed to get irq resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ fsl_lbc_ctrl_dev->dev = &dev->dev;
+
+ ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev);
+ if (ret < 0)
+ goto err;
+
+ ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0,
+ "fsl-lbc", fsl_lbc_ctrl_dev);
+ if (ret != 0) {
+ dev_err(&dev->dev, "failed to install irq (%d)\n",
+ fsl_lbc_ctrl_dev->irq);
+ ret = fsl_lbc_ctrl_dev->irq;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ iounmap(fsl_lbc_ctrl_dev->regs);
+ kfree(fsl_lbc_ctrl_dev);
+ return ret;
+}
+
+static const struct of_device_id fsl_lbc_match[] = {
+ { .compatible = "fsl,elbc", },
+ { .compatible = "fsl,pq3-localbus", },
+ { .compatible = "fsl,pq2-localbus", },
+ { .compatible = "fsl,pq2pro-localbus", },
+ {},
+};
+
+static struct platform_driver fsl_lbc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-lbc",
+ .of_match_table = fsl_lbc_match,
+ },
+ .probe = fsl_lbc_ctrl_probe,
+};
+
+static int __init fsl_lbc_init(void)
+{
+ return platform_driver_register(&fsl_lbc_ctrl_driver);
+}
+module_init(fsl_lbc_init);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 412763672d2..9725369d432 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -50,6 +50,7 @@
#define RIO_ATMU_REGS_OFFSET 0x10c00
#define RIO_P_MSG_REGS_OFFSET 0x11000
#define RIO_S_MSG_REGS_OFFSET 0x13000
+#define RIO_GCCSR 0x13c
#define RIO_ESCSR 0x158
#define RIO_CCSR 0x15c
#define RIO_LTLEDCSR 0x0608
@@ -87,6 +88,9 @@
#define RIO_IPWSR_PWD 0x00000008
#define RIO_IPWSR_PWB 0x00000004
+#define RIO_EPWISR_PINT 0x80000000
+#define RIO_EPWISR_PW 0x00000001
+
#define RIO_MSG_DESC_SIZE 32
#define RIO_MSG_BUFFER_SIZE 4096
#define RIO_MIN_TX_RING_SIZE 2
@@ -1082,18 +1086,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
struct rio_priv *priv = port->priv;
u32 epwisr, tmp;
- ipwmr = in_be32(&priv->msg_regs->pwmr);
- ipwsr = in_be32(&priv->msg_regs->pwsr);
-
epwisr = in_be32(priv->regs_win + RIO_EPWISR);
- if (epwisr & 0x80000000) {
- tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
- pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
- out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
- }
+ if (!(epwisr & RIO_EPWISR_PW))
+ goto pw_done;
- if (!(epwisr & 0x00000001))
- return IRQ_HANDLED;
+ ipwmr = in_be32(&priv->msg_regs->pwmr);
+ ipwsr = in_be32(&priv->msg_regs->pwsr);
#ifdef DEBUG_PW
pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
@@ -1109,20 +1107,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
pr_debug(" PWB");
pr_debug(" )\n");
#endif
- out_be32(&priv->msg_regs->pwsr,
- ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
-
- if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
- priv->port_write_msg.err_count++;
- pr_info("RIO: Port-Write Transaction Err (%d)\n",
- priv->port_write_msg.err_count);
- }
- if (ipwsr & RIO_IPWSR_PWD) {
- priv->port_write_msg.discard_count++;
- pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
- priv->port_write_msg.discard_count);
- }
-
/* Schedule deferred processing if PW was received */
if (ipwsr & RIO_IPWSR_QFI) {
/* Save PW message (if there is room in FIFO),
@@ -1134,16 +1118,43 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
RIO_PW_MSG_SIZE);
} else {
priv->port_write_msg.discard_count++;
- pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+ pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
priv->port_write_msg.discard_count);
}
+ /* Clear interrupt and issue Clear Queue command. This allows
+ * another port-write to be received.
+ */
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI);
+ out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
schedule_work(&priv->pw_work);
}
- /* Issue Clear Queue command. This allows another
- * port-write to be received.
- */
- out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+ if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+ priv->port_write_msg.err_count++;
+ pr_debug("RIO: Port-Write Transaction Err (%d)\n",
+ priv->port_write_msg.err_count);
+ /* Clear Transaction Error: port-write controller should be
+ * disabled when clearing this error
+ */
+ out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE);
+ out_be32(&priv->msg_regs->pwmr, ipwmr);
+ }
+
+ if (ipwsr & RIO_IPWSR_PWD) {
+ priv->port_write_msg.discard_count++;
+ pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+ priv->port_write_msg.discard_count);
+ out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD);
+ }
+
+pw_done:
+ if (epwisr & RIO_EPWISR_PINT) {
+ tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+ pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+ out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+ }
return IRQ_HANDLED;
}
@@ -1461,6 +1472,7 @@ int fsl_rio_setup(struct platform_device *dev)
port->host_deviceid = fsl_rio_get_hdid(port->id);
port->priv = priv;
+ port->phys_efptr = 0x100;
rio_register_mport(port);
priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
@@ -1508,6 +1520,12 @@ int fsl_rio_setup(struct platform_device *dev)
dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
port->sys_size ? 65536 : 256);
+ if (port->host_deviceid >= 0)
+ out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
+ RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
+ else
+ out_be32(priv->regs_win + RIO_GCCSR, 0x00000000);
+
priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+ RIO_ATMU_REGS_OFFSET);
priv->maint_atmu_regs = priv->atmu_regs + 1;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 068e55d1bba..e0b98e71ff4 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,8 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
config SCHED_MC
def_bool y
depends on SMP
@@ -78,8 +73,6 @@ config VIRT_CPU_ACCOUNTING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
-mainmenu "Linux Kernel Configuration"
-
config S390
def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
@@ -87,6 +80,7 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_C_RECORDMCOUNT
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER
@@ -151,7 +145,7 @@ source "kernel/time/Kconfig"
config 64BIT
bool "64 bit kernel"
help
- Select this option if you have a 64 bit IBM zSeries machine
+ Select this option if you have an IBM z/Architecture machine
and want to use the 64 bit addressing mode.
config 32BIT
@@ -203,9 +197,18 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_MC
+ def_bool y
+ prompt "Multi-core scheduler support"
+ depends on SMP
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places.
+
config SCHED_BOOK
bool "Book scheduler support"
- depends on SMP
+ depends on SMP && SCHED_MC
help
Book scheduler support improves the CPU scheduler's decision making
when dealing with machines that have several books.
@@ -215,7 +218,7 @@ config MATHEMU
depends on MARCH_G5
help
This option is required for IEEE compliant floating point arithmetic
- on older S/390 machines. Say Y unless you know your machine doesn't
+ on older ESA/390 machines. Say Y unless you know your machine doesn't
need this.
config COMPAT
@@ -244,8 +247,8 @@ config S390_EXEC_PROTECT
space programs and it also selects the addressing mode option above.
The kernel parameter noexec=on will enable this feature and also
switch the addressing modes, default is disabled. Enabling this (via
- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
- will reduce system performance.
+ kernel parameter) on machines earlier than IBM System z9 this will
+ reduce system performance.
comment "Code generation options"
@@ -254,49 +257,46 @@ choice
default MARCH_G5
config MARCH_G5
- bool "S/390 model G5 and G6"
+ bool "System/390 model G5 and G6"
depends on !64BIT
help
Select this to build a 31 bit kernel that works
- on all S/390 and zSeries machines.
+ on all ESA/390 and z/Architecture machines.
config MARCH_Z900
- bool "IBM eServer zSeries model z800 and z900"
+ bool "IBM zSeries model z800 and z900"
help
- Select this to optimize for zSeries machines. This
- will enable some optimizations that are not available
- on older 31 bit only CPUs.
+ Select this to enable optimizations for model z800/z900 (2064 and
+ 2066 series). This will enable some optimizations that are not
+ available on older ESA/390 (31 Bit) only CPUs.
config MARCH_Z990
- bool "IBM eServer zSeries model z890 and z990"
+ bool "IBM zSeries model z890 and z990"
help
- Select this enable optimizations for model z890/z990.
- This will be slightly faster but does not work on
- older machines such as the z900.
+ Select this to enable optimizations for model z890/z990 (2084 and
+ 2086 series). The kernel will be slightly faster but will not work
+ on older machines.
config MARCH_Z9_109
bool "IBM System z9"
help
- Select this to enable optimizations for IBM System z9-109, IBM
- System z9 Enterprise Class (z9 EC), and IBM System z9 Business
- Class (z9 BC). The kernel will be slightly faster but will not
- work on older machines such as the z990, z890, z900, and z800.
+ Select this to enable optimizations for IBM System z9 (2094 and
+ 2096 series). The kernel will be slightly faster but will not work
+ on older machines.
config MARCH_Z10
bool "IBM System z10"
help
- Select this to enable optimizations for IBM System z10. The
- kernel will be slightly faster but will not work on older
- machines such as the z990, z890, z900, z800, z9-109, z9-ec
- and z9-bc.
+ Select this to enable optimizations for IBM System z10 (2097 and
+ 2098 series). The kernel will be slightly faster but will not work
+ on older machines.
config MARCH_Z196
bool "IBM zEnterprise 196"
help
- Select this to enable optimizations for IBM zEnterprise 196.
- The kernel will be slightly faster but will not work on older
- machines such as the z990, z890, z900, z800, z9-109, z9-ec,
- z9-bc, z10-ec and z10-bc.
+ Select this to enable optimizations for IBM zEnterprise 196
+ (2817 series). The kernel will be slightly faster but will not work
+ on older machines.
endchoice
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 45e0c6199f3..05221b13ffb 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
+config STRICT_DEVMEM
+ def_bool y
+ prompt "Filter access to /dev/mem"
+ ---help---
+ This option restricts access to /dev/mem. If this option is
+ disabled, you allow userspace access to all memory, including
+ kernel and userspace memory. Accidental memory access is likely
+ to be disastrous.
+ Memory access is required for experts who want to debug the kernel.
+
+ If you are unsure, say Y.
+
config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict user copy size checks"
---help---
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 020e51c063d..cd4a81be9cf 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -638,18 +638,21 @@ __init int hypfs_diag_init(void)
pr_err("The hardware system does not support hypfs\n");
return -ENODATA;
}
- rc = diag224_get_name_table();
- if (rc) {
- diag204_free_buffer();
- pr_err("The hardware system does not provide all "
- "functions required by hypfs\n");
- }
if (diag204_info_type == INFO_EXT) {
rc = hypfs_dbfs_init();
if (rc)
- diag204_free_buffer();
+ return rc;
}
- return rc;
+ if (MACHINE_IS_LPAR) {
+ rc = diag224_get_name_table();
+ if (rc) {
+ pr_err("The hardware system does not provide all "
+ "functions required by hypfs\n");
+ debugfs_remove(dbfs_d204_file);
+ return rc;
+ }
+ }
+ return 0;
}
void hypfs_diag_exit(void)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 74d98670be2..47cc446dab8 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -316,10 +316,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static int hypfs_get_super(struct file_system_type *fst, int flags,
- const char *devname, void *data, struct vfsmount *mnt)
+static struct dentry *hypfs_mount(struct file_system_type *fst, int flags,
+ const char *devname, void *data)
{
- return get_sb_single(fst, flags, data, hypfs_fill_super, mnt);
+ return mount_single(fst, flags, data, hypfs_fill_super);
}
static void hypfs_kill_super(struct super_block *sb)
@@ -455,7 +455,7 @@ static const struct file_operations hypfs_file_ops = {
static struct file_system_type hypfs_type = {
.owner = THIS_MODULE,
.name = "s390_hypfs",
- .get_sb = hypfs_get_super,
+ .mount = hypfs_mount,
.kill_sb = hypfs_kill_super
};
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 8b1a52a137c..40e2ab0fa3f 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -73,18 +73,18 @@ cputime64_to_jiffies64(cputime64_t cputime)
}
/*
- * Convert cputime to milliseconds and back.
+ * Convert cputime to microseconds and back.
*/
static inline unsigned int
-cputime_to_msecs(const cputime_t cputime)
+cputime_to_usecs(const cputime_t cputime)
{
- return cputime_div(cputime, 4096000);
+ return cputime_div(cputime, 4096);
}
static inline cputime_t
-msecs_to_cputime(const unsigned int m)
+usecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 4096000;
+ return (cputime_t) m * 4096;
}
/*
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 218bce81ec7..b604a9186f8 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -217,6 +217,25 @@ typedef struct dasd_symmio_parms {
int rssd_result_len;
} __attribute__ ((packed)) dasd_symmio_parms_t;
+/*
+ * Data returned by Sense Path Group ID (SNID)
+ */
+struct dasd_snid_data {
+ struct {
+ __u8 group:2;
+ __u8 reserve:2;
+ __u8 mode:1;
+ __u8 res:3;
+ } __attribute__ ((packed)) path_state;
+ __u8 pgid[11];
+} __attribute__ ((packed));
+
+struct dasd_snid_ioctl_data {
+ struct dasd_snid_data data;
+ __u8 path_mask;
+} __attribute__ ((packed));
+
+
/********************************************************************************
* SECTION: Definition of IOCTLs
*
@@ -261,25 +280,10 @@ typedef struct dasd_symmio_parms {
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
+/* Get Sense Path Group ID (SNID) data */
+#define BIODASDSNID _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
+
#define BIODASDSYMMIO _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t)
#endif /* DASD_H */
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-indent-level: 4
- * c-brace-imaginary-offset: 0
- * c-brace-offset: -4
- * c-argdecl-indent: 4
- * c-label-offset: -4
- * c-continued-statement-offset: 4
- * c-continued-brace-offset: 0
- * indent-tabs-mode: nil
- * tab-width: 8
- * End:
- */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a8729ea7e9a..3c987e9ec8d 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -130,6 +130,11 @@ struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
+static inline int devmem_is_allowed(unsigned long pfn)
+{
+ return 0;
+}
+
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3c1b823c9a..33982e7ce04 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -66,9 +66,9 @@ int main(void)
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
- DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
- DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
+ DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* constants for SIGP */
DEFINE(__SIGP_STOP, sigp_stop);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 1e6449c79ab..53acaa86dd9 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -25,7 +25,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d149609e46e..3b7e7dddc32 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -282,8 +282,6 @@ static noinline __init void setup_facility_list(void)
static noinline __init void setup_hpage(void)
{
#ifndef CONFIG_DEBUG_PAGEALLOC
- unsigned int facilities;
-
if (!test_facility(2) || !test_facility(8))
return;
S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5efce720298..1ecc337fb67 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -557,6 +557,7 @@ pgm_svcper:
# per was called from kernel, must be kprobes
#
kernel_per:
+ REENABLE_IRQS
mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check
mvi SP_SVCNR+1(%r15),0xff
la %r2,SP_PTREGS(%r15) # address of register-save area
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index a2be23922f4..8f3e802174d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -568,6 +568,7 @@ pgm_svcper:
# per was called from kernel, must be kprobes
#
kernel_per:
+ REENABLE_IRQS
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_single_step
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 2a3d2bf6f08..2564793ec2b 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -30,6 +30,7 @@
#include <asm/sections.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/hardirq.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
/* Set the PER control regs, turns on single step for this address */
__ctl_load(kprobe_per_regs, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
- regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__get_cpu_var(current_kprobe) = p;
/* Save the interrupt and per flags */
kcb->kprobe_saved_imask = regs->psw.mask &
- (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Save the control regs that govern PER */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
}
@@ -348,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@@ -370,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
/* another task is sharing our hash bucket */
continue;
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (ri->rp && ri->rp->handler) {
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ }
+
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address) {
@@ -385,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
break;
}
}
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
reset_current_kprobe();
@@ -478,7 +502,7 @@ out:
return 1;
}
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -502,8 +526,9 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
regs->psw.mask |= kcb->kprobe_saved_imask;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
- else
+ else {
reset_current_kprobe();
+ }
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
@@ -546,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
return 0;
}
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ int ret;
+
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_disable();
+ ret = kprobe_trap_handler(regs, trapnr);
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
+ return ret;
+}
+
/*
* Wrapper routine to for handling exceptions.
*/
@@ -553,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ struct pt_regs *regs = args->regs;
int ret = NOTIFY_DONE;
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_disable();
+
switch (val) {
case DIE_BPT:
if (kprobe_handler(args->regs))
@@ -565,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_TRAP:
- /* kprobe_running() needs smp_processor_id() */
- preempt_disable();
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
+ if (!preemptible() && kprobe_running() &&
+ kprobe_trap_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
- preempt_enable();
break;
default:
break;
}
+
+ if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
+ local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
+
return ret;
}
@@ -588,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
/* setup return addr to the jprobe handler routine */
regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
/* r14 is the function return address */
kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 83339d33c4b..019bb714db4 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -343,7 +343,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
return __poke_user(child, addr, data);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
ptrace_area parea;
int copied, ret;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e3ceb911dc7..6f6350826c8 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -761,6 +761,9 @@ static void __init setup_hwcaps(void)
case 0x2098:
strcpy(elf_platform, "z10");
break;
+ case 0x2817:
+ strcpy(elf_platform, "z196");
+ break;
}
}
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index f04d93aa48e..5c9e439bf3f 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -106,11 +106,13 @@ static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len)
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
len += sprintf(page + len, " %d", info->mag[i]);
len += sprintf(page + len, "\n");
+#ifdef CONFIG_SCHED_MC
store_topology(info);
len += sprintf(page + len, "CPU Topology SW: ");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
len += sprintf(page + len, " %d", info->mag[i]);
len += sprintf(page + len, "\n");
+#endif
return len;
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index a9dee9048ee..94b06c31fc8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -53,8 +53,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpumask_t mask;
cpus_clear(mask);
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
- return cpu_possible_map;
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
+ cpumask_copy(&mask, cpumask_of(cpu));
+ return mask;
+ }
while (info) {
if (cpu_isset(cpu, info->mask)) {
mask = info->mask;
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index 9532c4e6a9d..36aaa25d05d 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -19,9 +19,9 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
- chi %r2,CLOCK_REALTIME
+ chi %r2,__CLOCK_REALTIME
je 0f
- chi %r2,CLOCK_MONOTONIC
+ chi %r2,__CLOCK_MONOTONIC
jne 3f
0: ltr %r3,%r3
jz 2f /* res == NULL */
@@ -34,6 +34,6 @@ __kernel_clock_getres:
3: lhi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
-4: .long CLOCK_REALTIME_RES
+4: .long __CLOCK_REALTIME_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 96964395427..b2224e0b974 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -21,9 +21,9 @@ __kernel_clock_gettime:
.cfi_startproc
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
- chi %r2,CLOCK_REALTIME
+ chi %r2,__CLOCK_REALTIME
je 10f
- chi %r2,CLOCK_MONOTONIC
+ chi %r2,__CLOCK_MONOTONIC
jne 19f
/* CLOCK_MONOTONIC */
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 9ce8caafdb4..176e1f75f9a 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -19,9 +19,9 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
- cghi %r2,CLOCK_REALTIME
+ cghi %r2,__CLOCK_REALTIME
je 0f
- cghi %r2,CLOCK_MONOTONIC
+ cghi %r2,__CLOCK_MONOTONIC
je 0f
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
jne 2f
@@ -39,6 +39,6 @@ __kernel_clock_getres:
2: lghi %r1,__NR_clock_getres /* fallback to svc */
svc 0
br %r14
-3: .quad CLOCK_REALTIME_RES
+3: .quad __CLOCK_REALTIME_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index f40467884a0..d46c95ed5f1 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -20,11 +20,11 @@
__kernel_clock_gettime:
.cfi_startproc
larl %r5,_vdso_data
- cghi %r2,CLOCK_REALTIME
+ cghi %r2,__CLOCK_REALTIME
je 4f
cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
je 9f
- cghi %r2,CLOCK_MONOTONIC
+ cghi %r2,__CLOCK_MONOTONIC
jne 12f
/* CLOCK_MONOTONIC */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 38e641cdd97..45b405ca256 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -20,18 +20,17 @@
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
- unsigned long mask, result;
+ unsigned long mask;
pte_t *ptep, pte;
struct page *page;
- result = write ? 0 : _PAGE_RO;
- mask = result | _PAGE_INVALID | _PAGE_SPECIAL;
+ mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
pte = *ptep;
barrier();
- if ((pte_val(pte) & mask) != result)
+ if ((pte_val(pte) & mask) != 0)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index be4a1558475..4293fdcb539 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -1,8 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-mainmenu "Linux/SCORE Kernel Configuration"
-
menu "Machine selection"
choice
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 174c6422b09..55836188b21 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -325,7 +325,8 @@ void ptrace_disable(struct task_struct *child)
}
long
-arch_ptrace(struct task_struct *child, long request, long addr, long data)
+arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (void __user *)data;
@@ -335,14 +336,14 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = copy_regset_to_user(child, &user_score_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)datap);
+ datap);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child, &user_score_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)datap);
+ datap);
break;
default:
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0f40fc35d0a..7f217b3a50a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,10 +1,3 @@
-#
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/SuperH Kernel Configuration"
-
config SUPERH
def_bool y
select EMBEDDED
@@ -25,8 +18,11 @@ config SUPERH
select HAVE_KERNEL_LZO
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_SPARSE_IRQ
select RTC_LIB
select GENERIC_ATOMIC64
+ select GENERIC_HARDIRQS_NO_DEPRECATED
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
@@ -49,6 +45,7 @@ config SUPERH32
select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS
select ARCH_HIBERNATION_POSSIBLE if MMU
+ select SPARSE_IRQ
config SUPERH64
def_bool ARCH = "sh64"
@@ -78,19 +75,9 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
-config GENERIC_HARDIRQS
- def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
- def_bool y
-
config IRQ_PER_CPU
def_bool y
-config SPARSE_IRQ
- def_bool y
- depends on SUPERH32
-
config GENERIC_GPIO
def_bool n
@@ -206,6 +193,7 @@ config CPU_SH2
config CPU_SH2A
bool
select CPU_SH2
+ select UNCACHED_MAPPING
config CPU_SH3
bool
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 307b3a4a790..9c8c6e1a2a1 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -133,10 +133,7 @@ machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast
machdir-$(CONFIG_SH_SH03) += mach-sh03
-machdir-$(CONFIG_SH_SECUREEDGE5410) += mach-snapgear
machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d
-machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh
-machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705
machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander
machdir-$(CONFIG_SH_MIGOR) += mach-migor
machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 9c94711aa6c..2018c7ea4c9 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -81,13 +81,6 @@ config SH_7343_SOLUTION_ENGINE
Select 7343 SolutionEngine if configuring for a Hitachi
SH7343 (SH-Mobile 3AS) evaluation board.
-config SH_7751_SYSTEMH
- bool "SystemH7751R"
- depends on CPU_SUBTYPE_SH7751R
- help
- Select SystemH if you are configuring for a Renesas SystemH
- 7751R evaluation board.
-
config SH_HP6XX
bool "HP6XX"
select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 38ef655cc0f..be7d11d04b2 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -2,10 +2,12 @@
# Specific board support, not covered by a mach group.
#
obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o
+obj-$(CONFIG_SH_SECUREEDGE5410) += board-secureedge5410.o
obj-$(CONFIG_SH_SH2007) += board-sh2007.o
obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o
obj-$(CONFIG_SH_URQUELL) += board-urquell.o
obj-$(CONFIG_SH_SHMIN) += board-shmin.o
+obj-$(CONFIG_SH_EDOSK7705) += board-edosk7705.o
obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o
obj-$(CONFIG_SH_ESPT) += board-espt.o
obj-$(CONFIG_SH_POLARIS) += board-polaris.o
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
new file mode 100644
index 00000000000..4cb3bb74c36
--- /dev/null
+++ b/arch/sh/boards/board-edosk7705.c
@@ -0,0 +1,78 @@
+/*
+ * arch/sh/boards/renesas/edosk7705/setup.c
+ *
+ * Copyright (C) 2000 Kazumoto Kojima
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ * Modified for edosk7705 development
+ * board by S. Dunn, 2003.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/smc91x.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+
+#define SMC_IOBASE 0xA2000000
+#define SMC_IO_OFFSET 0x300
+#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET)
+
+#define ETHERNET_IRQ 0x09
+
+static void __init sh_edosk7705_init_irq(void)
+{
+ make_imask_irq(ETHERNET_IRQ);
+}
+
+/* eth initialization functions */
+static struct smc91x_platdata smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
+};
+
+static struct resource smc91x_res[] = {
+ [0] = {
+ .start = SMC_IOADDR,
+ .end = SMC_IOADDR + SZ_32 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = ETHERNET_IRQ,
+ .end = ETHERNET_IRQ,
+ .flags = IORESOURCE_IRQ ,
+ }
+};
+
+static struct platform_device smc91x_dev = {
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smc91x_res),
+ .resource = smc91x_res,
+
+ .dev = {
+ .platform_data = &smc91x_info,
+ },
+};
+
+/* platform init code */
+static struct platform_device *edosk7705_devices[] __initdata = {
+ &smc91x_dev,
+};
+
+static int __init init_edosk7705_devices(void)
+{
+ return platform_add_devices(edosk7705_devices,
+ ARRAY_SIZE(edosk7705_devices));
+}
+__initcall(init_edosk7705_devices);
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_edosk7705 __initmv = {
+ .mv_name = "EDOSK7705",
+ .mv_nr_irqs = 80,
+ .mv_init_irq = sh_edosk7705_init_irq,
+};
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/board-secureedge5410.c
index 331745dee37..32f875e8493 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/sh/boards/snapgear/setup.c
- *
* Copyright (C) 2002 David McCullough <davidm@snapgear.com>
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
*
@@ -19,18 +17,19 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/machvec.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <cpu/timer.h>
+unsigned short secureedge5410_ioport;
+
/*
* EraseConfig handling functions
*/
-
static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
{
- (void)__raw_readb(0xb8000000); /* dummy read */
+ ctrl_delay(); /* dummy read */
printk("SnapGear: erase switch interrupt!\n");
@@ -39,21 +38,22 @@ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
static int __init eraseconfig_init(void)
{
+ unsigned int irq = evt2irq(0x240);
+
printk("SnapGear: EraseConfig init\n");
+
/* Setup "EraseConfig" switch on external IRQ 0 */
- if (request_irq(IRL0_IRQ, eraseconfig_interrupt, IRQF_DISABLED,
+ if (request_irq(irq, eraseconfig_interrupt, IRQF_DISABLED,
"Erase Config", NULL))
printk("SnapGear: failed to register IRQ%d for Reset witch\n",
- IRL0_IRQ);
+ irq);
else
printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
- IRL0_IRQ);
- return(0);
+ irq);
+ return 0;
}
-
module_init(eraseconfig_init);
-/****************************************************************************/
/*
* Initialize IRQ setting
*
@@ -62,7 +62,6 @@ module_init(eraseconfig_init);
* IRL2 = eth1
* IRL3 = crypto
*/
-
static void __init init_snapgear_IRQ(void)
{
printk("Setup SnapGear IRQ/IPR ...\n");
@@ -76,20 +75,5 @@ static void __init init_snapgear_IRQ(void)
static struct sh_machine_vector mv_snapgear __initmv = {
.mv_name = "SnapGear SecureEdge5410",
.mv_nr_irqs = 72,
-
- .mv_inb = snapgear_inb,
- .mv_inw = snapgear_inw,
- .mv_inl = snapgear_inl,
- .mv_outb = snapgear_outb,
- .mv_outw = snapgear_outw,
- .mv_outl = snapgear_outl,
-
- .mv_inb_p = snapgear_inb_p,
- .mv_inw_p = snapgear_inw,
- .mv_inl_p = snapgear_inl,
- .mv_outb_p = snapgear_outb_p,
- .mv_outw_p = snapgear_outw,
- .mv_outl_p = snapgear_outl,
-
.mv_init_irq = init_snapgear_IRQ,
};
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 3da116f47f0..07ea908c510 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -176,6 +176,21 @@ static void ap320_wvga_power_off(void *board_data)
__raw_writew(0, FPGA_LCDREG);
}
+const static struct fb_videomode ap325rxa_lcdc_modes[] = {
+ {
+ .name = "LB070WV1",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 32,
+ .right_margin = 160,
+ .hsync_len = 8,
+ .upper_margin = 63,
+ .lower_margin = 80,
+ .vsync_len = 1,
+ .sync = 0, /* hsync and vsync are active low */
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
@@ -183,18 +198,8 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.bpp = 16,
.interface_type = RGB18,
.clock_divider = 1,
- .lcd_cfg = {
- .name = "LB070WV1",
- .xres = 800,
- .yres = 480,
- .left_margin = 32,
- .right_margin = 160,
- .hsync_len = 8,
- .upper_margin = 63,
- .lower_margin = 80,
- .vsync_len = 1,
- .sync = 0, /* hsync and vsync are active low */
- },
+ .lcd_cfg = ap325rxa_lcdc_modes,
+ .num_cfg = ARRAY_SIZE(ap325rxa_lcdc_modes),
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
@@ -481,7 +486,6 @@ static struct soc_camera_link ov7725_link = {
.power = ov7725_power,
.board_info = &ap325rxa_i2c_camera[0],
.i2c_adapter_id = 0,
- .module_name = "ov772x",
.priv = &ov7725_info,
};
diff --git a/arch/sh/boards/mach-cayman/irq.c b/arch/sh/boards/mach-cayman/irq.c
index 1394b078db3..d7ac5af9d10 100644
--- a/arch/sh/boards/mach-cayman/irq.c
+++ b/arch/sh/boards/mach-cayman/irq.c
@@ -55,8 +55,9 @@ static struct irqaction cayman_action_pci2 = {
.flags = IRQF_DISABLED,
};
-static void enable_cayman_irq(unsigned int irq)
+static void enable_cayman_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long flags;
unsigned long mask;
unsigned int reg;
@@ -72,8 +73,9 @@ static void enable_cayman_irq(unsigned int irq)
local_irq_restore(flags);
}
-void disable_cayman_irq(unsigned int irq)
+static void disable_cayman_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long flags;
unsigned long mask;
unsigned int reg;
@@ -89,16 +91,10 @@ void disable_cayman_irq(unsigned int irq)
local_irq_restore(flags);
}
-static void ack_cayman_irq(unsigned int irq)
-{
- disable_cayman_irq(irq);
-}
-
struct irq_chip cayman_irq_type = {
.name = "Cayman-IRQ",
- .unmask = enable_cayman_irq,
- .mask = disable_cayman_irq,
- .mask_ack = ack_cayman_irq,
+ .irq_unmask = enable_cayman_irq,
+ .irq_mask = disable_cayman_irq,
};
int cayman_irq_demux(int evt)
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c
index d932667410a..72e7ac9549d 100644
--- a/arch/sh/boards/mach-dreamcast/irq.c
+++ b/arch/sh/boards/mach-dreamcast/irq.c
@@ -60,8 +60,9 @@
*/
/* Disable the hardware event by masking its bit in its EMR */
-static inline void disable_systemasic_irq(unsigned int irq)
+static inline void disable_systemasic_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
__u32 mask;
@@ -71,8 +72,9 @@ static inline void disable_systemasic_irq(unsigned int irq)
}
/* Enable the hardware event by setting its bit in its EMR */
-static inline void enable_systemasic_irq(unsigned int irq)
+static inline void enable_systemasic_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
__u32 mask;
@@ -82,18 +84,19 @@ static inline void enable_systemasic_irq(unsigned int irq)
}
/* Acknowledge a hardware event by writing its bit back to its ESR */
-static void mask_ack_systemasic_irq(unsigned int irq)
+static void mask_ack_systemasic_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
__u32 esr = ESR_BASE + (LEVEL(irq) << 2);
- disable_systemasic_irq(irq);
+ disable_systemasic_irq(data);
outl((1 << EVENT_BIT(irq)), esr);
}
struct irq_chip systemasic_int = {
.name = "System ASIC",
- .mask = disable_systemasic_irq,
- .mask_ack = mask_ack_systemasic_irq,
- .unmask = enable_systemasic_irq,
+ .irq_mask = disable_systemasic_irq,
+ .irq_mask_ack = mask_ack_systemasic_irq,
+ .irq_unmask = enable_systemasic_irq,
};
/*
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 71a3368ab1f..2eaeb9e5958 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -231,14 +231,41 @@ static struct platform_device usb1_common_device = {
};
/* LCDC */
+const static struct fb_videomode ecovec_lcd_modes[] = {
+ {
+ .name = "Panel",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 70,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .sync = 0, /* hsync and vsync are active low */
+ },
+};
+
+const static struct fb_videomode ecovec_dvi_modes[] = {
+ {
+ .name = "DVI",
+ .xres = 1280,
+ .yres = 720,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 40,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .sync = 0, /* hsync and vsync are active low */
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.ch[0] = {
.interface_type = RGB18,
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
- .lcd_cfg = {
- .sync = 0, /* hsync and vsync are active low */
- },
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
@@ -620,7 +647,6 @@ static struct soc_camera_link tw9910_link = {
.bus_id = 1,
.power = tw9910_power,
.board_info = &i2c_camera[0],
- .module_name = "tw9910",
.priv = &tw9910_info,
};
@@ -644,7 +670,6 @@ static struct soc_camera_link mt9t112_link1 = {
.power = mt9t112_power1,
.bus_id = 0,
.board_info = &i2c_camera[1],
- .module_name = "mt9t112",
.priv = &mt9t112_info1,
};
@@ -667,7 +692,6 @@ static struct soc_camera_link mt9t112_link2 = {
.power = mt9t112_power2,
.bus_id = 1,
.board_info = &i2c_camera[2],
- .module_name = "mt9t112",
.priv = &mt9t112_info2,
};
@@ -793,7 +817,6 @@ static struct sh_vou_pdata sh_vou_pdata = {
.flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
.board_info = &ak8813,
.i2c_adap = 0,
- .module_name = "ak881x",
};
static struct resource sh_vou_resources[] = {
@@ -1079,33 +1102,18 @@ static int __init arch_setup(void)
if (gpio_get_value(GPIO_PTE6)) {
/* DVI */
lcdc_info.clock_source = LCDC_CLK_EXTERNAL;
- lcdc_info.ch[0].clock_divider = 1,
- lcdc_info.ch[0].lcd_cfg.name = "DVI";
- lcdc_info.ch[0].lcd_cfg.xres = 1280;
- lcdc_info.ch[0].lcd_cfg.yres = 720;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 40;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
+ lcdc_info.ch[0].clock_divider = 1;
+ lcdc_info.ch[0].lcd_cfg = ecovec_dvi_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(ecovec_dvi_modes);
gpio_set_value(GPIO_PTA2, 1);
gpio_set_value(GPIO_PTU1, 1);
} else {
/* Panel */
-
lcdc_info.clock_source = LCDC_CLK_PERIPHERAL;
- lcdc_info.ch[0].clock_divider = 2,
- lcdc_info.ch[0].lcd_cfg.name = "Panel";
- lcdc_info.ch[0].lcd_cfg.xres = 800;
- lcdc_info.ch[0].lcd_cfg.yres = 480;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 70;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
+ lcdc_info.ch[0].clock_divider = 2;
+ lcdc_info.ch[0].lcd_cfg = ecovec_lcd_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(ecovec_lcd_modes);
gpio_set_value(GPIO_PTR1, 1);
diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile
deleted file mode 100644
index cd54acb5149..00000000000
--- a/arch/sh/boards/mach-edosk7705/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the EDOSK7705 specific parts of the kernel
-#
-
-obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
deleted file mode 100644
index 5b9c57c4324..00000000000
--- a/arch/sh/boards/mach-edosk7705/io.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/io.c
- *
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routines for Hitachi EDOSK7705 board.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/edosk7705.h>
-#include <asm/addrspace.h>
-
-#define SMC_IOADDR 0xA2000000
-
-/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
-static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
-{
- /*
- * SMC91C96 registers are 4 byte aligned rather than the
- * usual 2 byte!
- */
- if (port >= 0x300 && port < 0x320)
- return SMC_IOADDR + ((port - 0x300) * 2);
-
- maybebadio(port);
- return port;
-}
-
-/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
- * registers causes problems. So we bit-shift the value and read / write
- * in 2 byte chunks. Setting the low byte to 0 does not cause problems
- * now as odd byte writes are only made on the bit mask / interrupt
- * register. This may not be the case in future Mar-2003 SJD
- */
-unsigned char sh_edosk7705_inb(unsigned long port)
-{
- if (port >= 0x300 && port < 0x320 && port & 0x01)
- return __raw_readw(port - 1) >> 8;
-
- return __raw_readb(sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_outb(unsigned char value, unsigned long port)
-{
- if (port >= 0x300 && port < 0x320 && port & 0x01) {
- __raw_writew(((unsigned short)value << 8), port - 1);
- return;
- }
-
- __raw_writeb(value, sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
-{
- unsigned char *p = addr;
-
- while (count--)
- *p++ = sh_edosk7705_inb(port);
-}
-
-void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- unsigned char *p = (unsigned char *)addr;
-
- while (count--)
- sh_edosk7705_outb(*p++, port);
-}
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
deleted file mode 100644
index d59225e26fb..00000000000
--- a/arch/sh/boards/mach-edosk7705/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/setup.c
- *
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * Hitachi SolutionEngine Support.
- *
- * Modified for edosk7705 development
- * board by S. Dunn, 2003.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <asm/machvec.h>
-#include <mach/edosk7705.h>
-
-static void __init sh_edosk7705_init_irq(void)
-{
- /* This is the Ethernet interrupt */
- make_imask_irq(0x09);
-}
-
-/*
- * The Machine Vector
- */
-static struct sh_machine_vector mv_edosk7705 __initmv = {
- .mv_name = "EDOSK7705",
- .mv_nr_irqs = 80,
-
- .mv_inb = sh_edosk7705_inb,
- .mv_outb = sh_edosk7705_outb,
-
- .mv_insb = sh_edosk7705_insb,
- .mv_outsb = sh_edosk7705_outsb,
-
- .mv_init_irq = sh_edosk7705_init_irq,
-};
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 68994a163f6..9b60eaabf8f 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -126,6 +126,21 @@ static struct platform_device kfr2r09_sh_keysc_device = {
},
};
+const static struct fb_videomode kfr2r09_lcdc_modes[] = {
+ {
+ .name = "TX07D34VM0AAA",
+ .xres = 240,
+ .yres = 400,
+ .left_margin = 0,
+ .right_margin = 16,
+ .hsync_len = 8,
+ .upper_margin = 0,
+ .lower_margin = 1,
+ .vsync_len = 1,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+};
+
static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
@@ -134,18 +149,8 @@ static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
.interface_type = SYS18,
.clock_divider = 6,
.flags = LCDC_FLAGS_DWPOL,
- .lcd_cfg = {
- .name = "TX07D34VM0AAA",
- .xres = 240,
- .yres = 400,
- .left_margin = 0,
- .right_margin = 16,
- .hsync_len = 8,
- .upper_margin = 0,
- .lower_margin = 1,
- .vsync_len = 1,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- },
+ .lcd_cfg = kfr2r09_lcdc_modes,
+ .num_cfg = ARRAY_SIZE(kfr2r09_lcdc_modes),
.lcd_size_cfg = {
.width = 35,
.height = 58,
@@ -333,7 +338,6 @@ static struct soc_camera_link rj54n1_link = {
.power = camera_power,
.board_info = &kfr2r09_i2c_camera,
.i2c_adapter_id = 1,
- .module_name = "rj54n1cb0c",
.priv = &rj54n1_priv,
};
diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c
index 96f38a4187d..e79412a4049 100644
--- a/arch/sh/boards/mach-landisk/irq.c
+++ b/arch/sh/boards/mach-landisk/irq.c
@@ -18,25 +18,24 @@
#include <linux/io.h>
#include <mach-landisk/mach/iodata_landisk.h>
-static void disable_landisk_irq(unsigned int irq)
+static void disable_landisk_irq(struct irq_data *data)
{
- unsigned char mask = 0xff ^ (0x01 << (irq - 5));
+ unsigned char mask = 0xff ^ (0x01 << (data->irq - 5));
__raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK);
}
-static void enable_landisk_irq(unsigned int irq)
+static void enable_landisk_irq(struct irq_data *data)
{
- unsigned char value = (0x01 << (irq - 5));
+ unsigned char value = (0x01 << (data->irq - 5));
__raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK);
}
static struct irq_chip landisk_irq_chip __read_mostly = {
.name = "LANDISK",
- .mask = disable_landisk_irq,
- .unmask = enable_landisk_irq,
- .mask_ack = disable_landisk_irq,
+ .irq_mask = disable_landisk_irq,
+ .irq_unmask = enable_landisk_irq,
};
/*
@@ -50,7 +49,7 @@ void __init init_landisk_IRQ(void)
disable_irq_nosync(i);
set_irq_chip_and_handler_name(i, &landisk_irq_chip,
handle_level_irq, "level");
- enable_landisk_irq(i);
+ enable_landisk_irq(irq_get_irq_data(i));
}
__raw_writeb(0x00, PA_PWRINT_CLR);
}
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 2960c659020..acdafb0c640 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -54,7 +54,7 @@
/*
* map I/O ports to memory-mapped addresses
*/
-static unsigned long microdev_isa_port2addr(unsigned long offset)
+void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len)
{
unsigned long result;
@@ -72,16 +72,6 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
* Configuration Registers
*/
result = IO_SUPERIO_PHYS + (offset << 1);
-#if 0
- } else if (offset == KBD_DATA_REG || offset == KBD_CNTL_REG ||
- offset == KBD_STATUS_REG) {
- /*
- * SMSC FDC37C93xAPM SuperIO chip
- *
- * PS/2 Keyboard + Mouse (ports 0x60 and 0x64).
- */
- result = IO_SUPERIO_PHYS + (offset << 1);
-#endif
} else if (((offset >= IO_IDE1_BASE) &&
(offset < IO_IDE1_BASE + IO_IDE_EXTENT)) ||
(offset == IO_IDE1_MISC)) {
@@ -131,237 +121,5 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
result = PVR;
}
- return result;
-}
-
-#define PORT2ADDR(x) (microdev_isa_port2addr(x))
-
-static inline void delay(void)
-{
-#if defined(CONFIG_PCI)
- /* System board present, just make a dummy SRAM access. (CS0 will be
- mapped to PCI memory, probably good to avoid it.) */
- __raw_readw(0xa6800000);
-#else
- /* CS0 will be mapped to flash, ROM etc so safe to access it. */
- __raw_readw(0xa0000000);
-#endif
-}
-
-unsigned char microdev_inb(unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO)
- return microdev_pci_inb(port);
-#endif
- return *(volatile unsigned char*)PORT2ADDR(port);
-}
-
-unsigned short microdev_inw(unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO)
- return microdev_pci_inw(port);
-#endif
- return *(volatile unsigned short*)PORT2ADDR(port);
-}
-
-unsigned int microdev_inl(unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO)
- return microdev_pci_inl(port);
-#endif
- return *(volatile unsigned int*)PORT2ADDR(port);
-}
-
-void microdev_outw(unsigned short b, unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO) {
- microdev_pci_outw(b, port);
- return;
- }
-#endif
- *(volatile unsigned short*)PORT2ADDR(port) = b;
-}
-
-void microdev_outb(unsigned char b, unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO) {
- microdev_pci_outb(b, port);
- return;
- }
-#endif
-
- /*
- * There is a board feature with the current SH4-202 MicroDev in
- * that the 2 byte enables (nBE0 and nBE1) are tied together (and
- * to the Chip Select Line (Ethernet_CS)). Due to this connectivity,
- * it is not possible to safely perform 8-bit writes to the
- * Ethernet registers, as 16-bits will be consumed from the Data
- * lines (corrupting the other byte). Hence, this function is
- * written to implement 16-bit read/modify/write for all byte-wide
- * accesses.
- *
- * Note: there is no problem with byte READS (even or odd).
- *
- * Sean McGoogan - 16th June 2003.
- */
- if ((port >= IO_LAN91C111_BASE) &&
- (port < IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) {
- /*
- * Then are trying to perform a byte-write to the
- * LAN91C111. This needs special care.
- */
- if (port % 2 == 1) { /* is the port odd ? */
- /* unset bit-0, i.e. make even */
- const unsigned long evenPort = port-1;
- unsigned short word;
-
- /*
- * do a 16-bit read/write to write to 'port',
- * preserving even byte.
- *
- * Even addresses are bits 0-7
- * Odd addresses are bits 8-15
- */
- word = microdev_inw(evenPort);
- word = (word & 0xffu) | (b << 8);
- microdev_outw(word, evenPort);
- } else {
- /* else, we are trying to do an even byte write */
- unsigned short word;
-
- /*
- * do a 16-bit read/write to write to 'port',
- * preserving odd byte.
- *
- * Even addresses are bits 0-7
- * Odd addresses are bits 8-15
- */
- word = microdev_inw(port);
- word = (word & 0xff00u) | (b);
- microdev_outw(word, port);
- }
- } else {
- *(volatile unsigned char*)PORT2ADDR(port) = b;
- }
-}
-
-void microdev_outl(unsigned int b, unsigned long port)
-{
-#ifdef CONFIG_PCI
- if (port >= PCIBIOS_MIN_IO) {
- microdev_pci_outl(b, port);
- return;
- }
-#endif
- *(volatile unsigned int*)PORT2ADDR(port) = b;
-}
-
-unsigned char microdev_inb_p(unsigned long port)
-{
- unsigned char v = microdev_inb(port);
- delay();
- return v;
-}
-
-unsigned short microdev_inw_p(unsigned long port)
-{
- unsigned short v = microdev_inw(port);
- delay();
- return v;
-}
-
-unsigned int microdev_inl_p(unsigned long port)
-{
- unsigned int v = microdev_inl(port);
- delay();
- return v;
-}
-
-void microdev_outb_p(unsigned char b, unsigned long port)
-{
- microdev_outb(b, port);
- delay();
-}
-
-void microdev_outw_p(unsigned short b, unsigned long port)
-{
- microdev_outw(b, port);
- delay();
-}
-
-void microdev_outl_p(unsigned int b, unsigned long port)
-{
- microdev_outl(b, port);
- delay();
-}
-
-void microdev_insb(unsigned long port, void *buffer, unsigned long count)
-{
- volatile unsigned char *port_addr;
- unsigned char *buf = buffer;
-
- port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
- while (count--)
- *buf++ = *port_addr;
-}
-
-void microdev_insw(unsigned long port, void *buffer, unsigned long count)
-{
- volatile unsigned short *port_addr;
- unsigned short *buf = buffer;
-
- port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
- while (count--)
- *buf++ = *port_addr;
-}
-
-void microdev_insl(unsigned long port, void *buffer, unsigned long count)
-{
- volatile unsigned long *port_addr;
- unsigned int *buf = buffer;
-
- port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
- while (count--)
- *buf++ = *port_addr;
-}
-
-void microdev_outsb(unsigned long port, const void *buffer, unsigned long count)
-{
- volatile unsigned char *port_addr;
- const unsigned char *buf = buffer;
-
- port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
- while (count--)
- *port_addr = *buf++;
-}
-
-void microdev_outsw(unsigned long port, const void *buffer, unsigned long count)
-{
- volatile unsigned short *port_addr;
- const unsigned short *buf = buffer;
-
- port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
- while (count--)
- *port_addr = *buf++;
-}
-
-void microdev_outsl(unsigned long port, const void *buffer, unsigned long count)
-{
- volatile unsigned long *port_addr;
- const unsigned int *buf = buffer;
-
- port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
- while (count--)
- *port_addr = *buf++;
+ return (void __iomem *)result;
}
diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c
index a26d16669aa..c35001fd903 100644
--- a/arch/sh/boards/mach-microdev/irq.c
+++ b/arch/sh/boards/mach-microdev/irq.c
@@ -65,19 +65,9 @@ static const struct {
# error Inconsistancy in defining the IRQ# for primary IDE!
#endif
-static void enable_microdev_irq(unsigned int irq);
-static void disable_microdev_irq(unsigned int irq);
-static void mask_and_ack_microdev(unsigned int);
-
-static struct irq_chip microdev_irq_type = {
- .name = "MicroDev-IRQ",
- .unmask = enable_microdev_irq,
- .mask = disable_microdev_irq,
- .ack = mask_and_ack_microdev,
-};
-
-static void disable_microdev_irq(unsigned int irq)
+static void disable_microdev_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned int fpgaIrq;
if (irq >= NUM_EXTERNAL_IRQS)
@@ -91,8 +81,9 @@ static void disable_microdev_irq(unsigned int irq)
__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTDSB_REG);
}
-static void enable_microdev_irq(unsigned int irq)
+static void enable_microdev_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long priorityReg, priorities, pri;
unsigned int fpgaIrq;
@@ -116,17 +107,18 @@ static void enable_microdev_irq(unsigned int irq)
__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG);
}
+static struct irq_chip microdev_irq_type = {
+ .name = "MicroDev-IRQ",
+ .irq_unmask = enable_microdev_irq,
+ .irq_mask = disable_microdev_irq,
+};
+
/* This function sets the desired irq handler to be a MicroDev type */
static void __init make_microdev_irq(unsigned int irq)
{
disable_irq_nosync(irq);
set_irq_chip_and_handler(irq, &microdev_irq_type, handle_level_irq);
- disable_microdev_irq(irq);
-}
-
-static void mask_and_ack_microdev(unsigned int irq)
-{
- disable_microdev_irq(irq);
+ disable_microdev_irq(irq_get_irq_data(irq));
}
extern void __init init_microdev_irq(void)
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index d1df2a4fb9b..d8a747291e0 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -195,27 +195,6 @@ device_initcall(microdev_devices_setup);
static struct sh_machine_vector mv_sh4202_microdev __initmv = {
.mv_name = "SH4-202 MicroDev",
.mv_nr_irqs = 72,
-
- .mv_inb = microdev_inb,
- .mv_inw = microdev_inw,
- .mv_inl = microdev_inl,
- .mv_outb = microdev_outb,
- .mv_outw = microdev_outw,
- .mv_outl = microdev_outl,
-
- .mv_inb_p = microdev_inb_p,
- .mv_inw_p = microdev_inw_p,
- .mv_inl_p = microdev_inl_p,
- .mv_outb_p = microdev_outb_p,
- .mv_outw_p = microdev_outw_p,
- .mv_outl_p = microdev_outl_p,
-
- .mv_insb = microdev_insb,
- .mv_insw = microdev_insw,
- .mv_insl = microdev_insl,
- .mv_outsb = microdev_outsb,
- .mv_outsw = microdev_outsw,
- .mv_outsl = microdev_outsl,
-
+ .mv_ioport_map = microdev_ioport_map,
.mv_init_irq = init_microdev_irq,
};
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 662debe4ead..c8acfec9869 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -213,51 +213,55 @@ static struct platform_device migor_nand_flash_device = {
}
};
+const static struct fb_videomode migor_lcd_modes[] = {
+ {
+#if defined(CONFIG_SH_MIGOR_RTA_WVGA)
+ .name = "LB070WV1",
+ .xres = 800,
+ .yres = 480,
+ .left_margin = 64,
+ .right_margin = 16,
+ .hsync_len = 120,
+ .sync = 0,
+#elif defined(CONFIG_SH_MIGOR_QVGA)
+ .name = "PH240320T",
+ .xres = 320,
+ .yres = 240,
+ .left_margin = 0,
+ .right_margin = 16,
+ .hsync_len = 8,
+ .sync = FB_SYNC_HOR_HIGH_ACT,
+#endif
+ .upper_margin = 1,
+ .lower_margin = 17,
+ .vsync_len = 2,
+ },
+};
+
static struct sh_mobile_lcdc_info sh_mobile_lcdc_info = {
-#ifdef CONFIG_SH_MIGOR_RTA_WVGA
+#if defined(CONFIG_SH_MIGOR_RTA_WVGA)
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.interface_type = RGB16,
.clock_divider = 2,
- .lcd_cfg = {
- .name = "LB070WV1",
- .xres = 800,
- .yres = 480,
- .left_margin = 64,
- .right_margin = 16,
- .hsync_len = 120,
- .upper_margin = 1,
- .lower_margin = 17,
- .vsync_len = 2,
- .sync = 0,
- },
+ .lcd_cfg = migor_lcd_modes,
+ .num_cfg = ARRAY_SIZE(migor_lcd_modes),
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
},
}
-#endif
-#ifdef CONFIG_SH_MIGOR_QVGA
+#elif defined(CONFIG_SH_MIGOR_QVGA)
.clock_source = LCDC_CLK_PERIPHERAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.interface_type = SYS16A,
.clock_divider = 10,
- .lcd_cfg = {
- .name = "PH240320T",
- .xres = 320,
- .yres = 240,
- .left_margin = 0,
- .right_margin = 16,
- .hsync_len = 8,
- .upper_margin = 1,
- .lower_margin = 17,
- .vsync_len = 2,
- .sync = FB_SYNC_HOR_HIGH_ACT,
- },
+ .lcd_cfg = migor_lcd_modes,
+ .num_cfg = ARRAY_SIZE(migor_lcd_modes),
.lcd_size_cfg = { /* 2.4 inch */
.width = 49,
.height = 37,
@@ -450,7 +454,6 @@ static struct soc_camera_link ov7725_link = {
.power = ov7725_power,
.board_info = &migor_i2c_camera[0],
.i2c_adapter_id = 0,
- .module_name = "ov772x",
.priv = &ov7725_info,
};
@@ -463,7 +466,6 @@ static struct soc_camera_link tw9910_link = {
.power = tw9910_power,
.board_info = &migor_i2c_camera[1],
.i2c_adapter_id = 0,
- .module_name = "tw9910",
.priv = &tw9910_info,
};
diff --git a/arch/sh/boards/mach-se/7206/Makefile b/arch/sh/boards/mach-se/7206/Makefile
index 63e7ed699f3..5c9eaa0535b 100644
--- a/arch/sh/boards/mach-se/7206/Makefile
+++ b/arch/sh/boards/mach-se/7206/Makefile
@@ -2,4 +2,4 @@
# Makefile for the 7206 SolutionEngine specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
deleted file mode 100644
index adadc77532e..00000000000
--- a/arch/sh/boards/mach-se/7206/io.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
- *
- * linux/arch/sh/boards/se/7206/io.c
- *
- * Copyright (C) 2006 Yoshinori Sato
- *
- * I/O routine for Hitachi 7206 SolutionEngine.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7206.h>
-
-
-static inline void delay(void)
-{
- __raw_readw(0x20000000); /* P2 ROM Area */
-}
-
-/* MS7750 requires special versions of in*, out* routines, since
- PC-like io ports are located at upper half byte of 16-bit word which
- can be accessed only with 16-bit wide. */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
- if (port >= 0x2000 && port < 0x2020)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- else if (port >= 0x300 && port < 0x310)
- return (volatile __u16 *) (PA_SMSC + (port - 0x300));
-
- return (volatile __u16 *)port;
-}
-
-unsigned char se7206_inb(unsigned long port)
-{
- return (*port2adr(port)) & 0xff;
-}
-
-unsigned char se7206_inb_p(unsigned long port)
-{
- unsigned long v;
-
- v = (*port2adr(port)) & 0xff;
- delay();
- return v;
-}
-
-unsigned short se7206_inw(unsigned long port)
-{
- return *port2adr(port);
-}
-
-void se7206_outb(unsigned char value, unsigned long port)
-{
- *(port2adr(port)) = value;
-}
-
-void se7206_outb_p(unsigned char value, unsigned long port)
-{
- *(port2adr(port)) = value;
- delay();
-}
-
-void se7206_outw(unsigned short value, unsigned long port)
-{
- *port2adr(port) = value;
-}
-
-void se7206_insb(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u8 *ap = addr;
-
- while (count--)
- *ap++ = *p;
-}
-
-void se7206_insw(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u16 *ap = addr;
- while (count--)
- *ap++ = *p;
-}
-
-void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u8 *ap = addr;
-
- while (count--)
- *p = *ap++;
-}
-
-void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u16 *ap = addr;
- while (count--)
- *p = *ap++;
-}
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c
index 8d82175d83a..d961949600f 100644
--- a/arch/sh/boards/mach-se/7206/irq.c
+++ b/arch/sh/boards/mach-se/7206/irq.c
@@ -25,8 +25,9 @@
#define INTC_IPR01 0xfffe0818
#define INTC_ICR1 0xfffe0802
-static void disable_se7206_irq(unsigned int irq)
+static void disable_se7206_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short val;
unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
unsigned short msk0,msk1;
@@ -55,8 +56,9 @@ static void disable_se7206_irq(unsigned int irq)
__raw_writew(msk1, INTMSK1);
}
-static void enable_se7206_irq(unsigned int irq)
+static void enable_se7206_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short val;
unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
unsigned short msk0,msk1;
@@ -86,13 +88,14 @@ static void enable_se7206_irq(unsigned int irq)
__raw_writew(msk1, INTMSK1);
}
-static void eoi_se7206_irq(unsigned int irq)
+static void eoi_se7206_irq(struct irq_data *data)
{
unsigned short sts0,sts1;
+ unsigned int irq = data->irq;
struct irq_desc *desc = irq_to_desc(irq);
if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_se7206_irq(irq);
+ enable_se7206_irq(data);
/* FPGA isr clear */
sts0 = __raw_readw(INTSTS0);
sts1 = __raw_readw(INTSTS1);
@@ -115,10 +118,9 @@ static void eoi_se7206_irq(unsigned int irq)
static struct irq_chip se7206_irq_chip __read_mostly = {
.name = "SE7206-FPGA",
- .mask = disable_se7206_irq,
- .unmask = enable_se7206_irq,
- .mask_ack = disable_se7206_irq,
- .eoi = eoi_se7206_irq,
+ .irq_mask = disable_se7206_irq,
+ .irq_unmask = enable_se7206_irq,
+ .irq_eoi = eoi_se7206_irq,
};
static void make_se7206_irq(unsigned int irq)
@@ -126,7 +128,7 @@ static void make_se7206_irq(unsigned int irq)
disable_irq_nosync(irq);
set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
handle_level_irq, "level");
- disable_se7206_irq(irq);
+ disable_se7206_irq(irq_get_irq_data(irq));
}
/*
@@ -137,11 +139,13 @@ void __init init_se7206_IRQ(void)
make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
make_se7206_irq(IRQ1_IRQ); /* ATA */
make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
- __raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+ __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
/* FPGA System register setup*/
__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
__raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
+
/* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
__raw_writew(0x0001,INTSEL);
}
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 8f5c65d43d1..7f4871c71a0 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -86,20 +86,5 @@ __initcall(se7206_devices_setup);
static struct sh_machine_vector mv_se __initmv = {
.mv_name = "SolutionEngine",
.mv_nr_irqs = 256,
- .mv_inb = se7206_inb,
- .mv_inw = se7206_inw,
- .mv_outb = se7206_outb,
- .mv_outw = se7206_outw,
-
- .mv_inb_p = se7206_inb_p,
- .mv_inw_p = se7206_inw,
- .mv_outb_p = se7206_outb_p,
- .mv_outw_p = se7206_outw,
-
- .mv_insb = se7206_insb,
- .mv_insw = se7206_insw,
- .mv_outsb = se7206_outsb,
- .mv_outsw = se7206_outsw,
-
.mv_init_irq = init_se7206_IRQ,
};
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index d4305c26e9f..76255a19417 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -18,23 +18,22 @@
unsigned int se7343_fpga_irq[SE7343_FPGA_IRQ_NR] = { 0, };
-static void disable_se7343_irq(unsigned int irq)
+static void disable_se7343_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
}
-static void enable_se7343_irq(unsigned int irq)
+static void enable_se7343_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
}
static struct irq_chip se7343_irq_chip __read_mostly = {
- .name = "SE7343-FPGA",
- .mask = disable_se7343_irq,
- .unmask = enable_se7343_irq,
- .mask_ack = disable_se7343_irq,
+ .name = "SE7343-FPGA",
+ .irq_mask = disable_se7343_irq,
+ .irq_unmask = enable_se7343_irq,
};
static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/boards/mach-se/770x/Makefile b/arch/sh/boards/mach-se/770x/Makefile
index 8e624b06d5e..43ea14feef5 100644
--- a/arch/sh/boards/mach-se/770x/Makefile
+++ b/arch/sh/boards/mach-se/770x/Makefile
@@ -2,4 +2,4 @@
# Makefile for the 770x SolutionEngine specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/770x/io.c b/arch/sh/boards/mach-se/770x/io.c
deleted file mode 100644
index 28833c8786e..00000000000
--- a/arch/sh/boards/mach-se/770x/io.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * I/O routine for Hitachi SolutionEngine.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se.h>
-
-/* MS7750 requires special versions of in*, out* routines, since
- PC-like io ports are located at upper half byte of 16-bit word which
- can be accessed only with 16-bit wide. */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
- if (port & 0xff000000)
- return ( volatile __u16 *) port;
- if (port >= 0x2000)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- else if (port >= 0x1000)
- return (volatile __u16 *) (PA_83902 + (port << 1));
- else
- return (volatile __u16 *) (PA_SUPERIO + (port << 1));
-}
-
-static inline int
-shifted_port(unsigned long port)
-{
- /* For IDE registers, value is not shifted */
- if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
- return 0;
- else
- return 1;
-}
-
-unsigned char se_inb(unsigned long port)
-{
- if (shifted_port(port))
- return (*port2adr(port) >> 8);
- else
- return (*port2adr(port))&0xff;
-}
-
-unsigned char se_inb_p(unsigned long port)
-{
- unsigned long v;
-
- if (shifted_port(port))
- v = (*port2adr(port) >> 8);
- else
- v = (*port2adr(port))&0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short se_inw(unsigned long port)
-{
- if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int se_inl(unsigned long port)
-{
- maybebadio(port);
- return 0;
-}
-
-void se_outb(unsigned char value, unsigned long port)
-{
- if (shifted_port(port))
- *(port2adr(port)) = value << 8;
- else
- *(port2adr(port)) = value;
-}
-
-void se_outb_p(unsigned char value, unsigned long port)
-{
- if (shifted_port(port))
- *(port2adr(port)) = value << 8;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void se_outw(unsigned short value, unsigned long port)
-{
- if (port >= 0x2000)
- *port2adr(port) = value;
- else
- maybebadio(port);
-}
-
-void se_outl(unsigned int value, unsigned long port)
-{
- maybebadio(port);
-}
-
-void se_insb(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u8 *ap = addr;
-
- if (shifted_port(port)) {
- while (count--)
- *ap++ = *p >> 8;
- } else {
- while (count--)
- *ap++ = *p;
- }
-}
-
-void se_insw(unsigned long port, void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- __u16 *ap = addr;
- while (count--)
- *ap++ = *p;
-}
-
-void se_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void se_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u8 *ap = addr;
-
- if (shifted_port(port)) {
- while (count--)
- *p = *ap++ << 8;
- } else {
- while (count--)
- *p = *ap++;
- }
-}
-
-void se_outsw(unsigned long port, const void *addr, unsigned long count)
-{
- volatile __u16 *p = port2adr(port);
- const __u16 *ap = addr;
-
- while (count--)
- *p = *ap++;
-}
-
-void se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 66d39d1b090..31330c65c0c 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -195,27 +195,5 @@ static struct sh_machine_vector mv_se __initmv = {
#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
.mv_nr_irqs = 104,
#endif
-
- .mv_inb = se_inb,
- .mv_inw = se_inw,
- .mv_inl = se_inl,
- .mv_outb = se_outb,
- .mv_outw = se_outw,
- .mv_outl = se_outl,
-
- .mv_inb_p = se_inb_p,
- .mv_inw_p = se_inw,
- .mv_inl_p = se_inl,
- .mv_outb_p = se_outb_p,
- .mv_outw_p = se_outw,
- .mv_outl_p = se_outl,
-
- .mv_insb = se_insb,
- .mv_insw = se_insw,
- .mv_insl = se_insl,
- .mv_outsb = se_outsb,
- .mv_outsw = se_outsw,
- .mv_outsl = se_outsl,
-
.mv_init_irq = init_se_IRQ,
};
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 61605db04ee..c013f95628e 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -18,23 +18,22 @@
unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, };
-static void disable_se7722_irq(unsigned int irq)
+static void disable_se7722_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
}
-static void enable_se7722_irq(unsigned int irq)
+static void enable_se7722_irq(struct irq_data *data)
{
- unsigned int bit = (unsigned int)get_irq_chip_data(irq);
+ unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
__raw_writew(__raw_readw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
}
static struct irq_chip se7722_irq_chip __read_mostly = {
- .name = "SE7722-FPGA",
- .mask = disable_se7722_irq,
- .unmask = enable_se7722_irq,
- .mask_ack = disable_se7722_irq,
+ .name = "SE7722-FPGA",
+ .irq_mask = disable_se7722_irq,
+ .irq_unmask = enable_se7722_irq,
};
static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index 0942be2daef..5bd87c22b65 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -68,25 +68,26 @@ static struct fpga_irq get_fpga_irq(unsigned int irq)
return set;
}
-static void disable_se7724_irq(unsigned int irq)
+static void disable_se7724_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
unsigned int bit = irq - set.base;
__raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr);
}
-static void enable_se7724_irq(unsigned int irq)
+static void enable_se7724_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
unsigned int bit = irq - set.base;
__raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr);
}
static struct irq_chip se7724_irq_chip __read_mostly = {
- .name = "SE7724-FPGA",
- .mask = disable_se7724_irq,
- .unmask = enable_se7724_irq,
- .mask_ack = disable_se7724_irq,
+ .name = "SE7724-FPGA",
+ .irq_mask = disable_se7724_irq,
+ .irq_unmask = enable_se7724_irq,
};
static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 552ebd9ba82..c31d228fdfc 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -144,16 +144,42 @@ static struct platform_device nor_flash_device = {
};
/* LCDC */
+const static struct fb_videomode lcdc_720p_modes[] = {
+ {
+ .name = "LB070WV1",
+ .sync = 0, /* hsync and vsync are active low */
+ .xres = 1280,
+ .yres = 720,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 40,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ },
+};
+
+const static struct fb_videomode lcdc_vga_modes[] = {
+ {
+ .name = "LB070WV1",
+ .sync = 0, /* hsync and vsync are active low */
+ .xres = 640,
+ .yres = 480,
+ .left_margin = 105,
+ .right_margin = 50,
+ .hsync_len = 96,
+ .upper_margin = 33,
+ .lower_margin = 10,
+ .vsync_len = 2,
+ },
+};
+
static struct sh_mobile_lcdc_info lcdc_info = {
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.clock_divider = 1,
- .lcd_cfg = {
- .name = "LB070WV1",
- .sync = 0, /* hsync and vsync are active low */
- },
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
@@ -550,7 +576,6 @@ static struct sh_vou_pdata sh_vou_pdata = {
.flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
.board_info = &ak8813,
.i2c_adap = 0,
- .module_name = "ak881x",
};
static struct resource sh_vou_resources[] = {
@@ -909,24 +934,12 @@ static int __init devices_setup(void)
if (sw & SW41_B) {
/* 720p */
- lcdc_info.ch[0].lcd_cfg.xres = 1280;
- lcdc_info.ch[0].lcd_cfg.yres = 720;
- lcdc_info.ch[0].lcd_cfg.left_margin = 220;
- lcdc_info.ch[0].lcd_cfg.right_margin = 110;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 40;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 20;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 5;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 5;
+ lcdc_info.ch[0].lcd_cfg = lcdc_720p_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(lcdc_720p_modes);
} else {
/* VGA */
- lcdc_info.ch[0].lcd_cfg.xres = 640;
- lcdc_info.ch[0].lcd_cfg.yres = 480;
- lcdc_info.ch[0].lcd_cfg.left_margin = 105;
- lcdc_info.ch[0].lcd_cfg.right_margin = 50;
- lcdc_info.ch[0].lcd_cfg.hsync_len = 96;
- lcdc_info.ch[0].lcd_cfg.upper_margin = 33;
- lcdc_info.ch[0].lcd_cfg.lower_margin = 10;
- lcdc_info.ch[0].lcd_cfg.vsync_len = 2;
+ lcdc_info.ch[0].lcd_cfg = lcdc_vga_modes;
+ lcdc_info.ch[0].num_cfg = ARRAY_SIZE(lcdc_vga_modes);
}
if (sw & SW41_A) {
diff --git a/arch/sh/boards/mach-se/7751/Makefile b/arch/sh/boards/mach-se/7751/Makefile
index e6f4341bfe6..a338fd9d503 100644
--- a/arch/sh/boards/mach-se/7751/Makefile
+++ b/arch/sh/boards/mach-se/7751/Makefile
@@ -2,4 +2,4 @@
# Makefile for the 7751 SolutionEngine specific parts of the kernel
#
-obj-y := setup.o io.o irq.o
+obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7751/io.c b/arch/sh/boards/mach-se/7751/io.c
deleted file mode 100644
index 6e75bd4459e..00000000000
--- a/arch/sh/boards/mach-se/7751/io.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7751.h>
-#include <asm/addrspace.h>
-
-static inline volatile u16 *port2adr(unsigned int port)
-{
- if (port >= 0x2000)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- maybebadio((unsigned long)port);
- return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-unsigned char sh7751se_inb(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned char *)port;
- else
- return (*port2adr(port)) & 0xff;
-}
-
-unsigned char sh7751se_inb_p(unsigned long port)
-{
- unsigned char v;
-
- if (PXSEG(port))
- v = *(volatile unsigned char *)port;
- else
- v = (*port2adr(port)) & 0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short sh7751se_inw(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned short *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int sh7751se_inl(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned long *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-void sh7751se_outb(unsigned char value, unsigned long port)
-{
-
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
-}
-
-void sh7751se_outb_p(unsigned char value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void sh7751se_outw(unsigned short value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned short *)port = value;
- else if (port >= 0x2000)
- *port2adr(port) = value;
- else
- maybebadio(port);
-}
-
-void sh7751se_outl(unsigned int value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned long *)port = value;
- else
- maybebadio(port);
-}
-
-void sh7751se_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void sh7751se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 50572512e3e..9fbc51beb18 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -56,23 +56,5 @@ __initcall(se7751_devices_setup);
static struct sh_machine_vector mv_7751se __initmv = {
.mv_name = "7751 SolutionEngine",
.mv_nr_irqs = 72,
-
- .mv_inb = sh7751se_inb,
- .mv_inw = sh7751se_inw,
- .mv_inl = sh7751se_inl,
- .mv_outb = sh7751se_outb,
- .mv_outw = sh7751se_outw,
- .mv_outl = sh7751se_outl,
-
- .mv_inb_p = sh7751se_inb_p,
- .mv_inw_p = sh7751se_inw,
- .mv_inl_p = sh7751se_inl,
- .mv_outb_p = sh7751se_outb_p,
- .mv_outw_p = sh7751se_outw,
- .mv_outl_p = sh7751se_outl,
-
- .mv_insl = sh7751se_insl,
- .mv_outsl = sh7751se_outsl,
-
.mv_init_irq = init_7751se_IRQ,
};
diff --git a/arch/sh/boards/mach-snapgear/Makefile b/arch/sh/boards/mach-snapgear/Makefile
deleted file mode 100644
index d2d2f4b6a50..00000000000
--- a/arch/sh/boards/mach-snapgear/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the SnapGear specific parts of the kernel
-#
-
-obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-snapgear/io.c b/arch/sh/boards/mach-snapgear/io.c
deleted file mode 100644
index 476650e42db..00000000000
--- a/arch/sh/boards/mach-snapgear/io.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2002 David McCullough <davidm@snapgear.com>
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/addrspace.h>
-
-#ifdef CONFIG_SH_SECUREEDGE5410
-unsigned short secureedge5410_ioport;
-#endif
-
-static inline volatile __u16 *port2adr(unsigned int port)
-{
- maybebadio((unsigned long)port);
- return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-unsigned char snapgear_inb(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned char *)port;
- else
- return (*port2adr(port)) & 0xff;
-}
-
-unsigned char snapgear_inb_p(unsigned long port)
-{
- unsigned char v;
-
- if (PXSEG(port))
- v = *(volatile unsigned char *)port;
- else
- v = (*port2adr(port))&0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short snapgear_inw(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned short *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int snapgear_inl(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned long *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else
- maybebadio(port);
- return 0;
-}
-
-void snapgear_outb(unsigned char value, unsigned long port)
-{
-
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
-}
-
-void snapgear_outb_p(unsigned char value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void snapgear_outw(unsigned short value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned short *)port = value;
- else if (port >= 0x2000)
- *port2adr(port) = value;
- else
- maybebadio(port);
-}
-
-void snapgear_outl(unsigned int value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned long *)port = value;
- else
- maybebadio(port);
-}
-
-void snapgear_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/Makefile b/arch/sh/boards/mach-systemh/Makefile
deleted file mode 100644
index 2cc6a23d9d3..00000000000
--- a/arch/sh/boards/mach-systemh/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Makefile for the SystemH specific parts of the kernel
-#
-
-obj-y := setup.o irq.o io.o
-
-# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more
-# importantly, with the generic sh7751_pcic_init() code. For now, we'll
-# just abuse the hell out of kbuild, because we can..
-
-obj-$(CONFIG_PCI) += pci.o
-pci-y := ../../se/7751/pci.o
-
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
deleted file mode 100644
index 15577ff1f71..00000000000
--- a/arch/sh/boards/mach-systemh/io.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/io.c
- *
- * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 Systemh.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <mach/systemh7751.h>
-#include <asm/addrspace.h>
-#include <asm/io.h>
-
-#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area
- of smc lan chip*/
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
- if (port >= 0x2000)
- return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
- maybebadio((unsigned long)port);
- return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used w/o translation for
- * compatibility.
- */
-unsigned char sh7751systemh_inb(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned char *)port;
- else if (port <= 0x3F1)
- return *(volatile unsigned char *)ETHER_IOMAP(port);
- else
- return (*port2adr(port))&0xff;
-}
-
-unsigned char sh7751systemh_inb_p(unsigned long port)
-{
- unsigned char v;
-
- if (PXSEG(port))
- v = *(volatile unsigned char *)port;
- else if (port <= 0x3F1)
- v = *(volatile unsigned char *)ETHER_IOMAP(port);
- else
- v = (*port2adr(port))&0xff;
- ctrl_delay();
- return v;
-}
-
-unsigned short sh7751systemh_inw(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned short *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else if (port <= 0x3F1)
- return *(volatile unsigned int *)ETHER_IOMAP(port);
- else
- maybebadio(port);
- return 0;
-}
-
-unsigned int sh7751systemh_inl(unsigned long port)
-{
- if (PXSEG(port))
- return *(volatile unsigned long *)port;
- else if (port >= 0x2000)
- return *port2adr(port);
- else if (port <= 0x3F1)
- return *(volatile unsigned int *)ETHER_IOMAP(port);
- else
- maybebadio(port);
- return 0;
-}
-
-void sh7751systemh_outb(unsigned char value, unsigned long port)
-{
-
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else if (port <= 0x3F1)
- *(volatile unsigned char *)ETHER_IOMAP(port) = value;
- else
- *(port2adr(port)) = value;
-}
-
-void sh7751systemh_outb_p(unsigned char value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned char *)port = value;
- else if (port <= 0x3F1)
- *(volatile unsigned char *)ETHER_IOMAP(port) = value;
- else
- *(port2adr(port)) = value;
- ctrl_delay();
-}
-
-void sh7751systemh_outw(unsigned short value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned short *)port = value;
- else if (port >= 0x2000)
- *port2adr(port) = value;
- else if (port <= 0x3F1)
- *(volatile unsigned short *)ETHER_IOMAP(port) = value;
- else
- maybebadio(port);
-}
-
-void sh7751systemh_outl(unsigned int value, unsigned long port)
-{
- if (PXSEG(port))
- *(volatile unsigned long *)port = value;
- else
- maybebadio(port);
-}
-
-void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count)
-{
- unsigned char *p = addr;
- while (count--) *p++ = sh7751systemh_inb(port);
-}
-
-void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count)
-{
- unsigned short *p = addr;
- while (count--) *p++ = sh7751systemh_inw(port);
-}
-
-void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count)
-{
- maybebadio(port);
-}
-
-void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count)
-{
- unsigned char *p = (unsigned char*)addr;
- while (count--) sh7751systemh_outb(*p++, port);
-}
-
-void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count)
-{
- unsigned short *p = (unsigned short*)addr;
- while (count--) sh7751systemh_outw(*p++, port);
-}
-
-void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count)
-{
- maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
deleted file mode 100644
index 523aea5dc94..00000000000
--- a/arch/sh/boards/mach-systemh/irq.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/irq.c
- *
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by
- * Jonathan Short.
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <mach/systemh7751.h>
-#include <asm/smc37c93x.h>
-
-/* address of external interrupt mask register
- * address must be set prior to use these (maybe in init_XXX_irq())
- * XXX : is it better to use .config than specifying it in code? */
-static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
-static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
-
-/* forward declaration */
-static void enable_systemh_irq(unsigned int irq);
-static void disable_systemh_irq(unsigned int irq);
-static void mask_and_ack_systemh(unsigned int);
-
-static struct irq_chip systemh_irq_type = {
- .name = " SystemH Register",
- .unmask = enable_systemh_irq,
- .mask = disable_systemh_irq,
- .ack = mask_and_ack_systemh,
-};
-
-static void disable_systemh_irq(unsigned int irq)
-{
- if (systemh_irq_mask_register) {
- unsigned long val, mask = 0x01 << 1;
-
- /* Clear the "irq"th bit in the mask and set it in the request */
- val = __raw_readl((unsigned long)systemh_irq_mask_register);
- val &= ~mask;
- __raw_writel(val, (unsigned long)systemh_irq_mask_register);
-
- val = __raw_readl((unsigned long)systemh_irq_request_register);
- val |= mask;
- __raw_writel(val, (unsigned long)systemh_irq_request_register);
- }
-}
-
-static void enable_systemh_irq(unsigned int irq)
-{
- if (systemh_irq_mask_register) {
- unsigned long val, mask = 0x01 << 1;
-
- /* Set "irq"th bit in the mask register */
- val = __raw_readl((unsigned long)systemh_irq_mask_register);
- val |= mask;
- __raw_writel(val, (unsigned long)systemh_irq_mask_register);
- }
-}
-
-static void mask_and_ack_systemh(unsigned int irq)
-{
- disable_systemh_irq(irq);
-}
-
-void make_systemh_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
- disable_systemh_irq(irq);
-}
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
deleted file mode 100644
index 219fd800a43..00000000000
--- a/arch/sh/boards/mach-systemh/setup.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/setup.c
- *
- * Copyright (C) 2000 Kazumoto Kojima
- * Copyright (C) 2003 Paul Mundt
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by Jonathan Short.
- *
- * Rewritten for 2.6 by Paul Mundt.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <asm/machvec.h>
-#include <mach/systemh7751.h>
-
-extern void make_systemh_irq(unsigned int irq);
-
-/*
- * Initialize IRQ setting
- */
-static void __init sh7751systemh_init_irq(void)
-{
- make_systemh_irq(0xb); /* Ethernet interrupt */
-}
-
-static struct sh_machine_vector mv_7751systemh __initmv = {
- .mv_name = "7751 SystemH",
- .mv_nr_irqs = 72,
-
- .mv_inb = sh7751systemh_inb,
- .mv_inw = sh7751systemh_inw,
- .mv_inl = sh7751systemh_inl,
- .mv_outb = sh7751systemh_outb,
- .mv_outw = sh7751systemh_outw,
- .mv_outl = sh7751systemh_outl,
-
- .mv_inb_p = sh7751systemh_inb_p,
- .mv_inw_p = sh7751systemh_inw,
- .mv_inl_p = sh7751systemh_inl,
- .mv_outb_p = sh7751systemh_outb_p,
- .mv_outw_p = sh7751systemh_outw,
- .mv_outl_p = sh7751systemh_outl,
-
- .mv_insb = sh7751systemh_insb,
- .mv_insw = sh7751systemh_insw,
- .mv_insl = sh7751systemh_insl,
- .mv_outsb = sh7751systemh_outsb,
- .mv_outsw = sh7751systemh_outsw,
- .mv_outsl = sh7751systemh_outsl,
-
- .mv_init_irq = sh7751systemh_init_irq,
-};
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index 594adf76e46..239e7406625 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -54,18 +54,19 @@ static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned long mask;
int pin;
- chip->mask_ack(irq);
+ chip->irq_mask_ack(data);
mask = __raw_readw(KEYDETR);
for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS)
generic_handle_irq(x3proto_gpio_to_irq(NULL, pin));
- chip->unmask(irq);
+ chip->irq_unmask(data);
}
struct gpio_chip x3proto_gpio_chip = {
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index bcb31ae84a5..177a10b25ca 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -17,8 +17,9 @@
/* This belongs in cpu specific */
#define INTC_ICR1 0xA4140010UL
-static void hd64461_mask_irq(unsigned int irq)
+static void hd64461_mask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
@@ -27,8 +28,9 @@ static void hd64461_mask_irq(unsigned int irq)
__raw_writew(nimr, HD64461_NIMR);
}
-static void hd64461_unmask_irq(unsigned int irq)
+static void hd64461_unmask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
@@ -37,20 +39,21 @@ static void hd64461_unmask_irq(unsigned int irq)
__raw_writew(nimr, HD64461_NIMR);
}
-static void hd64461_mask_and_ack_irq(unsigned int irq)
+static void hd64461_mask_and_ack_irq(struct irq_data *data)
{
- hd64461_mask_irq(irq);
+ hd64461_mask_irq(data);
+
#ifdef CONFIG_HD64461_ENABLER
- if (irq == HD64461_IRQBASE + 13)
+ if (data->irq == HD64461_IRQBASE + 13)
__raw_writeb(0x00, HD64461_PCC1CSCR);
#endif
}
static struct irq_chip hd64461_irq_chip = {
.name = "HD64461-IRQ",
- .mask = hd64461_mask_irq,
- .mask_ack = hd64461_mask_and_ack_irq,
- .unmask = hd64461_unmask_irq,
+ .irq_mask = hd64461_mask_irq,
+ .irq_mask_ack = hd64461_mask_and_ack_irq,
+ .irq_unmask = hd64461_unmask_irq,
};
static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/secureedge5410_defconfig
index 7eae4e59d7f..7eae4e59d7f 100644
--- a/arch/sh/configs/snapgear_defconfig
+++ b/arch/sh/configs/secureedge5410_defconfig
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
deleted file mode 100644
index b58dfc505ef..00000000000
--- a/arch/sh/configs/systemh_defconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_CPU_SUBTYPE_SH7751R=y
-CONFIG_MEMORY_START=0x0c000000
-CONFIG_MEMORY_SIZE=0x00400000
-CONFIG_FLATMEM_MANUAL=y
-CONFIG_SH_7751_SYSTEMH=y
-CONFIG_PREEMPT=y
-# CONFIG_STANDALONE is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=1024
-# CONFIG_INPUT is not set
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
-CONFIG_HW_RANDOM=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 446b3831c21..3d1ae2bfaa6 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -44,10 +44,10 @@
/*
* These will never work in 32-bit, don't even bother.
*/
-#define P1SEGADDR(a) __futile_remapping_attempt
-#define P2SEGADDR(a) __futile_remapping_attempt
-#define P3SEGADDR(a) __futile_remapping_attempt
-#define P4SEGADDR(a) __futile_remapping_attempt
+#define P1SEGADDR(a) ({ (void)(a); BUG(); NULL; })
+#define P2SEGADDR(a) ({ (void)(a); BUG(); NULL; })
+#define P3SEGADDR(a) ({ (void)(a); BUG(); NULL; })
+#define P4SEGADDR(a) ({ (void)(a); BUG(); NULL; })
#endif
#endif /* P1SEG */
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 02f77450cd8..083ea068e81 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -66,7 +66,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
-#ifdef CONFIG_PMB
static inline unsigned long phys_addr_mask(void)
{
/* Is the MMU in 29bit mode? */
@@ -75,17 +74,6 @@ static inline unsigned long phys_addr_mask(void)
return PHYS_ADDR_MASK32;
}
-#elif defined(CONFIG_32BIT)
-static inline unsigned long phys_addr_mask(void)
-{
- return PHYS_ADDR_MASK32;
-}
-#else
-static inline unsigned long phys_addr_mask(void)
-{
- return PHYS_ADDR_MASK29;
-}
-#endif
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
@@ -169,6 +157,8 @@ extern void page_table_range_init(unsigned long start, unsigned long end,
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define __HAVE_ARCH_PTE_SPECIAL
+
#include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PGTABLE_H */
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 69fdfbf14ea..43528ec656b 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -378,8 +378,6 @@ PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
-#define __HAVE_ARCH_PTE_SPECIAL
-
/*
* Macro and implementation to make a page protection as uncachable.
*/
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index 10a48111226..42cb9dd5216 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -130,6 +130,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
* anything above the PPN field.
*/
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
+#define _PAGE_SPECIAL _PAGE_EXT(0x002)
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
@@ -173,7 +174,8 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
/* Default flags for a User page */
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
/*
* We have full permissions (Read/Write/Execute/Shared).
@@ -263,7 +265,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
-static inline int pte_special(pte_t pte){ return 0; }
+static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
@@ -272,8 +274,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) |
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
-
+static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry.
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 0a58cb25a65..c9e7cbc4768 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -89,6 +89,7 @@ struct sh_cpuinfo {
struct task_struct *idle;
#endif
+ unsigned int phys_bits;
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 1f1af5afff0..10c8b1823a1 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/types.h>
+#include <asm/uncached.h>
#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
#define instruction_size(insn) (4)
#endif
-extern unsigned long cached_to_uncached;
-extern unsigned long uncached_size;
-
void per_cpu_trap_init(void);
void default_idle(void);
void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index c941b273940..a4ad1cd9bc4 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@ do { \
__restore_dsp(prev); \
} while (0)
-/*
- * Jump to uncached area.
- * When handling TLB or caches, we need to do it from an uncached area.
- */
-#define jump_to_uncached() \
-do { \
- unsigned long __dummy; \
- \
- __asm__ __volatile__( \
- "mova 1f, %0\n\t" \
- "add %1, %0\n\t" \
- "jmp @%0\n\t" \
- " nop\n\t" \
- ".balign 4\n" \
- "1:" \
- : "=&z" (__dummy) \
- : "r" (cached_to_uncached)); \
-} while (0)
-
-/*
- * Back to cached area.
- */
-#define back_to_cached() \
-do { \
- unsigned long __dummy; \
- ctrl_barrier(); \
- __asm__ __volatile__( \
- "mov.l 1f, %0\n\t" \
- "jmp @%0\n\t" \
- " nop\n\t" \
- ".balign 4\n" \
- "1: .long 2f\n" \
- "2:" \
- : "=&r" (__dummy)); \
-} while (0)
-
#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector() \
({ \
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 36338646dfc..8593bc8d1a4 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@ do { \
&next->thread); \
} while (0)
-#define jump_to_uncached() do { } while (0)
-#define back_to_cached() do { } while (0)
-
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f96626..6f8816b79cf 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
#include <linux/bug.h>
#ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);
+
+/*
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
+ */
+#define jump_to_uncached() \
+do { \
+ unsigned long __dummy; \
+ \
+ __asm__ __volatile__( \
+ "mova 1f, %0\n\t" \
+ "add %1, %0\n\t" \
+ "jmp @%0\n\t" \
+ " nop\n\t" \
+ ".balign 4\n" \
+ "1:" \
+ : "=&z" (__dummy) \
+ : "r" (cached_to_uncached)); \
+} while (0)
+
+/*
+ * Back to cached area.
+ */
+#define back_to_cached() \
+do { \
+ unsigned long __dummy; \
+ ctrl_barrier(); \
+ __asm__ __volatile__( \
+ "mov.l 1f, %0\n\t" \
+ "jmp @%0\n\t" \
+ " nop\n\t" \
+ ".balign 4\n" \
+ "1: .long 2f\n" \
+ "2:" \
+ : "=&r" (__dummy)); \
+} while (0)
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
+#define jump_to_uncached() do { } while (0)
+#define back_to_cached() do { } while (0)
#endif
#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
deleted file mode 100644
index efc43b32346..00000000000
--- a/arch/sh/include/mach-common/mach/edosk7705.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_EDOSK7705_H
-#define __ASM_SH_EDOSK7705_H
-
-#define __IO_PREFIX sh_edosk7705
-#include <asm/io_generic.h>
-
-#endif /* __ASM_SH_EDOSK7705_H */
diff --git a/arch/sh/include/mach-common/mach/microdev.h b/arch/sh/include/mach-common/mach/microdev.h
index 1aed15856e1..dcb05fa8c16 100644
--- a/arch/sh/include/mach-common/mach/microdev.h
+++ b/arch/sh/include/mach-common/mach/microdev.h
@@ -68,13 +68,4 @@ extern void microdev_print_fpga_intc_status(void);
#define __IO_PREFIX microdev
#include <asm/io_generic.h>
-#if defined(CONFIG_PCI)
-unsigned char microdev_pci_inb(unsigned long port);
-unsigned short microdev_pci_inw(unsigned long port);
-unsigned long microdev_pci_inl(unsigned long port);
-void microdev_pci_outb(unsigned char data, unsigned long port);
-void microdev_pci_outw(unsigned short data, unsigned long port);
-void microdev_pci_outl(unsigned long data, unsigned long port);
-#endif
-
#endif /* __ASM_SH_MICRODEV_H */
diff --git a/arch/sh/include/mach-common/mach/snapgear.h b/arch/sh/include/mach-common/mach/secureedge5410.h
index 042d95f51c4..3653b9a4bac 100644
--- a/arch/sh/include/mach-common/mach/snapgear.h
+++ b/arch/sh/include/mach-common/mach/secureedge5410.h
@@ -12,30 +12,9 @@
#ifndef _ASM_SH_IO_SNAPGEAR_H
#define _ASM_SH_IO_SNAPGEAR_H
-#if defined(CONFIG_CPU_SH4)
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt. In fact the priority
- * is the interrupt :-)
- */
-
-#define IRL0_IRQ 2
-#define IRL0_PRIORITY 13
-
-#define IRL1_IRQ 5
-#define IRL1_PRIORITY 10
-
-#define IRL2_IRQ 8
-#define IRL2_PRIORITY 7
-
-#define IRL3_IRQ 11
-#define IRL3_PRIORITY 4
-#endif
-
#define __IO_PREFIX snapgear
#include <asm/io_generic.h>
-#ifdef CONFIG_SH_SECUREEDGE5410
/*
* We need to remember what was written to the ioport as some bits
* are shared with other functions and you cannot read back what was
@@ -66,6 +45,5 @@ extern unsigned short secureedge5410_ioport;
((secureedge5410_ioport & ~(mask)) | ((val) & (mask)))))
#define SECUREEDGE_READ_IOPORT() \
((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817))
-#endif
#endif /* _ASM_SH_IO_SNAPGEAR_H */
diff --git a/arch/sh/include/mach-common/mach/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
deleted file mode 100644
index 4161122c84e..00000000000
--- a/arch/sh/include/mach-common/mach/systemh7751.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __ASM_SH_SYSTEMH_7751SYSTEMH_H
-#define __ASM_SH_SYSTEMH_7751SYSTEMH_H
-
-/*
- * linux/include/asm-sh/systemh/7751systemh.h
- *
- * Copyright (C) 2000 Kazumoto Kojima
- *
- * Hitachi SystemH support
-
- * Modified for 7751 SystemH by
- * Jonathan Short, 2002.
- */
-
-/* Box specific addresses. */
-
-#define PA_ROM 0x00000000 /* EPROM */
-#define PA_ROM_SIZE 0x00400000 /* EPROM size 4M byte */
-#define PA_FROM 0x01000000 /* EPROM */
-#define PA_FROM_SIZE 0x00400000 /* EPROM size 4M byte */
-#define PA_EXT1 0x04000000
-#define PA_EXT1_SIZE 0x04000000
-#define PA_EXT2 0x08000000
-#define PA_EXT2_SIZE 0x04000000
-#define PA_SDRAM 0x0c000000
-#define PA_SDRAM_SIZE 0x04000000
-
-#define PA_EXT4 0x12000000
-#define PA_EXT4_SIZE 0x02000000
-#define PA_EXT5 0x14000000
-#define PA_EXT5_SIZE 0x04000000
-#define PA_PCIC 0x18000000 /* MR-SHPC-01 PCMCIA */
-
-#define PA_DIPSW0 0xb9000000 /* Dip switch 5,6 */
-#define PA_DIPSW1 0xb9000002 /* Dip switch 7,8 */
-#define PA_LED 0xba000000 /* LED */
-#define PA_BCR 0xbb000000 /* FPGA on the MS7751SE01 */
-
-#define PA_MRSHPC 0xb83fffe0 /* MR-SHPC-01 PCMCIA controller */
-#define PA_MRSHPC_MW1 0xb8400000 /* MR-SHPC-01 memory window base */
-#define PA_MRSHPC_MW2 0xb8500000 /* MR-SHPC-01 attribute window base */
-#define PA_MRSHPC_IO 0xb8600000 /* MR-SHPC-01 I/O window base */
-#define MRSHPC_MODE (PA_MRSHPC + 4)
-#define MRSHPC_OPTION (PA_MRSHPC + 6)
-#define MRSHPC_CSR (PA_MRSHPC + 8)
-#define MRSHPC_ISR (PA_MRSHPC + 10)
-#define MRSHPC_ICR (PA_MRSHPC + 12)
-#define MRSHPC_CPWCR (PA_MRSHPC + 14)
-#define MRSHPC_MW0CR1 (PA_MRSHPC + 16)
-#define MRSHPC_MW1CR1 (PA_MRSHPC + 18)
-#define MRSHPC_IOWCR1 (PA_MRSHPC + 20)
-#define MRSHPC_MW0CR2 (PA_MRSHPC + 22)
-#define MRSHPC_MW1CR2 (PA_MRSHPC + 24)
-#define MRSHPC_IOWCR2 (PA_MRSHPC + 26)
-#define MRSHPC_CDCR (PA_MRSHPC + 28)
-#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
-
-#define BCR_ILCRA (PA_BCR + 0)
-#define BCR_ILCRB (PA_BCR + 2)
-#define BCR_ILCRC (PA_BCR + 4)
-#define BCR_ILCRD (PA_BCR + 6)
-#define BCR_ILCRE (PA_BCR + 8)
-#define BCR_ILCRF (PA_BCR + 10)
-#define BCR_ILCRG (PA_BCR + 12)
-
-#define IRQ_79C973 13
-
-#define __IO_PREFIX sh7751systemh
-#include <asm/io_generic.h>
-
-#endif /* __ASM_SH_SYSTEMH_7751SYSTEMH_H */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 97661061ff2..fac742e514e 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -340,6 +340,8 @@ asmlinkage void __cpuinit cpu_init(void)
*/
current_cpu_data.asid_cache = NO_CONTEXT;
+ current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
+
speculative_execution_init();
expmask_init();
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index a351ed84eec..32c825c9488 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -51,16 +51,20 @@ static inline void set_interrupt_registers(int ip)
: "t");
}
-static void mask_imask_irq(unsigned int irq)
+static void mask_imask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
+
clear_bit(irq, imask_mask);
if (interrupt_priority < IMASK_PRIORITY - irq)
interrupt_priority = IMASK_PRIORITY - irq;
set_interrupt_registers(interrupt_priority);
}
-static void unmask_imask_irq(unsigned int irq)
+static void unmask_imask_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
+
set_bit(irq, imask_mask);
interrupt_priority = IMASK_PRIORITY -
find_first_zero_bit(imask_mask, IMASK_PRIORITY);
@@ -69,9 +73,9 @@ static void unmask_imask_irq(unsigned int irq)
static struct irq_chip imask_irq_chip = {
.name = "SR.IMASK",
- .mask = mask_imask_irq,
- .unmask = unmask_imask_irq,
- .mask_ack = mask_imask_irq,
+ .irq_mask = mask_imask_irq,
+ .irq_unmask = unmask_imask_irq,
+ .irq_mask_ack = mask_imask_irq,
};
void make_imask_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 96a23958394..5af48f8357e 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -76,39 +76,11 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
};
static unsigned long intc_virt;
-
-static unsigned int startup_intc_irq(unsigned int irq);
-static void shutdown_intc_irq(unsigned int irq);
-static void enable_intc_irq(unsigned int irq);
-static void disable_intc_irq(unsigned int irq);
-static void mask_and_ack_intc(unsigned int);
-static void end_intc_irq(unsigned int irq);
-
-static struct irq_chip intc_irq_type = {
- .name = "INTC",
- .startup = startup_intc_irq,
- .shutdown = shutdown_intc_irq,
- .enable = enable_intc_irq,
- .disable = disable_intc_irq,
- .ack = mask_and_ack_intc,
- .end = end_intc_irq
-};
-
static int irlm; /* IRL mode */
-static unsigned int startup_intc_irq(unsigned int irq)
-{
- enable_intc_irq(irq);
- return 0; /* never anything pending */
-}
-
-static void shutdown_intc_irq(unsigned int irq)
-{
- disable_intc_irq(irq);
-}
-
-static void enable_intc_irq(unsigned int irq)
+static void enable_intc_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
@@ -126,8 +98,9 @@ static void enable_intc_irq(unsigned int irq)
__raw_writel(bitmask, reg);
}
-static void disable_intc_irq(unsigned int irq)
+static void disable_intc_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
@@ -142,15 +115,11 @@ static void disable_intc_irq(unsigned int irq)
__raw_writel(bitmask, reg);
}
-static void mask_and_ack_intc(unsigned int irq)
-{
- disable_intc_irq(irq);
-}
-
-static void end_intc_irq(unsigned int irq)
-{
- enable_intc_irq(irq);
-}
+static struct irq_chip intc_irq_type = {
+ .name = "INTC",
+ .irq_enable = enable_intc_irq,
+ .irq_disable = disable_intc_irq,
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 9282d965a1b..7516c35ee51 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -24,25 +24,25 @@
#include <linux/module.h>
#include <linux/topology.h>
-static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
+static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
{
- struct irq_chip *chip = get_irq_chip(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
return container_of(chip, struct ipr_desc, chip);
}
-static void disable_ipr_irq(unsigned int irq)
+static void disable_ipr_irq(struct irq_data *data)
{
- struct ipr_data *p = get_irq_chip_data(irq);
- unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set the priority in IPR to 0 */
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
(void)__raw_readw(addr); /* Read back to flush write posting */
}
-static void enable_ipr_irq(unsigned int irq)
+static void enable_ipr_irq(struct irq_data *data)
{
- struct ipr_data *p = get_irq_chip_data(irq);
- unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set priority in IPR back to original value */
__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
}
@@ -56,19 +56,18 @@ void register_ipr_controller(struct ipr_desc *desc)
{
int i;
- desc->chip.mask = disable_ipr_irq;
- desc->chip.unmask = enable_ipr_irq;
- desc->chip.mask_ack = disable_ipr_irq;
+ desc->chip.irq_mask = disable_ipr_irq;
+ desc->chip.irq_unmask = enable_ipr_irq;
for (i = 0; i < desc->nr_irqs; i++) {
struct ipr_data *p = desc->ipr_data + i;
- struct irq_desc *irq_desc;
+ int res;
BUG_ON(p->ipr_idx >= desc->nr_offsets);
BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
- irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id());
- if (unlikely(!irq_desc)) {
+ res = irq_alloc_desc_at(p->irq, numa_node_id());
+ if (unlikely(res != p->irq && res != -EEXIST)) {
printk(KERN_INFO "can not get irq_desc for %d\n",
p->irq);
continue;
@@ -78,7 +77,7 @@ void register_ipr_controller(struct ipr_desc *desc)
set_irq_chip_and_handler_name(p->irq, &desc->chip,
handle_level_irq, "level");
set_irq_chip_data(p->irq, p);
- disable_ipr_irq(p->irq);
+ disable_ipr_irq(irq_get_irq_data(p->irq));
}
}
EXPORT_SYMBOL(register_ipr_controller);
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
index 7f9ecc9c2d0..dbf3b4bb71f 100644
--- a/arch/sh/kernel/cpu/sh4/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -225,7 +225,7 @@ static void sh7750_pmu_enable_all(void)
}
static struct sh_pmu sh7750_pmu = {
- .name = "SH7750",
+ .name = "sh7750",
.num_events = 2,
.event_map = sh7750_event_map,
.max_events = ARRAY_SIZE(sh7750_general_events),
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2d9700c6b53..0fe2e9329cb 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -48,7 +48,7 @@ static struct clk r_clk = {
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
-struct clk extal_clk = {
+static struct clk extal_clk = {
.rate = 33333333,
};
@@ -111,7 +111,7 @@ static struct clk div3_clk = {
.parent = &pll_clk,
};
-struct clk *main_clks[] = {
+static struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&fll_clk,
@@ -156,7 +156,7 @@ struct clk div4_clks[DIV4_NR] = {
enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
-struct clk div6_clks[DIV6_NR] = {
+static struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
[DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
[DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
index b8b873d8d6b..58027652573 100644
--- a/arch/sh/kernel/cpu/sh4a/perf_event.c
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -259,7 +259,7 @@ static void sh4a_pmu_enable_all(void)
}
static struct sh_pmu sh4a_pmu = {
- .name = "SH-4A",
+ .name = "sh4a",
.num_events = 2,
.event_map = sh4a_event_map,
.max_events = ARRAY_SIZE(sh4a_general_events),
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 9dc447db8a4..68ecbe6c881 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -56,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v)
int i = *(loff_t *)v, j, prec;
struct irqaction *action;
struct irq_desc *desc;
+ struct irq_data *data;
+ struct irq_chip *chip;
if (i > nr_irqs)
return 0;
@@ -77,6 +79,9 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
+ data = irq_get_irq_data(i);
+ chip = irq_data_get_irq_chip(data);
+
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
@@ -87,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- seq_printf(p, " %14s", desc->chip->name);
+ seq_printf(p, " %14s", chip->name);
seq_printf(p, "-%-8s", desc->name);
if (action) {
@@ -273,12 +278,6 @@ void __init init_IRQ(void)
{
plat_irq_setup();
- /*
- * Pin any of the legacy IRQ vectors that haven't already been
- * grabbed by the platform
- */
- reserve_irq_legacy();
-
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();
@@ -297,13 +296,16 @@ int __init arch_probe_nr_irqs(void)
#endif
#ifdef CONFIG_HOTPLUG_CPU
-static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+
printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
- irq, desc->node, cpu);
+ irq, data->node, cpu);
raw_spin_lock_irq(&desc->lock);
- desc->chip->set_affinity(irq, cpumask_of(cpu));
+ chip->irq_set_affinity(data, cpumask_of(cpu), false);
raw_spin_unlock_irq(&desc->lock);
}
@@ -314,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
*/
void migrate_irqs(void)
{
- struct irq_desc *desc;
unsigned int irq, cpu = smp_processor_id();
- for_each_irq_desc(irq, desc) {
- if (desc->node == cpu) {
- unsigned int newcpu = cpumask_any_and(desc->affinity,
+ for_each_active_irq(irq) {
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ if (data->node == cpu) {
+ unsigned int newcpu = cpumask_any_and(data->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
irq, cpu);
- cpumask_setall(desc->affinity);
- newcpu = cpumask_any_and(desc->affinity,
+ cpumask_setall(data->affinity);
+ newcpu = cpumask_any_and(data->affinity,
cpu_online_mask);
}
- route_irq(desc, irq, newcpu);
+ route_irq(data, irq, newcpu);
}
}
}
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
index 32365ba0e03..8fc05b997b6 100644
--- a/arch/sh/kernel/irq_64.c
+++ b/arch/sh/kernel/irq_64.c
@@ -11,17 +11,17 @@
#include <linux/module.h>
#include <cpu/registers.h>
-void notrace raw_local_irq_restore(unsigned long flags)
+void notrace arch_local_irq_restore(unsigned long flags)
{
unsigned long long __dummy;
- if (flags == RAW_IRQ_DISABLED) {
+ if (flags == ARCH_IRQ_DISABLED) {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
- : "r" (RAW_IRQ_DISABLED)
+ : "r" (ARCH_IRQ_DISABLED)
);
} else {
__asm__ __volatile__ (
@@ -29,13 +29,13 @@ void notrace raw_local_irq_restore(unsigned long flags)
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
- : "r" (~RAW_IRQ_DISABLED)
+ : "r" (~ARCH_IRQ_DISABLED)
);
}
}
-EXPORT_SYMBOL(raw_local_irq_restore);
+EXPORT_SYMBOL(arch_local_irq_restore);
-unsigned long notrace __raw_local_save_flags(void)
+unsigned long notrace arch_local_save_flags(void)
{
unsigned long flags;
@@ -43,9 +43,9 @@ unsigned long notrace __raw_local_save_flags(void)
"getcon " __SR ", %0\n\t"
"and %0, %1, %0"
: "=&r" (flags)
- : "r" (RAW_IRQ_DISABLED)
+ : "r" (ARCH_IRQ_DISABLED)
);
return flags;
}
-EXPORT_SYMBOL(__raw_local_save_flags);
+EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 2cd42b58cb2..90a15d29fee 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -365,9 +365,9 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sh_native_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
- struct user * dummy = NULL;
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
@@ -383,17 +383,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
- if (addr == (long)&dummy->fpu.fpscr)
+ if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
- } else
- tmp = ((long *)child->thread.xstate)
- [(addr - (long)&dummy->fpu) >> 2];
- } else if (addr == (long) &dummy->u_fpvalid)
+ } else {
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ tmp = ((unsigned long *)child->thread.xstate)
+ [index >> 2];
+ }
+ } else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
@@ -417,13 +420,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
- ((long *)child->thread.xstate)
- [(addr - (long)&dummy->fpu) >> 2] = data;
+ ((unsigned long *)child->thread.xstate)
+ [index >> 2] = data;
ret = 0;
- } else if (addr == (long) &dummy->u_fpvalid) {
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
@@ -433,35 +438,35 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (const void __user *)data);
+ datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
- (void __user *)data);
+ datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
- (const void __user *)data);
+ datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index e0fb065914a..4436eacddb1 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -383,9 +383,11 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sh64_native_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
@@ -400,13 +402,15 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
}
@@ -437,7 +441,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
+ unsigned long index;
+ index = addr - offsetof(struct user, fpu);
+ ret = put_fpu_long(child, index, data);
}
break;
@@ -445,23 +451,23 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (void __user *)data);
+ datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
- (const void __user *)data);
+ datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (void __user *)data);
+ datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
- (const void __user *)data);
+ datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
@@ -471,7 +477,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+asmlinkage int sh64_ptrace(long request, long pid,
+ unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
static unsigned long first_call;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4e278467f76..d6b018c7ebd 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -41,6 +41,7 @@
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
+#include <asm/sparsemem.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -52,6 +53,7 @@ struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
.type = CPU_SH_NONE,
.family = CPU_FAMILY_UNKNOWN,
.loops_per_jiffy = 10000000,
+ .phys_bits = MAX_PHYSMEM_BITS,
},
};
EXPORT_SYMBOL(cpu_data);
@@ -432,6 +434,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (c->flags & CPU_HAS_L2_CACHE)
show_cacheinfo(m, "scache", c->scache);
+ seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
+
seq_printf(m, "bogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 09370392aff..c3e61b36649 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -79,7 +79,7 @@ config 29BIT
config 32BIT
bool
- default y if CPU_SH5
+ default y if CPU_SH5 || !MMU
config PMB
bool "Support 32-bit physical addressing through PMB"
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index ab89ea4f941..150aa326aff 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -15,7 +15,7 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
-mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
+mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o gup.o \
ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
obj-y += $(mmu-y)
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 03879328699..40733a95240 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -79,21 +79,20 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
-#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
- void *p1addr = vaddr;
-#else
- void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
-#endif
+ void *addr;
+
+ addr = __in_29bit_mode() ?
+ (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- __flush_invalidate_region(p1addr, size);
+ __flush_invalidate_region(addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
- __flush_wback_region(p1addr, size);
+ __flush_wback_region(addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- __flush_purge_region(p1addr, size);
+ __flush_purge_region(addr, size);
break;
default:
BUG();
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
new file mode 100644
index 00000000000..bf8daf9d9c9
--- /dev/null
+++ b/arch/sh/mm/gup.c
@@ -0,0 +1,273 @@
+/*
+ * Lockless get_user_pages_fast for SuperH
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * Cloned from the x86 and PowerPC versions, by:
+ *
+ * Copyright (C) 2008 Nick Piggin
+ * Copyright (C) 2008 Novell Inc.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
+#include <linux/highmem.h>
+#include <asm/pgtable.h>
+
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+#ifndef CONFIG_X2TLB
+ return ACCESS_ONCE(*ptep);
+#else
+ /*
+ * With get_user_pages_fast, we walk down the pagetables without
+ * taking any locks. For this we would like to load the pointers
+ * atomically, but that is not possible with 64-bit PTEs. What
+ * we do have is the guarantee that a pte will only either go
+ * from not present to present, or present to not present or both
+ * -- it will not switch to a completely different present page
+ * without a TLB flush in between; something that we are blocking
+ * by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees l iff pte_high
+ * sees h. We load pte_high *after* loading pte_low, which ensures we
+ * don't see an older value of pte_high. *Then* we recheck pte_low,
+ * which ensures that we haven't picked up a changed pte high. We might
+ * have got rubbish values from pte_low and pte_high, but we are
+ * guaranteed that pte_low will not have the present bit set *unless*
+ * it is 'l'. And get_user_pages_fast only operates on present ptes, so
+ * we're safe.
+ *
+ * gup_get_pte should not be used or copied outside gup.c without being
+ * very careful -- it does not atomically load the pte or anything that
+ * is likely to be useful for you.
+ */
+ pte_t pte;
+
+retry:
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ if (unlikely(pte.pte_low != ptep->pte_low))
+ goto retry;
+
+ return pte;
+#endif
+}
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ u64 mask, result;
+ pte_t *ptep;
+
+#ifdef CONFIG_X2TLB
+ result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
+ if (write)
+ result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
+#elif defined(CONFIG_SUPERH64)
+ result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
+ if (write)
+ result |= _PAGE_WRITE;
+#else
+ result = _PAGE_PRESENT | _PAGE_USER;
+ if (write)
+ result |= _PAGE_RW;
+#endif
+
+ mask = result | _PAGE_SPECIAL;
+
+ ptep = pte_offset_map(&pmd, addr);
+ do {
+ pte_t pte = gup_get_pte(ptep);
+ struct page *page;
+
+ if ((pte_val(pte) & mask) != result) {
+ pte_unmap(ptep);
+ return 0;
+ }
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
+ get_page(page);
+ pages[*nr] = page;
+ (*nr)++;
+
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
+ pte_unmap(ptep - 1);
+
+ return 1;
+}
+
+static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pmd_t *pmdp;
+
+ pmdp = pmd_offset(&pud, addr);
+ do {
+ pmd_t pmd = *pmdp;
+
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(pmd))
+ return 0;
+ if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+ return 0;
+ } while (pmdp++, addr = next, addr != end);
+
+ return 1;
+}
+
+static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pud_t *pudp;
+
+ pudp = pud_offset(&pgd, addr);
+ do {
+ pud_t pud = *pudp;
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+ return 0;
+ if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+ return 0;
+ } while (pudp++, addr = next, addr != end);
+
+ return 1;
+}
+
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ unsigned long flags;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables and pages from being freed.
+ */
+ local_irq_save(flags);
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+
+ return nr;
+}
+
+/**
+ * get_user_pages_fast() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @write: whether pages will be written to
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
+ * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * If not successful, it will fall back to taking the lock and
+ * calling get_user_pages().
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno.
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+
+ end = start + len;
+ if (end < start)
+ goto slow_irqon;
+
+ local_irq_disable();
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ goto slow;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ goto slow;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_enable();
+
+ VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
+ return nr;
+
+ {
+ int ret;
+
+slow:
+ local_irq_enable();
+slow_irqon:
+ /* Try to get the remaining pages with get_user_pages */
+ start += nr << PAGE_SHIFT;
+ pages += nr;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, start,
+ (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ /* Have to be a bit careful with return values */
+ if (nr > 0) {
+ if (ret < 0)
+ ret = nr;
+ else
+ ret += nr;
+ }
+
+ return ret;
+ }
+}
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 8a4eca551fc..a7767da815e 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(virt_addr_uncached);
void __init uncached_init(void)
{
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
uncached_start = P2SEG;
#else
uncached_start = memory_end;
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile
index e85aae73e3d..ce3b119021e 100644
--- a/arch/sh/oprofile/Makefile
+++ b/arch/sh/oprofile/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_OPROFILE) += oprofile.o
+CFLAGS_common.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c
index 2bc74de23f0..37f3a75ea6c 100644
--- a/arch/sh/oprofile/backtrace.c
+++ b/arch/sh/oprofile/backtrace.c
@@ -91,7 +91,7 @@ void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
if (depth > backtrace_limit)
depth = backtrace_limit;
- stackaddr = (unsigned long *)regs->regs[15];
+ stackaddr = (unsigned long *)kernel_stack_pointer(regs);
if (!user_mode(regs)) {
if (depth)
unwind_stack(NULL, regs, stackaddr,
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index e10d89376f9..b4c2d2b946d 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -1,7 +1,7 @@
/*
* arch/sh/oprofile/init.c
*
- * Copyright (C) 2003 - 2008 Paul Mundt
+ * Copyright (C) 2003 - 2010 Paul Mundt
*
* Based on arch/mips/oprofile/common.c:
*
@@ -18,43 +18,46 @@
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
+#include <linux/slab.h>
#include <asm/processor.h>
-#ifdef CONFIG_HW_PERF_EVENTS
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
+#ifdef CONFIG_HW_PERF_EVENTS
+/*
+ * This will need to be reworked when multiple PMUs are supported.
+ */
+static char *sh_pmu_op_name;
+
char *op_name_from_perf_id(void)
{
- const char *pmu;
- char buf[20];
- int size;
-
- pmu = perf_pmu_name();
- if (!pmu)
- return NULL;
-
- size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
- if (size > -1 && size < sizeof(buf))
- return buf;
-
- return NULL;
+ return sh_pmu_op_name;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
ops->backtrace = sh_backtrace;
+ if (perf_num_counters() == 0)
+ return -ENODEV;
+
+ sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s",
+ UTS_MACHINE, perf_pmu_name());
+ if (unlikely(!sh_pmu_op_name))
+ return -ENOMEM;
+
return oprofile_perf_init(ops);
}
void __exit oprofile_arch_exit(void)
{
oprofile_perf_exit();
+ kfree(sh_pmu_op_name);
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
- pr_info("oprofile: hardware counters not available\n");
+ ops->backtrace = sh_backtrace;
return -ENODEV;
}
void __exit oprofile_arch_exit(void) {}
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 9f56eb97802..0e68465e7b5 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -26,7 +26,6 @@ HD64461 HD64461
7724SE SH_7724_SOLUTION_ENGINE
7751SE SH_7751_SOLUTION_ENGINE
7780SE SH_7780_SOLUTION_ENGINE
-7751SYSTEMH SH_7751_SYSTEMH
HP6XX SH_HP6XX
DREAMCAST SH_DREAMCAST
SNAPGEAR SH_SECUREEDGE5410
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 8e7bafc5dd0..45d9c87d083 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -1,9 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-
-mainmenu "Linux/SPARC Kernel Configuration"
-
config 64BIT
bool "64-bit kernel" if ARCH = "sparc"
default ARCH = "sparc64"
@@ -28,8 +22,6 @@ config SPARC
select RTC_CLASS
select RTC_DRV_M48T59
select HAVE_IRQ_WORK
- select HAVE_PERF_EVENTS
- select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
@@ -56,7 +48,6 @@ config SPARC64
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 2889574608d..c2ced21c9dc 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -208,6 +208,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
#define memset_io(d,c,sz) _memset_io(d,c,sz)
static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
+{
+ char *d = dst;
+
+ while (n--) {
+ char tmp = sbus_readb(src);
+ *d++ = tmp;
+ src++;
+ }
+}
+
+#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
+
+static inline void
_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
{
char *d = dst;
@@ -222,6 +237,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
+{
+ const char *s = src;
+ volatile void __iomem *d = dst;
+
+ while (n--) {
+ char tmp = *s++;
+ sbus_writeb(tmp, d);
+ d++;
+ }
+}
+
+#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
+
+static inline void
_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
{
const char *s = src;
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9517d063c79..9c8965415f0 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -419,6 +419,21 @@ _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
#define memset_io(d,c,sz) _memset_io(d,c,sz)
static inline void
+_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
+ __kernel_size_t n)
+{
+ char *d = dst;
+
+ while (n--) {
+ char tmp = sbus_readb(src);
+ *d++ = tmp;
+ src++;
+ }
+}
+
+#define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz)
+
+static inline void
_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
{
char *d = dst;
@@ -433,6 +448,22 @@ _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
static inline void
+_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
+ __kernel_size_t n)
+{
+ const char *s = src;
+ volatile void __iomem *d = dst;
+
+ while (n--) {
+ char tmp = *s++;
+ sbus_writeb(tmp, d);
+ d++;
+ }
+}
+
+#define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz)
+
+static inline void
_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
{
const char *s = src;
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 62e66d7b2fb..427d4684e0d 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -14,6 +13,7 @@
"nop\n\t" \
"nop\n\t" \
".pushsection __jump_table, \"a\"\n\t"\
+ ".align 4\n\t" \
".word 1b, %l[" #label "], %c0\n\t" \
".popsection \n\t" \
: : "i" (key) : : label);\
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 5312782f0b5..948b686ec08 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -38,7 +38,7 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
* types on sparc64. However, it requires that the device
* can drive enough of the 64 bits.
*/
-#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
+#define PCI64_REQUIRED_MASK (~(u64)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
#ifdef CONFIG_PCI
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 0116d8d10de..5ad6e5c5dbb 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -365,7 +365,7 @@ static int request_fast_irq(unsigned int irq,
unsigned long flags;
unsigned int cpu_irq;
int ret;
-#ifdef CONFIG_SMP
+#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
@@ -425,7 +425,7 @@ static int request_fast_irq(unsigned int irq,
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
-#ifdef CONFIG_SMP
+#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index e1656fc41cc..16582d85368 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -12,7 +12,6 @@
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/init.h>
@@ -56,8 +55,8 @@ void __init leon_configure_cache_smp(void);
static inline unsigned long do_swap(volatile unsigned long *ptr,
unsigned long val)
{
- __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val)
- : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
+ __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
+ : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
: "memory");
return val;
}
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index e608f397e11..27b9e93d012 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -323,18 +323,35 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_sparc32_view;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+struct fps {
+ unsigned long regs[32];
+ unsigned long fsr;
+ unsigned long flags;
+ unsigned long extra;
+ unsigned long fpqd;
+ struct fq {
+ unsigned long *insnaddr;
+ unsigned long insn;
+ } fpq[16];
+};
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
+ void __user *addr2p;
const struct user_regset_view *view;
+ struct pt_regs __user *pregs;
+ struct fps __user *fps;
int ret;
view = task_user_regset_view(current);
+ addr2p = (void __user *) addr2;
+ pregs = (struct pt_regs __user *) addr;
+ fps = (struct fps __user *) addr;
switch(request) {
case PTRACE_GETREGS: {
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
ret = copy_regset_to_user(child, view, REGSET_GENERAL,
32 * sizeof(u32),
4 * sizeof(u32),
@@ -348,8 +365,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_SETREGS: {
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-
ret = copy_regset_from_user(child, view, REGSET_GENERAL,
32 * sizeof(u32),
4 * sizeof(u32),
@@ -363,19 +378,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETFPREGS: {
- struct fps {
- unsigned long regs[32];
- unsigned long fsr;
- unsigned long flags;
- unsigned long extra;
- unsigned long fpqd;
- struct fq {
- unsigned long *insnaddr;
- unsigned long insn;
- } fpq[16];
- };
- struct fps __user *fps = (struct fps __user *) addr;
-
ret = copy_regset_to_user(child, view, REGSET_FP,
0 * sizeof(u32),
32 * sizeof(u32),
@@ -397,19 +399,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_SETFPREGS: {
- struct fps {
- unsigned long regs[32];
- unsigned long fsr;
- unsigned long flags;
- unsigned long extra;
- unsigned long fpqd;
- struct fq {
- unsigned long *insnaddr;
- unsigned long insn;
- } fpq[16];
- };
- struct fps __user *fps = (struct fps __user *) addr;
-
ret = copy_regset_from_user(child, view, REGSET_FP,
0 * sizeof(u32),
32 * sizeof(u32),
@@ -424,8 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_READTEXT:
case PTRACE_READDATA:
- ret = ptrace_readdata(child, addr,
- (void __user *) addr2, data);
+ ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
@@ -435,8 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
- ret = ptrace_writedata(child, (void __user *) addr2,
- addr, data);
+ ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index aa90da08bf6..9ccc812bc09 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -969,16 +969,19 @@ struct fps {
unsigned long fsr;
};
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
const struct user_regset_view *view = task_user_regset_view(current);
unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
struct pt_regs __user *pregs;
struct fps __user *fps;
+ void __user *addr2p;
int ret;
- pregs = (struct pt_regs __user *) (unsigned long) addr;
- fps = (struct fps __user *) (unsigned long) addr;
+ pregs = (struct pt_regs __user *) addr;
+ fps = (struct fps __user *) addr;
+ addr2p = (void __user *) addr2;
switch (request) {
case PTRACE_PEEKUSR:
@@ -1029,8 +1032,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_READTEXT:
case PTRACE_READDATA:
- ret = ptrace_readdata(child, addr,
- (char __user *)addr2, data);
+ ret = ptrace_readdata(child, addr, addr2p, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
@@ -1039,8 +1041,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_WRITETEXT:
case PTRACE_WRITEDATA:
- ret = ptrace_writedata(child, (char __user *) addr2,
- addr, data);
+ ret = ptrace_writedata(child, addr2p, addr, data);
if (ret == data)
ret = 0;
else if (ret >= 0)
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 4da2e1f6629..5f5f74c2c2c 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -78,9 +78,9 @@ signal_p:
call do_notify_resume
add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
- /* Fall through. */
- ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
- clr %l6
+ b signal_p
+ ld [%curptr + TI_FLAGS], %g2
+
ret_trap_continue:
sethi %hi(PSR_SYSCALL), %g1
andn %t_psr, %g1, %t_psr
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 090b9e9ad5e..77f1b95e080 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -34,37 +34,9 @@ __handle_preemption:
__handle_user_windows:
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Redo sched+sig checks */
- ldx [%g6 + TI_FLAGS], %l0
- andcc %l0, _TIF_NEED_RESCHED, %g0
-
- be,pt %xcc, 1f
- nop
- call schedule
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- ldx [%g6 + TI_FLAGS], %l0
-
-1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
- be,pt %xcc, __handle_user_windows_continue
- nop
- mov %l5, %o1
- add %sp, PTREGS_OFF, %o0
- mov %l0, %o2
-
- call do_notify_resume
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Signal delivery can modify pt_regs tstate, so we must
- * reload it.
- */
- ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
- sethi %hi(0xf << 20), %l4
- and %l1, %l4, %l4
- ba,pt %xcc, __handle_user_windows_continue
+ ba,pt %xcc, __handle_preemption_continue
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- andn %l1, %l4, %l1
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
@@ -87,7 +59,7 @@ __handle_signal:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
- ba,pt %xcc, __handle_signal_continue
+ ba,pt %xcc, __handle_preemption_continue
andn %l1, %l4, %l1
/* When returning from a NMI (%pil==15) interrupt we want to
@@ -177,11 +149,9 @@ __handle_preemption_continue:
bne,pn %xcc, __handle_preemption
andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
bne,pn %xcc, __handle_signal
-__handle_signal_continue:
ldub [%g6 + TI_WSAVED], %o2
brnz,pn %o2, __handle_user_windows
nop
-__handle_user_windows_continue:
sethi %hi(TSTATE_PEF), %o0
andcc %l1, %o0, %g0
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index e6375a750d9..6db18c6927f 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -17,7 +17,6 @@
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 675c9e11ada..42b282fa611 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -19,7 +19,6 @@
#include <linux/mman.h>
#include <linux/utsname.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 12b9f352595..4491f4cb269 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -16,7 +16,6 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/perf_event.h>
enum direction {
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index b351770cbdd..3107381e576 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -9,7 +9,6 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index bd8601601af..5b836f5aea9 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -539,6 +539,12 @@ do_sigbus:
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
+static void check_stack_aligned(unsigned long sp)
+{
+ if (sp & 0x7UL)
+ force_sig(SIGILL, current);
+}
+
void window_overflow_fault(void)
{
unsigned long sp;
@@ -547,6 +553,8 @@ void window_overflow_fault(void)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
+
+ check_stack_aligned(sp);
}
void window_underflow_fault(unsigned long sp)
@@ -554,6 +562,8 @@ void window_underflow_fault(unsigned long sp)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
+
+ check_stack_aligned(sp);
}
void window_ret_fault(struct pt_regs *regs)
@@ -564,4 +574,6 @@ void window_ret_fault(struct pt_regs *regs)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
+
+ check_stack_aligned(sp);
}
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 5e50c09b7dc..4730eac0747 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -75,7 +75,7 @@ void __kunmap_atomic(void *kvaddr)
return;
}
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
@@ -104,6 +104,8 @@ void __kunmap_atomic(void *kvaddr)
#endif
}
#endif
+
+ kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 89cfee07efa..07ec8a865c1 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -58,6 +58,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+
config LOCKDEP_SUPPORT
def_bool y
@@ -114,8 +117,6 @@ config TILE
# config HUGETLB_PAGE_SIZE_VARIABLE
-mainmenu "Linux/TILE Kernel Configuration"
-
# Please note: TILE-Gx support is not yet finalized; this is
# the preliminary support. TILE-Gx drivers are only provided
# with the alpha or beta test versions for Tilera customers.
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee18672..b2a6c5de79a 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/threads.h>
-#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106d1c0..3d0f2024626 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
#define _ASM_TILE_KMAP_TYPES_H
/*
- * In TILE Linux each set of four of these uses another 16MB chunk of
- * address space, given 64 tiles and 64KB pages, so we only enable
- * ones that are required by the kernel configuration.
+ * In 32-bit TILE Linux we have to balance the desire to have a lot of
+ * nested atomic mappings with the fact that large page sizes and many
+ * processors chew up address space quickly. In a typical
+ * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
+ * adds 4MB of required address-space. For now we leave KM_TYPE_NR
+ * set to depth 8.
*/
enum km_type {
+ KM_TYPE_NR = 8
+};
+
+/*
+ * We provide dummy definitions of all the stray values that used to be
+ * required for kmap_atomic() and no longer are.
+ */
+enum {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
- KM_MEMCPY0,
- KM_MEMCPY1,
-#if defined(CONFIG_HIGHPTE)
- KM_PTE0,
- KM_PTE1,
-#endif
- KM_TYPE_NR
+ KM_SYNC_ICACHE,
+ KM_SYNC_DCACHE,
+ KM_UML_USERCOPY,
+ KM_IRQ_PTE,
+ KM_NMI,
+ KM_NMI_PTE,
+ KM_KDB
};
#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd855b..a6604e9485d 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#if defined(CONFIG_HIGHPTE)
-extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
-#define pte_offset_map(dir, address) \
- _pte_offset_map(dir, address, KM_PTE0)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
+#define pte_unmap(pte) kunmap_atomic(pte)
#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa92c7..b16e5db8f0e 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
+#endif
#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff48533..b35c2db7119 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@ __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
#ifdef CONFIG_COMPAT
#define __ARCH_WANT_SYS_LLSEEK
#endif
+#define __ARCH_WANT_SYS_NEWFSTATAT
#endif
#endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cdd946..dbc213adf5e 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -21,7 +21,6 @@
#include <linux/kdev_t.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
#include <linux/uaccess.h>
#include <linux/signal.h>
#include <asm/syscalls.h>
@@ -148,11 +147,11 @@ long tile_compat_sys_msgrcv(int msqid,
#define compat_sys_readahead sys32_readahead
#define compat_sys_sync_file_range compat_sys_sync_file_range2
-/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
-#define compat_sys_stat64 sys_newstat
-#define compat_sys_lstat64 sys_newlstat
-#define compat_sys_fstat64 sys_newfstat
-#define compat_sys_fstatat64 sys_newfstatat
+/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
+#define compat_sys_stat64 sys_stat64
+#define compat_sys_lstat64 sys_lstat64
+#define compat_sys_fstat64 sys_fstat64
+#define compat_sys_fstatat64 sys_fstatat64
/* The native sys_ptrace dynamically handles compat binaries. */
#define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index fb64b99959d..543d6a33aa2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a..493a0e66d91 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
void early_panic(const char *fmt, ...)
{
va_list ap;
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
va_start(ap, fmt);
early_printk("Kernel panic - not syncing: ");
early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a784341..e910530436e 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
static void enable_firewall_interrupts(void)
{
- raw_local_irq_unmask_now(INT_UDN_FIREWALL);
+ arch_local_irq_unmask_now(INT_UDN_FIREWALL);
}
static void disable_firewall_interrupts(void)
{
- raw_local_irq_mask_now(INT_UDN_FIREWALL);
+ arch_local_irq_mask_now(INT_UDN_FIREWALL);
}
/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
}
static const struct file_operations dev_hardwall_fops = {
+ .open = nonseekable_open,
.unlocked_ioctl = hardwall_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = hardwall_compat_ioctl,
#endif
.flush = hardwall_flush,
.release = hardwall_release,
- .llseek = noop_llseek,
};
static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e63917687e9..128805ef8f2 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
#define IS_HW_CLEARED 1
/*
- * The set of interrupts we enable for raw_local_irq_enable().
+ * The set of interrupts we enable for arch_local_irq_enable().
* This is initialized to have just a single interrupt that the kernel
* doesn't actually use as a sentinel. During kernel init,
* interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
/* Enable interrupt delivery. */
unmask_irqs(~0UL);
#if CHIP_HAS_IPI()
- raw_local_irq_unmask(INT_IPI_K);
+ arch_local_irq_unmask(INT_IPI_K);
#endif
}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d617..0d8b9e93348 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
if ((entry & IND_SOURCE)) {
void *va =
- kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
+ kmap_atomic_pfn(entry >> PAGE_SHIFT);
r = kexec_bn2cl(va);
if (r) {
command_line = r;
break;
}
- kunmap_atomic(va, KM_USER0);
+ kunmap_atomic(va);
}
}
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
hverr = hv_set_command_line(
(HV_VirtAddr) command_line, strlen(command_line));
- kunmap_atomic(command_line, KM_USER0);
+ kunmap_atomic(command_line);
} else {
pr_info("%s: no command line found; making empty\n",
__func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e3933f72..0858ee6b520 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
panic("hv_register_message_state: error %d", rc);
/* Make sure downcall interrupts will be enabled. */
- raw_local_irq_unmask(INT_INTCTRL_K);
+ arch_local_irq_unmask(INT_INTCTRL_K);
}
void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 5b20c2874d5..e92e40527d6 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -45,19 +45,20 @@ void ptrace_disable(struct task_struct *child)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (long __user __force *)data;
unsigned long tmp;
- int i;
long ret = -EIO;
- unsigned long *childregs;
char *childreg;
+ struct pt_regs copyregs;
+ int ex1_offset;
switch (request) {
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
- if (addr < 0 || addr >= PTREGS_SIZE)
+ if (addr >= PTREGS_SIZE)
break;
childreg = (char *)task_pt_regs(child) + addr;
#ifdef CONFIG_COMPAT
@@ -76,9 +77,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_POKEUSR: /* Write register in pt_regs. */
- if (addr < 0 || addr >= PTREGS_SIZE)
+ if (addr >= PTREGS_SIZE)
break;
childreg = (char *)task_pt_regs(child) + addr;
+
+ /* Guard against overwrites of the privilege level. */
+ ex1_offset = PTREGS_OFFSET_EX1;
+#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
+ if (is_compat_task()) /* point at low word */
+ ex1_offset += sizeof(compat_long_t);
+#endif
+ if (addr == ex1_offset)
+ data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
+
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (addr & (sizeof(compat_long_t)-1))
@@ -95,24 +106,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS: /* Get all registers from the child. */
- if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
- break;
- childregs = (long *)task_pt_regs(child);
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
- ret = __put_user(childregs[i], &datap[i]);
- if (ret != 0)
- break;
+ if (copy_to_user(datap, task_pt_regs(child),
+ sizeof(struct pt_regs)) == 0) {
+ ret = 0;
}
break;
case PTRACE_SETREGS: /* Set all registers in the child. */
- if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
- break;
- childregs = (long *)task_pt_regs(child);
- for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) {
- ret = __get_user(childregs[i], &datap[i]);
- if (ret != 0)
- break;
+ if (copy_from_user(&copyregs, datap,
+ sizeof(struct pt_regs)) == 0) {
+ copyregs.ex1 =
+ PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
+ *task_pt_regs(child) = copyregs;
+ ret = 0;
}
break;
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beb..baa3d905fee 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
void machine_halt(void)
{
warn_early_printk();
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_halt();
}
@@ -35,14 +35,14 @@ void machine_halt(void)
void machine_power_off(void)
{
warn_early_printk();
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_power_off();
}
void machine_restart(char *cmd)
{
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
smp_send_stop();
hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index f3a50e74f9a..fb0b3cbeae1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -30,8 +30,6 @@
#include <linux/timex.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -870,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
/* Allow asynchronous TLB interrupts. */
#if CHIP_HAS_TILE_DMA()
- raw_local_irq_unmask(INT_DMATLB_MISS);
- raw_local_irq_unmask(INT_DMATLB_ACCESS);
+ arch_local_irq_unmask(INT_DMATLB_MISS);
+ arch_local_irq_unmask(INT_DMATLB_ACCESS);
#endif
#if CHIP_HAS_SN_PROC()
- raw_local_irq_unmask(INT_SNITLB_MISS);
+ arch_local_irq_unmask(INT_SNITLB_MISS);
#endif
#ifdef __tilegx__
- raw_local_irq_unmask(INT_SINGLE_STEP_K);
+ arch_local_irq_unmask(INT_SINGLE_STEP_K);
#endif
/*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85ae3a..757407e3669 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
@@ -71,6 +70,9 @@ int restore_sigcontext(struct pt_regs *regs,
for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
err |= __get_user(regs->regs[i], &sc->gregs[i]);
+ /* Ensure that the PL is always set to USER_PL. */
+ regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
+
regs->faultnum = INT_SWINT_1_SIGRETURN;
err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +332,7 @@ void do_signal(struct pt_regs *regs)
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
}
- return;
+ goto done;
}
/* Did we come from a system call? */
@@ -358,4 +360,8 @@ void do_signal(struct pt_regs *regs)
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
+
+done:
+ /* Avoid double syscall restart if there are nested signals. */
+ regs->faultnum = INT_SWINT_1_SIGRETURN;
}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d90aff..9575b37a8b7 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
static void smp_stop_cpu_interrupt(void)
{
set_cpu_online(smp_processor_id(), 0);
- raw_local_irq_disable_all();
+ arch_local_irq_disable_all();
for (;;)
asm("nap");
}
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 74d62d098ed..b949edcec20 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
-#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index 7e764669a02..e2187d24a9b 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -20,7 +20,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820e142..f2e156e4469 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
{
BUG_ON(ticks > MAX_TICK);
__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
- raw_local_irq_unmask_now(INT_TILE_TIMER);
+ arch_local_irq_unmask_now(INT_TILE_TIMER);
return 0;
}
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
static void tile_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- raw_local_irq_mask_now(INT_TILE_TIMER);
+ arch_local_irq_mask_now(INT_TILE_TIMER);
}
/*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
evt->cpumask = cpumask_of(smp_processor_id());
/* Start out with timer not firing. */
- raw_local_irq_mask_now(INT_TILE_TIMER);
+ arch_local_irq_mask_now(INT_TILE_TIMER);
/* Register tile timer. */
clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
* Mask the timer interrupt here, since we are a oneshot timer
* and there are now by definition no events pending.
*/
- raw_local_irq_mask(INT_TILE_TIMER);
+ arch_local_irq_mask(INT_TILE_TIMER);
/* Track time spent here in an interrupt context */
irq_enter();
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7b266..f7d4a6ad61e 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
* we must run with interrupts disabled to avoid the risk of some
* other code seeing the incoherent data in our cache. (Recall that
* our cache is indexed by PA, so even if the other code doesn't use
- * our KM_MEMCPY virtual addresses, they'll still hit in cache using
+ * our kmap_atomic virtual addresses, they'll still hit in cache using
* the normal VAs that aren't supposed to hit in cache.)
*/
static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@ static void memcpy_multicache(void *dest, const void *source,
unsigned long flags, newsrc, newdst;
pmd_t *pmdp;
pte_t *ptep;
+ int type0, type1;
int cpu = get_cpu();
/*
@@ -77,7 +78,8 @@ static void memcpy_multicache(void *dest, const void *source,
sim_allow_multiple_caching(1);
/* Set up the new dest mapping */
- idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0;
+ type0 = kmap_atomic_idx_push();
+ idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@ static void memcpy_multicache(void *dest, const void *source,
}
/* Set up the new source mapping */
- idx += (KM_MEMCPY0 - KM_MEMCPY1);
+ type1 = kmap_atomic_idx_push();
+ idx += (type0 - type1);
src_pte = hv_pte_set_nc(src_pte);
src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@ static void memcpy_multicache(void *dest, const void *source,
* We're done: notify the simulator that all is back to normal,
* and re-enable interrupts and pre-emption.
*/
+ kmap_atomic_idx_pop();
+ kmap_atomic_idx_pop();
sim_allow_multiple_caching(0);
local_irq_restore(flags);
put_cpu();
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index f295b4ac941..dcebfc831cd 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -24,7 +24,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/tty.h>
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 8ef6595e162..31dbbd9afe4 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
void *__kmap_atomic(struct page *page)
{
/* PAGE_NONE is a magic value that tells us to check immutability. */
- return kmap_atomic_prot(page, type, PAGE_NONE);
+ return kmap_atomic_prot(page, PAGE_NONE);
}
EXPORT_SYMBOL(__kmap_atomic);
@@ -241,7 +241,7 @@ void __kunmap_atomic(void *kvaddr)
pte_t pteval = *pte;
int idx, type;
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
idx = type + KM_TYPE_NR*smp_processor_id();
/*
@@ -252,6 +252,7 @@ void __kunmap_atomic(void *kvaddr)
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr);
+ kmap_atomic_idx_pop();
} else {
/* Must be a lowmem page */
BUG_ON(vaddr < PAGE_OFFSET);
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 24688b697a8..201a582c413 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -21,7 +21,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 78e1982cb6c..0b9ce69b0ee 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@ static long __write_once initfree = 1;
/* Select whether to free (1) or mark unusable (0) the __init pages. */
static int __init set_initfree(char *str)
{
- strict_strtol(str, 0, &initfree);
- pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
+ long val;
+ if (strict_strtol(str, 0, &val)) {
+ initfree = val;
+ pr_info("initfree: %s free init pages\n",
+ initfree ? "will" : "won't");
+ }
return 1;
}
__setup("initfree=", set_initfree);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c24621c4..1f5430c53d0 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
}
#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type)
+pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
{
- pte_t *pte = kmap_atomic(pmd_page(*dir), type) +
+ pte_t *pte = kmap_atomic(pmd_page(*dir)) +
(pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
return &pte[pte_index(address)];
}
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 7c8e277f6d3..049d048b070 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -19,8 +19,6 @@ config MMU
config NO_IOMEM
def_bool y
-mainmenu "Linux/Usermode Kernel Configuration"
-
config ISA
bool
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f75a3..b7c5bab9bd7 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@ struct pt_regs {
struct task_struct;
-extern long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data);
+extern long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
extern unsigned long getreg(struct task_struct *child, int regno);
extern int putreg(struct task_struct *child, int regno, unsigned long value);
extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 340268be00b..09bd7b58572 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -5,7 +5,6 @@
#include "linux/stddef.h"
#include "linux/fs.h"
-#include "linux/smp_lock.h"
#include "linux/ptrace.h"
#include "linux/sched.h"
#include "linux/slab.h"
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index e0510496596..701b672c112 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -42,10 +42,12 @@ void ptrace_disable(struct task_struct *child)
extern int peek_user(struct task_struct * child, long addr, long data);
extern int poke_user(struct task_struct * child, long addr, long data);
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int i, ret;
- unsigned long __user *p = (void __user *)(unsigned long)data;
+ unsigned long __user *p = (void __user *)data;
+ void __user *vp = p;
switch (request) {
/* read word at location addr. */
@@ -107,24 +109,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
#ifdef PTRACE_GETFPREGS
case PTRACE_GETFPREGS: /* Get the child FPU state. */
- ret = get_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = get_fpregs(vp, child);
break;
#endif
#ifdef PTRACE_SETFPREGS
case PTRACE_SETFPREGS: /* Set the child FPU state. */
- ret = set_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = set_fpregs(vp, child);
break;
#endif
case PTRACE_GET_THREAD_AREA:
- ret = ptrace_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ ret = ptrace_get_thread_area(child, addr, vp);
break;
case PTRACE_SET_THREAD_AREA:
- ret = ptrace_set_thread_area(child, addr,
- (struct user_desc __user *) data);
+ ret = ptrace_set_thread_area(child, addr, vp);
break;
case PTRACE_FAULTINFO: {
@@ -134,7 +132,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* On i386, ptrace_faultinfo is smaller!
*/
ret = copy_to_user(p, &child->thread.arch.faultinfo,
- sizeof(struct ptrace_faultinfo));
+ sizeof(struct ptrace_faultinfo)) ?
+ -EIO : 0;
break;
}
@@ -158,7 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef PTRACE_ARCH_PRCTL
case PTRACE_ARCH_PRCTL:
/* XXX Calls ptrace on the host - needs some SMP thinking */
- ret = arch_prctl(child, data, (void *) addr);
+ ret = arch_prctl(child, data, (void __user *) addr);
break;
#endif
default:
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index c9b176534d6..d23b2d3ea38 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -203,8 +203,8 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
(unsigned long *) &fpregs);
}
-long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data)
+long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
return -EIO;
}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index f3458d7d1c5..f43613643cd 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -175,19 +175,18 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
return restore_fp_registers(userspace_pid[cpu], fpregs);
}
-long subarch_ptrace(struct task_struct *child, long request, long addr,
- long data)
+long subarch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EIO;
+ void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_GETFPXREGS: /* Get the child FPU state. */
- ret = get_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = get_fpregs(datap, child);
break;
case PTRACE_SETFPXREGS: /* Set the child FPU state. */
- ret = set_fpregs((struct user_i387_struct __user *) data,
- child);
+ ret = set_fpregs(datap, child);
break;
}
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index ad8ec356fb3..0e103236b75 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -14,3 +14,4 @@ obj-y += crypto/
obj-y += vdso/
obj-$(CONFIG_IA32_EMULATION) += ia32/
+obj-y += platform/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dfabfefc21c..e8327686d3c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1,6 +1,3 @@
-# x86 configuration
-mainmenu "Linux Kernel Configuration for x86"
-
# Select 32 or 64 bit
config 64BIT
bool "64-bit kernel" if ARCH = "x86"
@@ -347,6 +344,7 @@ endif
config X86_VSMP
bool "ScaleMP vSMP"
+ select PARAVIRT_GUEST
select PARAVIRT
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
@@ -1895,6 +1893,11 @@ config PCI_OLPC
def_bool y
depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY)
+config PCI_XEN
+ def_bool y
+ depends on PCI && XEN
+ select SWIOTLB_XEN
+
config PCI_DOMAINS
def_bool y
depends on PCI
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 1255d953c65..f2ee1abb1df 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -51,7 +51,18 @@ cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
# tracer assumptions. For i686, generic, core2 this is set by the
# compiler anyway
-cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args)
+ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
+ADD_ACCUMULATE_OUTGOING_ARGS := y
+endif
+
+# Work around to a bug with asm goto with first implementations of it
+# in gcc causing gcc to mess up the push and pop of the stack in some
+# uses of asm goto.
+ifeq ($(CONFIG_JUMP_LABEL), y)
+ADD_ACCUMULATE_OUTGOING_ARGS := y
+endif
+
+cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
# Bug fix for binutils: this option is required in order to keep
# binutils from generating NOPL instructions against our will.
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 849813f398e..5852519b2d0 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -28,7 +28,6 @@
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/utsname.h>
-#include <linux/smp_lock.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/poll.h>
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 92091de1111..55d106b5e31 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -93,6 +93,9 @@ extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
+extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
+ int trigger, int polarity);
+
static inline void disable_acpi(void)
{
acpi_disabled = 1;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34b0ed..f6ce0bda3b9 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline u32 native_apic_msr_read(u32 reg)
{
- u32 low, high;
+ u64 msr;
if (reg == APIC_DFR)
return -1;
- rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
- return low;
+ rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+ return (u32)msr;
}
static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@ extern void enable_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
- int msr, msr2;
+ u64 msr;
if (!cpu_has_x2apic)
return 0;
- rdmsr(MSR_IA32_APICBASE, msr, msr2);
+ rdmsrl(MSR_IA32_APICBASE, msr);
if (msr & X2APIC_ENABLE)
return 1;
return 0;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index f0203f4791a..07227308252 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -41,6 +41,8 @@
#include <asm-generic/int-ll64.h>
#include <asm/page.h>
+#include <xen/xen.h>
+
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
@@ -351,6 +353,17 @@ extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void fixup_early_ioremap(void);
extern bool is_early_ioremap_ptep(pte_t *ptep);
+#ifdef CONFIG_XEN
+struct bio_vec;
+
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+#endif /* CONFIG_XEN */
+
#define IO_SPACE_LIMIT 0xffff
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index c8be4566c3d..a6b28d017c2 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -169,6 +169,7 @@ extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void probe_nr_irqs_gsi(void);
+extern int get_nr_irqs_gsi(void);
extern void setup_ioapic_ids_from_mpc(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 0bf5b008365..13b0ebaa512 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
#ifdef CONFIG_X86_32
extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
#else
# define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
#endif
#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 83c4bb1d917..3ea3dc48704 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -121,6 +121,7 @@
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSCTL 0xc001103a
+#define MSR_AMD64_IBSBRTARGET 0xc001103b
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index d395540ff89..ca0437c714b 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
+#include <asm/x86_init.h>
#ifdef __KERNEL__
@@ -94,8 +95,36 @@ static inline void early_quirks(void) { }
extern void pci_iommu_alloc(void);
-/* MSI arch hook */
-#define arch_setup_msi_irqs arch_setup_msi_irqs
+#ifdef CONFIG_PCI_MSI
+/* MSI arch specific hooks */
+static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ return x86_msi.setup_msi_irqs(dev, nvec, type);
+}
+
+static inline void x86_teardown_msi_irqs(struct pci_dev *dev)
+{
+ x86_msi.teardown_msi_irqs(dev);
+}
+
+static inline void x86_teardown_msi_irq(unsigned int irq)
+{
+ x86_msi.teardown_msi_irq(irq);
+}
+#define arch_setup_msi_irqs x86_setup_msi_irqs
+#define arch_teardown_msi_irqs x86_teardown_msi_irqs
+#define arch_teardown_msi_irq x86_teardown_msi_irq
+/* implemented in arch/x86/kernel/apic/io_apic. */
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+void native_teardown_msi_irq(unsigned int irq);
+/* default to the implementation in drivers/lib/msi.c */
+#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
+void default_teardown_msi_irqs(struct pci_dev *dev);
+#else
+#define native_setup_msi_irqs NULL
+#define native_teardown_msi_irq NULL
+#define default_teardown_msi_irqs NULL
+#endif
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 49c7219826f..704526734be 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -47,6 +47,7 @@ enum pci_bf_sort_state {
extern unsigned int pcibios_max_latency;
void pcibios_resource_survey(void);
+void pcibios_set_cache_line_size(void);
/* pci-pc.c */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6e742cc4251..550e26b1dbb 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -111,17 +111,18 @@ union cpuid10_edx {
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
/* IbsFetchCtl bits/masks */
-#define IBS_FETCH_RAND_EN (1ULL<<57)
-#define IBS_FETCH_VAL (1ULL<<49)
-#define IBS_FETCH_ENABLE (1ULL<<48)
-#define IBS_FETCH_CNT 0xFFFF0000ULL
-#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT 0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
/* IbsOpCtl bits */
-#define IBS_OP_CNT_CTL (1ULL<<19)
-#define IBS_OP_VAL (1ULL<<18)
-#define IBS_OP_ENABLE (1ULL<<17)
-#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4cfc9082406..4c2f63c7fc1 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -50,7 +50,7 @@ struct smp_ops {
void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus);
- void (*smp_send_stop)(void);
+ void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
static inline void smp_send_stop(void)
{
- smp_ops.smp_send_stop();
+ smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+ smp_ops.stop_other_cpus(1);
}
static inline void smp_prepare_boot_cpu(void)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index bf6b88ef8ee..e969f691cbf 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
*
* SGI UV architectural definitions
*
- * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_X86_UV_UV_HUB_H
@@ -77,7 +77,8 @@
*
* 1111110000000000
* 5432109876543210
- * pppppppppplc0cch
+ * pppppppppplc0cch Nehalem-EX
+ * ppppppppplcc0cch Westmere-EX
* sssssssssss
*
* p = pnode bits
@@ -148,12 +149,25 @@ struct uv_hub_info_s {
unsigned char m_val;
unsigned char n_val;
struct uv_scir_s scir;
+ unsigned char apic_pnode_shift;
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+union uvh_apicid {
+ unsigned long v;
+ struct uvh_apicid_s {
+ unsigned long local_apic_mask : 24;
+ unsigned long local_apic_shift : 5;
+ unsigned long unused1 : 3;
+ unsigned long pnode_mask : 24;
+ unsigned long pnode_shift : 5;
+ unsigned long unused2 : 3;
+ } s;
+};
+
/*
* Local & Global MMR space macros.
* Note: macros are intended to be used ONLY by inline functions
@@ -182,6 +196,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
(((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+#define UVH_APICID 0x002D0E00L
#define UV_APIC_PNODE_SHIFT 6
/* Local Bus from cpu's perspective */
@@ -280,7 +295,7 @@ static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
*/
static inline int uv_apicid_to_pnode(int apicid)
{
- return (apicid >> UV_APIC_PNODE_SHIFT);
+ return (apicid >> uv_hub_info->apic_pnode_shift);
}
/*
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e05ce..6d90adf4428 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -806,6 +806,78 @@ union uvh_node_present_table_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +929,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+union uvh_rh_gam_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_config_mmr_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_10_11: 2; /* */
+ unsigned long mmiol_cfg : 1; /* RW */
+ unsigned long rsvd_13_63: 51; /* */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u {
} s;
};
-/* ========================================================================= */
-/* UVH_SI_ADDR_MAP_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
-
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
-
-union uvh_si_addr_map_config_u {
- unsigned long v;
- struct uvh_si_addr_map_config_s {
- unsigned long m_skt : 6; /* RW */
- unsigned long rsvd_6_7: 2; /* */
- unsigned long n_skt : 4; /* RW */
- unsigned long rsvd_12_63: 52; /* */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
-
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias0_overlay_config_u {
- unsigned long v;
- struct uvh_si_alias0_overlay_config_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
-
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias1_overlay_config_u {
- unsigned long v;
- struct uvh_si_alias1_overlay_config_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
-/* ========================================================================= */
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
-
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias2_overlay_config_u {
- unsigned long v;
- struct uvh_si_alias2_overlay_config_s {
- unsigned long rsvd_0_23: 24; /* */
- unsigned long base : 8; /* RW */
- unsigned long rsvd_32_47: 16; /* */
- unsigned long m_alias : 5; /* RW */
- unsigned long rsvd_53_62: 10; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-#endif /* _ASM_X86_UV_UV_MMRS_H */
+#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index baa579c8e03..64642ad019f 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -154,9 +154,18 @@ struct x86_platform_ops {
int (*i8042_detect)(void);
};
+struct pci_dev;
+
+struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*teardown_msi_irq)(unsigned int irq);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+};
+
extern struct x86_init_ops x86_init;
extern struct x86_cpuinit_ops x86_cpuinit;
extern struct x86_platform_ops x86_platform;
+extern struct x86_msi_ops x86_msi;
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
new file mode 100644
index 00000000000..2329b3eaf8d
--- /dev/null
+++ b/arch/x86/include/asm/xen/pci.h
@@ -0,0 +1,65 @@
+#ifndef _ASM_X86_XEN_PCI_H
+#define _ASM_X86_XEN_PCI_H
+
+#if defined(CONFIG_PCI_XEN)
+extern int __init pci_xen_init(void);
+extern int __init pci_xen_hvm_init(void);
+#define pci_xen 1
+#else
+#define pci_xen 0
+#define pci_xen_init (0)
+static inline int pci_xen_hvm_init(void)
+{
+ return -1;
+}
+#endif
+#if defined(CONFIG_XEN_DOM0)
+void __init xen_setup_pirqs(void);
+#else
+static inline void __init xen_setup_pirqs(void)
+{
+}
+#endif
+
+#if defined(CONFIG_PCI_MSI)
+#if defined(CONFIG_PCI_XEN)
+/* The drivers/pci/xen-pcifront.c sets this structure to
+ * its own functions.
+ */
+struct xen_pci_frontend_ops {
+ int (*enable_msi)(struct pci_dev *dev, int **vectors);
+ void (*disable_msi)(struct pci_dev *dev);
+ int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
+ void (*disable_msix)(struct pci_dev *dev);
+};
+
+extern struct xen_pci_frontend_ops *xen_pci_frontend;
+
+static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
+ int **vectors)
+{
+ if (xen_pci_frontend && xen_pci_frontend->enable_msi)
+ return xen_pci_frontend->enable_msi(dev, vectors);
+ return -ENODEV;
+}
+static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
+{
+ if (xen_pci_frontend && xen_pci_frontend->disable_msi)
+ xen_pci_frontend->disable_msi(dev);
+}
+static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
+ int **vectors, int nvec)
+{
+ if (xen_pci_frontend && xen_pci_frontend->enable_msix)
+ return xen_pci_frontend->enable_msix(dev, vectors, nvec);
+ return -ENODEV;
+}
+static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
+{
+ if (xen_pci_frontend && xen_pci_frontend->disable_msix)
+ xen_pci_frontend->disable_msix(dev);
+}
+#endif /* CONFIG_PCI_XEN */
+#endif /* CONFIG_PCI_MSI */
+
+#endif /* _ASM_X86_XEN_PCI_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 2c833d8c414..9e13763b609 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -36,7 +36,6 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o ldt.o dumpstack.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
-obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
@@ -58,7 +57,6 @@ obj-$(CONFIG_INTEL_TXT) += tboot.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
-obj-$(CONFIG_SFI) += sfi.o
obj-y += reboot.o
obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o
@@ -82,7 +80,6 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_VM86) += vm86_32.o
@@ -104,14 +101,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
-obj-$(CONFIG_SCx200) += scx200.o
-scx200-y += scx200_32.o
-
-obj-$(CONFIG_OLPC) += olpc.o
-obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o
-obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o
-obj-$(CONFIG_X86_MRST) += mrst.o
-
microcode-y := microcode_core.o
microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
@@ -124,7 +113,6 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
- obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
obj-$(CONFIG_AUDIT) += audit_64.o
obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index c05872aa3ce..71232b941b6 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -513,35 +513,62 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
return 0;
}
-/*
- * success: return IRQ number (>=0)
- * failure: return < 0
- */
-int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
+ int trigger, int polarity)
{
- unsigned int irq;
- unsigned int plat_gsi = gsi;
-
#ifdef CONFIG_PCI
/*
* Make sure all (legacy) PCI IRQs are set as level-triggered.
*/
- if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
- if (trigger == ACPI_LEVEL_SENSITIVE)
- eisa_set_level_irq(gsi);
- }
+ if (trigger == ACPI_LEVEL_SENSITIVE)
+ eisa_set_level_irq(gsi);
#endif
+ return gsi;
+}
+
+static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
#ifdef CONFIG_X86_IO_APIC
- if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
- plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
- }
+ gsi = mp_register_gsi(dev, gsi, trigger, polarity);
#endif
+
+ return gsi;
+}
+
+int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
+ int trigger, int polarity) = acpi_register_gsi_pic;
+
+/*
+ * success: return IRQ number (>=0)
+ * failure: return < 0
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
+{
+ unsigned int irq;
+ unsigned int plat_gsi = gsi;
+
+ plat_gsi = (*__acpi_register_gsi)(dev, gsi, trigger, polarity);
irq = gsi_to_irq(plat_gsi);
return irq;
}
+void __init acpi_set_irq_model_pic(void)
+{
+ acpi_irq_model = ACPI_IRQ_MODEL_PIC;
+ __acpi_register_gsi = acpi_register_gsi_pic;
+ acpi_ioapic = 0;
+}
+
+void __init acpi_set_irq_model_ioapic(void)
+{
+ acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
+ __acpi_register_gsi = acpi_register_gsi_ioapic;
+ acpi_ioapic = 1;
+}
+
/*
* ACPI based hotplug support for CPU
*/
@@ -1259,8 +1286,7 @@ static void __init acpi_process_madt(void)
*/
error = acpi_parse_madt_ioapic_entries();
if (!error) {
- acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
- acpi_ioapic = 1;
+ acpi_set_irq_model_ioapic();
smp_found_config = 1;
}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a36bb90aef5..5079f24c955 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -638,71 +638,32 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
atomic_set(&stop_machine_first, 1);
wrote_text = 0;
/* Use __stop_machine() because the caller already got online_cpus. */
- __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+ __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
return addr;
}
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
-unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
+#ifdef CONFIG_X86_64
+unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
+#else
+unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
+#endif
void __init arch_init_ideal_nop5(void)
{
- extern const unsigned char ftrace_test_p6nop[];
- extern const unsigned char ftrace_test_nop5[];
- extern const unsigned char ftrace_test_jmp[];
- int faulted = 0;
-
/*
- * There is no good nop for all x86 archs.
- * We will default to using the P6_NOP5, but first we
- * will test to make sure that the nop will actually
- * work on this CPU. If it faults, we will then
- * go to a lesser efficient 5 byte nop. If that fails
- * we then just use a jmp as our nop. This isn't the most
- * efficient nop, but we can not use a multi part nop
- * since we would then risk being preempted in the middle
- * of that nop, and if we enabled tracing then, it might
- * cause a system crash.
+ * There is no good nop for all x86 archs. This selection
+ * algorithm should be unified with the one in find_nop_table(),
+ * but this should be good enough for now.
*
- * TODO: check the cpuid to determine the best nop.
+ * For cases other than the ones below, use the safe (as in
+ * always functional) defaults above.
*/
- asm volatile (
- "ftrace_test_jmp:"
- "jmp ftrace_test_p6nop\n"
- "nop\n"
- "nop\n"
- "nop\n" /* 2 byte jmp + 3 bytes */
- "ftrace_test_p6nop:"
- P6_NOP5
- "jmp 1f\n"
- "ftrace_test_nop5:"
- ".byte 0x66,0x66,0x66,0x66,0x90\n"
- "1:"
- ".section .fixup, \"ax\"\n"
- "2: movl $1, %0\n"
- " jmp ftrace_test_nop5\n"
- "3: movl $2, %0\n"
- " jmp 1b\n"
- ".previous\n"
- _ASM_EXTABLE(ftrace_test_p6nop, 2b)
- _ASM_EXTABLE(ftrace_test_nop5, 3b)
- : "=r"(faulted) : "0" (faulted));
-
- switch (faulted) {
- case 0:
- pr_info("converting mcount calls to 0f 1f 44 00 00\n");
- memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
- break;
- case 1:
- pr_info("converting mcount calls to 66 66 66 66 90\n");
- memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
- break;
- case 2:
- pr_info("converting mcount calls to jmp . + 5\n");
- memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
- break;
- }
-
+#ifdef CONFIG_X86_64
+ /* Don't use these on 32 bits due to broken virtualizers */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ memcpy(ideal_nop5, p6_nops[5], 5);
+#endif
}
#endif
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d1b0e..3f838d53739 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
#include <asm/mce.h>
#include <asm/kvm_para.h>
#include <asm/tsc.h>
-#include <asm/atomic.h>
unsigned int num_processors;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8ae808d110f..7cc0a721f62 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3109,7 +3109,7 @@ void destroy_irq(unsigned int irq)
irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
- if (intr_remapping_enabled)
+ if (irq_remapped(cfg))
free_irte(irq);
raw_spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq, cfg);
@@ -3331,7 +3331,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0;
}
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int node, ret, sub_handle, index = 0;
unsigned int irq, irq_want;
@@ -3389,7 +3389,7 @@ error:
return ret;
}
-void arch_teardown_msi_irq(unsigned int irq)
+void native_teardown_msi_irq(unsigned int irq)
{
destroy_irq(irq);
}
@@ -3650,6 +3650,11 @@ void __init probe_nr_irqs_gsi(void)
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
+int get_nr_irqs_gsi(void)
+{
+ return nr_irqs_gsi;
+}
+
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index f744f54cb24..194539aea17 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,7 +5,7 @@
*
* SGI UV APIC functions (note: not an Intel compatible APIC)
*
- * Copyright (C) 2007-2009 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/cpumask.h>
#include <linux/hardirq.h>
@@ -41,6 +41,7 @@ DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type;
static u64 gru_start_paddr, gru_end_paddr;
+static union uvh_apicid uvh_apicid;
int uv_min_hub_revision_id;
EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
static DEFINE_SPINLOCK(uv_nmi_lock);
@@ -70,12 +71,27 @@ static int early_get_nodeid(void)
return node_id.s.node_id;
}
+static void __init early_get_apic_pnode_shift(void)
+{
+ unsigned long *mmr;
+
+ mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_APICID, sizeof(*mmr));
+ uvh_apicid.v = *mmr;
+ early_iounmap(mmr, sizeof(*mmr));
+ if (!uvh_apicid.v)
+ /*
+ * Old bios, use default value
+ */
+ uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
+}
+
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
int nodeid;
if (!strcmp(oem_id, "SGI")) {
nodeid = early_get_nodeid();
+ early_get_apic_pnode_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
x86_platform.nmi_init = uv_nmi_init;
if (!strcmp(oem_table_id, "UVL"))
@@ -84,7 +100,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
uv_system_type = UV_X2APIC;
else if (!strcmp(oem_table_id, "UVH")) {
__get_cpu_var(x2apic_extra_bits) =
- nodeid << (UV_APIC_PNODE_SHIFT - 1);
+ nodeid << (uvh_apicid.s.pnode_shift - 1);
uv_system_type = UV_NON_UNIQUE_APIC;
return 1;
}
@@ -363,14 +379,14 @@ struct redir_addr {
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
static __initdata struct redir_addr redir_addrs[] = {
- {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
- {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
- {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+ {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
+ {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
+ {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
};
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
{
- union uvh_si_alias0_overlay_config_u alias;
+ union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
int i;
@@ -644,7 +660,7 @@ void uv_nmi_init(void)
void __init uv_system_init(void)
{
- union uvh_si_addr_map_config_u m_n_config;
+ union uvh_rh_gam_config_mmr_u m_n_config;
union uvh_node_id_u node_id;
unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -654,7 +670,7 @@ void __init uv_system_init(void)
map_low_mmrs();
- m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+ m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt;
mmr_base =
@@ -716,6 +732,10 @@ void __init uv_system_init(void)
int apicid = per_cpu(x86_cpu_to_apicid, cpu);
nid = cpu_to_node(cpu);
+ /*
+ * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
+ */
+ uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
pnode = uv_apicid_to_pnode(apicid);
blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c1e8c7a5116..ed6310183ef 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -237,6 +237,7 @@ struct x86_pmu {
* Intel DebugStore bits
*/
int bts, pebs;
+ int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
@@ -380,7 +381,7 @@ static void release_pmc_hardware(void) {}
#endif
-static int reserve_ds_buffers(void);
+static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
@@ -477,7 +478,7 @@ static int x86_setup_perfctr(struct perf_event *event)
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
/* BTS is not supported by this architecture. */
- if (!x86_pmu.bts)
+ if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
@@ -496,12 +497,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
int precise = 0;
/* Support for constant skid */
- if (x86_pmu.pebs)
+ if (x86_pmu.pebs_active) {
precise++;
- /* Support for IP fixup */
- if (x86_pmu.lbr_nr)
- precise++;
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr)
+ precise++;
+ }
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
@@ -543,11 +545,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
- else {
- err = reserve_ds_buffers();
- if (err)
- release_pmc_hardware();
- }
+ else
+ reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d58448c3a..e421b8cd694 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
struct amd_nb *nb;
int i;
- nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+ nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
+ cpu_to_node(cpu));
if (!nb)
return NULL;
- memset(nb, 0, sizeof(*nb));
nb->nb_id = nb_id;
/*
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 4977f9c400e..b7dcd9f2b8a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -74,6 +74,107 @@ static void fini_debug_store_on_cpu(int cpu)
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}
+static int alloc_pebs_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
+ int max, thresh = 1; /* always use a single PEBS record */
+ void *buffer;
+
+ if (!x86_pmu.pebs)
+ return 0;
+
+ buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+ ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ ds->pebs_index = ds->pebs_buffer_base;
+ ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+ max * x86_pmu.pebs_record_size;
+
+ ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+ thresh * x86_pmu.pebs_record_size;
+
+ return 0;
+}
+
+static void release_pebs_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ kfree((void *)(unsigned long)ds->pebs_buffer_base);
+ ds->pebs_buffer_base = 0;
+}
+
+static int alloc_bts_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
+ int max, thresh;
+ void *buffer;
+
+ if (!x86_pmu.bts)
+ return 0;
+
+ buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ thresh = max / 16;
+
+ ds->bts_buffer_base = (u64)(unsigned long)buffer;
+ ds->bts_index = ds->bts_buffer_base;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ thresh * BTS_RECORD_SIZE;
+
+ return 0;
+}
+
+static void release_bts_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds || !x86_pmu.bts)
+ return;
+
+ kfree((void *)(unsigned long)ds->bts_buffer_base);
+ ds->bts_buffer_base = 0;
+}
+
+static int alloc_ds_buffer(int cpu)
+{
+ int node = cpu_to_node(cpu);
+ struct debug_store *ds;
+
+ ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
+ if (unlikely(!ds))
+ return -ENOMEM;
+
+ per_cpu(cpu_hw_events, cpu).ds = ds;
+
+ return 0;
+}
+
+static void release_ds_buffer(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ return;
+
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
+ kfree(ds);
+}
+
static void release_ds_buffers(void)
{
int cpu;
@@ -82,93 +183,77 @@ static void release_ds_buffers(void)
return;
get_online_cpus();
-
for_each_online_cpu(cpu)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- continue;
-
- per_cpu(cpu_hw_events, cpu).ds = NULL;
-
- kfree((void *)(unsigned long)ds->pebs_buffer_base);
- kfree((void *)(unsigned long)ds->bts_buffer_base);
- kfree(ds);
+ release_pebs_buffer(cpu);
+ release_bts_buffer(cpu);
+ release_ds_buffer(cpu);
}
-
put_online_cpus();
}
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
{
- int cpu, err = 0;
+ int bts_err = 0, pebs_err = 0;
+ int cpu;
+
+ x86_pmu.bts_active = 0;
+ x86_pmu.pebs_active = 0;
if (!x86_pmu.bts && !x86_pmu.pebs)
- return 0;
+ return;
+
+ if (!x86_pmu.bts)
+ bts_err = 1;
+
+ if (!x86_pmu.pebs)
+ pebs_err = 1;
get_online_cpus();
for_each_possible_cpu(cpu) {
- struct debug_store *ds;
- void *buffer;
- int max, thresh;
+ if (alloc_ds_buffer(cpu)) {
+ bts_err = 1;
+ pebs_err = 1;
+ }
+
+ if (!bts_err && alloc_bts_buffer(cpu))
+ bts_err = 1;
- err = -ENOMEM;
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (unlikely(!ds))
+ if (!pebs_err && alloc_pebs_buffer(cpu))
+ pebs_err = 1;
+
+ if (bts_err && pebs_err)
break;
- per_cpu(cpu_hw_events, cpu).ds = ds;
-
- if (x86_pmu.bts) {
- buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
- thresh = max / 16;
-
- ds->bts_buffer_base = (u64)(unsigned long)buffer;
- ds->bts_index = ds->bts_buffer_base;
- ds->bts_absolute_maximum = ds->bts_buffer_base +
- max * BTS_RECORD_SIZE;
- ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
- thresh * BTS_RECORD_SIZE;
- }
+ }
- if (x86_pmu.pebs) {
- buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-
- ds->pebs_buffer_base = (u64)(unsigned long)buffer;
- ds->pebs_index = ds->pebs_buffer_base;
- ds->pebs_absolute_maximum = ds->pebs_buffer_base +
- max * x86_pmu.pebs_record_size;
- /*
- * Always use single record PEBS
- */
- ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
- x86_pmu.pebs_record_size;
- }
+ if (bts_err) {
+ for_each_possible_cpu(cpu)
+ release_bts_buffer(cpu);
+ }
- err = 0;
+ if (pebs_err) {
+ for_each_possible_cpu(cpu)
+ release_pebs_buffer(cpu);
}
- if (err)
- release_ds_buffers();
- else {
+ if (bts_err && pebs_err) {
+ for_each_possible_cpu(cpu)
+ release_ds_buffer(cpu);
+ } else {
+ if (x86_pmu.bts && !bts_err)
+ x86_pmu.bts_active = 1;
+
+ if (x86_pmu.pebs && !pebs_err)
+ x86_pmu.pebs_active = 1;
+
for_each_online_cpu(cpu)
init_debug_store_on_cpu(cpu);
}
put_online_cpus();
-
- return err;
}
/*
@@ -233,7 +318,7 @@ static int intel_pmu_drain_bts_buffer(void)
if (!event)
return 0;
- if (!ds)
+ if (!x86_pmu.bts_active)
return 0;
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -503,7 +588,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
struct pebs_record_core *at, *top;
int n;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -545,7 +630,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
u64 status = 0;
int bit, n;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
@@ -630,9 +715,8 @@ static void intel_ds_init(void)
#else /* CONFIG_CPU_SUP_INTEL */
-static int reserve_ds_buffers(void)
+static void reserve_ds_buffers(void)
{
- return 0;
}
static void release_ds_buffers(void)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1b7b31ab7d8..212a6a42527 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -33,7 +33,6 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 0f6376ffa2d..1bc7f75a5bd 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk("\n%s", log_lvl);
- printk(" %08lx", *stack++);
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT " %08lx", *stack++);
touch_nmi_watchdog();
}
- printk("\n");
+ printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 57a21f11c79..6a340485249 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
- printk(" <EOI> ");
+ printk(KERN_CONT " <EOI> ");
}
} else {
if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
}
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk("\n%s", log_lvl);
- printk(" %016lx", *stack++);
+ printk(KERN_CONT "\n");
+ printk(KERN_CONT " %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
- printk("\n");
+ printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 50fbbe60e50..96656f20775 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>
+#include <linux/mm.h>
#include <asm/apic.h>
@@ -60,9 +61,6 @@ union irq_ctx {
static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
static void call_on_stack(void *func, void *stack)
{
asm volatile("xchgl %%ebx,%%esp \n"
@@ -128,7 +126,9 @@ void __cpuinit irq_ctx_init(int cpu)
if (per_cpu(hardirq_ctx, cpu))
return;
- irqctx = &per_cpu(hardirq_stack, cpu);
+ irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+ THREAD_FLAGS,
+ THREAD_ORDER));
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -137,7 +137,9 @@ void __cpuinit irq_ctx_init(int cpu)
per_cpu(hardirq_ctx, cpu) = irqctx;
- irqctx = &per_cpu(softirq_stack, cpu);
+ irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+ THREAD_FLAGS,
+ THREAD_ORDER));
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
@@ -150,11 +152,6 @@ void __cpuinit irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
}
-void irq_ctx_exit(int cpu)
-{
- per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
asmlinkage void do_softirq(void)
{
unsigned long flags;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index d81cfebb848..cd21b654dec 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -315,14 +315,18 @@ static void kgdb_remove_all_hw_break(void)
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
- if (bp->attr.disabled == 1)
+ if (!bp->attr.disabled) {
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
continue;
+ }
if (dbg_is_early)
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
- else
- arch_uninstall_hw_breakpoint(bp);
- bp->attr.disabled = 1;
+ else if (hw_break_release_slot(i))
+ printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
+ breakinfo[i].addr);
+ breakinfo[i].enabled = 0;
}
}
@@ -387,7 +391,7 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
* disable hardware debugging while it is processing gdb packets or
* handling exception.
*/
-void kgdb_disable_hw_debug(struct pt_regs *regs)
+static void kgdb_disable_hw_debug(struct pt_regs *regs)
{
int i;
int cpu = raw_smp_processor_id();
@@ -724,6 +728,7 @@ struct kgdb_arch arch_kgdb_ops = {
.flags = KGDB_HW_BREAKPOINT,
.set_hw_breakpoint = kgdb_set_hw_break,
.remove_hw_breakpoint = kgdb_remove_hw_break,
+ .disable_hw_break = kgdb_disable_hw_debug,
.remove_all_hw_break = kgdb_remove_all_hw_break,
.correct_hw_break = kgdb_correct_hw_break,
};
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c055c7..ce0cb4721c9 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
return 0;
}
- equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
+ equiv_cpu_table = vmalloc(size);
if (!equiv_cpu_table) {
pr_err("failed to allocate equivalent CPU table\n");
return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 71825806cd4..6da143c2a6b 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
wrmsrl(address, val);
}
-static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
{
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
return 0;
}
-static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
+static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
{
.callback = set_check_enable_amd_mmconf,
.ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
{}
};
-void __cpuinit check_enable_amd_mmconf_dmi(void)
+/* Called from a __cpuinit function, but only on the BSP. */
+void __ref check_enable_amd_mmconf_dmi(void)
{
dmi_check_system(mmconf_dmi_table);
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7bf2dc4c8f7..12fcbe2c143 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/device.h>
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 70c4872cd8a..45892dc4b72 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -801,7 +801,8 @@ void ptrace_disable(struct task_struct *child)
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
#endif
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
@@ -812,8 +813,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
unsigned long tmp;
ret = -EIO;
- if ((addr & (sizeof(data) - 1)) || addr < 0 ||
- addr >= sizeof(struct user))
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
tmp = 0; /* Default return condition */
@@ -830,8 +830,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
- if ((addr & (sizeof(data) - 1)) || addr < 0 ||
- addr >= sizeof(struct user))
+ if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
if (addr < sizeof(struct user_regs_struct))
@@ -888,17 +887,17 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
case PTRACE_GET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
- (struct user_desc __user *) data);
+ (struct user_desc __user *)data);
break;
case PTRACE_SET_THREAD_AREA:
- if (addr < 0)
+ if ((int) addr < 0)
return -EIO;
ret = do_set_thread_area(child, addr,
- (struct user_desc __user *) data, 0);
+ (struct user_desc __user *)data, 0);
break;
#endif
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e6f66..008b91eefa1 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
valid_flags = flags;
}
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-{
- u64 product;
-#ifdef __i386__
- u32 tmp1, tmp2;
-#endif
-
- if (shift < 0)
- delta >>= -shift;
- else
- delta <<= shift;
-
-#ifdef __i386__
- __asm__ (
- "mul %5 ; "
- "mov %4,%%eax ; "
- "mov %%edx,%4 ; "
- "mul %5 ; "
- "xor %5,%5 ; "
- "add %4,%%eax ; "
- "adc %5,%%edx ; "
- : "=A" (product), "=r" (tmp1), "=r" (tmp2)
- : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif defined(__x86_64__)
- __asm__ (
- "mul %%rdx ; shrd $32,%%rdx,%%rax"
- : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
- return product;
-}
-
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
{
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f7f53dcd3e0..c495aa8d481 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
- smp_send_stop();
+ stop_other_cpus();
#endif
lapic_shutdown();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 95a32746fbf..21c6746338a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -769,6 +769,8 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
+ resource_alloc_from_bottom = 0;
+ iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
/* update the e820_saved too */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d801210945d..513deac7228 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
irq_exit();
}
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
{
unsigned long flags;
- unsigned long wait;
+ unsigned long timeout;
if (reboot_force)
return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
- /* Don't wait longer than a second */
- wait = USEC_PER_SEC;
- while (num_online_cpus() > 1 && wait--)
+ /*
+ * Don't wait longer than a second if the caller
+ * didn't ask us to wait.
+ */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
- .smp_send_stop = native_smp_send_stop,
+ .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6c7faecd9e4..083e99d1b7d 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1373,7 +1373,6 @@ void play_dead_common(void)
{
idle_task_exit();
reset_lazy_tlbstate();
- irq_ctx_exit(raw_smp_processor_id());
c1e_remove_cpu(raw_smp_processor_id());
mb();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index cd6da6bf3ec..ceb2911aa43 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -6,10 +6,12 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <asm/bios_ebda.h>
#include <asm/paravirt.h>
#include <asm/pci_x86.h>
+#include <asm/pci.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
#include <asm/apic.h>
@@ -99,3 +101,8 @@ struct x86_platform_ops x86_platform = {
};
EXPORT_SYMBOL_GPL(x86_platform);
+struct x86_msi_ops x86_msi = {
+ .setup_msi_irqs = native_setup_msi_irqs,
+ .teardown_msi_irq = native_teardown_msi_irq,
+ .teardown_msi_irqs = default_teardown_msi_irqs,
+};
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a5..fb8b376bf28 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
}
}
-static void set_spte_track_bits(u64 *sptep, u64 new_spte)
+static int set_spte_track_bits(u64 *sptep, u64 new_spte)
{
pfn_t pfn;
u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
old_spte = __xchg_spte(sptep, new_spte);
if (!is_rmap_spte(old_spte))
- return;
+ return 0;
pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
kvm_set_pfn_dirty(pfn);
+ return 1;
}
static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{
- set_spte_track_bits(sptep, new_spte);
- rmap_remove(kvm, sptep);
+ if (set_spte_track_bits(sptep, new_spte))
+ rmap_remove(kvm, sptep);
}
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 82e144a4e51..1ca12298ffc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
load_host_msrs(vcpu);
+ kvm_load_ldt(ldt_selector);
loadsegment(fs, fs_selector);
#ifdef CONFIG_X86_64
load_gs_index(gs_selector);
@@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#else
loadsegment(gs, gs_selector);
#endif
- kvm_load_ldt(ldt_selector);
reload_tss(vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8da0e45ff7c..ff21fdda0c5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- }
#endif
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
@@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
- if (vmx->host_state.fs_reload_needed)
- loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu))
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
- wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
+ if (vmx->host_state.fs_reload_needed)
+ loadsegment(fs, vmx->host_state.fs_sel);
reload_tss();
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
- }
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (current_thread_info()->status & TS_USEDFPU)
clts();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b3..cdac9e592aa 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
!kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+ events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+ events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
+ memset(&events->reserved, 0, sizeof(events->reserved));
}
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
+ memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ memset(&ps->reserved, 0, sizeof(ps->reserved));
return r;
}
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap;
- spin_lock(&kvm->mmu_lock);
- kvm_mmu_slot_remove_write_access(kvm, log->slot);
- spin_unlock(&kvm->mmu_lock);
-
r = -ENOMEM;
dirty_bitmap = vmalloc(n);
if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots);
+ spin_lock(&kvm->mmu_lock);
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ spin_unlock(&kvm->mmu_lock);
+
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
+ memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
return X86EMUL_CONTINUE;
if (kvm_x86_ops->has_wbinvd_exit()) {
+ preempt_disable();
smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1);
+ preempt_enable();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
}
wbinvd();
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index d723e369003..b4996266210 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -74,7 +74,7 @@ void __kunmap_atomic(void *kvaddr)
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -87,6 +87,7 @@ void __kunmap_atomic(void *kvaddr)
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 84346200e78..71a59296af8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,7 +51,6 @@
#include <asm/numa.h>
#include <asm/cacheflush.h>
#include <asm/init.h>
-#include <linux/bootmem.h>
static int __init parse_direct_gbpages_off(char *arg)
{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 75a3d7f24a2..7b179b499fa 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -98,7 +98,7 @@ iounmap_atomic(void __iomem *kvaddr)
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
- type = kmap_atomic_idx_pop();
+ type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -111,6 +111,7 @@ iounmap_atomic(void __iomem *kvaddr)
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
}
pagefault_enable();
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 60f498511dd..7ffc9b727ef 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -178,11 +178,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
/* extend the search scope */
end = max_pfn_mapped << PAGE_SHIFT;
- if (end > (MAX_DMA32_PFN<<PAGE_SHIFT))
- start = MAX_DMA32_PFN<<PAGE_SHIFT;
- else
- start = MAX_DMA_PFN<<PAGE_SHIFT;
- mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
+ start = MAX_DMA_PFN << PAGE_SHIFT;
+ mem = memblock_find_in_range(start, end, size, align);
if (mem != MEMBLOCK_ERROR)
return __va(mem);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49358481c73..12cdbb17ad1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void)
}
}
-static int tlb_cpuhp_notify(struct notifier_block *n,
+static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
switch (action & 0xf) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index bd1489c3ce0..4e8baad36d3 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -726,6 +726,12 @@ int __init op_nmi_init(struct oprofile_operations *ops)
case 0x11:
cpu_type = "x86-64/family11h";
break;
+ case 0x12:
+ cpu_type = "x86-64/family12h";
+ break;
+ case 0x14:
+ cpu_type = "x86-64/family14h";
+ break;
default:
return -ENODEV;
}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 42fb46f8388..a011bcc0f94 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -48,17 +48,24 @@ static unsigned long reset_value[NUM_VIRT_COUNTERS];
static u32 ibs_caps;
-struct op_ibs_config {
+struct ibs_config {
unsigned long op_enabled;
unsigned long fetch_enabled;
unsigned long max_cnt_fetch;
unsigned long max_cnt_op;
unsigned long rand_en;
unsigned long dispatched_ops;
+ unsigned long branch_target;
};
-static struct op_ibs_config ibs_config;
-static u64 ibs_op_ctl;
+struct ibs_state {
+ u64 ibs_op_ctl;
+ int branch_target;
+ unsigned long sample_size;
+};
+
+static struct ibs_config ibs_config;
+static struct ibs_state ibs_state;
/*
* IBS cpuid feature detection
@@ -71,8 +78,16 @@ static u64 ibs_op_ctl;
* bit 0 is used to indicate the existence of IBS.
*/
#define IBS_CAPS_AVAIL (1U<<0)
+#define IBS_CAPS_FETCHSAM (1U<<1)
+#define IBS_CAPS_OPSAM (1U<<2)
#define IBS_CAPS_RDWROPCNT (1U<<3)
#define IBS_CAPS_OPCNT (1U<<4)
+#define IBS_CAPS_BRNTRGT (1U<<5)
+#define IBS_CAPS_OPCNTEXT (1U<<6)
+
+#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
+ | IBS_CAPS_FETCHSAM \
+ | IBS_CAPS_OPSAM)
/*
* IBS APIC setup
@@ -99,12 +114,12 @@ static u32 get_ibs_caps(void)
/* check IBS cpuid feature flags */
max_level = cpuid_eax(0x80000000);
if (max_level < IBS_CPUID_FEATURES)
- return IBS_CAPS_AVAIL;
+ return IBS_CAPS_DEFAULT;
ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
if (!(ibs_caps & IBS_CAPS_AVAIL))
/* cpuid flags not valid */
- return IBS_CAPS_AVAIL;
+ return IBS_CAPS_DEFAULT;
return ibs_caps;
}
@@ -197,8 +212,8 @@ op_amd_handle_ibs(struct pt_regs * const regs,
rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
if (ctl & IBS_OP_VAL) {
rdmsrl(MSR_AMD64_IBSOPRIP, val);
- oprofile_write_reserve(&entry, regs, val,
- IBS_OP_CODE, IBS_OP_SIZE);
+ oprofile_write_reserve(&entry, regs, val, IBS_OP_CODE,
+ ibs_state.sample_size);
oprofile_add_data64(&entry, val);
rdmsrl(MSR_AMD64_IBSOPDATA, val);
oprofile_add_data64(&entry, val);
@@ -210,10 +225,14 @@ op_amd_handle_ibs(struct pt_regs * const regs,
oprofile_add_data64(&entry, val);
rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
oprofile_add_data64(&entry, val);
+ if (ibs_state.branch_target) {
+ rdmsrl(MSR_AMD64_IBSBRTARGET, val);
+ oprofile_add_data(&entry, (unsigned long)val);
+ }
oprofile_write_commit(&entry);
/* reenable the IRQ */
- ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
+ ctl = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
}
}
@@ -226,21 +245,32 @@ static inline void op_amd_start_ibs(void)
if (!ibs_caps)
return;
+ memset(&ibs_state, 0, sizeof(ibs_state));
+
+ /*
+ * Note: Since the max count settings may out of range we
+ * write back the actual used values so that userland can read
+ * it.
+ */
+
if (ibs_config.fetch_enabled) {
- val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
+ val = ibs_config.max_cnt_fetch >> 4;
+ val = min(val, IBS_FETCH_MAX_CNT);
+ ibs_config.max_cnt_fetch = val << 4;
val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
val |= IBS_FETCH_ENABLE;
wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
}
if (ibs_config.op_enabled) {
- ibs_op_ctl = ibs_config.max_cnt_op >> 4;
+ val = ibs_config.max_cnt_op >> 4;
if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
/*
* IbsOpCurCnt not supported. See
* op_amd_randomize_ibs_op() for details.
*/
- ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
+ val = clamp(val, 0x0081ULL, 0xFF80ULL);
+ ibs_config.max_cnt_op = val << 4;
} else {
/*
* The start value is randomized with a
@@ -248,13 +278,24 @@ static inline void op_amd_start_ibs(void)
* with the half of the randomized range. Also
* avoid underflows.
*/
- ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
- IBS_OP_MAX_CNT);
+ val += IBS_RANDOM_MAXCNT_OFFSET;
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ val = min(val, IBS_OP_MAX_CNT_EXT);
+ else
+ val = min(val, IBS_OP_MAX_CNT);
+ ibs_config.max_cnt_op =
+ (val - IBS_RANDOM_MAXCNT_OFFSET) << 4;
+ }
+ val = ((val & ~IBS_OP_MAX_CNT) << 4) | (val & IBS_OP_MAX_CNT);
+ val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+ val |= IBS_OP_ENABLE;
+ ibs_state.ibs_op_ctl = val;
+ ibs_state.sample_size = IBS_OP_SIZE;
+ if (ibs_config.branch_target) {
+ ibs_state.branch_target = 1;
+ ibs_state.sample_size++;
}
- if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
- ibs_op_ctl |= IBS_OP_CNT_CTL;
- ibs_op_ctl |= IBS_OP_ENABLE;
- val = op_amd_randomize_ibs_op(ibs_op_ctl);
+ val = op_amd_randomize_ibs_op(ibs_state.ibs_op_ctl);
wrmsrl(MSR_AMD64_IBSOPCTL, val);
}
}
@@ -281,29 +322,25 @@ static inline int eilvt_is_available(int offset)
static inline int ibs_eilvt_valid(void)
{
- u64 val;
int offset;
+ u64 val;
rdmsrl(MSR_AMD64_IBSCTL, val);
+ offset = val & IBSCTL_LVT_OFFSET_MASK;
+
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
- pr_err(FW_BUG "cpu %d, invalid IBS "
- "interrupt offset %d (MSR%08X=0x%016llx)",
- smp_processor_id(), offset,
- MSR_AMD64_IBSCTL, val);
+ pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
return 0;
}
- offset = val & IBSCTL_LVT_OFFSET_MASK;
-
- if (eilvt_is_available(offset))
- return !0;
-
- pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
- "not available (MSR%08X=0x%016llx)",
- smp_processor_id(), offset,
- MSR_AMD64_IBSCTL, val);
+ if (!eilvt_is_available(offset)) {
+ pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+ return 0;
+ }
- return 0;
+ return 1;
}
static inline int get_ibs_offset(void)
@@ -630,28 +667,33 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
/* model specific files */
/* setup some reasonable defaults */
+ memset(&ibs_config, 0, sizeof(ibs_config));
ibs_config.max_cnt_fetch = 250000;
- ibs_config.fetch_enabled = 0;
ibs_config.max_cnt_op = 250000;
- ibs_config.op_enabled = 0;
- ibs_config.dispatched_ops = 0;
-
- dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
- oprofilefs_create_ulong(sb, dir, "enable",
- &ibs_config.fetch_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
- &ibs_config.max_cnt_fetch);
- oprofilefs_create_ulong(sb, dir, "rand_enable",
- &ibs_config.rand_en);
-
- dir = oprofilefs_mkdir(sb, root, "ibs_op");
- oprofilefs_create_ulong(sb, dir, "enable",
- &ibs_config.op_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
- &ibs_config.max_cnt_op);
- if (ibs_caps & IBS_CAPS_OPCNT)
- oprofilefs_create_ulong(sb, dir, "dispatched_ops",
- &ibs_config.dispatched_ops);
+
+ if (ibs_caps & IBS_CAPS_FETCHSAM) {
+ dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
+ oprofilefs_create_ulong(sb, dir, "enable",
+ &ibs_config.fetch_enabled);
+ oprofilefs_create_ulong(sb, dir, "max_count",
+ &ibs_config.max_cnt_fetch);
+ oprofilefs_create_ulong(sb, dir, "rand_enable",
+ &ibs_config.rand_en);
+ }
+
+ if (ibs_caps & IBS_CAPS_OPSAM) {
+ dir = oprofilefs_mkdir(sb, root, "ibs_op");
+ oprofilefs_create_ulong(sb, dir, "enable",
+ &ibs_config.op_enabled);
+ oprofilefs_create_ulong(sb, dir, "max_count",
+ &ibs_config.max_cnt_op);
+ if (ibs_caps & IBS_CAPS_OPCNT)
+ oprofilefs_create_ulong(sb, dir, "dispatched_ops",
+ &ibs_config.dispatched_ops);
+ if (ibs_caps & IBS_CAPS_BRNTRGT)
+ oprofilefs_create_ulong(sb, dir, "branch_target",
+ &ibs_config.branch_target);
+ }
return 0;
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index a0207a7fdf3..effd96e33f1 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o
obj-$(CONFIG_PCI_DIRECT) += direct.o
obj-$(CONFIG_PCI_OLPC) += olpc.o
+obj-$(CONFIG_PCI_XEN) += xen.o
obj-y += fixup.o
obj-$(CONFIG_ACPI) += acpi.o
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 15466c096ba..0972315c386 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
- struct resource *root, *conflict;
u64 start, end;
status = resource_to_addr(acpi_res, &addr);
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
if (addr.resource_type == ACPI_MEMORY_RANGE) {
- root = &iomem_resource;
flags = IORESOURCE_MEM;
if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
flags |= IORESOURCE_PREFETCH;
} else if (addr.resource_type == ACPI_IO_RANGE) {
- root = &ioport_resource;
flags = IORESOURCE_IO;
} else
return AE_OK;
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
- conflict = insert_resource_conflict(root, res);
- if (conflict) {
- dev_err(&info->bridge->dev,
- "address space collision: host bridge window %pR "
- "conflicts with %s %pR\n",
- res, conflict->name, conflict);
- } else {
- pci_bus_add_resource(info->bus, res, 0);
- info->res_num++;
- if (addr.translation_offset)
- dev_info(&info->bridge->dev, "host bridge window %pR "
- "(PCI address [%#llx-%#llx])\n",
- res, res->start - addr.translation_offset,
- res->end - addr.translation_offset);
+ info->res_num++;
+ if (addr.translation_offset)
+ dev_info(&info->bridge->dev, "host bridge window %pR "
+ "(PCI address [%#llx-%#llx])\n",
+ res, res->start - addr.translation_offset,
+ res->end - addr.translation_offset);
+ else
+ dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
+
+ return AE_OK;
+}
+
+static bool resource_contains(struct resource *res, resource_size_t point)
+{
+ if (res->start <= point && point <= res->end)
+ return true;
+ return false;
+}
+
+static void coalesce_windows(struct pci_root_info *info, int type)
+{
+ int i, j;
+ struct resource *res1, *res2;
+
+ for (i = 0; i < info->res_num; i++) {
+ res1 = &info->res[i];
+ if (!(res1->flags & type))
+ continue;
+
+ for (j = i + 1; j < info->res_num; j++) {
+ res2 = &info->res[j];
+ if (!(res2->flags & type))
+ continue;
+
+ /*
+ * I don't like throwing away windows because then
+ * our resources no longer match the ACPI _CRS, but
+ * the kernel resource tree doesn't allow overlaps.
+ */
+ if (resource_contains(res1, res2->start) ||
+ resource_contains(res1, res2->end) ||
+ resource_contains(res2, res1->start) ||
+ resource_contains(res2, res1->end)) {
+ res1->start = min(res1->start, res2->start);
+ res1->end = max(res1->end, res2->end);
+ dev_info(&info->bridge->dev,
+ "host bridge window expanded to %pR; %pR ignored\n",
+ res1, res2);
+ res2->flags = 0;
+ }
+ }
+ }
+}
+
+static void add_resources(struct pci_root_info *info)
+{
+ int i;
+ struct resource *res, *root, *conflict;
+
+ if (!pci_use_crs)
+ return;
+
+ coalesce_windows(info, IORESOURCE_MEM);
+ coalesce_windows(info, IORESOURCE_IO);
+
+ for (i = 0; i < info->res_num; i++) {
+ res = &info->res[i];
+
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+ else if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
else
- dev_info(&info->bridge->dev,
- "host bridge window %pR\n", res);
+ continue;
+
+ conflict = insert_resource_conflict(root, res);
+ if (conflict)
+ dev_err(&info->bridge->dev,
+ "address space collision: host bridge window %pR "
+ "conflicts with %s %pR\n",
+ res, conflict->name, conflict);
+ else
+ pci_bus_add_resource(info->bus, res, 0);
}
- return AE_OK;
}
static void
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum,
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
&info);
+ add_resources(&info);
return;
name_alloc_fail:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index a0772af64ef..f7c8a399978 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -421,16 +421,10 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
return bus;
}
-
-int __init pcibios_init(void)
+void __init pcibios_set_cache_line_size(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!raw_pci_ops) {
- printk(KERN_WARNING "PCI: System does not support PCI\n");
- return 0;
- }
-
/*
* Set PCI cacheline size to that of the CPU if the CPU has reported it.
* (For older CPUs that don't support cpuid, we se it to 32 bytes
@@ -445,7 +439,16 @@ int __init pcibios_init(void)
pci_dfl_cache_line_size = 32 >> 2;
printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
}
+}
+
+int __init pcibios_init(void)
+{
+ if (!raw_pci_ops) {
+ printk(KERN_WARNING "PCI: System does not support PCI\n");
+ return 0;
+ }
+ pcibios_set_cache_line_size();
pcibios_resource_survey();
if (pci_bf_sort >= pci_force_bf)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 55253095be8..c4bb261c106 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
- resource_size_t start = res->start;
+ resource_size_t start = round_down(res->end - size + 1, align);
if (res->flags & IORESOURCE_IO) {
- if (skip_isa_ioresource_align(dev))
- return start;
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
+
+ /*
+ * If we're avoiding ISA aliases, the largest contiguous I/O
+ * port space is 256 bytes. Clearing bits 9 and 10 preserves
+ * all 256-byte and smaller alignments, so the result will
+ * still be correctly aligned.
+ */
+ if (!skip_isa_ioresource_align(dev))
+ start &= ~0x300;
} else if (res->flags & IORESOURCE_MEM) {
if (start < BIOS_END)
- start = BIOS_END;
+ start = res->end; /* fail; no space */
}
return start;
}
@@ -311,6 +316,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
*/
prot |= _PAGE_CACHE_UC_MINUS;
+ prot |= _PAGE_IOMAP; /* creating a mapping for IO */
+
vma->vm_page_prot = __pgprot(prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index f547ee05f71..9f9bfb705cf 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH9_3:
case PCI_DEVICE_ID_INTEL_ICH9_4:
case PCI_DEVICE_ID_INTEL_ICH9_5:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_0:
+ case PCI_DEVICE_ID_INTEL_EP80579_0:
case PCI_DEVICE_ID_INTEL_ICH10_0:
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
+ case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
return 1;
}
- if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
- (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
+ if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index a918553ebc7..e282886616a 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
int end, u64 addr)
{
struct pci_mmcfg_region *new;
- int num_buses;
struct resource *res;
if (addr == 0)
@@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
list_add_sorted(new);
- num_buses = end - start + 1;
res = &new->res;
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
- res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+ res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
new file mode 100644
index 00000000000..d7b5109f7a9
--- /dev/null
+++ b/arch/x86/pci/xen.c
@@ -0,0 +1,416 @@
+/*
+ * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
+ * x86 PCI core to support the Xen PCI Frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+
+#include <linux/io.h>
+#include <asm/io_apic.h>
+#include <asm/pci_x86.h>
+
+#include <asm/xen/hypervisor.h>
+
+#include <xen/features.h>
+#include <xen/events.h>
+#include <asm/xen/pci.h>
+
+#ifdef CONFIG_ACPI
+static int xen_hvm_register_pirq(u32 gsi, int triggering)
+{
+ int rc, irq;
+ struct physdev_map_pirq map_irq;
+ int shareable = 0;
+ char *name;
+
+ if (!xen_hvm_domain())
+ return -1;
+
+ map_irq.domid = DOMID_SELF;
+ map_irq.type = MAP_PIRQ_TYPE_GSI;
+ map_irq.index = gsi;
+ map_irq.pirq = -1;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (rc) {
+ printk(KERN_WARNING "xen map irq failed %d\n", rc);
+ return -1;
+ }
+
+ if (triggering == ACPI_EDGE_SENSITIVE) {
+ shareable = 0;
+ name = "ioapic-edge";
+ } else {
+ shareable = 1;
+ name = "ioapic-level";
+ }
+
+ irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name);
+
+ printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
+
+ return irq;
+}
+
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_hvm_register_pirq(gsi, trigger);
+}
+#endif
+
+#if defined(CONFIG_PCI_MSI)
+#include <linux/msi.h>
+#include <asm/msidef.h>
+
+struct xen_pci_frontend_ops *xen_pci_frontend;
+EXPORT_SYMBOL_GPL(xen_pci_frontend);
+
+static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
+ struct msi_msg *msg)
+{
+ /* We set vector == 0 to tell the hypervisor we don't care about it,
+ * but we want a pirq setup instead.
+ * We use the dest_id field to pass the pirq that we want. */
+ msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
+ msg->address_lo =
+ MSI_ADDR_BASE_LO |
+ MSI_ADDR_DEST_MODE_PHYSICAL |
+ MSI_ADDR_REDIRECTION_CPU |
+ MSI_ADDR_DEST_ID(pirq);
+
+ msg->data =
+ MSI_DATA_TRIGGER_EDGE |
+ MSI_DATA_LEVEL_ASSERT |
+ /* delivery mode reserved */
+ (3 << 8) |
+ MSI_DATA_VECTOR(0);
+}
+
+static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, pirq, ret = 0;
+ struct msi_desc *msidesc;
+ struct msi_msg msg;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
+ "msi-x" : "msi", &irq, &pirq);
+ if (irq < 0 || pirq < 0)
+ goto error;
+ printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
+ xen_msi_compose_msg(dev, pirq, &msg);
+ ret = set_irq_msi(irq, msidesc);
+ if (ret < 0)
+ goto error_while;
+ write_msi_msg(irq, &msg);
+ }
+ return 0;
+
+error_while:
+ unbind_from_irqhandler(irq, NULL);
+error:
+ if (ret == -ENODEV)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered" \
+ " MSI/MSI-X support!\n");
+
+ return ret;
+}
+
+/*
+ * For MSI interrupts we have to use drivers/xen/event.s functions to
+ * allocate an irq_desc and setup the right */
+
+
+static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, ret, i;
+ struct msi_desc *msidesc;
+ int *v;
+
+ v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ if (type == PCI_CAP_ID_MSIX)
+ ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
+ else
+ ret = xen_pci_frontend_enable_msi(dev, &v);
+ if (ret)
+ goto error;
+ i = 0;
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_allocate_pirq(v[i], 0, /* not sharable */
+ (type == PCI_CAP_ID_MSIX) ?
+ "pcifront-msi-x" : "pcifront-msi");
+ if (irq < 0) {
+ ret = -1;
+ goto free;
+ }
+
+ ret = set_irq_msi(irq, msidesc);
+ if (ret)
+ goto error_while;
+ i++;
+ }
+ kfree(v);
+ return 0;
+
+error_while:
+ unbind_from_irqhandler(irq, NULL);
+error:
+ if (ret == -ENODEV)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered" \
+ " MSI/MSI-X support!\n");
+free:
+ kfree(v);
+ return ret;
+}
+
+static void xen_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *msidesc;
+
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ if (msidesc->msi_attrib.is_msix)
+ xen_pci_frontend_disable_msix(dev);
+ else
+ xen_pci_frontend_disable_msi(dev);
+}
+
+static void xen_teardown_msi_irq(unsigned int irq)
+{
+ xen_destroy_irq(irq);
+}
+
+static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, ret;
+ struct msi_desc *msidesc;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_create_msi_irq(dev, msidesc, type);
+ if (irq < 0)
+ return -1;
+
+ ret = set_irq_msi(irq, msidesc);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ xen_destroy_irq(irq);
+ return ret;
+}
+#endif
+
+static int xen_pcifront_enable_irq(struct pci_dev *dev)
+{
+ int rc;
+ int share = 1;
+
+ dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
+
+ if (dev->irq < 0)
+ return -EINVAL;
+
+ if (dev->irq < NR_IRQS_LEGACY)
+ share = 0;
+
+ rc = xen_allocate_pirq(dev->irq, share, "pcifront");
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
+ dev->irq, rc);
+ return rc;
+ }
+ return 0;
+}
+
+int __init pci_xen_init(void)
+{
+ if (!xen_pv_domain() || xen_initial_domain())
+ return -ENODEV;
+
+ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
+
+ pcibios_set_cache_line_size();
+
+ pcibios_enable_irq = xen_pcifront_enable_irq;
+ pcibios_disable_irq = NULL;
+
+#ifdef CONFIG_ACPI
+ /* Keep ACPI out of the picture */
+ acpi_noirq = 1;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+ x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+ x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
+#endif
+ return 0;
+}
+
+int __init pci_xen_hvm_init(void)
+{
+ if (!xen_feature(XENFEAT_hvm_pirqs))
+ return 0;
+
+#ifdef CONFIG_ACPI
+ /*
+ * We don't want to change the actual ACPI delivery model,
+ * just how GSIs get registered.
+ */
+ __acpi_register_gsi = acpi_register_gsi_xen_hvm;
+#endif
+
+#ifdef CONFIG_PCI_MSI
+ x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int xen_register_pirq(u32 gsi, int triggering)
+{
+ int rc, irq;
+ struct physdev_map_pirq map_irq;
+ int shareable = 0;
+ char *name;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ if (triggering == ACPI_EDGE_SENSITIVE) {
+ shareable = 0;
+ name = "ioapic-edge";
+ } else {
+ shareable = 1;
+ name = "ioapic-level";
+ }
+
+ irq = xen_allocate_pirq(gsi, shareable, name);
+
+ printk(KERN_DEBUG "xen: --> irq=%d\n", irq);
+
+ if (irq < 0)
+ goto out;
+
+ map_irq.domid = DOMID_SELF;
+ map_irq.type = MAP_PIRQ_TYPE_GSI;
+ map_irq.index = gsi;
+ map_irq.pirq = irq;
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+ if (rc) {
+ printk(KERN_WARNING "xen map irq failed %d\n", rc);
+ return -1;
+ }
+
+out:
+ return irq;
+}
+
+static int xen_register_gsi(u32 gsi, int triggering, int polarity)
+{
+ int rc, irq;
+ struct physdev_setup_gsi setup_gsi;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
+ gsi, triggering, polarity);
+
+ irq = xen_register_pirq(gsi, triggering);
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (rc == -EEXIST)
+ printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
+ else if (rc) {
+ printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
+ gsi, rc);
+ }
+
+ return irq;
+}
+
+static __init void xen_setup_acpi_sci(void)
+{
+ int rc;
+ int trigger, polarity;
+ int gsi = acpi_sci_override_gsi;
+
+ if (!gsi)
+ return;
+
+ rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+ if (rc) {
+ printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
+ " sci, rc=%d\n", rc);
+ return;
+ }
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+
+ printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
+ "polarity=%d\n", gsi, trigger, polarity);
+
+ gsi = xen_register_gsi(gsi, trigger, polarity);
+ printk(KERN_INFO "xen: acpi sci %d\n", gsi);
+
+ return;
+}
+
+static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_register_gsi(gsi, trigger, polarity);
+}
+
+static int __init pci_xen_initial_domain(void)
+{
+#ifdef CONFIG_PCI_MSI
+ x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
+ x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
+#endif
+ xen_setup_acpi_sci();
+ __acpi_register_gsi = acpi_register_gsi_xen;
+
+ return 0;
+}
+
+void __init xen_setup_pirqs(void)
+{
+ int irq;
+
+ pci_xen_initial_domain();
+
+ if (0 == nr_ioapics) {
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
+ xen_allocate_pirq(irq, 0, "xt-pic");
+ return;
+ }
+
+ /* Pre-allocate legacy irqs */
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ int trigger, polarity;
+
+ if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
+ continue;
+
+ xen_register_pirq(irq,
+ trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
+ }
+}
+#endif
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
new file mode 100644
index 00000000000..7bf70b812fa
--- /dev/null
+++ b/arch/x86/platform/Makefile
@@ -0,0 +1,8 @@
+# Platform specific code goes here
+obj-y += efi/
+obj-y += mrst/
+obj-y += olpc/
+obj-y += scx200/
+obj-y += sfi/
+obj-y += visws/
+obj-y += uv/
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
new file mode 100644
index 00000000000..73b8be0f367
--- /dev/null
+++ b/arch/x86/platform/efi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
diff --git a/arch/x86/kernel/efi.c b/arch/x86/platform/efi/efi.c
index 0fe27d7c625..0fe27d7c625 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/platform/efi/efi.c
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 5cab48ee61a..5cab48ee61a 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac0621a7ac3..ac0621a7ac3 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
index fbe66e626c0..fbe66e626c0 100644
--- a/arch/x86/kernel/efi_stub_32.S
+++ b/arch/x86/platform/efi/efi_stub_32.S
diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 4c07ccab814..4c07ccab814 100644
--- a/arch/x86/kernel/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
diff --git a/arch/x86/platform/mrst/Makefile b/arch/x86/platform/mrst/Makefile
new file mode 100644
index 00000000000..efbbc552fa9
--- /dev/null
+++ b/arch/x86/platform/mrst/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_MRST) += mrst.o
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/platform/mrst/mrst.c
index 79ae68154e8..79ae68154e8 100644
--- a/arch/x86/kernel/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
new file mode 100644
index 00000000000..c31b8fcb5a8
--- /dev/null
+++ b/arch/x86/platform/olpc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_OLPC) += olpc.o
+obj-$(CONFIG_OLPC_XO1) += olpc-xo1.o
+obj-$(CONFIG_OLPC_OPENFIRMWARE) += olpc_ofw.o
diff --git a/arch/x86/kernel/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c
index f5442c03abc..f5442c03abc 100644
--- a/arch/x86/kernel/olpc-xo1.c
+++ b/arch/x86/platform/olpc/olpc-xo1.c
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/platform/olpc/olpc.c
index edaf3fe8dc5..edaf3fe8dc5 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
diff --git a/arch/x86/kernel/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 78732046437..78732046437 100644
--- a/arch/x86/kernel/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
diff --git a/arch/x86/platform/scx200/Makefile b/arch/x86/platform/scx200/Makefile
new file mode 100644
index 00000000000..762b4c7f431
--- /dev/null
+++ b/arch/x86/platform/scx200/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SCx200) += scx200.o
+scx200-y += scx200_32.o
diff --git a/arch/x86/kernel/scx200_32.c b/arch/x86/platform/scx200/scx200_32.c
index 7e004acbe52..7e004acbe52 100644
--- a/arch/x86/kernel/scx200_32.c
+++ b/arch/x86/platform/scx200/scx200_32.c
diff --git a/arch/x86/platform/sfi/Makefile b/arch/x86/platform/sfi/Makefile
new file mode 100644
index 00000000000..cc5db1168a5
--- /dev/null
+++ b/arch/x86/platform/sfi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SFI) += sfi.o
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/platform/sfi/sfi.c
index dd4c281ffe5..dd4c281ffe5 100644
--- a/arch/x86/kernel/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
new file mode 100644
index 00000000000..6c40995fefb
--- /dev/null
+++ b/arch/x86/platform/uv/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_UV) += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 8bc57baaa9a..8bc57baaa9a 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a39e2..a318194002b 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
* per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
*/
- bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
- UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+ bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
+ * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
BUG_ON(!bau_desc);
pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
struct bau_payload_queue_entry *pqp_malloc;
struct bau_control *bcp;
- pqp = (struct bau_payload_queue_entry *) kmalloc_node(
- (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
- GFP_KERNEL, node);
+ pqp = kmalloc_node((DEST_Q_SIZE + 1)
+ * sizeof(struct bau_payload_queue_entry),
+ GFP_KERNEL, node);
BUG_ON(!pqp);
pqp_malloc = pqp;
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
timeout_us = calculate_destination_timeout();
- uvhub_descs = (struct uvhub_desc *)
- kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
for_each_present_cpu(cpu) {
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 7b24460917d..7b24460917d 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
index 309c70fb775..309c70fb775 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/platform/uv/uv_sysfs.c
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 56e421bc379..56e421bc379 100644
--- a/arch/x86/kernel/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
diff --git a/arch/x86/platform/visws/Makefile b/arch/x86/platform/visws/Makefile
new file mode 100644
index 00000000000..91bc17ab2fd
--- /dev/null
+++ b/arch/x86/platform/visws/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_X86_VISWS) += visws_quirks.o
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/platform/visws/visws_quirks.c
index 3371bd053b8..3371bd053b8 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/platform/visws/visws_quirks.c
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 90a7f5ad691..5b54892e4bc 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -13,6 +13,16 @@ config XEN
kernel to boot in a paravirtualized environment under the
Xen hypervisor.
+config XEN_DOM0
+ def_bool y
+ depends on XEN && PCI_XEN && SWIOTLB_XEN
+ depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+
+# Dummy symbol since people have come to rely on the PRIVILEGED_GUEST
+# name in tools.
+config XEN_PRIVILEGED_GUEST
+ def_bool XEN_DOM0
+
config XEN_PVHVM
def_bool y
depends on XEN
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 44ab12dc2a1..235c0f4d386 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -46,6 +46,7 @@
#include <asm/paravirt.h>
#include <asm/apic.h>
#include <asm/page.h>
+#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <asm/fixmap.h>
@@ -59,7 +60,6 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
-#include <asm/setup.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
@@ -237,6 +237,7 @@ static __init void xen_init_cpuid_mask(void)
cpuid_leaf1_edx_mask =
~((1 << X86_FEATURE_MCE) | /* disable MCE */
(1 << X86_FEATURE_MCA) | /* disable MCA */
+ (1 << X86_FEATURE_MTRR) | /* disable MTRR */
(1 << X86_FEATURE_ACC)); /* thermal monitoring */
if (!xen_initial_domain())
@@ -1016,7 +1017,7 @@ static void xen_reboot(int reason)
struct sched_shutdown r = { .reason = reason };
#ifdef CONFIG_SMP
- smp_send_stop();
+ stop_other_cpus();
#endif
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
@@ -1185,6 +1186,7 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+ xen_ident_map_ISA();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
@@ -1223,6 +1225,8 @@ asmlinkage void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
add_preferred_console("tty", 0, NULL);
add_preferred_console("hvc", 0, NULL);
+ if (pci_xen)
+ x86_init.pci.arch_init = pci_xen_init;
} else {
/* Make sure ACS will be enabled */
pci_request_acs();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 9631c90907e..21ed8d7f75a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1975,6 +1975,7 @@ static void *m2v(phys_addr_t maddr)
return __ka(m2p(maddr));
}
+/* Set the page permissions on an identity-mapped pages */
static void set_page_prot(void *addr, pgprot_t prot)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
@@ -2125,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
{
pmd_t *kernel_pmd;
- level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+ level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
xen_start_info->nr_pt_frames * PAGE_SIZE +
@@ -2159,6 +2160,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
}
#endif /* CONFIG_X86_64 */
+static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{
pte_t pte;
@@ -2179,15 +2182,28 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#else
case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
#endif
-#ifdef CONFIG_X86_LOCAL_APIC
- case FIX_APIC_BASE: /* maps dummy local APIC */
-#endif
case FIX_TEXT_POKE0:
case FIX_TEXT_POKE1:
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
+#ifdef CONFIG_X86_LOCAL_APIC
+ case FIX_APIC_BASE: /* maps dummy local APIC */
+ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+ break;
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+ case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
+ /*
+ * We just don't map the IO APIC - all access is via
+ * hypercalls. Keep the address in the pte for reference.
+ */
+ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+ break;
+#endif
+
case FIX_PARAVIRT_BOOTMAP:
/* This is an MFN, but it isn't an IO mapping from the
IO domain */
@@ -2212,6 +2228,29 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif
}
+__init void xen_ident_map_ISA(void)
+{
+ unsigned long pa;
+
+ /*
+ * If we're dom0, then linear map the ISA machine addresses into
+ * the kernel's address space.
+ */
+ if (!xen_initial_domain())
+ return;
+
+ xen_raw_printk("Xen: setup ISA identity maps\n");
+
+ for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
+ pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
+
+ if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
+ BUG();
+ }
+
+ xen_flush_tlb();
+}
+
static __init void xen_post_allocator_init(void)
{
pv_mmu_ops.set_pte = xen_set_pte;
@@ -2320,6 +2359,8 @@ void __init xen_init_mmu_ops(void)
pv_mmu_ops = xen_mmu_ops;
vmap_lazy_unmap = false;
+
+ memset(dummy_mapping, 0xff, PAGE_SIZE);
}
/* Protected by xen_reservation_lock. */
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 22471001b74..bfd0632fe65 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -1,6 +1,7 @@
/* Glue code to lib/swiotlb-xen.c */
#include <linux/dma-mapping.h>
+#include <linux/pci.h>
#include <xen/swiotlb-xen.h>
#include <asm/xen/hypervisor.h>
@@ -55,6 +56,9 @@ void __init pci_xen_swiotlb_init(void)
if (xen_swiotlb) {
xen_swiotlb_init(1);
dma_ops = &xen_swiotlb_dma_ops;
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
}
}
IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 105db250105..769c4b01fa3 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
const struct e820map *e820)
{
phys_addr_t max_addr = PFN_PHYS(max_pfn);
- phys_addr_t last_end = 0;
+ phys_addr_t last_end = ISA_END_ADDRESS;
unsigned long released = 0;
int i;
+ /* Free any unused memory above the low 1Mbyte. */
for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
phys_addr_t end = e820->map[i].addr;
end = min(max_addr, end);
- released += xen_release_chunk(last_end, end);
- last_end = e820->map[i].addr + e820->map[i].size;
+ if (last_end < end)
+ released += xen_release_chunk(last_end, end);
+ last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
}
if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
XENMEM_memory_map;
rc = HYPERVISOR_memory_op(op, &memmap);
if (rc == -ENOSYS) {
+ BUG_ON(xen_initial_domain());
memmap.nr_entries = 1;
map[0].addr = 0ULL;
map[0].size = mem_end;
@@ -201,9 +204,13 @@ char * __init xen_memory_setup(void)
}
/*
- * Even though this is normal, usable memory under Xen, reserve
- * ISA memory anyway because too many things think they can poke
+ * In domU, the ISA region is normal, usable memory, but we
+ * reserve ISA memory anyway because too many things poke
* about in there.
+ *
+ * In Dom0, the host E820 information can leave gaps in the
+ * ISA range, which would cause us to release those pages. To
+ * avoid this, we unconditionally reserve them here.
*/
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
@@ -367,7 +374,5 @@ void __init xen_arch_setup(void)
pm_idle = xen_idle;
- paravirt_disable_iospace();
-
fiddle_vdso();
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 25f232b18a8..72a4c795904 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -28,6 +28,7 @@
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/events.h>
@@ -156,11 +157,35 @@ static void __init xen_fill_possible_map(void)
{
int i, rc;
+ if (xen_initial_domain())
+ return;
+
+ for (i = 0; i < nr_cpu_ids; i++) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
+ if (rc >= 0) {
+ num_processors++;
+ set_cpu_possible(i, true);
+ }
+ }
+}
+
+static void __init xen_filter_cpu_maps(void)
+{
+ int i, rc;
+
+ if (!xen_initial_domain())
+ return;
+
+ num_processors = 0;
+ disabled_cpus = 0;
for (i = 0; i < nr_cpu_ids; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) {
num_processors++;
set_cpu_possible(i, true);
+ } else {
+ set_cpu_possible(i, false);
+ set_cpu_present(i, false);
}
}
}
@@ -174,6 +199,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
old memory can be recycled */
make_lowmem_page_readwrite(xen_initial_gdt);
+ xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
}
@@ -400,9 +426,9 @@ static void stop_self(void *v)
BUG();
}
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
{
- smp_call_function(stop_self, NULL, 0);
+ smp_call_function(stop_self, NULL, wait);
}
static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +496,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
.cpu_disable = xen_cpu_disable,
.play_dead = xen_play_dead,
- .smp_send_stop = xen_smp_send_stop,
+ .stop_other_cpus = xen_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 0859bfd8ae9..d373d159e75 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,8 +1,3 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-
-mainmenu "Linux/Xtensa Kernel Configuration"
-
config FRAME_POINTER
def_bool n
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 9d4e1ceb3f0..c72c9473ef9 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -256,9 +256,11 @@ int ptrace_pokeusr(struct task_struct *child, long regno, long val)
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret = -EPERM;
+ void __user *datap = (void __user *) data;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -267,7 +269,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_PEEKUSR: /* read register specified by addr. */
- ret = ptrace_peekusr(child, addr, (void __user *) data);
+ ret = ptrace_peekusr(child, addr, datap);
break;
case PTRACE_POKETEXT: /* write the word at location addr. */
@@ -280,19 +282,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (void __user *) data);
+ ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (void __user *) data);
+ ret = ptrace_setregs(child, datap);
break;
case PTRACE_GETXTREGS:
- ret = ptrace_getxregs(child, (void __user *) data);
+ ret = ptrace_getxregs(child, datap);
break;
case PTRACE_SETXTREGS:
- ret = ptrace_setxregs(child, (void __user *) data);
+ ret = ptrace_setxregs(child, datap);
break;
default: